1
0
Fork 0
alistair23-linux/drivers/target/target_core_device.c

1772 lines
46 KiB
C
Raw Normal View History

/*******************************************************************************
* Filename: target_core_device.c (based on iscsi_target_device.c)
*
2011-07-19 02:55:10 -06:00
* This file contains the TCM Virtual Device and Disk Transport
* agnostic related functions.
*
* (c) Copyright 2003-2013 Datera, Inc.
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/net.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/in.h>
#include <linux/export.h>
#include <asm/unaligned.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
DEFINE_MUTEX(g_device_mutex);
LIST_HEAD(g_device_list);
2011-07-19 02:55:10 -06:00
static struct se_hba *lun0_hba;
/* not static, needed by tpg.c */
struct se_device *g_lun0_dev;
sense_reason_t
transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
{
struct se_lun *se_lun = NULL;
2011-07-19 02:55:10 -06:00
struct se_session *se_sess = se_cmd->se_sess;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
struct se_node_acl *nacl = se_sess->se_node_acl;
struct se_device *dev;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
struct se_dev_entry *deve;
if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
return TCM_NON_EXISTENT_LUN;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
rcu_read_lock();
deve = target_nacl_find_deve(nacl, unpacked_lun);
if (deve) {
atomic_long_inc(&deve->total_cmds);
if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
" Access for 0x%08x\n",
se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
rcu_read_unlock();
return TCM_WRITE_PROTECTED;
}
if (se_cmd->data_direction == DMA_TO_DEVICE)
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
atomic_long_add(se_cmd->data_length,
&deve->write_bytes);
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
atomic_long_add(se_cmd->data_length,
&deve->read_bytes);
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
se_lun = rcu_dereference(deve->se_lun);
se_cmd->se_lun = rcu_dereference(deve->se_lun);
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
percpu_ref_get(&se_lun->lun_ref);
se_cmd->lun_ref_active = true;
}
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
rcu_read_unlock();
if (!se_lun) {
/*
* Use the se_portal_group->tpg_virt_lun0 to allow for
* REPORT_LUNS, et al to be returned when no active
* MappedLUN=0 exists for this Initiator Port.
*/
if (unpacked_lun != 0) {
pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
" Access for 0x%08x\n",
2011-07-19 02:55:10 -06:00
se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
return TCM_NON_EXISTENT_LUN;
}
/*
* Force WRITE PROTECT for virtual LUN 0
*/
if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
(se_cmd->data_direction != DMA_NONE))
return TCM_WRITE_PROTECTED;
se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->orig_fe_lun = 0;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
percpu_ref_get(&se_lun->lun_ref);
se_cmd->lun_ref_active = true;
}
/* Directly associate cmd with se_dev */
se_cmd->se_dev = se_lun->lun_se_dev;
dev = se_lun->lun_se_dev;
atomic_long_inc(&dev->num_cmds);
if (se_cmd->data_direction == DMA_TO_DEVICE)
atomic_long_add(se_cmd->data_length, &dev->write_bytes);
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
atomic_long_add(se_cmd->data_length, &dev->read_bytes);
return 0;
}
EXPORT_SYMBOL(transport_lookup_cmd_lun);
int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
{
struct se_dev_entry *deve;
struct se_lun *se_lun = NULL;
2011-07-19 02:55:10 -06:00
struct se_session *se_sess = se_cmd->se_sess;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
struct se_node_acl *nacl = se_sess->se_node_acl;
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
unsigned long flags;
if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
2011-07-19 02:55:10 -06:00
return -ENODEV;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
rcu_read_lock();
deve = target_nacl_find_deve(nacl, unpacked_lun);
if (deve) {
se_tmr->tmr_lun = rcu_dereference(deve->se_lun);
se_cmd->se_lun = rcu_dereference(deve->se_lun);
se_lun = rcu_dereference(deve->se_lun);
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
}
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
rcu_read_unlock();
if (!se_lun) {
pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
" Access for 0x%08x\n",
2011-07-19 02:55:10 -06:00
se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
2011-07-19 02:55:10 -06:00
return -ENODEV;
}
/* Directly associate cmd with se_dev */
se_cmd->se_dev = se_lun->lun_se_dev;
se_tmr->tmr_dev = se_lun->lun_se_dev;
spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
return 0;
}
EXPORT_SYMBOL(transport_lookup_tmr_lun);
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
bool target_lun_is_rdonly(struct se_cmd *cmd)
{
struct se_session *se_sess = cmd->se_sess;
struct se_dev_entry *deve;
bool ret;
if (cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY)
return true;
rcu_read_lock();
deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
ret = (deve && deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY);
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL(target_lun_is_rdonly);
/*
* This function is called from core_scsi3_emulate_pro_register_and_move()
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
* and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
* when a matching rtpi is found.
*/
struct se_dev_entry *core_get_se_deve_from_rtpi(
struct se_node_acl *nacl,
u16 rtpi)
{
struct se_dev_entry *deve;
struct se_lun *lun;
struct se_portal_group *tpg = nacl->se_tpg;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
rcu_read_lock();
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
lun = rcu_dereference(deve->se_lun);
if (!lun) {
pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
2011-07-19 02:55:10 -06:00
tpg->se_tpg_tfo->get_fabric_name());
continue;
}
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
if (lun->lun_rtpi != rtpi)
continue;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
kref_get(&deve->pr_kref);
rcu_read_unlock();
return deve;
}
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
rcu_read_unlock();
return NULL;
}
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
void core_free_device_list_for_node(
struct se_node_acl *nacl,
struct se_portal_group *tpg)
{
struct se_dev_entry *deve;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
mutex_lock(&nacl->lun_entry_mutex);
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
struct se_lun *lun = rcu_dereference_check(deve->se_lun,
lockdep_is_held(&nacl->lun_entry_mutex));
core_disable_device_list_for_node(lun, deve, nacl, tpg);
}
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
mutex_unlock(&nacl->lun_entry_mutex);
}
void core_update_device_list_access(
u32 mapped_lun,
u32 lun_access,
struct se_node_acl *nacl)
{
struct se_dev_entry *deve;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
mutex_lock(&nacl->lun_entry_mutex);
deve = target_nacl_find_deve(nacl, mapped_lun);
if (deve) {
if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
} else {
deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
}
}
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
mutex_unlock(&nacl->lun_entry_mutex);
}
/*
* Called with rcu_read_lock or nacl->device_list_lock held.
*/
struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u32 mapped_lun)
{
struct se_dev_entry *deve;
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
if (deve->mapped_lun == mapped_lun)
return deve;
return NULL;
}
EXPORT_SYMBOL(target_nacl_find_deve);
void target_pr_kref_release(struct kref *kref)
{
struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
pr_kref);
complete(&deve->pr_comp);
}
/* core_enable_device_list_for_node():
*
*
*/
int core_enable_device_list_for_node(
struct se_lun *lun,
struct se_lun_acl *lun_acl,
u32 mapped_lun,
u32 lun_access,
struct se_node_acl *nacl,
struct se_portal_group *tpg)
{
struct se_port *port = lun->lun_sep;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
struct se_dev_entry *orig, *new;
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new) {
pr_err("Unable to allocate se_dev_entry memory\n");
return -ENOMEM;
}
atomic_set(&new->ua_count, 0);
spin_lock_init(&new->ua_lock);
INIT_LIST_HEAD(&new->alua_port_list);
INIT_LIST_HEAD(&new->ua_list);
new->mapped_lun = mapped_lun;
kref_init(&new->pr_kref);
init_completion(&new->pr_comp);
if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)
new->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
else
new->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
new->creation_time = get_jiffies_64();
new->attach_count++;
mutex_lock(&nacl->lun_entry_mutex);
orig = target_nacl_find_deve(nacl, mapped_lun);
if (orig && orig->se_lun) {
struct se_lun *orig_lun = rcu_dereference_check(orig->se_lun,
lockdep_is_held(&nacl->lun_entry_mutex));
if (orig_lun != lun) {
pr_err("Existing orig->se_lun doesn't match new lun"
" for dynamic -> explicit NodeACL conversion:"
" %s\n", nacl->initiatorname);
mutex_unlock(&nacl->lun_entry_mutex);
kfree(new);
return -EINVAL;
}
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
BUG_ON(orig->se_lun_acl != NULL);
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
rcu_assign_pointer(new->se_lun, lun);
rcu_assign_pointer(new->se_lun_acl, lun_acl);
hlist_del_rcu(&orig->link);
hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
mutex_unlock(&nacl->lun_entry_mutex);
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
spin_lock_bh(&port->sep_alua_lock);
list_del(&orig->alua_port_list);
list_add_tail(&new->alua_port_list, &port->sep_alua_list);
spin_unlock_bh(&port->sep_alua_lock);
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
kref_put(&orig->pr_kref, target_pr_kref_release);
wait_for_completion(&orig->pr_comp);
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
kfree_rcu(orig, rcu_head);
return 0;
}
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
rcu_assign_pointer(new->se_lun, lun);
rcu_assign_pointer(new->se_lun_acl, lun_acl);
hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
mutex_unlock(&nacl->lun_entry_mutex);
spin_lock_bh(&port->sep_alua_lock);
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
list_add_tail(&new->alua_port_list, &port->sep_alua_list);
spin_unlock_bh(&port->sep_alua_lock);
return 0;
}
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
/*
* Called with se_node_acl->lun_entry_mutex held.
*/
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
void core_disable_device_list_for_node(
struct se_lun *lun,
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
struct se_dev_entry *orig,
struct se_node_acl *nacl,
struct se_portal_group *tpg)
{
struct se_port *port = lun->lun_sep;
/*
* If the MappedLUN entry is being disabled, the entry in
* port->sep_alua_list must be removed now before clearing the
* struct se_dev_entry pointers below as logic in
* core_alua_do_transition_tg_pt() depends on these being present.
*
* deve->se_lun_acl will be NULL for demo-mode created LUNs
* that have not been explicitly converted to MappedLUNs ->
* struct se_lun_acl, but we remove deve->alua_port_list from
* port->sep_alua_list. This also means that active UAs and
* NodeACL context specific PR metadata for demo-mode
* MappedLUN *deve will be released below..
*/
spin_lock_bh(&port->sep_alua_lock);
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
list_del(&orig->alua_port_list);
spin_unlock_bh(&port->sep_alua_lock);
/*
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
* Disable struct se_dev_entry LUN ACL mapping
*/
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
core_scsi3_ua_release_all(orig);
hlist_del_rcu(&orig->link);
rcu_assign_pointer(orig->se_lun, NULL);
rcu_assign_pointer(orig->se_lun_acl, NULL);
orig->lun_flags = 0;
orig->creation_time = 0;
orig->attach_count--;
/*
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
* Before firing off RCU callback, wait for any in process SPEC_I_PT=1
* or REGISTER_AND_MOVE PR operation to complete.
*/
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
kref_put(&orig->pr_kref, target_pr_kref_release);
wait_for_completion(&orig->pr_comp);
kfree_rcu(orig, rcu_head);
core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
}
/* core_clear_lun_from_tpg():
*
*
*/
void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
{
struct se_node_acl *nacl;
struct se_dev_entry *deve;
spin_lock_irq(&tpg->acl_node_lock);
list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
spin_unlock_irq(&tpg->acl_node_lock);
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
mutex_lock(&nacl->lun_entry_mutex);
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
struct se_lun *tmp_lun = rcu_dereference_check(deve->se_lun,
lockdep_is_held(&nacl->lun_entry_mutex));
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
if (lun != tmp_lun)
continue;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
core_disable_device_list_for_node(lun, deve, nacl, tpg);
}
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
mutex_unlock(&nacl->lun_entry_mutex);
spin_lock_irq(&tpg->acl_node_lock);
}
spin_unlock_irq(&tpg->acl_node_lock);
}
static struct se_port *core_alloc_port(struct se_device *dev)
{
struct se_port *port, *port_tmp;
port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
if (!port) {
pr_err("Unable to allocate struct se_port\n");
2011-07-19 02:55:10 -06:00
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&port->sep_alua_list);
INIT_LIST_HEAD(&port->sep_list);
atomic_set(&port->sep_tg_pt_secondary_offline, 0);
spin_lock_init(&port->sep_alua_lock);
mutex_init(&port->sep_tg_pt_md_mutex);
spin_lock(&dev->se_port_lock);
if (dev->dev_port_count == 0x0000ffff) {
pr_warn("Reached dev->dev_port_count =="
" 0x0000ffff\n");
spin_unlock(&dev->se_port_lock);
2011-07-19 02:55:10 -06:00
return ERR_PTR(-ENOSPC);
}
again:
/*
* Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
* Here is the table from spc4r17 section 7.7.3.8.
*
* Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
*
* Code Description
* 0h Reserved
* 1h Relative port 1, historically known as port A
* 2h Relative port 2, historically known as port B
* 3h to FFFFh Relative port 3 through 65 535
*/
port->sep_rtpi = dev->dev_rpti_counter++;
if (!port->sep_rtpi)
goto again;
list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
/*
* Make sure RELATIVE TARGET PORT IDENTIFIER is unique
* for 16-bit wrap..
*/
if (port->sep_rtpi == port_tmp->sep_rtpi)
goto again;
}
spin_unlock(&dev->se_port_lock);
return port;
}
static void core_export_port(
struct se_device *dev,
struct se_portal_group *tpg,
struct se_port *port,
struct se_lun *lun)
{
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
spin_lock(&dev->se_port_lock);
spin_lock(&lun->lun_sep_lock);
port->sep_tpg = tpg;
port->sep_lun = lun;
lun->lun_sep = port;
spin_unlock(&lun->lun_sep_lock);
list_add_tail(&port->sep_list, &dev->dev_sep_list);
spin_unlock(&dev->se_port_lock);
if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
pr_err("Unable to allocate t10_alua_tg_pt"
"_gp_member_t\n");
return;
}
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
dev->t10_alua.default_tg_pt_gp);
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
pr_debug("%s/%s: Adding to default ALUA Target Port"
" Group: alua/default_tg_pt_gp\n",
2011-07-19 02:55:10 -06:00
dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
}
dev->dev_port_count++;
port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */
}
/*
* Called with struct se_device->se_port_lock spinlock held.
*/
static void core_release_port(struct se_device *dev, struct se_port *port)
__releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
{
/*
* Wait for any port reference for PR ALL_TG_PT=1 operation
* to complete in __core_scsi3_alloc_registration()
*/
spin_unlock(&dev->se_port_lock);
if (atomic_read(&port->sep_tg_pt_ref_cnt))
cpu_relax();
spin_lock(&dev->se_port_lock);
core_alua_free_tg_pt_gp_mem(port);
list_del(&port->sep_list);
dev->dev_port_count--;
kfree(port);
}
int core_dev_export(
struct se_device *dev,
struct se_portal_group *tpg,
struct se_lun *lun)
{
struct se_hba *hba = dev->se_hba;
struct se_port *port;
port = core_alloc_port(dev);
2011-07-19 02:55:10 -06:00
if (IS_ERR(port))
return PTR_ERR(port);
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
lun->lun_index = dev->dev_index;
lun->lun_se_dev = dev;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
lun->lun_rtpi = port->sep_rtpi;
spin_lock(&hba->device_lock);
dev->export_count++;
spin_unlock(&hba->device_lock);
core_export_port(dev, tpg, port, lun);
return 0;
}
void core_dev_unexport(
struct se_device *dev,
struct se_portal_group *tpg,
struct se_lun *lun)
{
struct se_hba *hba = dev->se_hba;
struct se_port *port = lun->lun_sep;
spin_lock(&lun->lun_sep_lock);
if (lun->lun_se_dev == NULL) {
spin_unlock(&lun->lun_sep_lock);
return;
}
spin_unlock(&lun->lun_sep_lock);
spin_lock(&dev->se_port_lock);
core_release_port(dev, port);
spin_unlock(&dev->se_port_lock);
spin_lock(&hba->device_lock);
dev->export_count--;
spin_unlock(&hba->device_lock);
lun->lun_sep = NULL;
lun->lun_se_dev = NULL;
}
static void se_release_vpd_for_dev(struct se_device *dev)
{
struct t10_vpd *vpd, *vpd_tmp;
spin_lock(&dev->t10_wwn.t10_vpd_lock);
list_for_each_entry_safe(vpd, vpd_tmp,
&dev->t10_wwn.t10_vpd_list, vpd_list) {
list_del(&vpd->vpd_list);
kfree(vpd);
}
spin_unlock(&dev->t10_wwn.t10_vpd_lock);
}
static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
{
u32 aligned_max_sectors;
u32 alignment;
/*
* Limit max_sectors to a PAGE_SIZE aligned value for modern
* transport_allocate_data_tasks() operation.
*/
alignment = max(1ul, PAGE_SIZE / block_size);
aligned_max_sectors = rounddown(max_sectors, alignment);
if (max_sectors != aligned_max_sectors)
pr_info("Rounding down aligned max_sectors from %u to %u\n",
max_sectors, aligned_max_sectors);
return aligned_max_sectors;
}
bool se_dev_check_wce(struct se_device *dev)
{
bool wce = false;
if (dev->transport->get_write_cache)
wce = dev->transport->get_write_cache(dev);
else if (dev->dev_attrib.emulate_write_cache > 0)
wce = true;
return wce;
}
int se_dev_set_max_unmap_lba_count(
struct se_device *dev,
u32 max_unmap_lba_count)
{
dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
dev, dev->dev_attrib.max_unmap_lba_count);
return 0;
}
EXPORT_SYMBOL(se_dev_set_max_unmap_lba_count);
int se_dev_set_max_unmap_block_desc_count(
struct se_device *dev,
u32 max_unmap_block_desc_count)
{
dev->dev_attrib.max_unmap_block_desc_count =
2011-07-19 02:55:10 -06:00
max_unmap_block_desc_count;
pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
dev, dev->dev_attrib.max_unmap_block_desc_count);
return 0;
}
EXPORT_SYMBOL(se_dev_set_max_unmap_block_desc_count);
int se_dev_set_unmap_granularity(
struct se_device *dev,
u32 unmap_granularity)
{
dev->dev_attrib.unmap_granularity = unmap_granularity;
pr_debug("dev[%p]: Set unmap_granularity: %u\n",
dev, dev->dev_attrib.unmap_granularity);
return 0;
}
EXPORT_SYMBOL(se_dev_set_unmap_granularity);
int se_dev_set_unmap_granularity_alignment(
struct se_device *dev,
u32 unmap_granularity_alignment)
{
dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
dev, dev->dev_attrib.unmap_granularity_alignment);
return 0;
}
EXPORT_SYMBOL(se_dev_set_unmap_granularity_alignment);
int se_dev_set_max_write_same_len(
struct se_device *dev,
u32 max_write_same_len)
{
dev->dev_attrib.max_write_same_len = max_write_same_len;
pr_debug("dev[%p]: Set max_write_same_len: %u\n",
dev, dev->dev_attrib.max_write_same_len);
return 0;
}
EXPORT_SYMBOL(se_dev_set_max_write_same_len);
static void dev_set_t10_wwn_model_alias(struct se_device *dev)
{
const char *configname;
configname = config_item_name(&dev->dev_group.cg_item);
if (strlen(configname) >= 16) {
pr_warn("dev[%p]: Backstore name '%s' is too long for "
"INQUIRY_MODEL, truncating to 16 bytes\n", dev,
configname);
}
snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
}
int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
{
if (dev->export_count) {
pr_err("dev[%p]: Unable to change model alias"
" while export_count is %d\n",
dev, dev->export_count);
return -EINVAL;
}
if (flag != 0 && flag != 1) {
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
if (flag) {
dev_set_t10_wwn_model_alias(dev);
} else {
strncpy(&dev->t10_wwn.model[0],
dev->transport->inquiry_prod, 16);
}
dev->dev_attrib.emulate_model_alias = flag;
return 0;
}
EXPORT_SYMBOL(se_dev_set_emulate_model_alias);
int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
{
printk_once(KERN_WARNING
"ignoring deprecated emulate_dpo attribute\n");
return 0;
}
EXPORT_SYMBOL(se_dev_set_emulate_dpo);
int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
{
if (flag != 0 && flag != 1) {
pr_err("Illegal value %d\n", flag);
2011-07-19 02:55:10 -06:00
return -EINVAL;
}
if (flag &&
dev->transport->get_write_cache) {
pr_warn("emulate_fua_write not supported for this device, ignoring\n");
return 0;
}
if (dev->export_count) {
pr_err("emulate_fua_write cannot be changed with active"
" exports: %d\n", dev->export_count);
return -EINVAL;
}
dev->dev_attrib.emulate_fua_write = flag;
pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
dev, dev->dev_attrib.emulate_fua_write);
return 0;
}
EXPORT_SYMBOL(se_dev_set_emulate_fua_write);
int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
{
printk_once(KERN_WARNING
"ignoring deprecated emulate_fua_read attribute\n");
return 0;
}
EXPORT_SYMBOL(se_dev_set_emulate_fua_read);
int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
{
if (flag != 0 && flag != 1) {
pr_err("Illegal value %d\n", flag);
2011-07-19 02:55:10 -06:00
return -EINVAL;
}
if (flag &&
dev->transport->get_write_cache) {
pr_err("emulate_write_cache not supported for this device\n");
return -EINVAL;
}
if (dev->export_count) {
pr_err("emulate_write_cache cannot be changed with active"
" exports: %d\n", dev->export_count);
return -EINVAL;
}
dev->dev_attrib.emulate_write_cache = flag;
pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
dev, dev->dev_attrib.emulate_write_cache);
return 0;
}
EXPORT_SYMBOL(se_dev_set_emulate_write_cache);
int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1) && (flag != 2)) {
pr_err("Illegal value %d\n", flag);
2011-07-19 02:55:10 -06:00
return -EINVAL;
}
if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device"
" UA_INTRLCK_CTRL while export_count is %d\n",
dev, dev->export_count);
2011-07-19 02:55:10 -06:00
return -EINVAL;
}
dev->dev_attrib.emulate_ua_intlck_ctrl = flag;
pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
dev, dev->dev_attrib.emulate_ua_intlck_ctrl);
return 0;
}
EXPORT_SYMBOL(se_dev_set_emulate_ua_intlck_ctrl);
int se_dev_set_emulate_tas(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
pr_err("Illegal value %d\n", flag);
2011-07-19 02:55:10 -06:00
return -EINVAL;
}
if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device TAS while"
" export_count is %d\n",
dev, dev->export_count);
2011-07-19 02:55:10 -06:00
return -EINVAL;
}
dev->dev_attrib.emulate_tas = flag;
pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
return 0;
}
EXPORT_SYMBOL(se_dev_set_emulate_tas);
int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
pr_err("Illegal value %d\n", flag);
2011-07-19 02:55:10 -06:00
return -EINVAL;
}
/*
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
dev->dev_attrib.emulate_tpu = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
dev, flag);
return 0;
}
EXPORT_SYMBOL(se_dev_set_emulate_tpu);
int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
pr_err("Illegal value %d\n", flag);
2011-07-19 02:55:10 -06:00
return -EINVAL;
}
/*
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
dev->dev_attrib.emulate_tpws = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
dev, flag);
return 0;
}
EXPORT_SYMBOL(se_dev_set_emulate_tpws);
int se_dev_set_emulate_caw(struct se_device *dev, int flag)
{
if (flag != 0 && flag != 1) {
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
dev->dev_attrib.emulate_caw = flag;
pr_debug("dev[%p]: SE Device CompareAndWrite (AtomicTestandSet): %d\n",
dev, flag);
return 0;
}
EXPORT_SYMBOL(se_dev_set_emulate_caw);
int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
{
if (flag != 0 && flag != 1) {
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
dev->dev_attrib.emulate_3pc = flag;
pr_debug("dev[%p]: SE Device 3rd Party Copy (EXTENDED_COPY): %d\n",
dev, flag);
return 0;
}
EXPORT_SYMBOL(se_dev_set_emulate_3pc);
int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
{
int rc, old_prot = dev->dev_attrib.pi_prot_type;
if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
pr_err("Illegal value %d for pi_prot_type\n", flag);
return -EINVAL;
}
if (flag == 2) {
pr_err("DIF TYPE2 protection currently not supported\n");
return -ENOSYS;
}
if (dev->dev_attrib.hw_pi_prot_type) {
pr_warn("DIF protection enabled on underlying hardware,"
" ignoring\n");
return 0;
}
if (!dev->transport->init_prot || !dev->transport->free_prot) {
/* 0 is only allowed value for non-supporting backends */
if (flag == 0)
return 0;
pr_err("DIF protection not supported by backend: %s\n",
dev->transport->name);
return -ENOSYS;
}
if (!(dev->dev_flags & DF_CONFIGURED)) {
pr_err("DIF protection requires device to be configured\n");
return -ENODEV;
}
if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device PROT type while"
" export_count is %d\n", dev, dev->export_count);
return -EINVAL;
}
dev->dev_attrib.pi_prot_type = flag;
if (flag && !old_prot) {
rc = dev->transport->init_prot(dev);
if (rc) {
dev->dev_attrib.pi_prot_type = old_prot;
return rc;
}
} else if (!flag && old_prot) {
dev->transport->free_prot(dev);
}
pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
return 0;
}
EXPORT_SYMBOL(se_dev_set_pi_prot_type);
int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
{
int rc;
if (!flag)
return 0;
if (flag != 1) {
pr_err("Illegal value %d for pi_prot_format\n", flag);
return -EINVAL;
}
if (!dev->transport->format_prot) {
pr_err("DIF protection format not supported by backend %s\n",
dev->transport->name);
return -ENOSYS;
}
if (!(dev->dev_flags & DF_CONFIGURED)) {
pr_err("DIF protection format requires device to be configured\n");
return -ENODEV;
}
if (dev->export_count) {
pr_err("dev[%p]: Unable to format SE Device PROT type while"
" export_count is %d\n", dev, dev->export_count);
return -EINVAL;
}
rc = dev->transport->format_prot(dev);
if (rc)
return rc;
pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
return 0;
}
EXPORT_SYMBOL(se_dev_set_pi_prot_format);
int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
pr_err("Illegal value %d\n", flag);
2011-07-19 02:55:10 -06:00
return -EINVAL;
}
dev->dev_attrib.enforce_pr_isids = flag;
pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
(dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
return 0;
}
EXPORT_SYMBOL(se_dev_set_enforce_pr_isids);
int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
printk(KERN_ERR "Illegal value %d\n", flag);
return -EINVAL;
}
if (dev->export_count) {
pr_err("dev[%p]: Unable to set force_pr_aptpl while"
" export_count is %d\n", dev, dev->export_count);
return -EINVAL;
}
dev->dev_attrib.force_pr_aptpl = flag;
pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag);
return 0;
}
EXPORT_SYMBOL(se_dev_set_force_pr_aptpl);
int se_dev_set_is_nonrot(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
printk(KERN_ERR "Illegal value %d\n", flag);
return -EINVAL;
}
dev->dev_attrib.is_nonrot = flag;
pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
dev, flag);
return 0;
}
EXPORT_SYMBOL(se_dev_set_is_nonrot);
int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
{
if (flag != 0) {
printk(KERN_ERR "dev[%p]: SE Device emulation of restricted"
" reordering not implemented\n", dev);
return -ENOSYS;
}
dev->dev_attrib.emulate_rest_reord = flag;
pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
return 0;
}
EXPORT_SYMBOL(se_dev_set_emulate_rest_reord);
/*
* Note, this can only be called on unexported SE Device Object.
*/
int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
{
if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device TCQ while"
" export_count is %d\n",
dev, dev->export_count);
2011-07-19 02:55:10 -06:00
return -EINVAL;
}
if (!queue_depth) {
pr_err("dev[%p]: Illegal ZERO value for queue"
"_depth\n", dev);
2011-07-19 02:55:10 -06:00
return -EINVAL;
}
if (queue_depth > dev->dev_attrib.queue_depth) {
if (queue_depth > dev->dev_attrib.hw_queue_depth) {
pr_err("dev[%p]: Passed queue_depth:"
" %u exceeds TCM/SE_Device MAX"
" TCQ: %u\n", dev, queue_depth,
dev->dev_attrib.hw_queue_depth);
2011-07-19 02:55:10 -06:00
return -EINVAL;
}
}
dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
dev, queue_depth);
return 0;
}
EXPORT_SYMBOL(se_dev_set_queue_depth);
int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
{
if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device"
" optimal_sectors while export_count is %d\n",
dev, dev->export_count);
return -EINVAL;
}
2015-01-06 17:10:37 -07:00
if (optimal_sectors > dev->dev_attrib.hw_max_sectors) {
pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
2015-01-06 17:10:37 -07:00
" greater than hw_max_sectors: %u\n", dev,
optimal_sectors, dev->dev_attrib.hw_max_sectors);
return -EINVAL;
}
dev->dev_attrib.optimal_sectors = optimal_sectors;
pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
dev, optimal_sectors);
return 0;
}
EXPORT_SYMBOL(se_dev_set_optimal_sectors);
int se_dev_set_block_size(struct se_device *dev, u32 block_size)
{
if (dev->export_count) {
pr_err("dev[%p]: Unable to change SE Device block_size"
" while export_count is %d\n",
dev, dev->export_count);
2011-07-19 02:55:10 -06:00
return -EINVAL;
}
if ((block_size != 512) &&
(block_size != 1024) &&
(block_size != 2048) &&
(block_size != 4096)) {
pr_err("dev[%p]: Illegal value for block_device: %u"
" for SE device, must be 512, 1024, 2048 or 4096\n",
dev, block_size);
2011-07-19 02:55:10 -06:00
return -EINVAL;
}
dev->dev_attrib.block_size = block_size;
pr_debug("dev[%p]: SE Device block_size changed to %u\n",
dev, block_size);
if (dev->dev_attrib.max_bytes_per_io)
dev->dev_attrib.hw_max_sectors =
dev->dev_attrib.max_bytes_per_io / block_size;
return 0;
}
EXPORT_SYMBOL(se_dev_set_block_size);
struct se_lun *core_dev_add_lun(
struct se_portal_group *tpg,
struct se_device *dev,
u32 unpacked_lun)
{
struct se_lun *lun;
int rc;
lun = core_tpg_alloc_lun(tpg, unpacked_lun);
if (IS_ERR(lun))
return lun;
rc = core_tpg_add_lun(tpg, lun,
TRANSPORT_LUNFLAGS_READ_WRITE, dev);
if (rc < 0)
return ERR_PTR(rc);
pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
2011-07-19 02:55:10 -06:00
" CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
/*
* Update LUN maps for dynamically added initiators when
* generate_node_acl is enabled.
*/
2011-07-19 02:55:10 -06:00
if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
struct se_node_acl *acl;
spin_lock_irq(&tpg->acl_node_lock);
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
if (acl->dynamic_node_acl &&
(!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
spin_unlock_irq(&tpg->acl_node_lock);
core_tpg_add_node_to_devs(acl, tpg);
spin_lock_irq(&tpg->acl_node_lock);
}
}
spin_unlock_irq(&tpg->acl_node_lock);
}
return lun;
}
/* core_dev_del_lun():
*
*
*/
void core_dev_del_lun(
struct se_portal_group *tpg,
struct se_lun *lun)
{
pr_debug("%s_TPG[%u]_LUN[%u] - Deactivating %s Logical Unit from"
2011-07-19 02:55:10 -06:00
" device object\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
2011-07-19 02:55:10 -06:00
tpg->se_tpg_tfo->get_fabric_name());
core_tpg_remove_lun(tpg, lun);
}
struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
{
struct se_lun *lun;
spin_lock(&tpg->tpg_lun_lock);
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
"_PER_TPG-1: %u for Target Portal Group: %hu\n",
2011-07-19 02:55:10 -06:00
tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
TRANSPORT_MAX_LUNS_PER_TPG-1,
2011-07-19 02:55:10 -06:00
tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return NULL;
}
lun = tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
pr_err("%s Logical Unit Number: %u is not free on"
" Target Portal Group: %hu, ignoring request.\n",
2011-07-19 02:55:10 -06:00
tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return NULL;
}
spin_unlock(&tpg->tpg_lun_lock);
return lun;
}
/* core_dev_get_lun():
*
*
*/
static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
{
struct se_lun *lun;
spin_lock(&tpg->tpg_lun_lock);
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
"_TPG-1: %u for Target Portal Group: %hu\n",
2011-07-19 02:55:10 -06:00
tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
TRANSPORT_MAX_LUNS_PER_TPG-1,
2011-07-19 02:55:10 -06:00
tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return NULL;
}
lun = tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
pr_err("%s Logical Unit Number: %u is not active on"
" Target Portal Group: %hu, ignoring request.\n",
2011-07-19 02:55:10 -06:00
tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return NULL;
}
spin_unlock(&tpg->tpg_lun_lock);
return lun;
}
struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
struct se_portal_group *tpg,
struct se_node_acl *nacl,
u32 mapped_lun,
int *ret)
{
struct se_lun_acl *lacl;
if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
pr_err("%s InitiatorName exceeds maximum size.\n",
2011-07-19 02:55:10 -06:00
tpg->se_tpg_tfo->get_fabric_name());
*ret = -EOVERFLOW;
return NULL;
}
lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
if (!lacl) {
pr_err("Unable to allocate memory for struct se_lun_acl.\n");
*ret = -ENOMEM;
return NULL;
}
INIT_LIST_HEAD(&lacl->lacl_list);
lacl->mapped_lun = mapped_lun;
lacl->se_lun_nacl = nacl;
snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
nacl->initiatorname);
return lacl;
}
int core_dev_add_initiator_node_lun_acl(
struct se_portal_group *tpg,
struct se_lun_acl *lacl,
u32 unpacked_lun,
u32 lun_access)
{
struct se_lun *lun;
struct se_node_acl *nacl;
lun = core_dev_get_lun(tpg, unpacked_lun);
if (!lun) {
pr_err("%s Logical Unit Number: %u is not active on"
" Target Portal Group: %hu, ignoring request.\n",
2011-07-19 02:55:10 -06:00
tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
tpg->se_tpg_tfo->tpg_get_tag(tpg));
return -EINVAL;
}
nacl = lacl->se_lun_nacl;
if (!nacl)
return -EINVAL;
if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
lacl->se_lun = lun;
if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
lun_access, nacl, tpg) < 0)
return -EINVAL;
spin_lock(&lun->lun_acl_lock);
list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
atomic_inc_mb(&lun->lun_acl_count);
spin_unlock(&lun->lun_acl_lock);
pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
2011-07-19 02:55:10 -06:00
" InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
lacl->initiatorname);
/*
* Check to see if there are any existing persistent reservation APTPL
* pre-registrations that need to be enabled for this LUN ACL..
*/
core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, nacl,
lacl->mapped_lun);
return 0;
}
int core_dev_del_initiator_node_lun_acl(
struct se_portal_group *tpg,
struct se_lun *lun,
struct se_lun_acl *lacl)
{
struct se_node_acl *nacl;
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
struct se_dev_entry *deve;
nacl = lacl->se_lun_nacl;
if (!nacl)
return -EINVAL;
spin_lock(&lun->lun_acl_lock);
list_del(&lacl->lacl_list);
atomic_dec_mb(&lun->lun_acl_count);
spin_unlock(&lun->lun_acl_lock);
target: Convert se_node_acl->device_list[] to RCU hlist This patch converts se_node_acl->device_list[] table for mappedluns to modern RCU hlist_head usage in order to support an arbitrary number of node_acl lun mappings. It converts transport_lookup_*_lun() fast-path code to use RCU read path primitives when looking up se_dev_entry. It adds a new hlist_head at se_node_acl->lun_entry_hlist for this purpose. For transport_lookup_cmd_lun() code, it works with existing per-cpu se_lun->lun_ref when associating se_cmd with se_lun + se_device. Also, go ahead and update core_create_device_list_for_node() + core_free_device_list_for_node() to use ->lun_entry_hlist. It also converts se_dev_entry->pr_ref_count access to use modern struct kref counting, and updates core_disable_device_list_for_node() to kref_put() and block on se_deve->pr_comp waiting for outstanding PR special-case PR references to drop, then invoke kfree_rcu() to wait for the RCU grace period to complete before releasing memory. So now that se_node_acl->lun_entry_hlist fast path access uses RCU protected pointers, go ahead and convert remaining non-fast path RCU updater code using ->lun_entry_lock to struct mutex to allow callers to block while walking se_node_acl->lun_entry_hlist. Finally drop the left-over core_clear_initiator_node_from_tpg() that originally cleared lun_access during se_node_acl shutdown, as post RCU conversion it now becomes duplicated logic. Reviewed-by: Hannes Reinecke <hare@suse.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagig@mellanox.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2015-03-22 21:42:19 -06:00
mutex_lock(&nacl->lun_entry_mutex);
deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
if (deve)
core_disable_device_list_for_node(lun, deve, nacl, tpg);
mutex_unlock(&nacl->lun_entry_mutex);
lacl->se_lun = NULL;
pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
" InitiatorNode: %s Mapped LUN: %u\n",
2011-07-19 02:55:10 -06:00
tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
lacl->initiatorname, lacl->mapped_lun);
return 0;
}
void core_dev_free_initiator_node_lun_acl(
struct se_portal_group *tpg,
struct se_lun_acl *lacl)
{
pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
2011-07-19 02:55:10 -06:00
" Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg),
tpg->se_tpg_tfo->get_fabric_name(),
lacl->initiatorname, lacl->mapped_lun);
kfree(lacl);
}
static void scsi_dump_inquiry(struct se_device *dev)
{
struct t10_wwn *wwn = &dev->t10_wwn;
char buf[17];
int i, device_type;
/*
* Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
*/
for (i = 0; i < 8; i++)
if (wwn->vendor[i] >= 0x20)
buf[i] = wwn->vendor[i];
else
buf[i] = ' ';
buf[i] = '\0';
pr_debug(" Vendor: %s\n", buf);
for (i = 0; i < 16; i++)
if (wwn->model[i] >= 0x20)
buf[i] = wwn->model[i];
else
buf[i] = ' ';
buf[i] = '\0';
pr_debug(" Model: %s\n", buf);
for (i = 0; i < 4; i++)
if (wwn->revision[i] >= 0x20)
buf[i] = wwn->revision[i];
else
buf[i] = ' ';
buf[i] = '\0';
pr_debug(" Revision: %s\n", buf);
device_type = dev->transport->get_device_type(dev);
pr_debug(" Type: %s ", scsi_device_type(device_type));
}
struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
{
struct se_device *dev;
struct se_lun *xcopy_lun;
dev = hba->transport->alloc_device(hba, name);
if (!dev)
return NULL;
dev->dev_link_magic = SE_DEV_LINK_MAGIC;
dev->se_hba = hba;
dev->transport = hba->transport;
dev->prot_length = sizeof(struct se_dif_v1_tuple);
INIT_LIST_HEAD(&dev->dev_list);
INIT_LIST_HEAD(&dev->dev_sep_list);
INIT_LIST_HEAD(&dev->dev_tmr_list);
INIT_LIST_HEAD(&dev->delayed_cmd_list);
INIT_LIST_HEAD(&dev->state_list);
INIT_LIST_HEAD(&dev->qf_cmd_list);
INIT_LIST_HEAD(&dev->g_dev_node);
spin_lock_init(&dev->execute_task_lock);
spin_lock_init(&dev->delayed_cmd_lock);
spin_lock_init(&dev->dev_reservation_lock);
spin_lock_init(&dev->se_port_lock);
spin_lock_init(&dev->se_tmr_lock);
spin_lock_init(&dev->qf_cmd_lock);
target: Add support for COMPARE_AND_WRITE emulation This patch adds support for COMPARE_AND_WRITE emulation on a per block basis. This logic is used as an atomic test and set primative currently used by VMWare ESX VAAI for performing array side locking of individual VMFS extent ownership. This includes the COMPARE_AND_WRITE CDB parsing within sbc_parse_cdb(), and does the majority of the work within the compare_and_write_callback() to perform the verify instance user data comparision, and subsequent write instance user data I/O submission upon a successfull comparision. The synchronization is enforced by se_device->caw_sem, that is obtained before the initial READ I/O submission in sbc_compare_and_write(). The mutex is then released upon MISCOMPARE in compare_and_write_callback(), or upon WRITE instance user-data completion in compare_and_write_post(). The implementation currently assumes a single logical block (NoLB=1). v4 changes: - Explicitly clear cmd->transport_complete_callback for two failure cases in sbc_compare_and_write() in order to avoid double unlock of ->caw_sem in compare_and_write_callback() (Dan Carpenter) v3 changes: - Convert se_device->caw_mutex to ->caw_sem v2 changes: - Set SCF_COMPARE_AND_WRITE and cmd->execute_cmd() to sbc_compare_and_write() during setup in sbc_parse_cdb() - Use sbc_compare_and_write() for initial READ submission with DMA_FROM_DEVICE - Reset cmd->execute_cmd() to sbc_execute_rw() for write instance user-data in compare_and_write_callback() - Drop SCF_BIDI command flag usage - Set TRANSPORT_PROCESSING + transport_state flags before write instance submission, and convert to __target_execute_cmd() - Prevent sbc_get_size() from being being called twice to generate incorrect size in sbc_parse_cdb() - Enforce se_device->caw_mutex synchronization between initial READ I/O submission, and final WRITE I/O completion. Cc: Christoph Hellwig <hch@lst.de> Cc: Hannes Reinecke <hare@suse.de> Cc: Martin Petersen <martin.petersen@oracle.com> Cc: Chris Mason <chris.mason@fusionio.com> Cc: James Bottomley <JBottomley@Parallels.com> Cc: Nicholas Bellinger <nab@linux-iscsi.org> Signed-off-by: Nicholas Bellinger <nab@daterainc.com>
2013-08-19 16:20:28 -06:00
sema_init(&dev->caw_sem, 1);
atomic_set(&dev->dev_ordered_id, 0);
INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&dev->t10_pr.registration_list);
INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
spin_lock_init(&dev->t10_pr.registration_lock);
spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
spin_lock_init(&dev->t10_alua.lba_map_lock);
dev->t10_wwn.t10_dev = dev;
dev->t10_alua.t10_dev = dev;
dev->dev_attrib.da_dev = dev;
dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
dev->dev_attrib.emulate_dpo = 1;
dev->dev_attrib.emulate_fua_write = 1;
dev->dev_attrib.emulate_fua_read = 1;
dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
dev->dev_attrib.is_nonrot = DA_IS_NONROT;
dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
dev->dev_attrib.max_unmap_block_desc_count =
DA_MAX_UNMAP_BLOCK_DESC_COUNT;
dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
dev->dev_attrib.unmap_granularity_alignment =
DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
xcopy_lun = &dev->xcopy_lun;
xcopy_lun->lun_se_dev = dev;
init_completion(&xcopy_lun->lun_shutdown_comp);
INIT_LIST_HEAD(&xcopy_lun->lun_acl_list);
spin_lock_init(&xcopy_lun->lun_acl_lock);
spin_lock_init(&xcopy_lun->lun_sep_lock);
init_completion(&xcopy_lun->lun_ref_comp);
return dev;
}
int target_configure_device(struct se_device *dev)
{
struct se_hba *hba = dev->se_hba;
int ret;
if (dev->dev_flags & DF_CONFIGURED) {
pr_err("se_dev->se_dev_ptr already set for storage"
" object\n");
return -EEXIST;
}
ret = dev->transport->configure_device(dev);
if (ret)
goto out;
/*
* XXX: there is not much point to have two different values here..
*/
dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
/*
* Align max_hw_sectors down to PAGE_SIZE I/O transfers
*/
dev->dev_attrib.hw_max_sectors =
se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
dev->dev_attrib.hw_block_size);
2015-01-06 17:10:37 -07:00
dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
dev->creation_time = get_jiffies_64();
ret = core_setup_alua(dev);
if (ret)
goto out;
/*
* Startup the struct se_device processing thread
*/
dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
dev->transport->name);
if (!dev->tmr_wq) {
pr_err("Unable to create tmr workqueue for %s\n",
dev->transport->name);
ret = -ENOMEM;
goto out_free_alua;
}
/*
* Setup work_queue for QUEUE_FULL
*/
INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
/*
* Preload the initial INQUIRY const values if we are doing
* anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
* passthrough because this is being provided by the backend LLD.
*/
if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) {
strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
strncpy(&dev->t10_wwn.model[0],
dev->transport->inquiry_prod, 16);
strncpy(&dev->t10_wwn.revision[0],
dev->transport->inquiry_rev, 4);
}
scsi_dump_inquiry(dev);
spin_lock(&hba->device_lock);
hba->dev_count++;
spin_unlock(&hba->device_lock);
mutex_lock(&g_device_mutex);
list_add_tail(&dev->g_dev_node, &g_device_list);
mutex_unlock(&g_device_mutex);
dev->dev_flags |= DF_CONFIGURED;
return 0;
out_free_alua:
core_alua_free_lu_gp_mem(dev);
out:
se_release_vpd_for_dev(dev);
return ret;
}
void target_free_device(struct se_device *dev)
{
struct se_hba *hba = dev->se_hba;
WARN_ON(!list_empty(&dev->dev_sep_list));
if (dev->dev_flags & DF_CONFIGURED) {
destroy_workqueue(dev->tmr_wq);
mutex_lock(&g_device_mutex);
list_del(&dev->g_dev_node);
mutex_unlock(&g_device_mutex);
spin_lock(&hba->device_lock);
hba->dev_count--;
spin_unlock(&hba->device_lock);
}
core_alua_free_lu_gp_mem(dev);
core_alua_set_lba_map(dev, NULL, 0, 0);
core_scsi3_free_all_registrations(dev);
se_release_vpd_for_dev(dev);
if (dev->transport->free_prot)
dev->transport->free_prot(dev);
dev->transport->free_device(dev);
}
int core_dev_setup_virtual_lun0(void)
{
struct se_hba *hba;
struct se_device *dev;
char buf[] = "rd_pages=8,rd_nullio=1";
int ret;
hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
if (IS_ERR(hba))
return PTR_ERR(hba);
dev = target_alloc_device(hba, "virt_lun0");
if (!dev) {
ret = -ENOMEM;
goto out_free_hba;
}
hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf));
ret = target_configure_device(dev);
if (ret)
goto out_free_se_dev;
lun0_hba = hba;
g_lun0_dev = dev;
return 0;
out_free_se_dev:
target_free_device(dev);
out_free_hba:
core_delete_hba(hba);
return ret;
}
void core_dev_release_virtual_lun0(void)
{
2011-07-19 02:55:10 -06:00
struct se_hba *hba = lun0_hba;
if (!hba)
return;
2011-07-19 02:55:10 -06:00
if (g_lun0_dev)
target_free_device(g_lun0_dev);
core_delete_hba(hba);
}
/*
* Common CDB parsing for kernel and user passthrough.
*/
sense_reason_t
passthrough_parse_cdb(struct se_cmd *cmd,
sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
{
unsigned char *cdb = cmd->t_task_cdb;
/*
* Clear a lun set in the cdb if the initiator talking to use spoke
* and old standards version, as we can't assume the underlying device
* won't choke up on it.
*/
switch (cdb[0]) {
case READ_10: /* SBC - RDProtect */
case READ_12: /* SBC - RDProtect */
case READ_16: /* SBC - RDProtect */
case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
case VERIFY: /* SBC - VRProtect */
case VERIFY_16: /* SBC - VRProtect */
case WRITE_VERIFY: /* SBC - VRProtect */
case WRITE_VERIFY_12: /* SBC - VRProtect */
case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
break;
default:
cdb[1] &= 0x1f; /* clear logical unit number */
break;
}
/*
* For REPORT LUNS we always need to emulate the response, for everything
* else, pass it up.
*/
if (cdb[0] == REPORT_LUNS) {
cmd->execute_cmd = spc_emulate_report_luns;
return TCM_NO_SENSE;
}
/* Set DATA_CDB flag for ops that should have it */
switch (cdb[0]) {
case READ_6:
case READ_10:
case READ_12:
case READ_16:
case WRITE_6:
case WRITE_10:
case WRITE_12:
case WRITE_16:
case WRITE_VERIFY:
case WRITE_VERIFY_12:
case 0x8e: /* WRITE_VERIFY_16 */
case COMPARE_AND_WRITE:
case XDWRITEREAD_10:
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
case VARIABLE_LENGTH_CMD:
switch (get_unaligned_be16(&cdb[8])) {
case READ_32:
case WRITE_32:
case 0x0c: /* WRITE_VERIFY_32 */
case XDWRITEREAD_32:
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
break;
}
}
cmd->execute_cmd = exec_cmd;
return TCM_NO_SENSE;
}
EXPORT_SYMBOL(passthrough_parse_cdb);