From 9375b1bfd2555c8bc828d394a4419a212b46ba71 Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Mon, 1 Aug 2011 23:29:11 +0200 Subject: [PATCH 01/62] target: Remove unneeded version.h includes It was pointed out by 'make versioncheck' that some includes of linux/version.h are not needed in drivers/target/. This patch removes them. Signed-off-by: Jesper Juhl Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_alua.c | 1 - drivers/target/target_core_configfs.c | 1 - drivers/target/target_core_fabric_configfs.c | 1 - drivers/target/target_core_file.c | 1 - drivers/target/target_core_iblock.c | 1 - drivers/target/target_core_pr.c | 1 - drivers/target/target_core_pscsi.c | 1 - drivers/target/target_core_rd.c | 1 - drivers/target/target_core_stat.c | 1 - drivers/target/target_core_tmr.c | 1 - drivers/target/target_core_transport.c | 1 - drivers/target/target_core_ua.c | 1 - drivers/target/tcm_fc/tfc_cmd.c | 1 - drivers/target/tcm_fc/tfc_conf.c | 1 - drivers/target/tcm_fc/tfc_io.c | 1 - drivers/target/tcm_fc/tfc_sess.c | 1 - 16 files changed, 16 deletions(-) diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 98c98a3a0250..007c6c298b8b 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -24,7 +24,6 @@ * ******************************************************************************/ -#include #include #include #include diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index b2575d8568cc..f37e2b9cbbd7 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -23,7 +23,6 @@ #include #include -#include #include #include #include diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 55bbe0847a6d..09b6f8729f91 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -22,7 +22,6 @@ #include #include -#include #include #include #include diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index bc1b33639b8d..99c9db003394 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -26,7 +26,6 @@ * ******************************************************************************/ -#include #include #include #include diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 7e1234105442..7f0cc53b4581 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -27,7 +27,6 @@ * ******************************************************************************/ -#include #include #include #include diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 7fd3a161f7cc..0c4f783f924c 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -25,7 +25,6 @@ * ******************************************************************************/ -#include #include #include #include diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 2b7b0da9146d..77d725886410 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -26,7 +26,6 @@ * ******************************************************************************/ -#include #include #include #include diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index e567e129c697..1ab69f32e664 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -27,7 +27,6 @@ * ******************************************************************************/ -#include #include #include #include diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index a8d6e1dee938..874152aed94a 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 27d4925e51c3..662473c54043 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -24,7 +24,6 @@ * ******************************************************************************/ -#include #include #include #include diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index a4b0a8d27f25..4b2d6bf87a12 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -26,7 +26,6 @@ * ******************************************************************************/ -#include #include #include #include diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c index 31e3c652527e..50a480db7a66 100644 --- a/drivers/target/target_core_ua.c +++ b/drivers/target/target_core_ua.c @@ -24,7 +24,6 @@ * ******************************************************************************/ -#include #include #include #include diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 80fbcde00cb6..3c6c5a1c7dd0 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -19,7 +19,6 @@ #include #include -#include #include #include #include diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 8fa39b74f22c..b30eace70d35 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -23,7 +23,6 @@ #include #include -#include #include #include #include diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index d35ea5a3d56c..1369b1cb103d 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -28,7 +28,6 @@ #include #include -#include #include #include #include diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index dbb5eaeee399..326921385aff 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c @@ -19,7 +19,6 @@ #include #include -#include #include #include #include From 79a7fef26431830e22e282053d050af790117db8 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Wed, 28 Sep 2011 22:12:07 -0700 Subject: [PATCH 02/62] target: Prevent cmd->se_queue_node double add This patch addresses a bug with the lio-core-2.6.git conversion of transport_add_cmd_to_queue() to use a single embedded list_head, instead of individual struct se_queue_req allocations allowing a single se_cmd to be added to the queue mulitple times. This was changed in the following: commit 2a9e4d5ca5d99f4c600578d6285d45142e7e5208 Author: Andy Grover Date: Tue Apr 26 17:45:51 2011 -0700 target: Embed qr in struct se_cmd The problem is that some target code still assumes performing multiple adds is allowed via transport_add_cmd_to_queue(), which ends up causing list corruption in qobj->qobj_list code. This patch addresses this by removing an existing struct se_cmd from the list before the add, and removes an unnecessary list walk in transport_remove_cmd_from_queue() It also changes cmd->t_transport_queue_active to use explict sets intead of increment/decrement to prevent confusion during exception path handling. Signed-off-by: Roland Dreier Cc: Andy Grover Cc: stable@kernel.org Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_tmr.c | 2 +- drivers/target/target_core_transport.c | 31 +++++++++++++------------- 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 662473c54043..98b12a8923f0 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -339,7 +339,7 @@ int core_tmr_lun_reset( atomic_dec(&cmd->t_transport_queue_active); atomic_dec(&qobj->queue_cnt); - list_del(&cmd->se_queue_node); + list_del_init(&cmd->se_queue_node); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:" diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 4b2d6bf87a12..046da7f38230 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -620,8 +620,6 @@ static void transport_add_cmd_to_queue( struct se_queue_obj *qobj = &dev->dev_queue_obj; unsigned long flags; - INIT_LIST_HEAD(&cmd->se_queue_node); - if (t_state) { spin_lock_irqsave(&cmd->t_state_lock, flags); cmd->t_state = t_state; @@ -630,15 +628,21 @@ static void transport_add_cmd_to_queue( } spin_lock_irqsave(&qobj->cmd_queue_lock, flags); + + /* If the cmd is already on the list, remove it before we add it */ + if (!list_empty(&cmd->se_queue_node)) + list_del(&cmd->se_queue_node); + else + atomic_inc(&qobj->queue_cnt); + if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) { cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL; list_add(&cmd->se_queue_node, &qobj->qobj_list); } else list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); - atomic_inc(&cmd->t_transport_queue_active); + atomic_set(&cmd->t_transport_queue_active, 1); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); - atomic_inc(&qobj->queue_cnt); wake_up_interruptible(&qobj->thread_wq); } @@ -655,9 +659,9 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj) } cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); - atomic_dec(&cmd->t_transport_queue_active); + atomic_set(&cmd->t_transport_queue_active, 0); - list_del(&cmd->se_queue_node); + list_del_init(&cmd->se_queue_node); atomic_dec(&qobj->queue_cnt); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); @@ -667,7 +671,6 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj) static void transport_remove_cmd_from_queue(struct se_cmd *cmd, struct se_queue_obj *qobj) { - struct se_cmd *t; unsigned long flags; spin_lock_irqsave(&qobj->cmd_queue_lock, flags); @@ -675,14 +678,9 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd, spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); return; } - - list_for_each_entry(t, &qobj->qobj_list, se_queue_node) - if (t == cmd) { - atomic_dec(&cmd->t_transport_queue_active); - atomic_dec(&qobj->queue_cnt); - list_del(&cmd->se_queue_node); - break; - } + atomic_set(&cmd->t_transport_queue_active, 0); + atomic_dec(&qobj->queue_cnt); + list_del_init(&cmd->se_queue_node); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); if (atomic_read(&cmd->t_transport_queue_active)) { @@ -1066,7 +1064,7 @@ static void transport_release_all_cmds(struct se_device *dev) list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list, se_queue_node) { t_state = cmd->t_state; - list_del(&cmd->se_queue_node); + list_del_init(&cmd->se_queue_node); spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags); @@ -1597,6 +1595,7 @@ void transport_init_se_cmd( INIT_LIST_HEAD(&cmd->se_delayed_node); INIT_LIST_HEAD(&cmd->se_ordered_node); INIT_LIST_HEAD(&cmd->se_qf_node); + INIT_LIST_HEAD(&cmd->se_queue_node); INIT_LIST_HEAD(&cmd->t_task_list); init_completion(&cmd->transport_lun_fe_stop_comp); From d050ffb922c782f092234611b9019e95024481ab Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Wed, 28 Sep 2011 21:37:29 -0700 Subject: [PATCH 03/62] target: Re-org of core_tmr_lun_reset This patch is a re-orginzation of core_tmr_lun_reset() logic to properly scan the active tmr_list, dev->state_task_list and qobj->qobj_list w/ the relivent locks held, and performing a list_move_tail onto seperate local scope lists before performing the full drain. This involves breaking out the code into three seperate list specific functions: core_tmr_drain_tmr_list(), core_tmr_drain_task_list() and core_tmr_drain_cmd_list(). (nab: Include target: Remove non-active tasks from execute list during LUN_RESET patch to address original breakage) Reported-by: Roland Dreier Cc: Roland Dreier Cc: Christoph Hellwig Cc: stable@kernel.org Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_tmr.c | 197 ++++++++++++++++++++----------- 1 file changed, 125 insertions(+), 72 deletions(-) diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 98b12a8923f0..ed0b1ff99110 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -66,15 +66,16 @@ void core_tmr_release_req( struct se_tmr_req *tmr) { struct se_device *dev = tmr->tmr_dev; + unsigned long flags; if (!dev) { kmem_cache_free(se_tmr_req_cache, tmr); return; } - spin_lock_irq(&dev->se_tmr_lock); + spin_lock_irqsave(&dev->se_tmr_lock, flags); list_del(&tmr->tmr_list); - spin_unlock_irq(&dev->se_tmr_lock); + spin_unlock_irqrestore(&dev->se_tmr_lock, flags); kmem_cache_free(se_tmr_req_cache, tmr); } @@ -99,54 +100,20 @@ static void core_tmr_handle_tas_abort( transport_cmd_finish_abort(cmd, 0); } -int core_tmr_lun_reset( +static void core_tmr_drain_tmr_list( struct se_device *dev, struct se_tmr_req *tmr, - struct list_head *preempt_and_abort_list, - struct se_cmd *prout_cmd) + struct list_head *preempt_and_abort_list) { - struct se_cmd *cmd, *tcmd; - struct se_node_acl *tmr_nacl = NULL; - struct se_portal_group *tmr_tpg = NULL; - struct se_queue_obj *qobj = &dev->dev_queue_obj; + LIST_HEAD(drain_tmr_list); struct se_tmr_req *tmr_p, *tmr_pp; - struct se_task *task, *task_tmp; + struct se_cmd *cmd; unsigned long flags; - int fe_count, tas; - /* - * TASK_ABORTED status bit, this is configurable via ConfigFS - * struct se_device attributes. spc4r17 section 7.4.6 Control mode page - * - * A task aborted status (TAS) bit set to zero specifies that aborted - * tasks shall be terminated by the device server without any response - * to the application client. A TAS bit set to one specifies that tasks - * aborted by the actions of an I_T nexus other than the I_T nexus on - * which the command was received shall be completed with TASK ABORTED - * status (see SAM-4). - */ - tas = dev->se_sub_dev->se_dev_attrib.emulate_tas; - /* - * Determine if this se_tmr is coming from a $FABRIC_MOD - * or struct se_device passthrough.. - */ - if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { - tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; - tmr_tpg = tmr->task_cmd->se_sess->se_tpg; - if (tmr_nacl && tmr_tpg) { - pr_debug("LUN_RESET: TMR caller fabric: %s" - " initiator port %s\n", - tmr_tpg->se_tpg_tfo->get_fabric_name(), - tmr_nacl->initiatorname); - } - } - pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n", - (preempt_and_abort_list) ? "Preempt" : "TMR", - dev->transport->name, tas); /* * Release all pending and outgoing TMRs aside from the received * LUN_RESET tmr.. */ - spin_lock_irq(&dev->se_tmr_lock); + spin_lock_irqsave(&dev->se_tmr_lock, flags); list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) { /* * Allow the received TMR to return with FUNCTION_COMPLETE. @@ -168,29 +135,48 @@ int core_tmr_lun_reset( (core_scsi3_check_cdb_abort_and_preempt( preempt_and_abort_list, cmd) != 0)) continue; - spin_unlock_irq(&dev->se_tmr_lock); - spin_lock_irqsave(&cmd->t_state_lock, flags); + spin_lock(&cmd->t_state_lock); if (!atomic_read(&cmd->t_transport_active)) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - spin_lock_irq(&dev->se_tmr_lock); + spin_unlock(&cmd->t_state_lock); continue; } if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - spin_lock_irq(&dev->se_tmr_lock); + spin_unlock(&cmd->t_state_lock); continue; } + spin_unlock(&cmd->t_state_lock); + + list_move_tail(&tmr->tmr_list, &drain_tmr_list); + } + spin_unlock_irqrestore(&dev->se_tmr_lock, flags); + + while (!list_empty(&drain_tmr_list)) { + tmr = list_entry(drain_tmr_list.next, struct se_tmr_req, tmr_list); + list_del(&tmr->tmr_list); + cmd = tmr_p->task_cmd; + pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x," " Response: 0x%02x, t_state: %d\n", - (preempt_and_abort_list) ? "Preempt" : "", tmr_p, - tmr_p->function, tmr_p->response, cmd->t_state); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); + (preempt_and_abort_list) ? "Preempt" : "", tmr, + tmr->function, tmr->response, cmd->t_state); transport_cmd_finish_abort_tmr(cmd); - spin_lock_irq(&dev->se_tmr_lock); } - spin_unlock_irq(&dev->se_tmr_lock); +} + +static void core_tmr_drain_task_list( + struct se_device *dev, + struct se_cmd *prout_cmd, + struct se_node_acl *tmr_nacl, + int tas, + struct list_head *preempt_and_abort_list) +{ + LIST_HEAD(drain_task_list); + struct se_cmd *cmd; + struct se_task *task, *task_tmp; + unsigned long flags; + int fe_count; /* * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status. * This is following sam4r17, section 5.6 Aborting commands, Table 38 @@ -235,9 +221,23 @@ int core_tmr_lun_reset( if (prout_cmd == cmd) continue; - list_del(&task->t_state_list); + list_move_tail(&task->t_state_list, &drain_task_list); atomic_set(&task->task_state_active, 0); - spin_unlock_irqrestore(&dev->execute_task_lock, flags); + /* + * Remove from task execute list before processing drain_task_list + */ + if (atomic_read(&task->task_execute_queue) != 0) { + list_del(&task->t_execute_list); + atomic_set(&task->task_execute_queue, 0); + atomic_dec(&dev->execute_tasks); + } + } + spin_unlock_irqrestore(&dev->execute_task_lock, flags); + + while (!list_empty(&drain_task_list)) { + task = list_entry(drain_task_list.next, struct se_task, t_state_list); + list_del(&task->t_state_list); + cmd = task->task_se_cmd; spin_lock_irqsave(&cmd->t_state_lock, flags); pr_debug("LUN_RESET: %s cmd: %p task: %p" @@ -274,20 +274,14 @@ int core_tmr_lun_reset( atomic_set(&task->task_active, 0); atomic_set(&task->task_stop, 0); - } else { - if (atomic_read(&task->task_execute_queue) != 0) - transport_remove_task_from_execute_queue(task, dev); } __transport_stop_task_timer(task, &flags); if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { - spin_unlock_irqrestore( - &cmd->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); pr_debug("LUN_RESET: Skipping task: %p, dev: %p for" " t_task_cdbs_ex_left: %d\n", task, dev, atomic_read(&cmd->t_task_cdbs_ex_left)); - - spin_lock_irqsave(&dev->execute_task_lock, flags); continue; } fe_count = atomic_read(&cmd->t_fe_count); @@ -297,22 +291,31 @@ int core_tmr_lun_reset( " task: %p, t_fe_count: %d dev: %p\n", task, fe_count, dev); atomic_set(&cmd->t_transport_aborted, 1); - spin_unlock_irqrestore(&cmd->t_state_lock, - flags); - core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); - spin_lock_irqsave(&dev->execute_task_lock, flags); + core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); continue; } pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p," " t_fe_count: %d dev: %p\n", task, fe_count, dev); atomic_set(&cmd->t_transport_aborted, 1); spin_unlock_irqrestore(&cmd->t_state_lock, flags); - core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); - spin_lock_irqsave(&dev->execute_task_lock, flags); + core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); } - spin_unlock_irqrestore(&dev->execute_task_lock, flags); +} + +static void core_tmr_drain_cmd_list( + struct se_device *dev, + struct se_cmd *prout_cmd, + struct se_node_acl *tmr_nacl, + int tas, + struct list_head *preempt_and_abort_list) +{ + LIST_HEAD(drain_cmd_list); + struct se_queue_obj *qobj = &dev->dev_queue_obj; + struct se_cmd *cmd, *tcmd; + unsigned long flags; /* * Release all commands remaining in the struct se_device cmd queue. * @@ -337,10 +340,15 @@ int core_tmr_lun_reset( if (prout_cmd == cmd) continue; - atomic_dec(&cmd->t_transport_queue_active); + atomic_set(&cmd->t_transport_queue_active, 0); atomic_dec(&qobj->queue_cnt); + list_move_tail(&cmd->se_queue_node, &drain_cmd_list); + } + spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + + while (!list_empty(&drain_cmd_list)) { + cmd = list_entry(drain_cmd_list.next, struct se_cmd, se_queue_node); list_del_init(&cmd->se_queue_node); - spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:" " %d t_fe_count: %d\n", (preempt_and_abort_list) ? @@ -353,9 +361,53 @@ int core_tmr_lun_reset( core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, atomic_read(&cmd->t_fe_count)); - spin_lock_irqsave(&qobj->cmd_queue_lock, flags); } - spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); +} + +int core_tmr_lun_reset( + struct se_device *dev, + struct se_tmr_req *tmr, + struct list_head *preempt_and_abort_list, + struct se_cmd *prout_cmd) +{ + struct se_node_acl *tmr_nacl = NULL; + struct se_portal_group *tmr_tpg = NULL; + int tas; + /* + * TASK_ABORTED status bit, this is configurable via ConfigFS + * struct se_device attributes. spc4r17 section 7.4.6 Control mode page + * + * A task aborted status (TAS) bit set to zero specifies that aborted + * tasks shall be terminated by the device server without any response + * to the application client. A TAS bit set to one specifies that tasks + * aborted by the actions of an I_T nexus other than the I_T nexus on + * which the command was received shall be completed with TASK ABORTED + * status (see SAM-4). + */ + tas = dev->se_sub_dev->se_dev_attrib.emulate_tas; + /* + * Determine if this se_tmr is coming from a $FABRIC_MOD + * or struct se_device passthrough.. + */ + if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { + tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; + tmr_tpg = tmr->task_cmd->se_sess->se_tpg; + if (tmr_nacl && tmr_tpg) { + pr_debug("LUN_RESET: TMR caller fabric: %s" + " initiator port %s\n", + tmr_tpg->se_tpg_tfo->get_fabric_name(), + tmr_nacl->initiatorname); + } + } + pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n", + (preempt_and_abort_list) ? "Preempt" : "TMR", + dev->transport->name, tas); + + core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); + core_tmr_drain_task_list(dev, prout_cmd, tmr_nacl, tas, + preempt_and_abort_list); + core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas, + preempt_and_abort_list); /* * Clear any legacy SPC-2 reservation when called during * LOGICAL UNIT RESET @@ -378,3 +430,4 @@ int core_tmr_lun_reset( dev->transport->name); return 0; } + From b0e062aec578c756d1aea4b5809294488366a6e8 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Tue, 11 Oct 2011 06:02:48 +0000 Subject: [PATCH 04/62] target: Prevent TRANSPORT_FREE_CMD_INTR processing in core_tmr_drain_cmd_list This patch contains a bugfix for TMR LUN_RESET related to TRANSPORT_FREE_CMD_INTR operation, where core_tmr_drain_cmd_list() will now skip processing for this case to prevent an ABORT_TASK status from being returned for descriptors that are already queued up to be released by processing thread context. Cc: Roland Dreier Cc: Christoph Hellwig Cc: stable@kernel.org Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_tmr.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index ed0b1ff99110..d04cc1016ebf 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -339,6 +339,16 @@ static void core_tmr_drain_cmd_list( */ if (prout_cmd == cmd) continue; + /* + * Skip direct processing of TRANSPORT_FREE_CMD_INTR for + * HW target mode fabrics. + */ + spin_lock(&cmd->t_state_lock); + if (cmd->t_state == TRANSPORT_FREE_CMD_INTR) { + spin_unlock(&cmd->t_state_lock); + continue; + } + spin_unlock(&cmd->t_state_lock); atomic_set(&cmd->t_transport_queue_active, 0); atomic_dec(&qobj->queue_cnt); From 77039d1eafbbc192df71ee84b157b8973766737d Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 29 Sep 2011 01:01:35 -0700 Subject: [PATCH 05/62] target: Fix transport_cmd_finish_abort queue removal bug This patch fixes a bug in LUN_RESET operation with transport_cmd_finish_abort() where transport_remove_cmd_from_queue() was incorrectly being called, causing descriptors with t_state == TRANSPORT_FREE_CMD_INTR to be incorrectly removed from qobj->qobj_list during process context release. This change ensures the descriptor is only removed via transport_remove_cmd_from_queue() when doing a direct release via transport_generic_remove(). Cc: stable@kernel.org Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 046da7f38230..009547b35574 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -593,13 +593,14 @@ check_lun: void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) { - transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); if (transport_cmd_check_stop_to_fabric(cmd)) return; - if (remove) + if (remove) { + transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); transport_generic_remove(cmd, 0); + } } void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) From c252f003470a99d319db4ebd12f4a9e4710a65db Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 29 Sep 2011 14:22:13 -0700 Subject: [PATCH 06/62] target: Prevent transport_send_task_abort when CHECK_CONDITION status This patch fixes a bug where transport_send_task_abort() could be called during LUN_RESET to return SAM_STAT_TASK_ABORTED + tfo->queue_status(), when SCF_SENT_CHECK_CONDITION -> tfo->queue_status() has already been sent from within another context via transport_send_check_condition_and_sense(). Cc: stable@kernel.org Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 009547b35574..af1e0a5f9bc6 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -4919,6 +4919,15 @@ EXPORT_SYMBOL(transport_check_aborted_status); void transport_send_task_abort(struct se_cmd *cmd) { + unsigned long flags; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + return; + } + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + /* * If there are still expected incoming fabric WRITEs, we wait * until until they have completed before sending a TASK_ABORTED From 4ca495e0630b6fd960f94f89a9f13ad32b91bc96 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 12 Sep 2011 21:50:49 +0200 Subject: [PATCH 07/62] tfm_fc: use transport_handle_cdb_direct ft_send_work is always called from workqueue context, which means we can handle the CDB directly instead of doing another context switch. Signed-off-by: Christoph Hellwig Cc: Kiran Patil Signed-off-by: Nicholas Bellinger --- drivers/target/tcm_fc/tfc_cmd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 3c6c5a1c7dd0..c2d148d80cf7 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -639,7 +639,7 @@ static void ft_send_work(struct work_struct *work) transport_generic_free_cmd(se_cmd, 0, 0); return; } - transport_generic_handle_cdb(se_cmd); + transport_handle_cdb_direct(se_cmd); return; err: From acf3ecc4a1c7460662757c07ee1ec625760d3ae6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 12 Sep 2011 21:50:56 +0200 Subject: [PATCH 08/62] iscsi-target: always call transport_handle_cdb_direct iscsit_task_reassign_complete is always called from the TX thread, so handle the CDB directly instead of offloading it. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target_tmr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c index db1fe1ec84df..c3617d8ff3ea 100644 --- a/drivers/target/iscsi/iscsi_target_tmr.c +++ b/drivers/target/iscsi/iscsi_target_tmr.c @@ -318,7 +318,7 @@ static int iscsit_task_reassign_complete_read( pr_debug("READ ITT: 0x%08x: t_state: %d never sent to" " transport\n", cmd->init_task_tag, cmd->se_cmd.t_state); - transport_generic_handle_cdb(se_cmd); + transport_handle_cdb_direct(se_cmd); return 0; } From 680b73c5f2fb60336707b53b2b2792d2c01b69dc Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 12 Sep 2011 21:51:14 +0200 Subject: [PATCH 09/62] target: remove transport_generic_handle_cdb Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 23 +++-------------------- include/target/target_core_transport.h | 1 - 2 files changed, 3 insertions(+), 21 deletions(-) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index af1e0a5f9bc6..f0f21bb28400 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1728,24 +1728,6 @@ int transport_generic_allocate_tasks( } EXPORT_SYMBOL(transport_generic_allocate_tasks); -/* - * Used by fabric module frontends not defining a TFO->new_cmd_map() - * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis - */ -int transport_generic_handle_cdb( - struct se_cmd *cmd) -{ - if (!cmd->se_lun) { - dump_stack(); - pr_err("cmd->se_lun is NULL\n"); - return -EINVAL; - } - - transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD); - return 0; -} -EXPORT_SYMBOL(transport_generic_handle_cdb); - static void transport_generic_request_failure(struct se_cmd *, struct se_device *, int, int); /* @@ -5205,6 +5187,9 @@ get_cmd: continue; switch (cmd->t_state) { + case TRANSPORT_NEW_CMD: + BUG(); + break; case TRANSPORT_NEW_CMD_MAP: if (!cmd->se_tfo->new_cmd_map) { pr_err("cmd->se_tfo->new_cmd_map is" @@ -5219,8 +5204,6 @@ get_cmd: DMA_TO_DEVICE)); break; } - /* Fall through */ - case TRANSPORT_NEW_CMD: ret = transport_generic_new_cmd(cmd); if (ret == -EAGAIN) break; diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 46aae4f94ede..0482a28629ff 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -171,7 +171,6 @@ void *transport_kmap_first_data_page(struct se_cmd *cmd); void transport_kunmap_first_data_page(struct se_cmd *cmd); extern void transport_free_se_cmd(struct se_cmd *); extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *); -extern int transport_generic_handle_cdb(struct se_cmd *); extern int transport_handle_cdb_direct(struct se_cmd *); extern int transport_generic_handle_cdb_map(struct se_cmd *); extern int transport_generic_handle_data(struct se_cmd *); From 31afc39c0c93edec5a117371f1bd2a264cceafac Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Sep 2011 23:08:11 +0200 Subject: [PATCH 10/62] target: don't opencode transport_release_cmd in transport_release_fe_cmd Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index f0f21bb28400..0c1d004548b1 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -3794,8 +3794,7 @@ static void transport_release_fe_cmd(struct se_cmd *cmd) transport_release_tasks(cmd); free_pages: transport_free_pages(cmd); - transport_free_se_cmd(cmd); - cmd->se_tfo->release_cmd(cmd); + transport_release_cmd(cmd); } static int From 2dbc43d256c5371ebc294e3534620663eb80a5ce Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Sep 2011 23:08:19 +0200 Subject: [PATCH 11/62] target: remove transport_free_se_cmd It is only called by transport_release_cmd, so inline it there. Also add a kerneldoc comment for transport_release_cmd while we are at it. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 25 +++++++++++-------------- include/target/target_core_transport.h | 1 - 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 0c1d004548b1..49df0971a37f 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1640,19 +1640,6 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd) return 0; } -void transport_free_se_cmd( - struct se_cmd *se_cmd) -{ - if (se_cmd->se_tmr_req) - core_tmr_release_req(se_cmd->se_tmr_req); - /* - * Check and free any extended CDB buffer that was allocated - */ - if (se_cmd->t_task_cdb != se_cmd->__t_task_cdb) - kfree(se_cmd->t_task_cdb); -} -EXPORT_SYMBOL(transport_free_se_cmd); - static void transport_generic_wait_for_tasks(struct se_cmd *, int, int); /* transport_generic_allocate_tasks(): @@ -4371,11 +4358,21 @@ queue_full: return ret; } +/** + * transport_release_cmd - free a command + * @cmd: command to free + * + * This routine unconditionally frees a command, and reference counting + * or list removal must be done in the caller. + */ void transport_release_cmd(struct se_cmd *cmd) { BUG_ON(!cmd->se_tfo); - transport_free_se_cmd(cmd); + if (cmd->se_tmr_req) + core_tmr_release_req(cmd->se_tmr_req); + if (cmd->t_task_cdb != cmd->__t_task_cdb) + kfree(cmd->t_task_cdb); cmd->se_tfo->release_cmd(cmd); } EXPORT_SYMBOL(transport_release_cmd); diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 0482a28629ff..99a671e0f9da 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -169,7 +169,6 @@ extern void transport_init_se_cmd(struct se_cmd *, unsigned char *); void *transport_kmap_first_data_page(struct se_cmd *cmd); void transport_kunmap_first_data_page(struct se_cmd *cmd); -extern void transport_free_se_cmd(struct se_cmd *); extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *); extern int transport_handle_cdb_direct(struct se_cmd *); extern int transport_generic_handle_cdb_map(struct se_cmd *); From d3df7825aed2e69e12732f9e32ef9093b01302d8 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Sep 2011 23:08:32 +0200 Subject: [PATCH 12/62] target: simplify transport_generic_remove Instead of duplicating the code from transport_release_fe_cmd re-use it by allowing transport_release_fe_cmd to return wether it actually freed the command or not. Also rename transport_release_fe_cmd to transport_put_cmd and add a kerneldoc comment for it to make the use case more obvious. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 33 +++++++++++--------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 49df0971a37f..bee923fd5750 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -88,7 +88,7 @@ static u32 transport_allocate_tasks(struct se_cmd *cmd, static int transport_generic_get_mem(struct se_cmd *cmd); static int transport_generic_remove(struct se_cmd *cmd, int session_reinstatement); -static void transport_release_fe_cmd(struct se_cmd *cmd); +static bool transport_put_cmd(struct se_cmd *cmd); static void transport_remove_cmd_from_queue(struct se_cmd *cmd, struct se_queue_obj *qobj); static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); @@ -1074,7 +1074,7 @@ static void transport_release_all_cmds(struct se_device *dev) cmd->se_tfo->get_task_tag(cmd), cmd->se_tfo->get_cmd_state(cmd), t_state); - transport_release_fe_cmd(cmd); + transport_put_cmd(cmd); bug_out = 1; spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); @@ -3762,12 +3762,18 @@ static inline int transport_dec_and_check(struct se_cmd *cmd) return 0; } -static void transport_release_fe_cmd(struct se_cmd *cmd) +/** + * transport_put_cmd - release a reference to a command + * @cmd: command to release + * + * This routine releases our reference to the command and frees it if possible. + */ +static bool transport_put_cmd(struct se_cmd *cmd) { unsigned long flags; if (transport_dec_and_check(cmd)) - return; + return false; spin_lock_irqsave(&cmd->t_state_lock, flags); if (!atomic_read(&cmd->transport_dev_active)) { @@ -3779,9 +3785,12 @@ static void transport_release_fe_cmd(struct se_cmd *cmd) spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_release_tasks(cmd); + return true; + free_pages: transport_free_pages(cmd); transport_release_cmd(cmd); + return true; } static int @@ -3789,7 +3798,7 @@ transport_generic_remove(struct se_cmd *cmd, int session_reinstatement) { unsigned long flags; - if (transport_dec_and_check(cmd)) { + if (!transport_put_cmd(cmd)) { if (session_reinstatement) { spin_lock_irqsave(&cmd->t_state_lock, flags); transport_all_task_dev_remove_state(cmd); @@ -3799,20 +3808,6 @@ transport_generic_remove(struct se_cmd *cmd, int session_reinstatement) return 1; } - spin_lock_irqsave(&cmd->t_state_lock, flags); - if (!atomic_read(&cmd->transport_dev_active)) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - goto free_pages; - } - atomic_set(&cmd->transport_dev_active, 0); - transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - transport_release_tasks(cmd); - -free_pages: - transport_free_pages(cmd); - transport_release_cmd(cmd); return 0; } From 4911e3ccbec047ed1f728e19a70ad87729a3fb01 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Sep 2011 23:08:42 +0200 Subject: [PATCH 13/62] target: simplify transport_put_cmd Inline two simple functions only used by it, and replace a goto with a simple if else construct. Note that the code moved from transport_dec_and_check seems fairly buggy - the atomic_read check on a variable where we'd do an atomic_dec_and_test looks racy if we'll ever get someone increment it without the lock held around them (which it looks like we do), and not decrementing the second counter if the first one doesn't hit zero also at least needs an explanation. (nab: Fix transport_put_cmd breakage) Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 61 +++++++++----------------- 1 file changed, 20 insertions(+), 41 deletions(-) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index bee923fd5750..a650100d997a 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -3732,36 +3732,6 @@ static inline void transport_free_pages(struct se_cmd *cmd) cmd->t_bidi_data_nents = 0; } -static inline void transport_release_tasks(struct se_cmd *cmd) -{ - transport_free_dev_tasks(cmd); -} - -static inline int transport_dec_and_check(struct se_cmd *cmd) -{ - unsigned long flags; - - spin_lock_irqsave(&cmd->t_state_lock, flags); - if (atomic_read(&cmd->t_fe_count)) { - if (!atomic_dec_and_test(&cmd->t_fe_count)) { - spin_unlock_irqrestore(&cmd->t_state_lock, - flags); - return 1; - } - } - - if (atomic_read(&cmd->t_se_count)) { - if (!atomic_dec_and_test(&cmd->t_se_count)) { - spin_unlock_irqrestore(&cmd->t_state_lock, - flags); - return 1; - } - } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - return 0; -} - /** * transport_put_cmd - release a reference to a command * @cmd: command to release @@ -3771,26 +3741,35 @@ static inline int transport_dec_and_check(struct se_cmd *cmd) static bool transport_put_cmd(struct se_cmd *cmd) { unsigned long flags; - - if (transport_dec_and_check(cmd)) - return false; + int free_tasks = 0; spin_lock_irqsave(&cmd->t_state_lock, flags); - if (!atomic_read(&cmd->transport_dev_active)) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - goto free_pages; + if (atomic_read(&cmd->t_fe_count)) { + if (!atomic_dec_and_test(&cmd->t_fe_count)) + goto out_busy; + } + + if (atomic_read(&cmd->t_se_count)) { + if (!atomic_dec_and_test(&cmd->t_se_count)) + goto out_busy; + } + + if (atomic_read(&cmd->transport_dev_active)) { + atomic_set(&cmd->transport_dev_active, 0); + transport_all_task_dev_remove_state(cmd); + free_tasks = 1; } - atomic_set(&cmd->transport_dev_active, 0); - transport_all_task_dev_remove_state(cmd); spin_unlock_irqrestore(&cmd->t_state_lock, flags); - transport_release_tasks(cmd); - return true; + if (free_tasks != 0) + transport_free_dev_tasks(cmd); -free_pages: transport_free_pages(cmd); transport_release_cmd(cmd); return true; +out_busy: + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + return false; } static int From e6a2573f1f5d66f0456c433afdfc63f33fdf9008 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Sep 2011 23:08:50 +0200 Subject: [PATCH 14/62] target: remove transport_generic_remove All callers that never have the session_reinstatement flag set can trivially be converted to transport_put_cmd. Opencode the session reinstatement code in transport_generic_free_cmd, which was the only caller ever asking for it. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 44 +++++++++----------------- 1 file changed, 15 insertions(+), 29 deletions(-) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index a650100d997a..4f21b88b85b1 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -86,8 +86,6 @@ static u32 transport_allocate_tasks(struct se_cmd *cmd, enum dma_data_direction data_direction, struct scatterlist *sgl, unsigned int nents); static int transport_generic_get_mem(struct se_cmd *cmd); -static int transport_generic_remove(struct se_cmd *cmd, - int session_reinstatement); static bool transport_put_cmd(struct se_cmd *cmd); static void transport_remove_cmd_from_queue(struct se_cmd *cmd, struct se_queue_obj *qobj); @@ -599,7 +597,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) return; if (remove) { transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); - transport_generic_remove(cmd, 0); + transport_put_cmd(cmd); } } @@ -610,7 +608,7 @@ void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) if (transport_cmd_check_stop_to_fabric(cmd)) return; - transport_generic_remove(cmd, 0); + transport_put_cmd(cmd); } static void transport_add_cmd_to_queue( @@ -2072,7 +2070,7 @@ static void transport_generic_request_timeout(struct se_cmd *cmd) unsigned long flags; /* - * Reset cmd->t_se_count to allow transport_generic_remove() + * Reset cmd->t_se_count to allow transport_put_cmd() * to allow last call to free memory resources. */ spin_lock_irqsave(&cmd->t_state_lock, flags); @@ -2083,7 +2081,7 @@ static void transport_generic_request_timeout(struct se_cmd *cmd) } spin_unlock_irqrestore(&cmd->t_state_lock, flags); - transport_generic_remove(cmd, 0); + transport_put_cmd(cmd); } static inline u32 transport_lba_21(unsigned char *cdb) @@ -3772,24 +3770,6 @@ out_busy: return false; } -static int -transport_generic_remove(struct se_cmd *cmd, int session_reinstatement) -{ - unsigned long flags; - - if (!transport_put_cmd(cmd)) { - if (session_reinstatement) { - spin_lock_irqsave(&cmd->t_state_lock, flags); - transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&cmd->t_state_lock, - flags); - } - return 1; - } - - return 0; -} - /* * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of * allocating in the core. @@ -4379,7 +4359,13 @@ void transport_generic_free_cmd( transport_free_dev_tasks(cmd); - transport_generic_remove(cmd, session_reinstatement); + if (!transport_put_cmd(cmd) && session_reinstatement) { + unsigned long flags; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + transport_all_task_dev_remove_state(cmd); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + } } } EXPORT_SYMBOL(transport_generic_free_cmd); @@ -5066,7 +5052,7 @@ static void transport_processing_shutdown(struct se_device *dev) transport_lun_remove_cmd(cmd); if (transport_cmd_check_stop(cmd, 1, 0)) - transport_generic_remove(cmd, 0); + transport_put_cmd(cmd); } spin_lock_irqsave(&dev->execute_task_lock, flags); @@ -5094,7 +5080,7 @@ static void transport_processing_shutdown(struct se_device *dev) transport_lun_remove_cmd(cmd); if (transport_cmd_check_stop(cmd, 1, 0)) - transport_generic_remove(cmd, 0); + transport_put_cmd(cmd); } spin_lock_irqsave(&dev->execute_task_lock, flags); @@ -5117,7 +5103,7 @@ static void transport_processing_shutdown(struct se_device *dev) } else { transport_lun_remove_cmd(cmd); if (transport_cmd_check_stop(cmd, 1, 0)) - transport_generic_remove(cmd, 0); + transport_put_cmd(cmd); } } } @@ -5192,7 +5178,7 @@ get_cmd: transport_generic_complete_ok(cmd); break; case TRANSPORT_REMOVE: - transport_generic_remove(cmd, 0); + transport_put_cmd(cmd); break; case TRANSPORT_FREE_CMD_INTR: transport_generic_free_cmd(cmd, 0, 0); From 82f1c8a4e7739eae9f1c32c2c419efdc19b8af41 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Sep 2011 23:09:01 +0200 Subject: [PATCH 15/62] target: push session reinstatement out of transport_generic_free_cmd Push session reinstatement out of transport_generic_free_cmd into the only caller that actually needs it. Clean up transport_generic_free_cmd a bit, and remove the useless comment. I'd love to add a more useful kerneldoc comment for it, but as this point I'm still a bit confused in where it stands in the command release stack. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target.c | 2 +- drivers/target/loopback/tcm_loop.c | 4 +-- drivers/target/target_core_transport.c | 37 +++++++++----------------- drivers/target/tcm_fc/tfc_cmd.c | 12 ++++----- include/target/target_core_transport.h | 2 +- 5 files changed, 23 insertions(+), 34 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 6a4ea29c2f36..7dc2cfe9431c 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -3547,7 +3547,7 @@ get_immediate: iscsit_release_cmd(cmd); else transport_generic_free_cmd(&cmd->se_cmd, - 1, 0); + 1); goto get_immediate; case ISTATE_SEND_NOPIN_WANT_RESPONSE: spin_unlock_bh(&cmd->istate_lock); diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index aa2d67997235..f0e701d27bd1 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -200,7 +200,7 @@ static void tcm_loop_check_stop_free(struct se_cmd *se_cmd) * Release the struct se_cmd, which will make a callback to release * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd() */ - transport_generic_free_cmd(se_cmd, 0, 0); + transport_generic_free_cmd(se_cmd, 0); } static void tcm_loop_release_cmd(struct se_cmd *se_cmd) @@ -388,7 +388,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) SUCCESS : FAILED; release: if (se_cmd) - transport_generic_free_cmd(se_cmd, 1, 0); + transport_generic_free_cmd(se_cmd, 1); else kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); kfree(tl_tmr); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 4f21b88b85b1..db2f8987a046 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -4331,42 +4331,25 @@ void transport_release_cmd(struct se_cmd *cmd) } EXPORT_SYMBOL(transport_release_cmd); -/* transport_generic_free_cmd(): - * - * Called from processing frontend to release storage engine resources - */ -void transport_generic_free_cmd( - struct se_cmd *cmd, - int wait_for_tasks, - int session_reinstatement) +bool transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) { if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) transport_release_cmd(cmd); else { core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); - if (cmd->se_lun) { -#if 0 - pr_debug("cmd: %p ITT: 0x%08x contains" - " cmd->se_lun\n", cmd, - cmd->se_tfo->get_task_tag(cmd)); -#endif + if (cmd->se_lun) transport_lun_remove_cmd(cmd); - } if (wait_for_tasks && cmd->transport_wait_for_tasks) cmd->transport_wait_for_tasks(cmd, 0, 0); transport_free_dev_tasks(cmd); - if (!transport_put_cmd(cmd) && session_reinstatement) { - unsigned long flags; - - spin_lock_irqsave(&cmd->t_state_lock, flags); - transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - } + return transport_put_cmd(cmd); } + + return true; } EXPORT_SYMBOL(transport_generic_free_cmd); @@ -4631,7 +4614,13 @@ remove: if (!remove_cmd) return; - transport_generic_free_cmd(cmd, 0, session_reinstatement); + if (!transport_generic_free_cmd(cmd, 0) && session_reinstatement) { + unsigned long flags; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + transport_all_task_dev_remove_state(cmd); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + } } static int transport_get_sense_codes( @@ -5181,7 +5170,7 @@ get_cmd: transport_put_cmd(cmd); break; case TRANSPORT_FREE_CMD_INTR: - transport_generic_free_cmd(cmd, 0, 0); + transport_generic_free_cmd(cmd, 0); break; case TRANSPORT_PROCESS_TMR: transport_generic_do_tmr(cmd); diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index c2d148d80cf7..7f2ee5a0ed79 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -114,7 +114,7 @@ void ft_release_cmd(struct se_cmd *se_cmd) void ft_check_stop_free(struct se_cmd *se_cmd) { - transport_generic_free_cmd(se_cmd, 0, 0); + transport_generic_free_cmd(se_cmd, 0); } /* @@ -269,7 +269,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg) /* XXX need to find cmd if queued */ cmd->se_cmd.t_state = TRANSPORT_REMOVE; cmd->seq = NULL; - transport_generic_free_cmd(&cmd->se_cmd, 0, 0); + transport_generic_free_cmd(&cmd->se_cmd, 0); return; } @@ -287,7 +287,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg) __func__, fh->fh_r_ctl); ft_invl_hw_context(cmd); fc_frame_free(fp); - transport_generic_free_cmd(&cmd->se_cmd, 0, 0); + transport_generic_free_cmd(&cmd->se_cmd, 0); break; } } @@ -420,7 +420,7 @@ static void ft_send_tm(struct ft_cmd *cmd) sess = cmd->sess; transport_send_check_condition_and_sense(&cmd->se_cmd, cmd->se_cmd.scsi_sense_reason, 0); - transport_generic_free_cmd(&cmd->se_cmd, 0, 0); + transport_generic_free_cmd(&cmd->se_cmd, 0); ft_sess_put(sess); return; } @@ -627,7 +627,7 @@ static void ft_send_work(struct work_struct *work) if (ret == -ENOMEM) { transport_send_check_condition_and_sense(se_cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); - transport_generic_free_cmd(se_cmd, 0, 0); + transport_generic_free_cmd(se_cmd, 0); return; } if (ret == -EINVAL) { @@ -636,7 +636,7 @@ static void ft_send_work(struct work_struct *work) else transport_send_check_condition_and_sense(se_cmd, se_cmd->scsi_sense_reason, 0); - transport_generic_free_cmd(se_cmd, 0, 0); + transport_generic_free_cmd(se_cmd, 0); return; } transport_handle_cdb_direct(se_cmd); diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 99a671e0f9da..a113129fa22e 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -184,7 +184,7 @@ extern int transport_check_aborted_status(struct se_cmd *, int); extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int); extern void transport_send_task_abort(struct se_cmd *); extern void transport_release_cmd(struct se_cmd *); -extern void transport_generic_free_cmd(struct se_cmd *, int, int); +extern bool transport_generic_free_cmd(struct se_cmd *, int); extern void transport_generic_wait_for_cmds(struct se_cmd *, int); extern int transport_init_task_sg(struct se_task *, struct se_mem *, u32); extern int transport_map_mem_to_sg(struct se_task *, struct list_head *, From 39c05f321a4b27f3036392eed68bd94ce2267155 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sat, 8 Oct 2011 13:59:52 -0700 Subject: [PATCH 16/62] target: Remove session_reinstatement parameter from ->transport_wait_for_tasks This patch removes the unnecessary session_reinstatement parameter from se_cmd->transport_wait_for_tasks(), logic in transport_generic_wait_for_tasks, and usage within iscsi-target code. This also includes the removal of the 'bool' return from transport_put_cmd() + transport_generic_free_cmd() that is no longer necessary. Cc: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target.c | 4 +-- drivers/target/iscsi/iscsi_target_erl2.c | 15 +++++------ drivers/target/target_core_transport.c | 33 ++++++++---------------- include/target/target_core_base.h | 2 +- include/target/target_core_transport.h | 2 +- 5 files changed, 22 insertions(+), 34 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 7dc2cfe9431c..354a8339a3f9 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -3960,7 +3960,7 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) * iscsit_get_lun_for_cmd() in iscsit_handle_scsi_cmd(). */ if (cmd->tmr_req && se_cmd->transport_wait_for_tasks) - se_cmd->transport_wait_for_tasks(se_cmd, 1, 1); + se_cmd->transport_wait_for_tasks(se_cmd, 1); else if (cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) transport_release_cmd(se_cmd); else @@ -3976,7 +3976,7 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) se_cmd = &cmd->se_cmd; if (se_cmd->transport_wait_for_tasks) - se_cmd->transport_wait_for_tasks(se_cmd, 1, 1); + se_cmd->transport_wait_for_tasks(se_cmd, 1); spin_lock_bh(&conn->cmd_lock); } diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c index 91a4d170bda4..000356baf0b5 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.c +++ b/drivers/target/iscsi/iscsi_target_erl2.c @@ -148,7 +148,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) iscsit_release_cmd(cmd); else cmd->se_cmd.transport_wait_for_tasks( - &cmd->se_cmd, 1, 1); + &cmd->se_cmd, 1); spin_lock(&cr->conn_recovery_cmd_lock); } spin_unlock(&cr->conn_recovery_cmd_lock); @@ -175,7 +175,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) iscsit_release_cmd(cmd); else cmd->se_cmd.transport_wait_for_tasks( - &cmd->se_cmd, 1, 1); + &cmd->se_cmd, 1); spin_lock(&cr->conn_recovery_cmd_lock); } spin_unlock(&cr->conn_recovery_cmd_lock); @@ -265,7 +265,7 @@ void iscsit_discard_cr_cmds_by_expstatsn( iscsit_release_cmd(cmd); else cmd->se_cmd.transport_wait_for_tasks( - &cmd->se_cmd, 1, 0); + &cmd->se_cmd, 1); spin_lock(&cr->conn_recovery_cmd_lock); } spin_unlock(&cr->conn_recovery_cmd_lock); @@ -324,7 +324,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn) iscsit_release_cmd(cmd); else cmd->se_cmd.transport_wait_for_tasks( - &cmd->se_cmd, 1, 1); + &cmd->se_cmd, 1); spin_lock_bh(&conn->cmd_lock); } spin_unlock_bh(&conn->cmd_lock); @@ -383,7 +383,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) iscsit_release_cmd(cmd); else cmd->se_cmd.transport_wait_for_tasks( - &cmd->se_cmd, 1, 0); + &cmd->se_cmd, 1); spin_lock_bh(&conn->cmd_lock); continue; } @@ -409,7 +409,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) iscsit_release_cmd(cmd); else cmd->se_cmd.transport_wait_for_tasks( - &cmd->se_cmd, 1, 1); + &cmd->se_cmd, 1); spin_lock_bh(&conn->cmd_lock); continue; } @@ -436,8 +436,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) if ((cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) && cmd->se_cmd.transport_wait_for_tasks) - cmd->se_cmd.transport_wait_for_tasks(&cmd->se_cmd, - 0, 0); + cmd->se_cmd.transport_wait_for_tasks(&cmd->se_cmd, 0); /* * Add the struct iscsi_cmd to the connection recovery cmd list */ diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index db2f8987a046..e5905269c850 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -86,7 +86,7 @@ static u32 transport_allocate_tasks(struct se_cmd *cmd, enum dma_data_direction data_direction, struct scatterlist *sgl, unsigned int nents); static int transport_generic_get_mem(struct se_cmd *cmd); -static bool transport_put_cmd(struct se_cmd *cmd); +static void transport_put_cmd(struct se_cmd *cmd); static void transport_remove_cmd_from_queue(struct se_cmd *cmd, struct se_queue_obj *qobj); static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); @@ -1638,7 +1638,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd) return 0; } -static void transport_generic_wait_for_tasks(struct se_cmd *, int, int); +static void transport_generic_wait_for_tasks(struct se_cmd *, int); /* transport_generic_allocate_tasks(): * @@ -2504,7 +2504,7 @@ void transport_new_cmd_failure(struct se_cmd *se_cmd) spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); } -static void transport_nop_wait_for_tasks(struct se_cmd *, int, int); +static void transport_nop_wait_for_tasks(struct se_cmd *, int); static inline u32 transport_get_sectors_6( unsigned char *cdb, @@ -3736,7 +3736,7 @@ static inline void transport_free_pages(struct se_cmd *cmd) * * This routine releases our reference to the command and frees it if possible. */ -static bool transport_put_cmd(struct se_cmd *cmd) +static void transport_put_cmd(struct se_cmd *cmd) { unsigned long flags; int free_tasks = 0; @@ -3764,10 +3764,9 @@ static bool transport_put_cmd(struct se_cmd *cmd) transport_free_pages(cmd); transport_release_cmd(cmd); - return true; + return; out_busy: spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return false; } /* @@ -4331,7 +4330,7 @@ void transport_release_cmd(struct se_cmd *cmd) } EXPORT_SYMBOL(transport_release_cmd); -bool transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) +void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) { if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) transport_release_cmd(cmd); @@ -4342,21 +4341,18 @@ bool transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) transport_lun_remove_cmd(cmd); if (wait_for_tasks && cmd->transport_wait_for_tasks) - cmd->transport_wait_for_tasks(cmd, 0, 0); + cmd->transport_wait_for_tasks(cmd, 0); transport_free_dev_tasks(cmd); - return transport_put_cmd(cmd); + transport_put_cmd(cmd); } - - return true; } EXPORT_SYMBOL(transport_generic_free_cmd); static void transport_nop_wait_for_tasks( struct se_cmd *cmd, - int remove_cmd, - int session_reinstatement) + int remove_cmd) { return; } @@ -4537,8 +4533,7 @@ int transport_clear_lun_from_sessions(struct se_lun *lun) */ static void transport_generic_wait_for_tasks( struct se_cmd *cmd, - int remove_cmd, - int session_reinstatement) + int remove_cmd) { unsigned long flags; @@ -4614,13 +4609,7 @@ remove: if (!remove_cmd) return; - if (!transport_generic_free_cmd(cmd, 0) && session_reinstatement) { - unsigned long flags; - - spin_lock_irqsave(&cmd->t_state_lock, flags); - transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - } + transport_generic_free_cmd(cmd, 0); } static int transport_get_sense_codes( diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 27040653005e..c10e351bc1f5 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -480,7 +480,7 @@ struct se_cmd { struct target_core_fabric_ops *se_tfo; int (*transport_emulate_cdb)(struct se_cmd *); void (*transport_split_cdb)(unsigned long long, u32, unsigned char *); - void (*transport_wait_for_tasks)(struct se_cmd *, int, int); + void (*transport_wait_for_tasks)(struct se_cmd *, int); void (*transport_complete_callback)(struct se_cmd *); int (*transport_qf_callback)(struct se_cmd *); diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index a113129fa22e..e67feeb88b69 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -184,7 +184,7 @@ extern int transport_check_aborted_status(struct se_cmd *, int); extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int); extern void transport_send_task_abort(struct se_cmd *); extern void transport_release_cmd(struct se_cmd *); -extern bool transport_generic_free_cmd(struct se_cmd *, int); +extern void transport_generic_free_cmd(struct se_cmd *, int); extern void transport_generic_wait_for_cmds(struct se_cmd *, int); extern int transport_init_task_sg(struct se_task *, struct se_mem *, u32); extern int transport_map_mem_to_sg(struct se_task *, struct list_head *, From b6b4e61ff4e26d6721b2607ea23402825a38c402 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 19 Sep 2011 11:11:21 +0300 Subject: [PATCH 17/62] target: simplify target_parse_naa_6h_vendor_specific() This patch adds a minor simplfication in target_parse_naa_6h_vendor_specific() to remove direct isxdigit() + ctype.h usage. (nab: Fix next assignment breakage in for loop) Signed-off-by: Andy Shevchenko Cc: Nicholas Bellinger Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_cdb.c | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index f04d4ef99dca..0d02391d1fb1 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -24,7 +24,6 @@ */ #include -#include #include #include @@ -156,11 +155,12 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) } static void -target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf_off) +target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf) { unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0]; - unsigned char *buf = buf_off; - int cnt = 0, next = 1; + int cnt; + bool next = true; + /* * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field @@ -169,19 +169,18 @@ target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf_of * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure * per device uniqeness. */ - while (*p != '\0') { - if (cnt >= 13) - break; - if (!isxdigit(*p)) { - p++; + for (cnt = 0; *p && cnt < 13; p++) { + int val = hex_to_bin(*p); + + if (val < 0) continue; - } - if (next != 0) { - buf[cnt++] |= hex_to_bin(*p++); - next = 0; + + if (next) { + next = false; + buf[cnt++] |= val; } else { - buf[cnt] = hex_to_bin(*p++) << 4; - next = 1; + next = true; + buf[cnt] = val << 4; } } } From dbbf3e94c2b26988d3c41af63e50189e9133eb28 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 25 Sep 2011 14:56:24 -0400 Subject: [PATCH 18/62] target: cleanup iblock bio submission Move the entirely bio allocation, mapping and submission into ->do_task. This a) avoids blocking the I/O submission thread unessecarily, and b) simplifies the code greatly Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_iblock.c | 194 +++++++++------------------- drivers/target/target_core_iblock.h | 1 - 2 files changed, 59 insertions(+), 136 deletions(-) diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 7f0cc53b4581..dcf93f85977a 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -374,45 +374,6 @@ static int iblock_emulated_fua_read(struct se_device *dev) return 0; } -static int iblock_do_task(struct se_task *task) -{ - struct se_device *dev = task->task_se_cmd->se_dev; - struct iblock_req *req = IBLOCK_REQ(task); - struct bio *bio = req->ib_bio, *nbio = NULL; - struct blk_plug plug; - int rw; - - if (task->task_data_direction == DMA_TO_DEVICE) { - /* - * Force data to disk if we pretend to not have a volatile - * write cache, or the initiator set the Force Unit Access bit. - */ - if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || - (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && - task->task_se_cmd->t_tasks_fua)) - rw = WRITE_FUA; - else - rw = WRITE; - } else { - rw = READ; - } - - blk_start_plug(&plug); - while (bio) { - nbio = bio->bi_next; - bio->bi_next = NULL; - pr_debug("Calling submit_bio() task: %p bio: %p" - " bio->bi_sector: %llu\n", task, bio, - (unsigned long long)bio->bi_sector); - - submit_bio(rw, bio); - bio = nbio; - } - blk_finish_plug(&plug); - - return PYX_TRANSPORT_SENT_TO_TRANSPORT; -} - static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range) { struct iblock_dev *ibd = dev->dev_ptr; @@ -424,20 +385,7 @@ static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range) static void iblock_free_task(struct se_task *task) { - struct iblock_req *req = IBLOCK_REQ(task); - struct bio *bio, *hbio = req->ib_bio; - /* - * We only release the bio(s) here if iblock_bio_done() has not called - * bio_put() -> iblock_bio_destructor(). - */ - while (hbio != NULL) { - bio = hbio; - hbio = hbio->bi_next; - bio->bi_next = NULL; - bio_put(bio); - } - - kfree(req); + kfree(IBLOCK_REQ(task)); } enum { @@ -556,20 +504,16 @@ static void iblock_bio_destructor(struct bio *bio) bio_free(bio, ib_dev->ibd_bio_set); } -static struct bio *iblock_get_bio( - struct se_task *task, - struct iblock_req *ib_req, - struct iblock_dev *ib_dev, - int *ret, - sector_t lba, - u32 sg_num) +static struct bio * +iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num) { + struct iblock_dev *ib_dev = task->se_dev->dev_ptr; + struct iblock_req *ib_req = IBLOCK_REQ(task); struct bio *bio; bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); if (!bio) { pr_err("Unable to allocate memory for bio\n"); - *ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; return NULL; } @@ -590,17 +534,33 @@ static struct bio *iblock_get_bio( return bio; } -static int iblock_map_data_SG(struct se_task *task) +static int iblock_do_task(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; - struct iblock_dev *ib_dev = task->se_dev->dev_ptr; - struct iblock_req *ib_req = IBLOCK_REQ(task); - struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; + struct bio *bio; + struct bio_list list; struct scatterlist *sg; - int ret = 0; u32 i, sg_num = task->task_sg_nents; sector_t block_lba; + struct blk_plug plug; + int rw; + + if (task->task_data_direction == DMA_TO_DEVICE) { + /* + * Force data to disk if we pretend to not have a volatile + * write cache, or the initiator set the Force Unit Access bit. + */ + if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || + (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && + task->task_se_cmd->t_tasks_fua)) + rw = WRITE_FUA; + else + rw = WRITE; + } else { + rw = READ; + } + /* * Do starting conversion up from non 512-byte blocksize with * struct se_task SCSI blocksize into Linux/Block 512 units for BIO. @@ -619,63 +579,43 @@ static int iblock_map_data_SG(struct se_task *task) return PYX_TRANSPORT_LU_COMM_FAILURE; } - bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num); + bio = iblock_get_bio(task, block_lba, sg_num); if (!bio) - return ret; + return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; + + bio_list_init(&list); + bio_list_add(&list, bio); - ib_req->ib_bio = bio; - hbio = tbio = bio; - /* - * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist - * from task->task_sg -> struct scatterlist memory. - */ for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { - pr_debug("task: %p bio: %p Calling bio_add_page(): page:" - " %p len: %u offset: %u\n", task, bio, sg_page(sg), - sg->length, sg->offset); -again: - ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset); - if (ret != sg->length) { - - pr_debug("*** Set bio->bi_sector: %llu\n", - (unsigned long long)bio->bi_sector); - pr_debug("** task->task_size: %u\n", - task->task_size); - pr_debug("*** bio->bi_max_vecs: %u\n", - bio->bi_max_vecs); - pr_debug("*** bio->bi_vcnt: %u\n", - bio->bi_vcnt); - - bio = iblock_get_bio(task, ib_req, ib_dev, &ret, - block_lba, sg_num); + /* + * XXX: if the length the device accepts is shorter than the + * length of the S/G list entry this will cause and + * endless loop. Better hope no driver uses huge pages. + */ + while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) + != sg->length) { + bio = iblock_get_bio(task, block_lba, sg_num); if (!bio) goto fail; - - tbio = tbio->bi_next = bio; - pr_debug("-----------------> Added +1 bio: %p to" - " list, Going to again\n", bio); - goto again; + bio_list_add(&list, bio); } + /* Always in 512 byte units for Linux/Block */ block_lba += sg->length >> IBLOCK_LBA_SHIFT; sg_num--; - pr_debug("task: %p bio-add_page() passed!, decremented" - " sg_num to %u\n", task, sg_num); - pr_debug("task: %p bio_add_page() passed!, increased lba" - " to %llu\n", task, (unsigned long long)block_lba); - pr_debug("task: %p bio_add_page() passed!, bio->bi_vcnt:" - " %u\n", task, bio->bi_vcnt); } - return 0; + blk_start_plug(&plug); + while ((bio = bio_list_pop(&list))) + submit_bio(rw, bio); + blk_finish_plug(&plug); + + return PYX_TRANSPORT_SENT_TO_TRANSPORT; + fail: - while (hbio) { - bio = hbio; - hbio = hbio->bi_next; - bio->bi_next = NULL; + while ((bio = bio_list_pop(&list))) bio_put(bio); - } - return ret; + return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; } static unsigned char *iblock_get_cdb(struct se_task *task) @@ -706,6 +646,7 @@ static void iblock_bio_done(struct bio *bio, int err) { struct se_task *task = bio->bi_private; struct iblock_req *ibr = IBLOCK_REQ(task); + /* * Set -EIO if !BIO_UPTODATE and the passed is still err=0 */ @@ -720,41 +661,24 @@ static void iblock_bio_done(struct bio *bio, int err) */ atomic_inc(&ibr->ib_bio_err_cnt); smp_mb__after_atomic_inc(); - bio_put(bio); - /* - * Wait to complete the task until the last bio as completed. - */ - if (!atomic_dec_and_test(&ibr->ib_bio_cnt)) - return; - - ibr->ib_bio = NULL; - transport_complete_task(task, 0); - return; } - pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", - task, bio, task->task_lba, (unsigned long long)bio->bi_sector, err); - /* - * bio_put() will call iblock_bio_destructor() to release the bio back - * to ibr->ib_bio_set. - */ + bio_put(bio); - /* - * Wait to complete the task until the last bio as completed. - */ + if (!atomic_dec_and_test(&ibr->ib_bio_cnt)) return; - /* - * Return GOOD status for task if zero ib_bio_err_cnt exists. - */ - ibr->ib_bio = NULL; - transport_complete_task(task, (!atomic_read(&ibr->ib_bio_err_cnt))); + + pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", + task, bio, task->task_lba, + (unsigned long long)bio->bi_sector, err); + + transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt)); } static struct se_subsystem_api iblock_template = { .name = "iblock", .owner = THIS_MODULE, .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, - .map_data_SG = iblock_map_data_SG, .attach_hba = iblock_attach_hba, .detach_hba = iblock_detach_hba, .allocate_virtdevice = iblock_allocate_virtdevice, diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h index a121cd1b6575..7a76f663385c 100644 --- a/drivers/target/target_core_iblock.h +++ b/drivers/target/target_core_iblock.h @@ -11,7 +11,6 @@ struct iblock_req { unsigned char ib_scsi_cdb[TCM_MAX_COMMAND_SIZE]; atomic_t ib_bio_cnt; atomic_t ib_bio_err_cnt; - struct bio *ib_bio; } ____cacheline_aligned; #define IBDF_HAS_UDEV_PATH 0x01 From 02b1a7463420e1cebe86c6755c44d3bd9489829e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 25 Sep 2011 14:56:32 -0400 Subject: [PATCH 19/62] target: cleanup pscsi request submission Move the entirely request allocation, mapping and submission into ->do_task. This a) avoids blocking the I/O submission thread unessecarily, and b) simplifies the code greatly Note that the code seems to have various error handling issues, mostly related to bidi handling in the current form. I've added comments about those but not tried to fix them in this commit. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_pscsi.c | 265 ++++++++++------------------- drivers/target/target_core_pscsi.h | 1 - 2 files changed, 92 insertions(+), 174 deletions(-) diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 77d725886410..b6d609362d62 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -776,95 +776,6 @@ pscsi_alloc_task(unsigned char *cdb) return &pt->pscsi_task; } -static inline void pscsi_blk_init_request( - struct se_task *task, - struct pscsi_plugin_task *pt, - struct request *req, - int bidi_read) -{ - /* - * Defined as "scsi command" in include/linux/blkdev.h. - */ - req->cmd_type = REQ_TYPE_BLOCK_PC; - /* - * For the extra BIDI-COMMAND READ struct request we do not - * need to setup the remaining structure members - */ - if (bidi_read) - return; - /* - * Setup the done function pointer for struct request, - * also set the end_io_data pointer.to struct se_task. - */ - req->end_io = pscsi_req_done; - req->end_io_data = task; - /* - * Load the referenced struct se_task's SCSI CDB into - * include/linux/blkdev.h:struct request->cmd - */ - req->cmd_len = scsi_command_size(pt->pscsi_cdb); - req->cmd = &pt->pscsi_cdb[0]; - /* - * Setup pointer for outgoing sense data. - */ - req->sense = &pt->pscsi_sense[0]; - req->sense_len = 0; -} - -/* - * Used for pSCSI data payloads for all *NON* SCF_SCSI_DATA_SG_IO_CDB -*/ -static int pscsi_blk_get_request(struct se_task *task) -{ - struct pscsi_plugin_task *pt = PSCSI_TASK(task); - struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; - - pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue, - (task->task_data_direction == DMA_TO_DEVICE), - GFP_KERNEL); - if (!pt->pscsi_req || IS_ERR(pt->pscsi_req)) { - pr_err("PSCSI: blk_get_request() failed: %ld\n", - IS_ERR(pt->pscsi_req)); - return PYX_TRANSPORT_LU_COMM_FAILURE; - } - /* - * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC, - * and setup rq callback, CDB and sense. - */ - pscsi_blk_init_request(task, pt, pt->pscsi_req, 0); - return 0; -} - -/* pscsi_do_task(): (Part of se_subsystem_api_t template) - * - * - */ -static int pscsi_do_task(struct se_task *task) -{ - struct pscsi_plugin_task *pt = PSCSI_TASK(task); - struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; - /* - * Set the struct request->timeout value based on peripheral - * device type from SCSI. - */ - if (pdv->pdv_sd->type == TYPE_DISK) - pt->pscsi_req->timeout = PS_TIMEOUT_DISK; - else - pt->pscsi_req->timeout = PS_TIMEOUT_OTHER; - - pt->pscsi_req->retries = PS_RETRY; - /* - * Queue the struct request into the struct scsi_device->request_queue. - * Also check for HEAD_OF_QUEUE SAM TASK attr from received se_cmd - * descriptor - */ - blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req, - (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG), - pscsi_req_done); - - return PYX_TRANSPORT_SENT_TO_TRANSPORT; -} - static void pscsi_free_task(struct se_task *task) { struct pscsi_plugin_task *pt = PSCSI_TASK(task); @@ -1048,15 +959,12 @@ static inline struct bio *pscsi_get_bio(int sg_num) return bio; } -static int __pscsi_map_SG( - struct se_task *task, - struct scatterlist *task_sg, - u32 task_sg_num, - int bidi_read) +static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg, + struct bio **hbio) { - struct pscsi_plugin_task *pt = PSCSI_TASK(task); struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; - struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; + u32 task_sg_num = task->task_sg_nents; + struct bio *bio = NULL, *tbio = NULL; struct page *page; struct scatterlist *sg; u32 data_len = task->task_size, i, len, bytes, off; @@ -1065,19 +973,8 @@ static int __pscsi_map_SG( int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; int rw = (task->task_data_direction == DMA_TO_DEVICE); - if (!task->task_size) - return 0; - /* - * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup - * the bio_vec maplist from task->task_sg -> - * struct scatterlist memory. The struct se_task->task_sg[] currently needs - * to be attached to struct bios for submission to Linux/SCSI using - * struct request to struct scsi_device->request_queue. - * - * Note that this will be changing post v2.6.28 as Target_Core_Mod/pSCSI - * is ported to upstream SCSI passthrough functionality that accepts - * struct scatterlist->page_link or struct page as a paraemeter. - */ + *hbio = NULL; + pr_debug("PSCSI: nr_pages: %d\n", nr_pages); for_each_sg(task_sg, sg, task_sg_num, i) { @@ -1114,8 +1011,8 @@ static int __pscsi_map_SG( * bios need to be added to complete a given * struct se_task */ - if (!hbio) - hbio = tbio = bio; + if (!*hbio) + *hbio = tbio = bio; else tbio = tbio->bi_next = bio; } @@ -1151,82 +1048,108 @@ static int __pscsi_map_SG( off = 0; } } - /* - * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND - * primary SCSI WRITE poayload mapped for struct se_task->task_sg[] - */ - if (!bidi_read) { - /* - * Starting with v2.6.31, call blk_make_request() passing in *hbio to - * allocate the pSCSI task a struct request. - */ - pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue, - hbio, GFP_KERNEL); - if (!pt->pscsi_req) { - pr_err("pSCSI: blk_make_request() failed\n"); - goto fail; - } - /* - * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC, - * and setup rq callback, CDB and sense. - */ - pscsi_blk_init_request(task, pt, pt->pscsi_req, 0); - - return task->task_sg_nents; - } - /* - * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND - * SCSI READ paylaod mapped for struct se_task->task_sg_bidi[] - */ - pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue, - hbio, GFP_KERNEL); - if (!pt->pscsi_req->next_rq) { - pr_err("pSCSI: blk_make_request() failed for BIDI\n"); - goto fail; - } - pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1); return task->task_sg_nents; fail: - while (hbio) { - bio = hbio; - hbio = hbio->bi_next; + while (*hbio) { + bio = *hbio; + *hbio = (*hbio)->bi_next; bio->bi_next = NULL; - bio_endio(bio, 0); + bio_endio(bio, 0); /* XXX: should be error */ } return ret; } -/* - * pSCSI maps both ->map_control_SG() and ->map_data_SG() to a single call. - */ -static int pscsi_map_SG(struct se_task *task) +static int pscsi_do_task(struct se_task *task) { + struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; + struct pscsi_plugin_task *pt = PSCSI_TASK(task); + struct request *req; + struct bio *hbio; int ret; - /* - * Setup the main struct request for the task->task_sg[] payload - */ + if (task->task_se_cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { + req = blk_get_request(pdv->pdv_sd->request_queue, + (task->task_data_direction == DMA_TO_DEVICE), + GFP_KERNEL); + if (!req || IS_ERR(req)) { + pr_err("PSCSI: blk_get_request() failed: %ld\n", + req ? IS_ERR(req) : -ENOMEM); + return PYX_TRANSPORT_LU_COMM_FAILURE; + } + } else { + BUG_ON(!task->task_size); - ret = __pscsi_map_SG(task, task->task_sg, task->task_sg_nents, 0); - if (ret >= 0 && task->task_sg_bidi) { /* - * If present, set up the extra BIDI-COMMAND SCSI READ - * struct request and payload. + * Setup the main struct request for the task->task_sg[] payload */ - ret = __pscsi_map_SG(task, task->task_sg_bidi, - task->task_sg_nents, 1); + ret = pscsi_map_sg(task, task->task_sg, &hbio); + if (ret < 0) + return PYX_TRANSPORT_LU_COMM_FAILURE; + + req = blk_make_request(pdv->pdv_sd->request_queue, hbio, + GFP_KERNEL); + if (!req) { + pr_err("pSCSI: blk_make_request() failed\n"); + goto fail; + } + + if (task->task_sg_bidi) { + /* + * If present, set up the extra BIDI-COMMAND SCSI READ + * struct request and payload. + */ + ret = pscsi_map_sg(task, task->task_sg_bidi, &hbio); + if (ret < 0) { + /* XXX: free the main request? */ + return PYX_TRANSPORT_LU_COMM_FAILURE; + } + + /* + * Setup the secondary pt->pscsi_req->next_rq used for the extra + * BIDI READ payload. + */ + req->next_rq = blk_make_request(pdv->pdv_sd->request_queue, + hbio, GFP_KERNEL); + if (!req) { + pr_err("pSCSI: blk_make_request() failed for BIDI\n"); + /* XXX: free the main request? */ + goto fail; + } + + req->next_rq->cmd_type = REQ_TYPE_BLOCK_PC; + } } - if (ret < 0) - return PYX_TRANSPORT_LU_COMM_FAILURE; - return 0; + req->cmd_type = REQ_TYPE_BLOCK_PC; + req->end_io = pscsi_req_done; + req->end_io_data = task; + req->cmd_len = scsi_command_size(pt->pscsi_cdb); + req->cmd = &pt->pscsi_cdb[0]; + req->sense = &pt->pscsi_sense[0]; + req->sense_len = 0; + if (pdv->pdv_sd->type == TYPE_DISK) + req->timeout = PS_TIMEOUT_DISK; + else + req->timeout = PS_TIMEOUT_OTHER; + req->retries = PS_RETRY; + + blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req, + (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG), + pscsi_req_done); + + return PYX_TRANSPORT_SENT_TO_TRANSPORT; + +fail: + while (hbio) { + struct bio *bio = hbio; + hbio = hbio->bi_next; + bio->bi_next = NULL; + bio_endio(bio, 0); /* XXX: should be error */ + } + return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; } -static int pscsi_CDB_none(struct se_task *task) -{ - return pscsi_blk_get_request(task); -} /* pscsi_get_cdb(): * @@ -1334,16 +1257,12 @@ static void pscsi_req_done(struct request *req, int uptodate) __blk_put_request(req->q, req->next_rq); __blk_put_request(req->q, req); - pt->pscsi_req = NULL; } static struct se_subsystem_api pscsi_template = { .name = "pscsi", .owner = THIS_MODULE, .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV, - .cdb_none = pscsi_CDB_none, - .map_control_SG = pscsi_map_SG, - .map_data_SG = pscsi_map_SG, .attach_hba = pscsi_attach_hba, .detach_hba = pscsi_detach_hba, .pmode_enable_hba = pscsi_pmode_enable_hba, diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h index ebf4f1ae2c83..fdc17b6aefb3 100644 --- a/drivers/target/target_core_pscsi.h +++ b/drivers/target/target_core_pscsi.h @@ -27,7 +27,6 @@ struct pscsi_plugin_task { int pscsi_direction; int pscsi_result; u32 pscsi_resid; - struct request *pscsi_req; unsigned char pscsi_cdb[0]; } ____cacheline_aligned; From a3eedc227bfa7c9e21ef3cebe164d06a4c507a71 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 25 Sep 2011 14:56:43 -0400 Subject: [PATCH 20/62] target: remove unused se_subsystem_api methods The cdb_none, map_data_SG and map_control_SG methods have no callers left and can be removed now. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 32 ++------------------------ include/target/target_core_transport.h | 12 ---------- 2 files changed, 2 insertions(+), 42 deletions(-) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index e5905269c850..ae1efc7c9ec6 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -4023,7 +4023,7 @@ static int transport_allocate_data_tasks( struct se_task *task; struct se_device *dev = cmd->se_dev; unsigned long flags; - int task_count, i, ret; + int task_count, i; sector_t sectors, dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size; struct scatterlist *sg; @@ -4101,20 +4101,6 @@ static int transport_allocate_data_tasks( list_add_tail(&task->t_list, &cmd->t_task_list); spin_unlock_irqrestore(&cmd->t_state_lock, flags); } - /* - * Now perform the memory map of task->task_sg[] into backend - * subsystem memory.. - */ - list_for_each_entry(task, &cmd->t_task_list, t_list) { - if (atomic_read(&task->task_sent)) - continue; - if (!dev->transport->map_data_SG) - continue; - - ret = dev->transport->map_data_SG(task); - if (ret < 0) - return 0; - } return task_count; } @@ -4126,7 +4112,6 @@ transport_allocate_control_task(struct se_cmd *cmd) unsigned char *cdb; struct se_task *task; unsigned long flags; - int ret = 0; task = transport_generic_get_task(cmd, cmd->data_direction); if (!task) @@ -4153,21 +4138,8 @@ transport_allocate_control_task(struct se_cmd *cmd) list_add_tail(&task->t_list, &cmd->t_task_list); spin_unlock_irqrestore(&cmd->t_state_lock, flags); - if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { - if (dev->transport->map_control_SG) - ret = dev->transport->map_control_SG(task); - } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { - if (dev->transport->cdb_none) - ret = dev->transport->cdb_none(task); - } else { - pr_err("target: Unknown control cmd type!\n"); - BUG(); - } - /* Success! Return number of tasks allocated */ - if (ret == 0) - return 1; - return ret; + return 1; } static u32 transport_allocate_tasks( diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index e67feeb88b69..549b6b332b1b 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -233,18 +233,6 @@ struct se_subsystem_api { * Used for global se_subsystem_api list_head */ struct list_head sub_api_list; - /* - * For SCF_SCSI_NON_DATA_CDB - */ - int (*cdb_none)(struct se_task *); - /* - * For SCF_SCSI_DATA_SG_IO_CDB - */ - int (*map_data_SG)(struct se_task *); - /* - * For SCF_SCSI_CONTROL_SG_IO_CDB - */ - int (*map_control_SG)(struct se_task *); /* * attach_hba(): */ From f2b56afd406b455fba339a35f43bfc4ada198073 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 30 Sep 2011 14:39:54 +0300 Subject: [PATCH 21/62] iscsi-target: use native hex2bin for chap_string_to_hex This patch converts chap_string_to_hex() to use hex2bin() instead of the internal chap_asciihex_to_binaryhex(). (nab: Fix up minor compile breakage + typo) Signed-off-by: Andy Shevchenko Cc: "Nicholas A. Bellinger" Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target_auth.c | 34 +++--------------------- 1 file changed, 3 insertions(+), 31 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index 11fd74307811..beb39469e7f1 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -18,6 +18,7 @@ * GNU General Public License for more details. ******************************************************************************/ +#include #include #include #include @@ -27,40 +28,11 @@ #include "iscsi_target_nego.h" #include "iscsi_target_auth.h" -static unsigned char chap_asciihex_to_binaryhex(unsigned char val[2]) -{ - unsigned char result = 0; - /* - * MSB - */ - if ((val[0] >= 'a') && (val[0] <= 'f')) - result = ((val[0] - 'a' + 10) & 0xf) << 4; - else - if ((val[0] >= 'A') && (val[0] <= 'F')) - result = ((val[0] - 'A' + 10) & 0xf) << 4; - else /* digit */ - result = ((val[0] - '0') & 0xf) << 4; - /* - * LSB - */ - if ((val[1] >= 'a') && (val[1] <= 'f')) - result |= ((val[1] - 'a' + 10) & 0xf); - else - if ((val[1] >= 'A') && (val[1] <= 'F')) - result |= ((val[1] - 'A' + 10) & 0xf); - else /* digit */ - result |= ((val[1] - '0') & 0xf); - - return result; -} - static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len) { - int i, j = 0; + int j = DIV_ROUND_UP(len, 2); - for (i = 0; i < len; i += 2) { - dst[j++] = (unsigned char) chap_asciihex_to_binaryhex(&src[i]); - } + hex2bin(dst, src, j); dst[j] = '\0'; return j; From 635a2b3f3e561278cb5b837ea305e50e3fa7f063 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 30 Sep 2011 14:45:40 +0300 Subject: [PATCH 22/62] tcm_fc: remove custom hex_to_bin in ft_parse_wwn This patch converts ft_parse_wwn() to use hex_to_bin() instead of custom conversion code. (Andy: Re-add missing strict && isupper(c) check) Signed-off-by: Andy Shevchenko Cc: "Nicholas A. Bellinger" Signed-off-by: Nicholas Bellinger --- drivers/target/tcm_fc/tfc_conf.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index b30eace70d35..5f770412ca40 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -70,10 +71,10 @@ static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict) { const char *cp; char c; - u32 nibble; u32 byte = 0; u32 pos = 0; u32 err; + int val; *wwn = 0; for (cp = name; cp < &name[FT_NAMELEN - 1]; cp++) { @@ -94,13 +95,10 @@ static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict) return cp - name; } err = 3; - if (isdigit(c)) - nibble = c - '0'; - else if (isxdigit(c) && (islower(c) || !strict)) - nibble = tolower(c) - 'a' + 10; - else + val = hex_to_bin(c); + if (val < 0 || (strict && isupper(c))) goto fail; - *wwn = (*wwn << 4) | nibble; + *wwn = (*wwn << 4) | val; } err = 4; fail: From 942d82646e16725ac366d44087b8c992f2cb2190 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sun, 2 Oct 2011 01:59:13 +0300 Subject: [PATCH 23/62] target: Make pscsi_create_virtdevice use ERR_CAST This patch changes pscsi_create_virtdevice() to properly return ERR_CAST instead of a raw pointer upon scsi_host_lookup() failure. Signed-off-by: Dan Carpenter Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_pscsi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index b6d609362d62..3898fb7d317c 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -566,7 +566,7 @@ static struct se_device *pscsi_create_virtdevice( if (IS_ERR(sh)) { pr_err("pSCSI: Unable to locate" " pdv_host_id: %d\n", pdv->pdv_host_id); - return (struct se_device *) sh; + return ERR_CAST(sh); } } } else { From dd503a5fcc0dfb8b5fd887bd967b6f431176864b Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Thu, 6 Oct 2011 09:56:16 -0700 Subject: [PATCH 24/62] target: Have core_tmr_alloc_req() take an explicit GFP_xxx flag Testing in_interrupt() to know when sleeping is allowed is not really reliable (since eg it won't be true if the caller is holding a spinlock). Instead have the caller tell core_tmr_alloc_req() what GFP_xxx to use; every caller except tcm_qla2xxx can use GFP_KERNEL. Signed-off-by: Roland Dreier Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target_util.c | 3 ++- drivers/target/loopback/tcm_loop.c | 2 +- drivers/target/target_core_tmr.c | 6 +++--- drivers/target/tcm_fc/tfc_cmd.c | 2 +- include/target/target_core_tmr.h | 2 +- 5 files changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index f00137f377b2..c4be6223b9cc 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -289,7 +289,8 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr( } se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, - (void *)cmd->tmr_req, tcm_function); + (void *)cmd->tmr_req, tcm_function, + GFP_KERNEL); if (!se_cmd->se_tmr_req) goto out; diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index f0e701d27bd1..5b870c316b9c 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -366,7 +366,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) * Allocate the LUN_RESET TMR */ se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr, - TMR_LUN_RESET); + TMR_LUN_RESET, GFP_KERNEL); if (IS_ERR(se_cmd->se_tmr_req)) goto release; /* diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index d04cc1016ebf..efc5ec7da57c 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -43,12 +43,12 @@ struct se_tmr_req *core_tmr_alloc_req( struct se_cmd *se_cmd, void *fabric_tmr_ptr, - u8 function) + u8 function, + gfp_t gfp_flags) { struct se_tmr_req *tmr; - tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ? - GFP_ATOMIC : GFP_KERNEL); + tmr = kmem_cache_zalloc(se_tmr_req_cache, gfp_flags); if (!tmr) { pr_err("Unable to allocate struct se_tmr_req\n"); return ERR_PTR(-ENOMEM); diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 7f2ee5a0ed79..55a278ed1111 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -396,7 +396,7 @@ static void ft_send_tm(struct ft_cmd *cmd) } pr_debug("alloc tm cmd fn %d\n", tm_func); - tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func); + tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func, GFP_KERNEL); if (!tmr) { pr_debug("alloc failed\n"); ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED); diff --git a/include/target/target_core_tmr.h b/include/target/target_core_tmr.h index bd5596807478..d5876e17d3fb 100644 --- a/include/target/target_core_tmr.h +++ b/include/target/target_core_tmr.h @@ -27,7 +27,7 @@ enum tcm_tmrsp_table { extern struct kmem_cache *se_tmr_req_cache; -extern struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8); +extern struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t); extern void core_tmr_release_req(struct se_tmr_req *); extern int core_tmr_lun_reset(struct se_device *, struct se_tmr_req *, struct list_head *, struct se_cmd *); From d14921d6ad192868184686b3af5bb99cf3380510 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sun, 9 Oct 2011 01:00:58 -0700 Subject: [PATCH 25/62] target: Convert ->transport_wait_for_tasks usage to transport_generic_free_cmd This patch converts se_cmd->transport_wait_for_tasks(se_cmd, 1) usage to use transport_generic_free_cmd() directly in target-core and iscsi-target fabric usage. The includes: *) Removal of the optional transport_generic_free_cmd() call from within transport_generic_wait_for_tasks() *) Usage of existing SCF_SUPPORTED_SAM_OPCODE to determine when transport_generic_wait_for_tasks() processing may occur instead of checking se_cmd->transport_wait_for_tasks() *) Move transport_generic_wait_for_tasks() call ahead of core_dec_lacl_count() and transport_lun_remove_cmd() in transport_generic_free_cmd() to follow existing logic for iscsi-target w/ se_cmd->transport_wait_for_tasks(se_cmd, 1) *) Removal of se_cmd->transport_wait_for_tasks() function pointer *) Rename transport_generic_wait_for_tasks() -> transport_wait_for_tasks(), and add docbook comment. *) Add EXPORT_SYMBOL for transport_wait_for_tasks() For the case in iscsi_target_erl2.c:iscsit_prepare_cmds_for_realligance() where se_cmd->transport_wait_for_tasks(se_cmd, 0) is called, this patch adds a direct call to transport_wait_for_tasks(). (hch: Fix transport_generic_free_cmd() usage in iscsit_release_commands_from_conn) (nab: Add patch: Ensure that TMRs hit wait_for_tasks logic during release) Reported-by: Christoph Hellwig Cc: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target.c | 12 +--- drivers/target/iscsi/iscsi_target_erl2.c | 41 ++++-------- drivers/target/target_core_transport.c | 82 ++++++++++-------------- include/target/target_core_base.h | 1 - include/target/target_core_transport.h | 1 + 5 files changed, 51 insertions(+), 86 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 354a8339a3f9..e4b9ba296dcf 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -3940,7 +3940,6 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) { struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL; struct iscsi_session *sess = conn->sess; - struct se_cmd *se_cmd; /* * We expect this function to only ever be called from either RX or TX * thread context via iscsit_close_connection() once the other context @@ -3953,16 +3952,13 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) list_del(&cmd->i_list); spin_unlock_bh(&conn->cmd_lock); iscsit_increment_maxcmdsn(cmd, sess); - se_cmd = &cmd->se_cmd; /* * Special cases for active iSCSI TMR, and * transport_lookup_cmd_lun() failing from * iscsit_get_lun_for_cmd() in iscsit_handle_scsi_cmd(). */ - if (cmd->tmr_req && se_cmd->transport_wait_for_tasks) - se_cmd->transport_wait_for_tasks(se_cmd, 1); - else if (cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) - transport_release_cmd(se_cmd); + if (cmd->tmr_req) + transport_generic_free_cmd(&cmd->se_cmd, 1); else iscsit_release_cmd(cmd); @@ -3973,10 +3969,8 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) spin_unlock_bh(&conn->cmd_lock); iscsit_increment_maxcmdsn(cmd, sess); - se_cmd = &cmd->se_cmd; - if (se_cmd->transport_wait_for_tasks) - se_cmd->transport_wait_for_tasks(se_cmd, 1); + transport_generic_free_cmd(&cmd->se_cmd, 1); spin_lock_bh(&conn->cmd_lock); } diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c index 000356baf0b5..c3803b2fdd65 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.c +++ b/drivers/target/iscsi/iscsi_target_erl2.c @@ -143,12 +143,10 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) list_del(&cmd->i_list); cmd->conn = NULL; spin_unlock(&cr->conn_recovery_cmd_lock); - if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || - !(cmd->se_cmd.transport_wait_for_tasks)) + if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)) iscsit_release_cmd(cmd); else - cmd->se_cmd.transport_wait_for_tasks( - &cmd->se_cmd, 1); + transport_generic_free_cmd(&cmd->se_cmd, 1); spin_lock(&cr->conn_recovery_cmd_lock); } spin_unlock(&cr->conn_recovery_cmd_lock); @@ -170,12 +168,10 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) list_del(&cmd->i_list); cmd->conn = NULL; spin_unlock(&cr->conn_recovery_cmd_lock); - if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || - !(cmd->se_cmd.transport_wait_for_tasks)) + if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)) iscsit_release_cmd(cmd); else - cmd->se_cmd.transport_wait_for_tasks( - &cmd->se_cmd, 1); + transport_generic_free_cmd(&cmd->se_cmd, 1); spin_lock(&cr->conn_recovery_cmd_lock); } spin_unlock(&cr->conn_recovery_cmd_lock); @@ -260,12 +256,10 @@ void iscsit_discard_cr_cmds_by_expstatsn( iscsit_remove_cmd_from_connection_recovery(cmd, sess); spin_unlock(&cr->conn_recovery_cmd_lock); - if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || - !(cmd->se_cmd.transport_wait_for_tasks)) + if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)) iscsit_release_cmd(cmd); else - cmd->se_cmd.transport_wait_for_tasks( - &cmd->se_cmd, 1); + transport_generic_free_cmd(&cmd->se_cmd, 1); spin_lock(&cr->conn_recovery_cmd_lock); } spin_unlock(&cr->conn_recovery_cmd_lock); @@ -319,12 +313,10 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn) list_del(&cmd->i_list); spin_unlock_bh(&conn->cmd_lock); - if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || - !(cmd->se_cmd.transport_wait_for_tasks)) + if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)) iscsit_release_cmd(cmd); else - cmd->se_cmd.transport_wait_for_tasks( - &cmd->se_cmd, 1); + transport_generic_free_cmd(&cmd->se_cmd, 1); spin_lock_bh(&conn->cmd_lock); } spin_unlock_bh(&conn->cmd_lock); @@ -378,12 +370,10 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) list_del(&cmd->i_list); spin_unlock_bh(&conn->cmd_lock); - if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || - !(cmd->se_cmd.transport_wait_for_tasks)) + if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)) iscsit_release_cmd(cmd); else - cmd->se_cmd.transport_wait_for_tasks( - &cmd->se_cmd, 1); + transport_generic_free_cmd(&cmd->se_cmd, 1); spin_lock_bh(&conn->cmd_lock); continue; } @@ -404,12 +394,10 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) list_del(&cmd->i_list); spin_unlock_bh(&conn->cmd_lock); - if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || - !(cmd->se_cmd.transport_wait_for_tasks)) + if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)) iscsit_release_cmd(cmd); else - cmd->se_cmd.transport_wait_for_tasks( - &cmd->se_cmd, 1); + transport_generic_free_cmd(&cmd->se_cmd, 1); spin_lock_bh(&conn->cmd_lock); continue; } @@ -434,9 +422,8 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) iscsit_free_all_datain_reqs(cmd); - if ((cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) && - cmd->se_cmd.transport_wait_for_tasks) - cmd->se_cmd.transport_wait_for_tasks(&cmd->se_cmd, 0); + if (cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) + transport_wait_for_tasks(&cmd->se_cmd); /* * Add the struct iscsi_cmd to the connection recovery cmd list */ diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index ae1efc7c9ec6..f8de042e532e 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1638,8 +1638,6 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd) return 0; } -static void transport_generic_wait_for_tasks(struct se_cmd *, int); - /* transport_generic_allocate_tasks(): * * Called from fabric RX Thread. @@ -1651,12 +1649,6 @@ int transport_generic_allocate_tasks( int ret; transport_generic_prepare_cdb(cdb); - - /* - * This is needed for early exceptions. - */ - cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; - /* * Ensure that the received CDB is less than the max (252 + 8) bytes * for VARIABLE_LENGTH_CMD @@ -1739,7 +1731,7 @@ int transport_handle_cdb_direct( * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue() * in existing usage to ensure that outstanding descriptors are handled - * correctly during shutdown via transport_generic_wait_for_tasks() + * correctly during shutdown via transport_wait_for_tasks() * * Also, we don't take cmd->t_state_lock here as we only expect * this to be called for initial descriptor submission. @@ -1819,11 +1811,6 @@ EXPORT_SYMBOL(transport_generic_handle_data); int transport_generic_handle_tmr( struct se_cmd *cmd) { - /* - * This is needed for early exceptions. - */ - cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; - transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR); return 0; } @@ -2504,8 +2491,6 @@ void transport_new_cmd_failure(struct se_cmd *se_cmd) spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); } -static void transport_nop_wait_for_tasks(struct se_cmd *, int); - static inline u32 transport_get_sectors_6( unsigned char *cdb, struct se_cmd *cmd, @@ -2780,7 +2765,6 @@ static int transport_get_sense_data(struct se_cmd *cmd) static int transport_handle_reservation_conflict(struct se_cmd *cmd) { - cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT; cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; @@ -2881,8 +2865,6 @@ static int transport_generic_cmd_sequencer( * Check for an existing UNIT ATTENTION condition */ if (core_scsi3_ua_check(cmd, cdb) < 0) { - cmd->transport_wait_for_tasks = - &transport_nop_wait_for_tasks; cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; return -EINVAL; @@ -2892,7 +2874,6 @@ static int transport_generic_cmd_sequencer( */ ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); if (ret != 0) { - cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; /* * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; * The ALUA additional sense code qualifier (ASCQ) is determined @@ -3396,7 +3377,6 @@ static int transport_generic_cmd_sequencer( pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" " 0x%02x, sending CHECK_CONDITION.\n", cmd->se_tfo->get_fabric_name(), cdb[0]); - cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; goto out_unsupported_cdb; } @@ -4304,17 +4284,20 @@ EXPORT_SYMBOL(transport_release_cmd); void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) { - if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { + if (wait_for_tasks && cmd->se_tmr_req) + transport_wait_for_tasks(cmd); + transport_release_cmd(cmd); - else { + } else { + if (wait_for_tasks) + transport_wait_for_tasks(cmd); + core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); if (cmd->se_lun) transport_lun_remove_cmd(cmd); - if (wait_for_tasks && cmd->transport_wait_for_tasks) - cmd->transport_wait_for_tasks(cmd, 0); - transport_free_dev_tasks(cmd); transport_put_cmd(cmd); @@ -4322,13 +4305,6 @@ void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) } EXPORT_SYMBOL(transport_generic_free_cmd); -static void transport_nop_wait_for_tasks( - struct se_cmd *cmd, - int remove_cmd) -{ - return; -} - /* transport_lun_wait_for_tasks(): * * Called from ConfigFS context to stop the passed struct se_cmd to allow @@ -4498,21 +4474,30 @@ int transport_clear_lun_from_sessions(struct se_lun *lun) return 0; } -/* transport_generic_wait_for_tasks(): +/** + * transport_wait_for_tasks - wait for completion to occur + * @cmd: command to wait * - * Called from frontend or passthrough context to wait for storage engine - * to pause and/or release frontend generated struct se_cmd. + * Called from frontend fabric context to wait for storage engine + * to pause and/or release frontend generated struct se_cmd. */ -static void transport_generic_wait_for_tasks( - struct se_cmd *cmd, - int remove_cmd) +void transport_wait_for_tasks(struct se_cmd *cmd) { unsigned long flags; - if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) - return; - spin_lock_irqsave(&cmd->t_state_lock, flags); + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + return; + } + /* + * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE + * has been set in transport_set_supported_SAM_opcode(). + */ + if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + return; + } /* * If we are already stopped due to an external event (ie: LUN shutdown) * sleep until the connection can have the passed struct se_cmd back. @@ -4552,8 +4537,10 @@ static void transport_generic_wait_for_tasks( atomic_set(&cmd->transport_lun_stop, 0); } if (!atomic_read(&cmd->t_transport_active) || - atomic_read(&cmd->t_transport_aborted)) - goto remove; + atomic_read(&cmd->t_transport_aborted)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + return; + } atomic_set(&cmd->t_transport_stop, 1); @@ -4576,13 +4563,10 @@ static void transport_generic_wait_for_tasks( pr_debug("wait_for_tasks: Stopped wait_for_compltion(" "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", cmd->se_tfo->get_task_tag(cmd)); -remove: - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - if (!remove_cmd) - return; - transport_generic_free_cmd(cmd, 0); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); } +EXPORT_SYMBOL(transport_wait_for_tasks); static int transport_get_sense_codes( struct se_cmd *cmd, diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index c10e351bc1f5..872cafcb2681 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -480,7 +480,6 @@ struct se_cmd { struct target_core_fabric_ops *se_tfo; int (*transport_emulate_cdb)(struct se_cmd *); void (*transport_split_cdb)(unsigned long long, u32, unsigned char *); - void (*transport_wait_for_tasks)(struct se_cmd *, int); void (*transport_complete_callback)(struct se_cmd *); int (*transport_qf_callback)(struct se_cmd *); diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 549b6b332b1b..371c24aff23e 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -180,6 +180,7 @@ extern void __transport_stop_task_timer(struct se_task *, unsigned long *); extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32, struct scatterlist *, u32); extern int transport_clear_lun_from_sessions(struct se_lun *); +extern void transport_wait_for_tasks(struct se_cmd *); extern int transport_check_aborted_status(struct se_cmd *, int); extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int); extern void transport_send_task_abort(struct se_cmd *); From d270190a75e5b859c6d8eed8d4095fbb45f78264 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sun, 9 Oct 2011 01:48:14 -0700 Subject: [PATCH 26/62] iscsi-target: Remove SCF_SE_LUN_CMD flag abuses This patch removes a number of SCF_SE_LUN_CMD flag abuses within iscsi-target code to determine when iscsit_release_cmd() or transport_generic_free_cmd() should be called while releasing an individual iscsi_cmd descriptor. In the place of SCF_SE_LUN_CMD checks, this patch converts existing code to use a new iscsit_free_cmd() that inspects iscsi_cmd->iscsi_opcode types to determine which of the above functions should be invoked. It also removes the now unnecessary special case checking in iscsit_release_commands_from_conn(). (hch: Use iscsit_free_cmd instead of open-coded alternative) Reported-by: Christoph Hellwig Cc: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target.c | 31 ++------------------- drivers/target/iscsi/iscsi_target_erl2.c | 35 +++++------------------- drivers/target/iscsi/iscsi_target_util.c | 17 ++++++++++++ drivers/target/iscsi/iscsi_target_util.h | 1 + 4 files changed, 28 insertions(+), 56 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index e4b9ba296dcf..d86e18dc163a 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -3538,16 +3538,8 @@ get_immediate: spin_lock_bh(&conn->cmd_lock); list_del(&cmd->i_list); spin_unlock_bh(&conn->cmd_lock); - /* - * Determine if a struct se_cmd is assoicated with - * this struct iscsi_cmd. - */ - if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) && - !(cmd->tmr_req)) - iscsit_release_cmd(cmd); - else - transport_generic_free_cmd(&cmd->se_cmd, - 1); + + iscsit_free_cmd(cmd); goto get_immediate; case ISTATE_SEND_NOPIN_WANT_RESPONSE: spin_unlock_bh(&cmd->istate_lock); @@ -3947,30 +3939,13 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) */ spin_lock_bh(&conn->cmd_lock); list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) { - if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)) { - list_del(&cmd->i_list); - spin_unlock_bh(&conn->cmd_lock); - iscsit_increment_maxcmdsn(cmd, sess); - /* - * Special cases for active iSCSI TMR, and - * transport_lookup_cmd_lun() failing from - * iscsit_get_lun_for_cmd() in iscsit_handle_scsi_cmd(). - */ - if (cmd->tmr_req) - transport_generic_free_cmd(&cmd->se_cmd, 1); - else - iscsit_release_cmd(cmd); - - spin_lock_bh(&conn->cmd_lock); - continue; - } list_del(&cmd->i_list); spin_unlock_bh(&conn->cmd_lock); iscsit_increment_maxcmdsn(cmd, sess); - transport_generic_free_cmd(&cmd->se_cmd, 1); + iscsit_free_cmd(cmd); spin_lock_bh(&conn->cmd_lock); } diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c index c3803b2fdd65..0b8404c30125 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.c +++ b/drivers/target/iscsi/iscsi_target_erl2.c @@ -143,10 +143,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) list_del(&cmd->i_list); cmd->conn = NULL; spin_unlock(&cr->conn_recovery_cmd_lock); - if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)) - iscsit_release_cmd(cmd); - else - transport_generic_free_cmd(&cmd->se_cmd, 1); + iscsit_free_cmd(cmd); spin_lock(&cr->conn_recovery_cmd_lock); } spin_unlock(&cr->conn_recovery_cmd_lock); @@ -168,10 +165,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) list_del(&cmd->i_list); cmd->conn = NULL; spin_unlock(&cr->conn_recovery_cmd_lock); - if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)) - iscsit_release_cmd(cmd); - else - transport_generic_free_cmd(&cmd->se_cmd, 1); + iscsit_free_cmd(cmd); spin_lock(&cr->conn_recovery_cmd_lock); } spin_unlock(&cr->conn_recovery_cmd_lock); @@ -256,10 +250,7 @@ void iscsit_discard_cr_cmds_by_expstatsn( iscsit_remove_cmd_from_connection_recovery(cmd, sess); spin_unlock(&cr->conn_recovery_cmd_lock); - if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)) - iscsit_release_cmd(cmd); - else - transport_generic_free_cmd(&cmd->se_cmd, 1); + iscsit_free_cmd(cmd); spin_lock(&cr->conn_recovery_cmd_lock); } spin_unlock(&cr->conn_recovery_cmd_lock); @@ -313,10 +304,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn) list_del(&cmd->i_list); spin_unlock_bh(&conn->cmd_lock); - if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)) - iscsit_release_cmd(cmd); - else - transport_generic_free_cmd(&cmd->se_cmd, 1); + iscsit_free_cmd(cmd); spin_lock_bh(&conn->cmd_lock); } spin_unlock_bh(&conn->cmd_lock); @@ -369,11 +357,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) list_del(&cmd->i_list); spin_unlock_bh(&conn->cmd_lock); - - if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)) - iscsit_release_cmd(cmd); - else - transport_generic_free_cmd(&cmd->se_cmd, 1); + iscsit_free_cmd(cmd); spin_lock_bh(&conn->cmd_lock); continue; } @@ -393,11 +377,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) (cmd->cmd_sn >= conn->sess->exp_cmd_sn)) { list_del(&cmd->i_list); spin_unlock_bh(&conn->cmd_lock); - - if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)) - iscsit_release_cmd(cmd); - else - transport_generic_free_cmd(&cmd->se_cmd, 1); + iscsit_free_cmd(cmd); spin_lock_bh(&conn->cmd_lock); continue; } @@ -422,8 +402,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) iscsit_free_all_datain_reqs(cmd); - if (cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) - transport_wait_for_tasks(&cmd->se_cmd); + transport_wait_for_tasks(&cmd->se_cmd); /* * Add the struct iscsi_cmd to the connection recovery cmd list */ diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index c4be6223b9cc..02348f727bd4 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -840,6 +840,23 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd) kmem_cache_free(lio_cmd_cache, cmd); } +void iscsit_free_cmd(struct iscsi_cmd *cmd) +{ + /* + * Determine if a struct se_cmd is assoicated with + * this struct iscsi_cmd. + */ + switch (cmd->iscsi_opcode) { + case ISCSI_OP_SCSI_CMD: + case ISCSI_OP_SCSI_TMFUNC: + transport_generic_free_cmd(&cmd->se_cmd, 1); + break; + default: + iscsit_release_cmd(cmd); + break; + } +} + int iscsit_check_session_usage_count(struct iscsi_session *sess) { spin_lock_bh(&sess->session_usage_lock); diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h index 2cd49d607bda..835bf7de0281 100644 --- a/drivers/target/iscsi/iscsi_target_util.h +++ b/drivers/target/iscsi/iscsi_target_util.h @@ -30,6 +30,7 @@ extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_c extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *); extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *); extern void iscsit_release_cmd(struct iscsi_cmd *); +extern void iscsit_free_cmd(struct iscsi_cmd *); extern int iscsit_check_session_usage_count(struct iscsi_session *); extern void iscsit_dec_session_usage_count(struct iscsi_session *); extern void iscsit_inc_session_usage_count(struct iscsi_session *); From 8dc52b54207f361f7abf6cbe26f5199ae8b7cf23 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sun, 9 Oct 2011 02:02:51 -0700 Subject: [PATCH 27/62] target: Merge transport_cmd_finish_abort_tmr into transport_cmd_finish_abort This patch merges transport_cmd_finish_abort_tmr() logic into a single transport_cmd_finish_abort() function by adding a cmd->se_tmr_req check around transport_lun_remove_cmd(), and updates the single caller within core_tmr_drain_tmr_list(). Reported-by: Christoph Hellwig Cc: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_tmr.c | 2 +- drivers/target/target_core_transport.c | 13 ++----------- include/target/target_core_transport.h | 1 - 3 files changed, 3 insertions(+), 13 deletions(-) diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index efc5ec7da57c..b8dc10fd4ef7 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -161,7 +161,7 @@ static void core_tmr_drain_tmr_list( (preempt_and_abort_list) ? "Preempt" : "", tmr, tmr->function, tmr->response, cmd->t_state); - transport_cmd_finish_abort_tmr(cmd); + transport_cmd_finish_abort(cmd, 1); } } diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index f8de042e532e..36fb39c2ce03 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -591,7 +591,8 @@ check_lun: void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) { - transport_lun_remove_cmd(cmd); + if (!cmd->se_tmr_req) + transport_lun_remove_cmd(cmd); if (transport_cmd_check_stop_to_fabric(cmd)) return; @@ -601,16 +602,6 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) } } -void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) -{ - transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); - - if (transport_cmd_check_stop_to_fabric(cmd)) - return; - - transport_put_cmd(cmd); -} - static void transport_add_cmd_to_queue( struct se_cmd *cmd, int t_state) diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 371c24aff23e..c91516910e3e 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -134,7 +134,6 @@ extern void transport_free_session(struct se_session *); extern void transport_deregister_session_configfs(struct se_session *); extern void transport_deregister_session(struct se_session *); extern void transport_cmd_finish_abort(struct se_cmd *, int); -extern void transport_cmd_finish_abort_tmr(struct se_cmd *); extern void transport_complete_sync_cache(struct se_cmd *, int); extern void transport_complete_task(struct se_task *, int); extern void transport_add_task_to_execute_queue(struct se_task *, From ce8762f6cd1e0e6d87a6d0b536635993aef0a697 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sun, 9 Oct 2011 02:19:01 -0700 Subject: [PATCH 28/62] target: Remove legacy + unused device active I/O shutdown code This patch removes the legacy device active I/O shutdown code that was originally called from transport_processing_thread() context during shutdown including transport_processing_shutdown() and transport_release_all_cmds(). This is due to the fact that in modern configfs control plane code by the time shutdown of an se_device instance in transport_processing_thread() is allowed to occur via: rmdir /sys/kernel/config/target/core/$HBA/$DEV all active I/O will already have been ceased while removing active configfs fabric Port/LUN symlinks. Eg: the removal of an active se_device is protected by inter-module VFS references from active Port/LUN symlinks. Two WARN_ON() checks have been added in their place before exiting transport_processing_thread() to watch out for any leaked descriptors. Cc: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 220 +------------------------ 1 file changed, 2 insertions(+), 218 deletions(-) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 36fb39c2ce03..ee73f1095a12 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1040,41 +1040,6 @@ void transport_dump_dev_state( *bl += sprintf(b + *bl, " "); } -/* transport_release_all_cmds(): - * - * - */ -static void transport_release_all_cmds(struct se_device *dev) -{ - struct se_cmd *cmd, *tcmd; - int bug_out = 0, t_state; - unsigned long flags; - - spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); - list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list, - se_queue_node) { - t_state = cmd->t_state; - list_del_init(&cmd->se_queue_node); - spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, - flags); - - pr_err("Releasing ITT: 0x%08x, i_state: %u," - " t_state: %u directly\n", - cmd->se_tfo->get_task_tag(cmd), - cmd->se_tfo->get_cmd_state(cmd), t_state); - - transport_put_cmd(cmd); - bug_out = 1; - - spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); - } - spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags); -#if 0 - if (bug_out) - BUG(); -#endif -} - void transport_dump_vpd_proto_id( struct t10_vpd *vpd, unsigned char *p_buf, @@ -4859,180 +4824,6 @@ int transport_generic_do_tmr(struct se_cmd *cmd) return 0; } -/* - * Called with spin_lock_irq(&dev->execute_task_lock); held - * - */ -static struct se_task * -transport_get_task_from_state_list(struct se_device *dev) -{ - struct se_task *task; - - if (list_empty(&dev->state_task_list)) - return NULL; - - list_for_each_entry(task, &dev->state_task_list, t_state_list) - break; - - list_del(&task->t_state_list); - atomic_set(&task->task_state_active, 0); - - return task; -} - -static void transport_processing_shutdown(struct se_device *dev) -{ - struct se_cmd *cmd; - struct se_task *task; - unsigned long flags; - /* - * Empty the struct se_device's struct se_task state list. - */ - spin_lock_irqsave(&dev->execute_task_lock, flags); - while ((task = transport_get_task_from_state_list(dev))) { - if (!task->task_se_cmd) { - pr_err("task->task_se_cmd is NULL!\n"); - continue; - } - cmd = task->task_se_cmd; - - spin_unlock_irqrestore(&dev->execute_task_lock, flags); - - spin_lock_irqsave(&cmd->t_state_lock, flags); - - pr_debug("PT: cmd: %p task: %p ITT: 0x%08x," - " i_state: %d, t_state/def_t_state:" - " %d/%d cdb: 0x%02x\n", cmd, task, - cmd->se_tfo->get_task_tag(cmd), - cmd->se_tfo->get_cmd_state(cmd), - cmd->t_state, cmd->deferred_t_state, - cmd->t_task_cdb[0]); - pr_debug("PT: ITT[0x%08x] - t_tasks: %d t_task_cdbs_left:" - " %d t_task_cdbs_sent: %d -- t_transport_active: %d" - " t_transport_stop: %d t_transport_sent: %d\n", - cmd->se_tfo->get_task_tag(cmd), - cmd->t_task_list_num, - atomic_read(&cmd->t_task_cdbs_left), - atomic_read(&cmd->t_task_cdbs_sent), - atomic_read(&cmd->t_transport_active), - atomic_read(&cmd->t_transport_stop), - atomic_read(&cmd->t_transport_sent)); - - if (atomic_read(&task->task_active)) { - atomic_set(&task->task_stop, 1); - spin_unlock_irqrestore( - &cmd->t_state_lock, flags); - - pr_debug("Waiting for task: %p to shutdown for dev:" - " %p\n", task, dev); - wait_for_completion(&task->task_stop_comp); - pr_debug("Completed task: %p shutdown for dev: %p\n", - task, dev); - - spin_lock_irqsave(&cmd->t_state_lock, flags); - atomic_dec(&cmd->t_task_cdbs_left); - - atomic_set(&task->task_active, 0); - atomic_set(&task->task_stop, 0); - } else { - if (atomic_read(&task->task_execute_queue) != 0) - transport_remove_task_from_execute_queue(task, dev); - } - __transport_stop_task_timer(task, &flags); - - if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { - spin_unlock_irqrestore( - &cmd->t_state_lock, flags); - - pr_debug("Skipping task: %p, dev: %p for" - " t_task_cdbs_ex_left: %d\n", task, dev, - atomic_read(&cmd->t_task_cdbs_ex_left)); - - spin_lock_irqsave(&dev->execute_task_lock, flags); - continue; - } - - if (atomic_read(&cmd->t_transport_active)) { - pr_debug("got t_transport_active = 1 for task: %p, dev:" - " %p\n", task, dev); - - if (atomic_read(&cmd->t_fe_count)) { - spin_unlock_irqrestore( - &cmd->t_state_lock, flags); - transport_send_check_condition_and_sense( - cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, - 0); - transport_remove_cmd_from_queue(cmd, - &cmd->se_dev->dev_queue_obj); - - transport_lun_remove_cmd(cmd); - transport_cmd_check_stop(cmd, 1, 0); - } else { - spin_unlock_irqrestore( - &cmd->t_state_lock, flags); - - transport_remove_cmd_from_queue(cmd, - &cmd->se_dev->dev_queue_obj); - - transport_lun_remove_cmd(cmd); - - if (transport_cmd_check_stop(cmd, 1, 0)) - transport_put_cmd(cmd); - } - - spin_lock_irqsave(&dev->execute_task_lock, flags); - continue; - } - pr_debug("Got t_transport_active = 0 for task: %p, dev: %p\n", - task, dev); - - if (atomic_read(&cmd->t_fe_count)) { - spin_unlock_irqrestore( - &cmd->t_state_lock, flags); - transport_send_check_condition_and_sense(cmd, - TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); - transport_remove_cmd_from_queue(cmd, - &cmd->se_dev->dev_queue_obj); - - transport_lun_remove_cmd(cmd); - transport_cmd_check_stop(cmd, 1, 0); - } else { - spin_unlock_irqrestore( - &cmd->t_state_lock, flags); - - transport_remove_cmd_from_queue(cmd, - &cmd->se_dev->dev_queue_obj); - transport_lun_remove_cmd(cmd); - - if (transport_cmd_check_stop(cmd, 1, 0)) - transport_put_cmd(cmd); - } - - spin_lock_irqsave(&dev->execute_task_lock, flags); - } - spin_unlock_irqrestore(&dev->execute_task_lock, flags); - /* - * Empty the struct se_device's struct se_cmd list. - */ - while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) { - - pr_debug("From Device Queue: cmd: %p t_state: %d\n", - cmd, cmd->t_state); - - if (atomic_read(&cmd->t_fe_count)) { - transport_send_check_condition_and_sense(cmd, - TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); - - transport_lun_remove_cmd(cmd); - transport_cmd_check_stop(cmd, 1, 0); - } else { - transport_lun_remove_cmd(cmd); - if (transport_cmd_check_stop(cmd, 1, 0)) - transport_put_cmd(cmd); - } - } -} - /* transport_processing_thread(): * * @@ -5052,14 +4843,6 @@ static int transport_processing_thread(void *param) if (ret < 0) goto out; - spin_lock_irq(&dev->dev_status_lock); - if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) { - spin_unlock_irq(&dev->dev_status_lock); - transport_processing_shutdown(dev); - continue; - } - spin_unlock_irq(&dev->dev_status_lock); - get_cmd: __transport_execute_tasks(dev); @@ -5135,7 +4918,8 @@ get_cmd: } out: - transport_release_all_cmds(dev); + WARN_ON(!list_empty(&dev->state_task_list)); + WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list)); dev->process_thread = NULL; return 0; } From 0a020436d8baf412bcf5997aee7796276ea773ae Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Mon, 10 Oct 2011 19:44:05 -0700 Subject: [PATCH 29/62] loopback: Prevent uninitialized use of tl_tpg in tcm_loop_queuecommand This patch fixes a bug with tcm_loop where performing a scsi_host rescan was causing an oops due to a received scsi_cmnd->device->id value not matching a previously configured tcm_loop_tpg entry in tcm_loop_hba->tl_hba_tpgs[] obtained from within tcm_loop_queuecommand() code. This fix adds an explict check for tcm_loop_tpg->tl_hba in order to ensure tcm_loop_make_naa_tpg() has already been invoked to initialize a given tcm_loop_tpg entry, and also adds an explict clear of tcm_loop_tpg->tl_hba from within the tcm_loop_drop_naa_tpg() release path. This bug was manifesting itself with the following OOPs: [176289.430909] BUG: unable to handle kernel NULL pointer dereference at 0000000000000090 [176289.431337] IP: [] transport_processing_thread+0x1e3/0x794 [target_core_mod] [176289.431399] PGD 22e9b067 PUD 23375067 PMD 0 [176289.431399] Oops: 0000 [#1] SMP [176289.431815] CPU 1 [176289.431815] Modules linked in: tcm_loop target_core_stgt target_core_pscsi target_core_file target_core_iblock target_core_mod crc32c ib_cm ib_sa ib_mad ib_core qla2xxx scsi_tgt configfs fcoe libfcoe libfc scsi_transport_fc ipv6 iscsi_tcp libiscsi_tcp libiscsi scsi_transport_iscsi sr_mod cdrom sd_mod ata_piix libata e1000 mptspi mptscsih mptbase [last unloaded: target_core_mod] [176289.431815] [176289.431815] Pid: 12339, comm: LIO_iblock Tainted: G W 3.1.0-rc8+ [176289.431815] RIP: 0010:[] [] transport_processing_thread+0x1e3/0x794 [target_core_mod] [176289.431815] RSP: 0018:ffff880023bfbe10 EFLAGS: 00010283 [176289.431815] RAX: 0000000000000000 RBX: ffff88002d600040 RCX: ffff88002d600108 [176289.431815] RDX: ffff88000c9e50bc RSI: 0000000000000246 RDI: 0000000000000246 [176289.431815] RBP: ffff880023bfbee0 R08: ffff88002d600108 R09: 0000000000000000 [176289.431815] R10: ffff88002fc8cc80 R11: ffffffff81671b60 R12: ffff88002d600108 [176289.431815] R13: ffff88000c9e4f38 R14: ffff88000c9e50b8 R15: 0000000000000000 [176289.431815] FS: 0000000000000000(0000) GS:ffff88002fc80000(0000) knlGS:0000000000000000 [176289.431815] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b [176289.431815] CR2: 0000000000000090 CR3: 000000002a33f000 CR4: 00000000000006e0 [176289.431815] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [176289.431815] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 [176289.431815] Process LIO_iblock (pid: 12339, threadinfo ffff880023bfa000, task ffff88002a2e0000) [176289.431815] Stack: [176289.431815] 0000000000011280 0000000000000246 ffff88002a2e0000 ffff880023a58900 [176289.431815] ffff880023bfbed0 ffff880023bfa000 ffff880023bfa000 ffff88000c9e50d0 [176289.431815] ffff88000c9e50c0 ffff88000c9e50bc ffff880023bfa000 ffff880023bfbfd8 [176289.431815] Call Trace: [176289.431815] [] ? wake_up_bit+0x25/0x25 [176289.431815] [] ? transport_handle_cdb_direct+0x92/0x92 [target_core_mod] [176289.431815] [] kthread+0x7d/0x85 [176289.431815] [] kernel_thread_helper+0x4/0x10 [176289.431815] [] ? kthread_worker_fn+0x16d/0x16d [176289.431815] [] ? gs_change+0x13/0x13 [176289.431815] Code: 67 05 00 00 41 8b 84 24 4c ff ff ff ff c8 83 f8 11 0f 87 f0 04 00 00 89 c0 ff 24 c5 b0 c6 39 a0 0f 0b eb fe 48 8b 83 d8 00 00 00 [176289.431815] RIP [] transport_processing_thread+0x1e3/0x794 [target_core_mod] [176289.431815] RSP [176289.431815] CR2: 0000000000000090 [176295.041004] ---[ end trace 85dc6865b23b8f3e ]--- Reported-by: Paolo Bonzini Cc: Paolo Bonzini Signed-off-by: Nicholas Bellinger --- drivers/target/loopback/tcm_loop.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 5b870c316b9c..b15d8cbf630b 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -290,6 +290,15 @@ static int tcm_loop_queuecommand( */ tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; + /* + * Ensure that this tl_tpg reference from the incoming sc->device->id + * has already been configured via tcm_loop_make_naa_tpg(). + */ + if (!tl_tpg->tl_hba) { + set_host_byte(sc, DID_NO_CONNECT); + sc->scsi_done(sc); + return 0; + } se_tpg = &tl_tpg->tl_se_tpg; /* * Determine the SAM Task Attribute and allocate tl_cmd and @@ -1245,6 +1254,9 @@ void tcm_loop_drop_naa_tpg( */ core_tpg_deregister(se_tpg); + tl_tpg->tl_hba = NULL; + tl_tpg->tl_tpgt = 0; + pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s" " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), config_item_name(&wwn->wwn_group.cg_item), tpgt); From c0427f155614908ca1147cd5b6a0d5cdcaef8327 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 12 Oct 2011 11:06:56 -0400 Subject: [PATCH 30/62] target: Cleanup unused target_core_base.h bits This is a squashed version of the following target_core_base.h cleanup patches: target: remove the unused SHUTDOWN_SIGS defintion target: remove unused se_mem leftovers target: remove the unused map_func_t typedef target: move TRANSPORT_IOV_DATA_BUFFER to the iscsi-specific code Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target.c | 2 +- drivers/target/iscsi/iscsi_target_core.h | 3 +++ drivers/target/target_core_transport.c | 3 --- include/target/target_core_base.h | 3 --- include/target/target_core_transport.h | 7 ------- 5 files changed, 4 insertions(+), 14 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index d86e18dc163a..4d01768fcd90 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -765,7 +765,7 @@ static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd) u32 iov_count = (cmd->se_cmd.t_data_nents == 0) ? 1 : cmd->se_cmd.t_data_nents; - iov_count += TRANSPORT_IOV_DATA_BUFFER; + iov_count += ISCSI_IOV_DATA_BUFFER; cmd->iov_data = kzalloc(iov_count * sizeof(struct kvec), GFP_KERNEL); if (!cmd->iov_data) { diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 470ed551eeb5..84818d7a7057 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h @@ -57,6 +57,9 @@ #define TA_PROD_MODE_WRITE_PROTECT 0 #define TA_CACHE_CORE_NPS 0 + +#define ISCSI_IOV_DATA_BUFFER 5 + enum tpg_np_network_transport_table { ISCSI_TCP = 0, ISCSI_SCTP_TCP = 1, diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index ee73f1095a12..89b85dd36cce 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -69,9 +69,6 @@ struct kmem_cache *t10_alua_lu_gp_mem_cache; struct kmem_cache *t10_alua_tg_pt_gp_cache; struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; -/* Used for transport_dev_get_map_*() */ -typedef int (*map_func_t)(struct se_task *, u32); - static int transport_generic_write_pending(struct se_cmd *); static int transport_processing_thread(void *param); static int __transport_execute_tasks(struct se_device *dev); diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 872cafcb2681..c00224e8d6c4 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -10,10 +10,7 @@ #include #define TARGET_CORE_MOD_VERSION "v4.1.0-rc1-ml" -#define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT)) -/* Used by transport_generic_allocate_iovecs() */ -#define TRANSPORT_IOV_DATA_BUFFER 5 /* Maximum Number of LUNs per Target Portal Group */ /* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */ #define TRANSPORT_MAX_LUNS_PER_TPG 256 diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index c91516910e3e..ccd85b380d6b 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -110,11 +110,8 @@ #define MOD_MAX_SECTORS(ms, bs) (ms % (PAGE_SIZE / bs)) -struct se_mem; struct se_subsystem_api; -extern struct kmem_cache *se_mem_cache; - extern int init_se_kmem_caches(void); extern void release_se_kmem_caches(void); extern u32 scsi_get_new_index(scsi_index_t); @@ -186,10 +183,6 @@ extern void transport_send_task_abort(struct se_cmd *); extern void transport_release_cmd(struct se_cmd *); extern void transport_generic_free_cmd(struct se_cmd *, int); extern void transport_generic_wait_for_cmds(struct se_cmd *, int); -extern int transport_init_task_sg(struct se_task *, struct se_mem *, u32); -extern int transport_map_mem_to_sg(struct se_task *, struct list_head *, - struct scatterlist *, struct se_mem *, - struct se_mem **, u32 *, u32 *); extern void transport_do_task_sg_chain(struct se_cmd *); extern void transport_generic_process_write(struct se_cmd *); extern int transport_generic_new_cmd(struct se_cmd *); From 42bf829eee0e36371a3df43978b14572c716cbe7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 12 Oct 2011 11:07:00 -0400 Subject: [PATCH 31/62] target: Cleanup unused se_task bits This is a squashed version of the following se_task cleanup patches: target: remove the unused task_state_flags field in se_task target: remove the unused se_obj_ptr field in se_task target: remove the se_dev field in se_task Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_file.c | 10 ++++--- drivers/target/target_core_iblock.c | 4 +-- drivers/target/target_core_pscsi.c | 6 ++--- drivers/target/target_core_rd.c | 6 ++--- drivers/target/target_core_transport.c | 36 ++++++++++---------------- include/target/target_core_base.h | 3 --- include/target/target_core_transport.h | 3 --- 7 files changed, 27 insertions(+), 41 deletions(-) diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 99c9db003394..524f41afea48 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -272,13 +272,14 @@ fd_alloc_task(unsigned char *cdb) static int fd_do_readv(struct se_task *task) { struct fd_request *req = FILE_REQ(task); - struct fd_dev *dev = req->fd_task.se_dev->dev_ptr; + struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev; + struct fd_dev *dev = se_dev->dev_ptr; struct file *fd = dev->fd_file; struct scatterlist *sg = task->task_sg; struct iovec *iov; mm_segment_t old_fs; loff_t pos = (task->task_lba * - task->se_dev->se_sub_dev->se_dev_attrib.block_size); + se_dev->se_sub_dev->se_dev_attrib.block_size); int ret = 0, i; iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); @@ -324,13 +325,14 @@ static int fd_do_readv(struct se_task *task) static int fd_do_writev(struct se_task *task) { struct fd_request *req = FILE_REQ(task); - struct fd_dev *dev = req->fd_task.se_dev->dev_ptr; + struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev; + struct fd_dev *dev = se_dev->dev_ptr; struct file *fd = dev->fd_file; struct scatterlist *sg = task->task_sg; struct iovec *iov; mm_segment_t old_fs; loff_t pos = (task->task_lba * - task->se_dev->se_sub_dev->se_dev_attrib.block_size); + se_dev->se_sub_dev->se_dev_attrib.block_size); int ret, i = 0; iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index dcf93f85977a..21a3677984fb 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -499,7 +499,7 @@ static ssize_t iblock_show_configfs_dev_params( static void iblock_bio_destructor(struct bio *bio) { struct se_task *task = bio->bi_private; - struct iblock_dev *ib_dev = task->se_dev->dev_ptr; + struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; bio_free(bio, ib_dev->ibd_bio_set); } @@ -507,7 +507,7 @@ static void iblock_bio_destructor(struct bio *bio) static struct bio * iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num) { - struct iblock_dev *ib_dev = task->se_dev->dev_ptr; + struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; struct iblock_req *ib_req = IBLOCK_REQ(task); struct bio *bio; diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 3898fb7d317c..b347862bf801 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -676,7 +676,7 @@ static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task) */ static int pscsi_transport_complete(struct se_task *task) { - struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; + struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; struct scsi_device *sd = pdv->pdv_sd; int result; struct pscsi_plugin_task *pt = PSCSI_TASK(task); @@ -962,7 +962,7 @@ static inline struct bio *pscsi_get_bio(int sg_num) static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg, struct bio **hbio) { - struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; + struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; u32 task_sg_num = task->task_sg_nents; struct bio *bio = NULL, *tbio = NULL; struct page *page; @@ -1062,7 +1062,7 @@ fail: static int pscsi_do_task(struct se_task *task) { - struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; + struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; struct pscsi_plugin_task *pt = PSCSI_TASK(task); struct request *req; struct bio *hbio; diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 1ab69f32e664..daf95dbe12ec 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -350,7 +350,7 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) static int rd_MEMCPY_read(struct rd_request *req) { struct se_task *task = &req->rd_task; - struct rd_dev *dev = req->rd_task.se_dev->dev_ptr; + struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr; struct rd_dev_sg_table *table; struct scatterlist *sg_d, *sg_s; void *dst, *src; @@ -466,7 +466,7 @@ static int rd_MEMCPY_read(struct rd_request *req) static int rd_MEMCPY_write(struct rd_request *req) { struct se_task *task = &req->rd_task; - struct rd_dev *dev = req->rd_task.se_dev->dev_ptr; + struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr; struct rd_dev_sg_table *table; struct scatterlist *sg_d, *sg_s; void *dst, *src; @@ -581,7 +581,7 @@ static int rd_MEMCPY_write(struct rd_request *req) */ static int rd_MEMCPY_do_task(struct se_task *task) { - struct se_device *dev = task->se_dev; + struct se_device *dev = task->task_se_cmd->se_dev; struct rd_request *req = RD_REQ(task); unsigned long long lba; int ret; diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 89b85dd36cce..c935c72da7be 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -432,15 +432,14 @@ EXPORT_SYMBOL(transport_deregister_session); */ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) { - struct se_device *dev; + struct se_device *dev = cmd->se_dev; struct se_task *task; unsigned long flags; - list_for_each_entry(task, &cmd->t_task_list, t_list) { - dev = task->se_dev; - if (!dev) - continue; + if (!dev) + return; + list_for_each_entry(task, &cmd->t_task_list, t_list) { if (atomic_read(&task->task_active)) continue; @@ -708,7 +707,7 @@ EXPORT_SYMBOL(transport_complete_sync_cache); void transport_complete_task(struct se_task *task, int success) { struct se_cmd *cmd = task->task_se_cmd; - struct se_device *dev = task->se_dev; + struct se_device *dev = cmd->se_dev; int t_state; unsigned long flags; #if 0 @@ -886,14 +885,12 @@ static void __transport_add_task_to_execute_queue( static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) { - struct se_device *dev; + struct se_device *dev = cmd->se_dev; struct se_task *task; unsigned long flags; spin_lock_irqsave(&cmd->t_state_lock, flags); list_for_each_entry(task, &cmd->t_task_list, t_list) { - dev = task->se_dev; - if (atomic_read(&task->task_state_active)) continue; @@ -1522,7 +1519,6 @@ transport_generic_get_task(struct se_cmd *cmd, INIT_LIST_HEAD(&task->t_state_list); init_completion(&task->task_stop_comp); task->task_se_cmd = cmd; - task->se_dev = dev; task->task_data_direction = data_direction; return task; @@ -1802,7 +1798,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_remove_task_from_execute_queue(task, - task->se_dev); + cmd->se_dev); pr_debug("task_no[%d] - Removed from execute queue\n", task->task_no); @@ -2130,7 +2126,7 @@ static void transport_task_timeout_handler(unsigned long data) */ static void transport_start_task_timer(struct se_task *task) { - struct se_device *dev = task->se_dev; + struct se_device *dev = task->task_se_cmd->se_dev; int timeout; if (task->task_flags & TF_RUNNING) @@ -2656,13 +2652,16 @@ out: static int transport_get_sense_data(struct se_cmd *cmd) { unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; - struct se_device *dev; + struct se_device *dev = cmd->se_dev; struct se_task *task = NULL, *task_tmp; unsigned long flags; u32 offset = 0; WARN_ON(!cmd->se_lun); + if (!dev) + return 0; + spin_lock_irqsave(&cmd->t_state_lock, flags); if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -2671,14 +2670,9 @@ static int transport_get_sense_data(struct se_cmd *cmd) list_for_each_entry_safe(task, task_tmp, &cmd->t_task_list, t_list) { - if (!task->task_sense) continue; - dev = task->se_dev; - if (!dev) - continue; - if (!dev->transport->get_sense_buffer) { pr_err("dev->transport->get_sense_buffer" " is NULL\n"); @@ -3628,11 +3622,7 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) list_del(&task->t_list); spin_unlock_irqrestore(&cmd->t_state_lock, flags); - if (task->se_dev) - task->se_dev->transport->free_task(task); - else - pr_err("task[%u] - task->se_dev is NULL\n", - task->task_no); + cmd->se_dev->transport->free_task(task); spin_lock_irqsave(&cmd->t_state_lock, flags); } spin_unlock_irqrestore(&cmd->t_state_lock, flags); diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index c00224e8d6c4..6c49db403205 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -405,7 +405,6 @@ struct se_task { u8 task_scsi_status; u8 task_flags; int task_error_status; - int task_state_flags; bool task_padded_sg; unsigned long long task_lba; u32 task_no; @@ -413,7 +412,6 @@ struct se_task { u32 task_size; enum dma_data_direction task_data_direction; struct se_cmd *task_se_cmd; - struct se_device *se_dev; struct completion task_stop_comp; atomic_t task_active; atomic_t task_execute_queue; @@ -422,7 +420,6 @@ struct se_task { atomic_t task_stop; atomic_t task_state_active; struct timer_list task_timer; - struct se_device *se_obj_ptr; struct list_head t_list; struct list_head t_execute_list; struct list_head t_state_list; diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index ccd85b380d6b..c93cf0ae37a4 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -45,9 +45,6 @@ #define TRANSPORT_TIMEOUT_TYPE_TAPE 600 #define TRANSPORT_TIMEOUT_TYPE_OTHER 300 -/* For se_task->task_state_flags */ -#define TSF_EXCEPTION_CLEARED 0x01 - /* * struct se_subsystem_dev->su_dev_flags */ From 6c76bf951cb099f5573954b1f56c1121c3a41c72 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 12 Oct 2011 11:07:03 -0400 Subject: [PATCH 32/62] target: make more use of the task_flags field in se_task Replace various atomic_t variables that were mostly under t_state_lock with new flags in task_flags. Note that the execution error path didn't take t_state_lock before, so add it there. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_tmr.c | 9 ++-- drivers/target/target_core_transport.c | 64 +++++++++++++------------- include/target/target_core_base.h | 14 +++--- 3 files changed, 43 insertions(+), 44 deletions(-) diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index b8dc10fd4ef7..b5c18648fa22 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -259,8 +259,8 @@ static void core_tmr_drain_task_list( atomic_read(&cmd->t_transport_stop), atomic_read(&cmd->t_transport_sent)); - if (atomic_read(&task->task_active)) { - atomic_set(&task->task_stop, 1); + if (task->task_flags & TF_ACTIVE) { + task->task_flags |= TF_REQUEST_STOP; spin_unlock_irqrestore( &cmd->t_state_lock, flags); @@ -269,11 +269,10 @@ static void core_tmr_drain_task_list( wait_for_completion(&task->task_stop_comp); pr_debug("LUN_RESET Completed task: %p shutdown for" " dev: %p\n", task, dev); + spin_lock_irqsave(&cmd->t_state_lock, flags); atomic_dec(&cmd->t_task_cdbs_left); - - atomic_set(&task->task_active, 0); - atomic_set(&task->task_stop, 0); + task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP); } __transport_stop_task_timer(task, &flags); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index c935c72da7be..165a60c875ae 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -440,7 +440,7 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) return; list_for_each_entry(task, &cmd->t_task_list, t_list) { - if (atomic_read(&task->task_active)) + if (task->task_flags & TF_ACTIVE) continue; if (!atomic_read(&task->task_state_active)) @@ -718,7 +718,7 @@ void transport_complete_task(struct se_task *task, int success) atomic_inc(&dev->depth_left); spin_lock_irqsave(&cmd->t_state_lock, flags); - atomic_set(&task->task_active, 0); + task->task_flags &= ~TF_ACTIVE; /* * See if any sense data exists, if so set the TASK_SENSE flag. @@ -737,14 +737,14 @@ void transport_complete_task(struct se_task *task, int success) * See if we are waiting for outstanding struct se_task * to complete for an exception condition */ - if (atomic_read(&task->task_stop)) { + if (task->task_flags & TF_REQUEST_STOP) { /* * Decrement cmd->t_se_count if this task had * previously thrown its timeout exception handler. */ - if (atomic_read(&task->task_timeout)) { + if (task->task_flags & TF_TIMEOUT) { atomic_dec(&cmd->t_se_count); - atomic_set(&task->task_timeout, 0); + task->task_flags &= ~TF_TIMEOUT; } spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -756,7 +756,7 @@ void transport_complete_task(struct se_task *task, int success) * left counter to determine when the struct se_cmd is ready to be queued to * the processing thread. */ - if (atomic_read(&task->task_timeout)) { + if (task->task_flags & TF_TIMEOUT) { if (!atomic_dec_and_test( &cmd->t_task_cdbs_timeout_left)) { spin_unlock_irqrestore(&cmd->t_state_lock, @@ -1793,8 +1793,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) * If the struct se_task has not been sent and is not active, * remove the struct se_task from the execution queue. */ - if (!atomic_read(&task->task_sent) && - !atomic_read(&task->task_active)) { + if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_remove_task_from_execute_queue(task, @@ -1810,8 +1809,8 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) * If the struct se_task is active, sleep until it is returned * from the plugin. */ - if (atomic_read(&task->task_active)) { - atomic_set(&task->task_stop, 1); + if (task->task_flags & TF_ACTIVE) { + task->task_flags |= TF_REQUEST_STOP; spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -1823,9 +1822,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) spin_lock_irqsave(&cmd->t_state_lock, flags); atomic_dec(&cmd->t_task_cdbs_left); - - atomic_set(&task->task_active, 0); - atomic_set(&task->task_stop, 0); + task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP); } else { pr_debug("task_no[%d] - Did nothing\n", task->task_no); ret++; @@ -2074,18 +2071,18 @@ static void transport_task_timeout_handler(unsigned long data) pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd); spin_lock_irqsave(&cmd->t_state_lock, flags); - if (task->task_flags & TF_STOP) { + if (task->task_flags & TF_TIMER_STOP) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } - task->task_flags &= ~TF_RUNNING; + task->task_flags &= ~TF_TIMER_RUNNING; /* * Determine if transport_complete_task() has already been called. */ - if (!atomic_read(&task->task_active)) { - pr_debug("transport task: %p cmd: %p timeout task_active" - " == 0\n", task, cmd); + if (!(task->task_flags & TF_ACTIVE)) { + pr_debug("transport task: %p cmd: %p timeout !TF_ACTIVE\n", + task, cmd); spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } @@ -2094,12 +2091,12 @@ static void transport_task_timeout_handler(unsigned long data) atomic_inc(&cmd->t_transport_timeout); cmd->t_tasks_failed = 1; - atomic_set(&task->task_timeout, 1); + task->task_flags |= TF_TIMEOUT; task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; task->task_scsi_status = 1; - if (atomic_read(&task->task_stop)) { - pr_debug("transport task: %p cmd: %p timeout task_stop" + if (task->task_flags & TF_REQUEST_STOP) { + pr_debug("transport task: %p cmd: %p timeout TF_REQUEST_STOP" " == 1\n", task, cmd); spin_unlock_irqrestore(&cmd->t_state_lock, flags); complete(&task->task_stop_comp); @@ -2129,7 +2126,7 @@ static void transport_start_task_timer(struct se_task *task) struct se_device *dev = task->task_se_cmd->se_dev; int timeout; - if (task->task_flags & TF_RUNNING) + if (task->task_flags & TF_TIMER_RUNNING) return; /* * If the task_timeout is disabled, exit now. @@ -2143,7 +2140,7 @@ static void transport_start_task_timer(struct se_task *task) task->task_timer.data = (unsigned long) task; task->task_timer.function = transport_task_timeout_handler; - task->task_flags |= TF_RUNNING; + task->task_flags |= TF_TIMER_RUNNING; add_timer(&task->task_timer); #if 0 pr_debug("Starting task timer for cmd: %p task: %p seconds:" @@ -2158,17 +2155,17 @@ void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) { struct se_cmd *cmd = task->task_se_cmd; - if (!task->task_flags & TF_RUNNING) + if (!(task->task_flags & TF_TIMER_RUNNING)) return; - task->task_flags |= TF_STOP; + task->task_flags |= TF_TIMER_STOP; spin_unlock_irqrestore(&cmd->t_state_lock, *flags); del_timer_sync(&task->task_timer); spin_lock_irqsave(&cmd->t_state_lock, *flags); - task->task_flags &= ~TF_RUNNING; - task->task_flags &= ~TF_STOP; + task->task_flags &= ~TF_TIMER_RUNNING; + task->task_flags &= ~TF_TIMER_STOP; } static void transport_stop_all_task_timers(struct se_cmd *cmd) @@ -2360,8 +2357,7 @@ check_depth: cmd = task->task_se_cmd; spin_lock_irqsave(&cmd->t_state_lock, flags); - atomic_set(&task->task_active, 1); - atomic_set(&task->task_sent, 1); + task->task_flags |= (TF_ACTIVE | TF_SENT); atomic_inc(&cmd->t_task_cdbs_sent); if (atomic_read(&cmd->t_task_cdbs_sent) == @@ -2379,7 +2375,9 @@ check_depth: error = cmd->transport_emulate_cdb(cmd); if (error != 0) { cmd->transport_error_status = error; - atomic_set(&task->task_active, 0); + spin_lock_irqsave(&cmd->t_state_lock, flags); + task->task_flags &= ~TF_ACTIVE; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); atomic_set(&cmd->transport_sent, 0); transport_stop_tasks_for_cmd(cmd); transport_generic_request_failure(cmd, dev, 0, 1); @@ -2415,7 +2413,9 @@ check_depth: if (error != 0) { cmd->transport_error_status = error; - atomic_set(&task->task_active, 0); + spin_lock_irqsave(&cmd->t_state_lock, flags); + task->task_flags &= ~TF_ACTIVE; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); atomic_set(&cmd->transport_sent, 0); transport_stop_tasks_for_cmd(cmd); transport_generic_request_failure(cmd, dev, 0, 1); @@ -3613,7 +3613,7 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) spin_lock_irqsave(&cmd->t_state_lock, flags); list_for_each_entry_safe(task, task_tmp, &cmd->t_task_list, t_list) { - if (atomic_read(&task->task_active)) + if (task->task_flags & TF_ACTIVE) continue; kfree(task->task_sg_bidi); diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 6c49db403205..5e3dd1418bac 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -72,9 +72,13 @@ enum transport_tpg_type_table { }; /* Used for generate timer flags */ -enum timer_flags_table { - TF_RUNNING = 0x01, - TF_STOP = 0x02, +enum se_task_flags { + TF_ACTIVE = (1 << 0), + TF_SENT = (1 << 1), + TF_TIMEOUT = (1 << 2), + TF_REQUEST_STOP = (1 << 3), + TF_TIMER_RUNNING = (1 << 4), + TF_TIMER_STOP = (1 << 5), }; /* Special transport agnostic struct se_cmd->t_states */ @@ -413,11 +417,7 @@ struct se_task { enum dma_data_direction task_data_direction; struct se_cmd *task_se_cmd; struct completion task_stop_comp; - atomic_t task_active; atomic_t task_execute_queue; - atomic_t task_timeout; - atomic_t task_sent; - atomic_t task_stop; atomic_t task_state_active; struct timer_list task_timer; struct list_head t_list; From 04629b7bde553e3703577779f53cb0ba1eddd2c0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 12 Oct 2011 11:07:04 -0400 Subject: [PATCH 33/62] target: Remove unnecessary se_task members This is a squashed version of the following unnecessary se_task structure member removal patches: target: remove the task_execute_queue field in se_task Instead of using a separate flag we can simply do list_emptry checks on t_execute_list if we make sure to always use list_del_init to remove a task from the list. Also factor some duplicate code into a new __transport_remove_task_from_execute_queue helper. target: remove the read-only task_no field in se_task The task_no field never was initialized and only used in debug printks, so kill it. target: remove the task_padded_sg field in se_task This field is only check in one place and not actually needed there. Rationale: - transport_do_task_sg_chain asserts that we have task_sg_chaining set early on - we only make use of the sg_prev_nents field we calculate based on it if there is another sg list that gets chained onto this one, which never happens for the last (or only) task. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_tmr.c | 7 +-- drivers/target/target_core_transport.c | 59 +++++++++++--------------- include/target/target_core_base.h | 3 -- include/target/target_core_transport.h | 2 + 4 files changed, 28 insertions(+), 43 deletions(-) diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index b5c18648fa22..b8a780fff367 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -226,11 +226,8 @@ static void core_tmr_drain_task_list( /* * Remove from task execute list before processing drain_task_list */ - if (atomic_read(&task->task_execute_queue) != 0) { - list_del(&task->t_execute_list); - atomic_set(&task->task_execute_queue, 0); - atomic_dec(&dev->execute_tasks); - } + if (!list_empty(&task->t_execute_list)) + __transport_remove_task_from_execute_queue(task, dev); } spin_unlock_irqrestore(&dev->execute_task_lock, flags); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 165a60c875ae..4a787a0ab068 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -915,38 +915,36 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd) spin_lock_irqsave(&dev->execute_task_lock, flags); list_for_each_entry(task, &cmd->t_task_list, t_list) { - if (atomic_read(&task->task_execute_queue)) + if (!list_empty(&task->t_execute_list)) continue; /* * __transport_add_task_to_execute_queue() handles the * SAM Task Attribute emulation if enabled */ __transport_add_task_to_execute_queue(task, task_prev, dev); - atomic_set(&task->task_execute_queue, 1); task_prev = task; } spin_unlock_irqrestore(&dev->execute_task_lock, flags); } -/* transport_remove_task_from_execute_queue(): - * - * - */ +void __transport_remove_task_from_execute_queue(struct se_task *task, + struct se_device *dev) +{ + list_del_init(&task->t_execute_list); + atomic_dec(&dev->execute_tasks); +} + void transport_remove_task_from_execute_queue( struct se_task *task, struct se_device *dev) { unsigned long flags; - if (atomic_read(&task->task_execute_queue) == 0) { - dump_stack(); + if (WARN_ON(list_empty(&task->t_execute_list))) return; - } spin_lock_irqsave(&dev->execute_task_lock, flags); - list_del(&task->t_execute_list); - atomic_set(&task->task_execute_queue, 0); - atomic_dec(&dev->execute_tasks); + __transport_remove_task_from_execute_queue(task, dev); spin_unlock_irqrestore(&dev->execute_task_lock, flags); } @@ -1787,8 +1785,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) spin_lock_irqsave(&cmd->t_state_lock, flags); list_for_each_entry_safe(task, task_tmp, &cmd->t_task_list, t_list) { - pr_debug("task_no[%d] - Processing task %p\n", - task->task_no, task); + pr_debug("Processing task %p\n", task); /* * If the struct se_task has not been sent and is not active, * remove the struct se_task from the execution queue. @@ -1799,8 +1796,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) transport_remove_task_from_execute_queue(task, cmd->se_dev); - pr_debug("task_no[%d] - Removed from execute queue\n", - task->task_no); + pr_debug("Task %p removed from execute queue\n", task); spin_lock_irqsave(&cmd->t_state_lock, flags); continue; } @@ -1814,17 +1810,15 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) spin_unlock_irqrestore(&cmd->t_state_lock, flags); - pr_debug("task_no[%d] - Waiting to complete\n", - task->task_no); + pr_debug("Task %p waiting to complete\n", task); wait_for_completion(&task->task_stop_comp); - pr_debug("task_no[%d] - Stopped successfully\n", - task->task_no); + pr_debug("Task %p stopped successfully\n", task); spin_lock_irqsave(&cmd->t_state_lock, flags); atomic_dec(&cmd->t_task_cdbs_left); task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP); } else { - pr_debug("task_no[%d] - Did nothing\n", task->task_no); + pr_debug("Task %p - did nothing\n", task); ret++; } @@ -2347,9 +2341,7 @@ check_depth: } task = list_first_entry(&dev->execute_task_list, struct se_task, t_execute_list); - list_del(&task->t_execute_list); - atomic_set(&task->task_execute_queue, 0); - atomic_dec(&dev->execute_tasks); + __transport_remove_task_from_execute_queue(task, dev); spin_unlock_irq(&dev->execute_task_lock); atomic_dec(&dev->depth_left); @@ -2681,9 +2673,9 @@ static int transport_get_sense_data(struct se_cmd *cmd) sense_buffer = dev->transport->get_sense_buffer(task); if (!sense_buffer) { - pr_err("ITT[0x%08x]_TASK[%d]: Unable to locate" + pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate" " sense buffer for task with sense\n", - cmd->se_tfo->get_task_tag(cmd), task->task_no); + cmd->se_tfo->get_task_tag(cmd), task); continue; } spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -3897,15 +3889,13 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) /* * For the padded tasks, use the extra SGL vector allocated * in transport_allocate_data_tasks() for the sg_prev_nents - * offset into sg_chain() above.. The last task of a - * multi-task list, or a single task will not have - * task->task_sg_padded set.. + * offset into sg_chain() above. + * + * We do not need the padding for the last task (or a single + * task), but in that case we will never use the sg_prev_nents + * value below which would be incorrect. */ - if (task->task_padded_sg) - sg_prev_nents = (task->task_sg_nents + 1); - else - sg_prev_nents = task->task_sg_nents; - + sg_prev_nents = (task->task_sg_nents + 1); sg_prev = task->task_sg; } /* @@ -3992,7 +3982,6 @@ static int transport_allocate_data_tasks( */ if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) { task_sg_nents_padded = (task->task_sg_nents + 1); - task->task_padded_sg = 1; } else task_sg_nents_padded = task->task_sg_nents; diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 5e3dd1418bac..45291b23bc03 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -409,15 +409,12 @@ struct se_task { u8 task_scsi_status; u8 task_flags; int task_error_status; - bool task_padded_sg; unsigned long long task_lba; - u32 task_no; u32 task_sectors; u32 task_size; enum dma_data_direction task_data_direction; struct se_cmd *task_se_cmd; struct completion task_stop_comp; - atomic_t task_execute_queue; atomic_t task_state_active; struct timer_list task_timer; struct list_head t_list; diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index c93cf0ae37a4..c5eb259c2e2f 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -135,6 +135,8 @@ extern void transport_add_task_to_execute_queue(struct se_task *, struct se_device *); extern void transport_remove_task_from_execute_queue(struct se_task *, struct se_device *); +extern void __transport_remove_task_from_execute_queue(struct se_task *, + struct se_device *); unsigned char *transport_dump_cmd_direction(struct se_cmd *); extern void transport_dump_dev_state(struct se_device *, char *, int *); extern void transport_dump_dev_info(struct se_device *, struct se_lun *, From 3189b067eeae4646f3c7fa0ed0d14659a682baa8 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 12 Oct 2011 11:07:07 -0400 Subject: [PATCH 34/62] target: pack struct se_task more tightly Rearrange the fields in se_task to avoid holes. Also increase the flags field to 16 bits as we have the space for it, and this makes adding new flags safer. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- include/target/target_core_base.h | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 45291b23bc03..a6c23485068e 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -402,24 +402,24 @@ struct se_queue_obj { } ____cacheline_aligned; struct se_task { - unsigned char task_sense; - struct scatterlist *task_sg; - u32 task_sg_nents; - struct scatterlist *task_sg_bidi; - u8 task_scsi_status; - u8 task_flags; - int task_error_status; unsigned long long task_lba; - u32 task_sectors; - u32 task_size; + u32 task_sectors; + u32 task_size; + struct se_cmd *task_se_cmd; + struct scatterlist *task_sg; + struct scatterlist *task_sg_bidi; + u32 task_sg_nents; + u16 task_flags; + u8 task_sense; + u8 task_scsi_status; + int task_error_status; enum dma_data_direction task_data_direction; - struct se_cmd *task_se_cmd; - struct completion task_stop_comp; - atomic_t task_state_active; + atomic_t task_state_active; struct timer_list task_timer; - struct list_head t_list; - struct list_head t_execute_list; - struct list_head t_state_list; + struct list_head t_list; + struct list_head t_execute_list; + struct list_head t_state_list; + struct completion task_stop_comp; } ____cacheline_aligned; struct se_cmd { From 6193f06e6fe27c9475e407cb3cf2b0d4cd2725b0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 12 Oct 2011 11:09:11 -0400 Subject: [PATCH 35/62] target: make the ->get_cdb method optional The most commonly used file, iblock and rd backends have no use for a per-task CDB and thus don't need a method to copy it into their otherwise unused CDB fields. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_file.c | 12 ------------ drivers/target/target_core_file.h | 4 +--- drivers/target/target_core_iblock.c | 6 ------ drivers/target/target_core_iblock.h | 1 - drivers/target/target_core_rd.c | 12 ------------ drivers/target/target_core_rd.h | 2 -- drivers/target/target_core_transport.c | 26 ++++++++++++++------------ 7 files changed, 15 insertions(+), 48 deletions(-) diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 524f41afea48..98aa5836448b 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -609,17 +609,6 @@ static ssize_t fd_show_configfs_dev_params( return bl; } -/* fd_get_cdb(): (Part of se_subsystem_api_t template) - * - * - */ -static unsigned char *fd_get_cdb(struct se_task *task) -{ - struct fd_request *req = FILE_REQ(task); - - return req->fd_scsi_cdb; -} - /* fd_get_device_rev(): (Part of se_subsystem_api_t template) * * @@ -667,7 +656,6 @@ static struct se_subsystem_api fileio_template = { .check_configfs_dev_params = fd_check_configfs_dev_params, .set_configfs_dev_params = fd_set_configfs_dev_params, .show_configfs_dev_params = fd_show_configfs_dev_params, - .get_cdb = fd_get_cdb, .get_device_rev = fd_get_device_rev, .get_device_type = fd_get_device_type, .get_blocks = fd_get_blocks, diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index daebd710b893..59e6e73106c2 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h @@ -14,9 +14,7 @@ struct fd_request { struct se_task fd_task; - /* SCSI CDB from iSCSI Command PDU */ - unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE]; -} ____cacheline_aligned; +}; #define FBDF_HAS_PATH 0x01 #define FBDF_HAS_SIZE 0x02 diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 21a3677984fb..bf074c4b7a36 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -618,11 +618,6 @@ fail: return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; } -static unsigned char *iblock_get_cdb(struct se_task *task) -{ - return IBLOCK_REQ(task)->ib_scsi_cdb; -} - static u32 iblock_get_device_rev(struct se_device *dev) { return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ @@ -696,7 +691,6 @@ static struct se_subsystem_api iblock_template = { .check_configfs_dev_params = iblock_check_configfs_dev_params, .set_configfs_dev_params = iblock_set_configfs_dev_params, .show_configfs_dev_params = iblock_show_configfs_dev_params, - .get_cdb = iblock_get_cdb, .get_device_rev = iblock_get_device_rev, .get_device_type = iblock_get_device_type, .get_blocks = iblock_get_blocks, diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h index 7a76f663385c..5cf1860c10d0 100644 --- a/drivers/target/target_core_iblock.h +++ b/drivers/target/target_core_iblock.h @@ -8,7 +8,6 @@ struct iblock_req { struct se_task ib_task; - unsigned char ib_scsi_cdb[TCM_MAX_COMMAND_SIZE]; atomic_t ib_bio_cnt; atomic_t ib_bio_err_cnt; } ____cacheline_aligned; diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index daf95dbe12ec..5158d3846f19 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -691,17 +691,6 @@ static ssize_t rd_show_configfs_dev_params( return bl; } -/* rd_get_cdb(): (Part of se_subsystem_api_t template) - * - * - */ -static unsigned char *rd_get_cdb(struct se_task *task) -{ - struct rd_request *req = RD_REQ(task); - - return req->rd_scsi_cdb; -} - static u32 rd_get_device_rev(struct se_device *dev) { return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ @@ -735,7 +724,6 @@ static struct se_subsystem_api rd_mcp_template = { .check_configfs_dev_params = rd_check_configfs_dev_params, .set_configfs_dev_params = rd_set_configfs_dev_params, .show_configfs_dev_params = rd_show_configfs_dev_params, - .get_cdb = rd_get_cdb, .get_device_rev = rd_get_device_rev, .get_device_type = rd_get_device_type, .get_blocks = rd_get_blocks, diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h index 0d027732cd00..784e56a04100 100644 --- a/drivers/target/target_core_rd.h +++ b/drivers/target/target_core_rd.h @@ -22,8 +22,6 @@ void rd_module_exit(void); struct rd_request { struct se_task rd_task; - /* SCSI CDB from iSCSI Command PDU */ - unsigned char rd_scsi_cdb[TCM_MAX_COMMAND_SIZE]; /* Offset from start of page */ u32 rd_offset; /* Starting page in Ramdisk for request */ diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 4a787a0ab068..895969650a27 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -3932,7 +3932,6 @@ static int transport_allocate_data_tasks( struct scatterlist *sgl, unsigned int sgl_nents) { - unsigned char *cdb = NULL; struct se_task *task; struct se_device *dev = cmd->se_dev; unsigned long flags; @@ -3959,14 +3958,17 @@ static int transport_allocate_data_tasks( task->task_sectors = min(sectors, dev_max_sectors); task->task_size = task->task_sectors * sector_size; - cdb = dev->transport->get_cdb(task); - BUG_ON(!cdb); + if (dev->transport->get_cdb) { + unsigned char *cdb = dev->transport->get_cdb(task); - memcpy(cdb, cmd->t_task_cdb, - scsi_command_size(cmd->t_task_cdb)); + memcpy(cdb, cmd->t_task_cdb, + scsi_command_size(cmd->t_task_cdb)); + + /* Update new cdb with updated lba/sectors */ + cmd->transport_split_cdb(task->task_lba, + task->task_sectors, cdb); + } - /* Update new cdb with updated lba/sectors */ - cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); /* * This now assumes that passed sg_ents are in PAGE_SIZE chunks * in order to calculate the number per task SGL entries @@ -4021,7 +4023,6 @@ static int transport_allocate_control_task(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; - unsigned char *cdb; struct se_task *task; unsigned long flags; @@ -4029,10 +4030,11 @@ transport_allocate_control_task(struct se_cmd *cmd) if (!task) return -ENOMEM; - cdb = dev->transport->get_cdb(task); - BUG_ON(!cdb); - memcpy(cdb, cmd->t_task_cdb, - scsi_command_size(cmd->t_task_cdb)); + if (dev->transport->get_cdb) { + unsigned char *cdb = dev->transport->get_cdb(task); + + memcpy(cdb, cmd->t_task_cdb, scsi_command_size(cmd->t_task_cdb)); + } task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, GFP_KERNEL); From 485fd0d1e3b8010b538bd0b209f3592acc825677 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 12 Oct 2011 11:09:12 -0400 Subject: [PATCH 36/62] target: replace ->get_cdb with a target_get_task_cdb helper Instead of calling out to the backends from the core to get a per-task CDB and then modify it for the LBA/len pair used for this CDB provide a helper that writes the adjusted CDB into a provided buffer and call this method from ->do_task in pscsi. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_cdb.c | 16 ++++++++++++++++ drivers/target/target_core_pscsi.c | 15 ++------------- drivers/target/target_core_transport.c | 18 ------------------ include/target/target_core_transport.h | 5 +---- 4 files changed, 19 insertions(+), 35 deletions(-) diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 0d02391d1fb1..fb0b6308038b 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -1265,3 +1265,19 @@ transport_emulate_control_cdb(struct se_task *task) return PYX_TRANSPORT_SENT_TO_TRANSPORT; } + +/* + * Write a CDB into @cdb that is based on the one the intiator sent us, + * but updated to only cover the sectors that the current task handles. + */ +void target_get_task_cdb(struct se_task *task, unsigned char *cdb) +{ + struct se_cmd *cmd = task->task_se_cmd; + + memcpy(cdb, cmd->t_task_cdb, scsi_command_size(cmd->t_task_cdb)); + if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { + cmd->transport_split_cdb(task->task_lba, task->task_sectors, + cdb); + } +} +EXPORT_SYMBOL(target_get_task_cdb); diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index b347862bf801..936b9fec4cca 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -1068,6 +1068,8 @@ static int pscsi_do_task(struct se_task *task) struct bio *hbio; int ret; + target_get_task_cdb(task, pt->pscsi_cdb); + if (task->task_se_cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { req = blk_get_request(pdv->pdv_sd->request_queue, (task->task_data_direction == DMA_TO_DEVICE), @@ -1150,18 +1152,6 @@ fail: return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; } - -/* pscsi_get_cdb(): - * - * - */ -static unsigned char *pscsi_get_cdb(struct se_task *task) -{ - struct pscsi_plugin_task *pt = PSCSI_TASK(task); - - return pt->pscsi_cdb; -} - /* pscsi_get_sense_buffer(): * * @@ -1276,7 +1266,6 @@ static struct se_subsystem_api pscsi_template = { .check_configfs_dev_params = pscsi_check_configfs_dev_params, .set_configfs_dev_params = pscsi_set_configfs_dev_params, .show_configfs_dev_params = pscsi_show_configfs_dev_params, - .get_cdb = pscsi_get_cdb, .get_sense_buffer = pscsi_get_sense_buffer, .get_device_rev = pscsi_get_device_rev, .get_device_type = pscsi_get_device_type, diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 895969650a27..3025a6f5ab56 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -3958,17 +3958,6 @@ static int transport_allocate_data_tasks( task->task_sectors = min(sectors, dev_max_sectors); task->task_size = task->task_sectors * sector_size; - if (dev->transport->get_cdb) { - unsigned char *cdb = dev->transport->get_cdb(task); - - memcpy(cdb, cmd->t_task_cdb, - scsi_command_size(cmd->t_task_cdb)); - - /* Update new cdb with updated lba/sectors */ - cmd->transport_split_cdb(task->task_lba, - task->task_sectors, cdb); - } - /* * This now assumes that passed sg_ents are in PAGE_SIZE chunks * in order to calculate the number per task SGL entries @@ -4022,7 +4011,6 @@ static int transport_allocate_data_tasks( static int transport_allocate_control_task(struct se_cmd *cmd) { - struct se_device *dev = cmd->se_dev; struct se_task *task; unsigned long flags; @@ -4030,12 +4018,6 @@ transport_allocate_control_task(struct se_cmd *cmd) if (!task) return -ENOMEM; - if (dev->transport->get_cdb) { - unsigned char *cdb = dev->transport->get_cdb(task); - - memcpy(cdb, cmd->t_task_cdb, scsi_command_size(cmd->t_task_cdb)); - } - task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, GFP_KERNEL); if (!task->task_sg) { diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index c5eb259c2e2f..171c2359bc79 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -190,6 +190,7 @@ extern int transport_generic_do_tmr(struct se_cmd *); extern int core_alua_check_nonop_delay(struct se_cmd *); /* From target_core_cdb.c */ extern int transport_emulate_control_cdb(struct se_task *); +extern void target_get_task_cdb(struct se_task *task, unsigned char *cdb); /* * Each se_transport_task_t can have N number of possible struct se_task's @@ -308,10 +309,6 @@ struct se_subsystem_api { */ ssize_t (*show_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *, char *); - /* - * get_cdb(): - */ - unsigned char *(*get_cdb)(struct se_task *); /* * get_device_rev(): */ From b937d27052e5759b1308782166fe47bc76e05b4d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 12 Oct 2011 11:09:13 -0400 Subject: [PATCH 37/62] target: remove the ->transport_split_cdb callback in se_cmd Add a switch statement implementing the CDB LBA/len update directly in target_get_task_cdb and remove the old ->transport_split_cdb callback and all its implementations. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/Makefile | 1 - drivers/target/target_core_cdb.c | 39 ++++++++- drivers/target/target_core_scdb.c | 105 ------------------------- drivers/target/target_core_scdb.h | 10 --- drivers/target/target_core_transport.c | 11 --- include/target/target_core_base.h | 1 - 6 files changed, 36 insertions(+), 131 deletions(-) delete mode 100644 drivers/target/target_core_scdb.c delete mode 100644 drivers/target/target_core_scdb.h diff --git a/drivers/target/Makefile b/drivers/target/Makefile index 1060c7b7f803..62e54053bcd8 100644 --- a/drivers/target/Makefile +++ b/drivers/target/Makefile @@ -6,7 +6,6 @@ target_core_mod-y := target_core_configfs.o \ target_core_hba.o \ target_core_pr.o \ target_core_alua.o \ - target_core_scdb.o \ target_core_tmr.o \ target_core_tpg.o \ target_core_transport.o \ diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index fb0b6308038b..575cf7216f52 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -1273,11 +1273,44 @@ transport_emulate_control_cdb(struct se_task *task) void target_get_task_cdb(struct se_task *task, unsigned char *cdb) { struct se_cmd *cmd = task->task_se_cmd; + unsigned int cdb_len = scsi_command_size(cmd->t_task_cdb); - memcpy(cdb, cmd->t_task_cdb, scsi_command_size(cmd->t_task_cdb)); + memcpy(cdb, cmd->t_task_cdb, cdb_len); if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { - cmd->transport_split_cdb(task->task_lba, task->task_sectors, - cdb); + unsigned long long lba = task->task_lba; + u32 sectors = task->task_sectors; + + switch (cdb_len) { + case 6: + /* 21-bit LBA and 8-bit sectors */ + cdb[1] = (lba >> 16) & 0x1f; + cdb[2] = (lba >> 8) & 0xff; + cdb[3] = lba & 0xff; + cdb[4] = sectors & 0xff; + break; + case 10: + /* 32-bit LBA and 16-bit sectors */ + put_unaligned_be32(lba, &cdb[2]); + put_unaligned_be16(sectors, &cdb[7]); + break; + case 12: + /* 32-bit LBA and 32-bit sectors */ + put_unaligned_be32(lba, &cdb[2]); + put_unaligned_be32(sectors, &cdb[6]); + break; + case 16: + /* 64-bit LBA and 32-bit sectors */ + put_unaligned_be64(lba, &cdb[2]); + put_unaligned_be32(sectors, &cdb[10]); + break; + case 32: + /* 64-bit LBA and 32-bit sectors, extended CDB */ + put_unaligned_be64(lba, &cdb[12]); + put_unaligned_be32(sectors, &cdb[28]); + break; + default: + BUG(); + } } } EXPORT_SYMBOL(target_get_task_cdb); diff --git a/drivers/target/target_core_scdb.c b/drivers/target/target_core_scdb.c deleted file mode 100644 index 72843441d4fa..000000000000 --- a/drivers/target/target_core_scdb.c +++ /dev/null @@ -1,105 +0,0 @@ -/******************************************************************************* - * Filename: target_core_scdb.c - * - * This file contains the generic target engine Split CDB related functions. - * - * Copyright (c) 2004-2005 PyX Technologies, Inc. - * Copyright (c) 2005, 2006, 2007 SBE, Inc. - * Copyright (c) 2007-2010 Rising Tide Systems - * Copyright (c) 2008-2010 Linux-iSCSI.org - * - * Nicholas A. Bellinger - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - ******************************************************************************/ - -#include -#include -#include -#include - -#include -#include - -#include "target_core_scdb.h" - -/* split_cdb_XX_6(): - * - * 21-bit LBA w/ 8-bit SECTORS - */ -void split_cdb_XX_6( - unsigned long long lba, - u32 sectors, - unsigned char *cdb) -{ - cdb[1] = (lba >> 16) & 0x1f; - cdb[2] = (lba >> 8) & 0xff; - cdb[3] = lba & 0xff; - cdb[4] = sectors & 0xff; -} - -/* split_cdb_XX_10(): - * - * 32-bit LBA w/ 16-bit SECTORS - */ -void split_cdb_XX_10( - unsigned long long lba, - u32 sectors, - unsigned char *cdb) -{ - put_unaligned_be32(lba, &cdb[2]); - put_unaligned_be16(sectors, &cdb[7]); -} - -/* split_cdb_XX_12(): - * - * 32-bit LBA w/ 32-bit SECTORS - */ -void split_cdb_XX_12( - unsigned long long lba, - u32 sectors, - unsigned char *cdb) -{ - put_unaligned_be32(lba, &cdb[2]); - put_unaligned_be32(sectors, &cdb[6]); -} - -/* split_cdb_XX_16(): - * - * 64-bit LBA w/ 32-bit SECTORS - */ -void split_cdb_XX_16( - unsigned long long lba, - u32 sectors, - unsigned char *cdb) -{ - put_unaligned_be64(lba, &cdb[2]); - put_unaligned_be32(sectors, &cdb[10]); -} - -/* - * split_cdb_XX_32(): - * - * 64-bit LBA w/ 32-bit SECTORS such as READ_32, WRITE_32 and emulated XDWRITEREAD_32 - */ -void split_cdb_XX_32( - unsigned long long lba, - u32 sectors, - unsigned char *cdb) -{ - put_unaligned_be64(lba, &cdb[12]); - put_unaligned_be32(sectors, &cdb[28]); -} diff --git a/drivers/target/target_core_scdb.h b/drivers/target/target_core_scdb.h deleted file mode 100644 index 48e9ccc9585e..000000000000 --- a/drivers/target/target_core_scdb.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef TARGET_CORE_SCDB_H -#define TARGET_CORE_SCDB_H - -extern void split_cdb_XX_6(unsigned long long, u32, unsigned char *); -extern void split_cdb_XX_10(unsigned long long, u32, unsigned char *); -extern void split_cdb_XX_12(unsigned long long, u32, unsigned char *); -extern void split_cdb_XX_16(unsigned long long, u32, unsigned char *); -extern void split_cdb_XX_32(unsigned long long, u32, unsigned char *); - -#endif /* TARGET_CORE_SCDB_H */ diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 3025a6f5ab56..cb022a1b22b3 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -54,7 +54,6 @@ #include "target_core_alua.h" #include "target_core_hba.h" #include "target_core_pr.h" -#include "target_core_scdb.h" #include "target_core_ua.h" static int sub_api_initialized; @@ -2851,7 +2850,6 @@ static int transport_generic_cmd_sequencer( if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); - cmd->transport_split_cdb = &split_cdb_XX_6; cmd->t_task_lba = transport_lba_21(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; @@ -2860,7 +2858,6 @@ static int transport_generic_cmd_sequencer( if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); - cmd->transport_split_cdb = &split_cdb_XX_10; cmd->t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; @@ -2869,7 +2866,6 @@ static int transport_generic_cmd_sequencer( if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); - cmd->transport_split_cdb = &split_cdb_XX_12; cmd->t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; @@ -2878,7 +2874,6 @@ static int transport_generic_cmd_sequencer( if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); - cmd->transport_split_cdb = &split_cdb_XX_16; cmd->t_task_lba = transport_lba_64(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; @@ -2887,7 +2882,6 @@ static int transport_generic_cmd_sequencer( if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); - cmd->transport_split_cdb = &split_cdb_XX_6; cmd->t_task_lba = transport_lba_21(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; @@ -2896,7 +2890,6 @@ static int transport_generic_cmd_sequencer( if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); - cmd->transport_split_cdb = &split_cdb_XX_10; cmd->t_task_lba = transport_lba_32(cdb); cmd->t_tasks_fua = (cdb[1] & 0x8); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; @@ -2906,7 +2899,6 @@ static int transport_generic_cmd_sequencer( if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); - cmd->transport_split_cdb = &split_cdb_XX_12; cmd->t_task_lba = transport_lba_32(cdb); cmd->t_tasks_fua = (cdb[1] & 0x8); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; @@ -2916,7 +2908,6 @@ static int transport_generic_cmd_sequencer( if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); - cmd->transport_split_cdb = &split_cdb_XX_16; cmd->t_task_lba = transport_lba_64(cdb); cmd->t_tasks_fua = (cdb[1] & 0x8); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; @@ -2929,7 +2920,6 @@ static int transport_generic_cmd_sequencer( if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); - cmd->transport_split_cdb = &split_cdb_XX_10; cmd->t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; passthrough = (dev->transport->transport_type == @@ -2964,7 +2954,6 @@ static int transport_generic_cmd_sequencer( * Use WRITE_32 and READ_32 opcodes for the emulated * XDWRITE_READ_32 logic. */ - cmd->transport_split_cdb = &split_cdb_XX_32; cmd->t_task_lba = transport_lba_64_ext(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index a6c23485068e..16d7a4985639 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -470,7 +470,6 @@ struct se_cmd { struct list_head se_queue_node; struct target_core_fabric_ops *se_tfo; int (*transport_emulate_cdb)(struct se_cmd *); - void (*transport_split_cdb)(unsigned long long, u32, unsigned char *); void (*transport_complete_callback)(struct se_cmd *); int (*transport_qf_callback)(struct se_cmd *); From 6b20fa9aaf0c2f69ee6f9648e20ab2be0206705e Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Tue, 18 Oct 2011 23:48:04 -0700 Subject: [PATCH 38/62] target: Fix REPORT TARGET PORT GROUPS handling with small allocation length This patch fixes a bug with the handling of REPORT TARGET PORT GROUPS containing a smaller allocation length than the payload requires causing memory writes beyond the end of the buffer. This patch checks for the minimum 4 byte length for the response payload length, and also checks upon each loop of T10_ALUA(su_dev)->tg_pt_gps_list to ensure the Target port group and Target port descriptor list is able to fit into the remaining allocation length. If the response payload exceeds the allocation length length, then rd_len is still increments to indicate to the initiator that the payload has been truncated. Reported-by: Roland Dreier Cc: stable@kernel.org Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_alua.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 007c6c298b8b..8f4447749c71 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -67,12 +67,32 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd) unsigned char *buf; u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first Target port group descriptor */ + /* + * Need at least 4 bytes of response data or else we can't + * even fit the return data length. + */ + if (cmd->data_length < 4) { + pr_warn("REPORT TARGET PORT GROUPS allocation length %u" + " too small\n", cmd->data_length); + return -EINVAL; + } buf = transport_kmap_first_data_page(cmd); spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, tg_pt_gp_list) { + /* + * Check if the Target port group and Target port descriptor list + * based on tg_pt_gp_members count will fit into the response payload. + * Otherwise, bump rd_len to let the initiator know we have exceeded + * the allocation length and the response is truncated. + */ + if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) > + cmd->data_length) { + rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4); + continue; + } /* * PREF: Preferred target port bit, determine if this * bit should be set for port group. From df5fa691ce61aedd3e4dbcf960ee44f05b797d8b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 14 Oct 2011 07:29:58 -0400 Subject: [PATCH 39/62] target: make iblock_emulate_sync_cache asynchronous Do not block the submitting thread when handling a SYNCHRONIZE CACHE command, but implement it asynchronously by sending the FLUSH command ourself and calling transport_complete_sync_cache from the completion handler. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_iblock.c | 37 ++++++++++++++++------------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index bf074c4b7a36..d9ad2a216eaa 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -313,37 +313,42 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( return blocks_long; } +static void iblock_end_io_flush(struct bio *bio, int err) +{ + struct se_cmd *cmd = bio->bi_private; + + if (err) + pr_err("IBLOCK: cache flush failed: %d\n", err); + + if (cmd) + transport_complete_sync_cache(cmd, err == 0); + bio_put(bio); +} + /* - * Emulate SYCHRONIZE_CACHE_* + * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must + * always flush the whole cache. */ static void iblock_emulate_sync_cache(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; int immed = (cmd->t_task_cdb[1] & 0x2); - sector_t error_sector; - int ret; + struct bio *bio; /* * If the Immediate bit is set, queue up the GOOD response - * for this SYNCHRONIZE_CACHE op + * for this SYNCHRONIZE_CACHE op. */ if (immed) transport_complete_sync_cache(cmd, 1); - /* - * blkdev_issue_flush() does not support a specifying a range, so - * we have to flush the entire cache. - */ - ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector); - if (ret != 0) { - pr_err("IBLOCK: block_issue_flush() failed: %d " - " error_sector: %llu\n", ret, - (unsigned long long)error_sector); - } - + bio = bio_alloc(GFP_KERNEL, 0); + bio->bi_end_io = iblock_end_io_flush; + bio->bi_bdev = ib_dev->ibd_bd; if (!immed) - transport_complete_sync_cache(cmd, ret == 0); + bio->bi_private = cmd; + submit_bio(WRITE_FLUSH, bio); } /* From f55918fa3202a646dad2404f7de008108edc5048 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 14 Oct 2011 07:30:17 -0400 Subject: [PATCH 40/62] target: clean up the backend interface to caching parameters Remove the dpo_emulated, fua_write_emulated, fua_read_emulated and write_cache_emulated methods, and replace them with a simple bitfields in se_subsystem_api in those cases where they ever returned one. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_device.c | 55 +++++++------------------- drivers/target/target_core_file.c | 33 +--------------- drivers/target/target_core_iblock.c | 34 +--------------- include/target/target_core_transport.h | 20 ++-------- 4 files changed, 23 insertions(+), 119 deletions(-) diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index ca6e4a4df134..81352b7f9130 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -972,36 +972,24 @@ int se_dev_set_unmap_granularity_alignment( int se_dev_set_emulate_dpo(struct se_device *dev, int flag) { - if ((flag != 0) && (flag != 1)) { + if (flag != 0 && flag != 1) { pr_err("Illegal value %d\n", flag); return -EINVAL; } - if (dev->transport->dpo_emulated == NULL) { - pr_err("dev->transport->dpo_emulated is NULL\n"); - return -EINVAL; - } - if (dev->transport->dpo_emulated(dev) == 0) { - pr_err("dev->transport->dpo_emulated not supported\n"); - return -EINVAL; - } - dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag; - pr_debug("dev[%p]: SE Device Page Out (DPO) Emulation" - " bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo); - return 0; + + pr_err("dpo_emulated not supported\n"); + return -EINVAL; } int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) { - if ((flag != 0) && (flag != 1)) { + if (flag != 0 && flag != 1) { pr_err("Illegal value %d\n", flag); return -EINVAL; } - if (dev->transport->fua_write_emulated == NULL) { - pr_err("dev->transport->fua_write_emulated is NULL\n"); - return -EINVAL; - } - if (dev->transport->fua_write_emulated(dev) == 0) { - pr_err("dev->transport->fua_write_emulated not supported\n"); + + if (dev->transport->fua_write_emulated == 0) { + pr_err("fua_write_emulated not supported\n"); return -EINVAL; } dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag; @@ -1012,36 +1000,23 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) { - if ((flag != 0) && (flag != 1)) { + if (flag != 0 && flag != 1) { pr_err("Illegal value %d\n", flag); return -EINVAL; } - if (dev->transport->fua_read_emulated == NULL) { - pr_err("dev->transport->fua_read_emulated is NULL\n"); - return -EINVAL; - } - if (dev->transport->fua_read_emulated(dev) == 0) { - pr_err("dev->transport->fua_read_emulated not supported\n"); - return -EINVAL; - } - dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag; - pr_debug("dev[%p]: SE Device Forced Unit Access READs: %d\n", - dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read); - return 0; + + pr_err("ua read emulated not supported\n"); + return -EINVAL; } int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) { - if ((flag != 0) && (flag != 1)) { + if (flag != 0 && flag != 1) { pr_err("Illegal value %d\n", flag); return -EINVAL; } - if (dev->transport->write_cache_emulated == NULL) { - pr_err("dev->transport->write_cache_emulated is NULL\n"); - return -EINVAL; - } - if (dev->transport->write_cache_emulated(dev) == 0) { - pr_err("dev->transport->write_cache_emulated not supported\n"); + if (dev->transport->write_cache_emulated == 0) { + pr_err("write_cache_emulated not supported\n"); return -EINVAL; } dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag; diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 98aa5836448b..19a0be9c6570 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -399,33 +399,6 @@ static void fd_emulate_sync_cache(struct se_task *task) transport_complete_sync_cache(cmd, ret == 0); } -/* - * Tell TCM Core that we are capable of WriteCache emulation for - * an underlying struct se_device. - */ -static int fd_emulated_write_cache(struct se_device *dev) -{ - return 1; -} - -static int fd_emulated_dpo(struct se_device *dev) -{ - return 0; -} -/* - * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs - * for TYPE_DISK. - */ -static int fd_emulated_fua_write(struct se_device *dev) -{ - return 1; -} - -static int fd_emulated_fua_read(struct se_device *dev) -{ - return 0; -} - /* * WRITE Force Unit Access (FUA) emulation on a per struct se_task * LBA range basis.. @@ -640,15 +613,13 @@ static struct se_subsystem_api fileio_template = { .name = "fileio", .owner = THIS_MODULE, .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, + .write_cache_emulated = 1, + .fua_write_emulated = 1, .attach_hba = fd_attach_hba, .detach_hba = fd_detach_hba, .allocate_virtdevice = fd_allocate_virtdevice, .create_virtdevice = fd_create_virtdevice, .free_device = fd_free_device, - .dpo_emulated = fd_emulated_dpo, - .fua_write_emulated = fd_emulated_fua_write, - .fua_read_emulated = fd_emulated_fua_read, - .write_cache_emulated = fd_emulated_write_cache, .alloc_task = fd_alloc_task, .do_task = fd_do_task, .do_sync_cache = fd_emulate_sync_cache, diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index d9ad2a216eaa..41ad02b5fb87 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -351,34 +351,6 @@ static void iblock_emulate_sync_cache(struct se_task *task) submit_bio(WRITE_FLUSH, bio); } -/* - * Tell TCM Core that we are capable of WriteCache emulation for - * an underlying struct se_device. - */ -static int iblock_emulated_write_cache(struct se_device *dev) -{ - return 1; -} - -static int iblock_emulated_dpo(struct se_device *dev) -{ - return 0; -} - -/* - * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs - * for TYPE_DISK. - */ -static int iblock_emulated_fua_write(struct se_device *dev) -{ - return 1; -} - -static int iblock_emulated_fua_read(struct se_device *dev) -{ - return 0; -} - static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range) { struct iblock_dev *ibd = dev->dev_ptr; @@ -679,15 +651,13 @@ static struct se_subsystem_api iblock_template = { .name = "iblock", .owner = THIS_MODULE, .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, + .write_cache_emulated = 1, + .fua_write_emulated = 1, .attach_hba = iblock_attach_hba, .detach_hba = iblock_detach_hba, .allocate_virtdevice = iblock_allocate_virtdevice, .create_virtdevice = iblock_create_virtdevice, .free_device = iblock_free_device, - .dpo_emulated = iblock_emulated_dpo, - .fua_write_emulated = iblock_emulated_fua_write, - .fua_read_emulated = iblock_emulated_fua_read, - .write_cache_emulated = iblock_emulated_write_cache, .alloc_task = iblock_alloc_task, .do_task = iblock_do_task, .do_discard = iblock_do_discard, diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 171c2359bc79..9c7e32870f87 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -218,6 +218,10 @@ struct se_subsystem_api { * Transport Type. */ u8 transport_type; + + unsigned int fua_write_emulated : 1; + unsigned int write_cache_emulated : 1; + /* * struct module for struct se_hba references */ @@ -253,22 +257,6 @@ struct se_subsystem_api { */ void (*free_device)(void *); - /* - * dpo_emulated(): - */ - int (*dpo_emulated)(struct se_device *); - /* - * fua_write_emulated(): - */ - int (*fua_write_emulated)(struct se_device *); - /* - * fua_read_emulated(): - */ - int (*fua_read_emulated)(struct se_device *); - /* - * write_cache_emulated(): - */ - int (*write_cache_emulated)(struct se_device *); /* * transport_complete(): * From e057f53308a5f071556ee80586b99ee755bf07f5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Oct 2011 13:56:41 -0400 Subject: [PATCH 41/62] target: remove the transport_qf_callback se_cmd callback Remove the need for the transport_qf_callback callback by making sure we have specific states with specific handlers for the two queue full cases. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 79 ++++++++++++-------------- include/target/target_core_base.h | 2 +- 2 files changed, 37 insertions(+), 44 deletions(-) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index cb022a1b22b3..4ce205040b15 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -72,9 +72,8 @@ static int transport_generic_write_pending(struct se_cmd *); static int transport_processing_thread(void *param); static int __transport_execute_tasks(struct se_device *dev); static void transport_complete_task_attr(struct se_cmd *cmd); -static int transport_complete_qf(struct se_cmd *cmd); static void transport_handle_queue_full(struct se_cmd *cmd, - struct se_device *dev, int (*qf_callback)(struct se_cmd *)); + struct se_device *dev); static void transport_direct_request_timeout(struct se_cmd *cmd); static void transport_free_dev_tasks(struct se_cmd *cmd); static u32 transport_allocate_tasks(struct se_cmd *cmd, @@ -969,7 +968,7 @@ static void target_qf_do_work(struct work_struct *work) pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, - (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" : + (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" : "UNKNOWN"); /* @@ -1968,8 +1967,8 @@ check_stop: return; queue_full: - cmd->t_state = TRANSPORT_COMPLETE_OK; - transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); + cmd->t_state = TRANSPORT_COMPLETE_QF_OK; + transport_handle_queue_full(cmd, cmd->se_dev); } static void transport_direct_request_timeout(struct se_cmd *cmd) @@ -3431,12 +3430,19 @@ static void transport_complete_task_attr(struct se_cmd *cmd) wake_up_interruptible(&dev->dev_queue_obj.thread_wq); } -static int transport_complete_qf(struct se_cmd *cmd) +static void transport_complete_qf(struct se_cmd *cmd) { int ret = 0; - if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) - return cmd->se_tfo->queue_status(cmd); + transport_stop_all_task_timers(cmd); + if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) + transport_complete_task_attr(cmd); + + if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { + ret = cmd->se_tfo->queue_status(cmd); + if (ret) + goto out; + } switch (cmd->data_direction) { case DMA_FROM_DEVICE: @@ -3446,7 +3452,7 @@ static int transport_complete_qf(struct se_cmd *cmd) if (cmd->t_bidi_data_sg) { ret = cmd->se_tfo->queue_data_in(cmd); if (ret < 0) - return ret; + break; } /* Fall through for DMA_TO_DEVICE */ case DMA_NONE: @@ -3456,17 +3462,21 @@ static int transport_complete_qf(struct se_cmd *cmd) break; } - return ret; +out: + if (ret < 0) { + transport_handle_queue_full(cmd, cmd->se_dev); + return; + } + transport_lun_remove_cmd(cmd); + transport_cmd_check_stop_to_fabric(cmd); } static void transport_handle_queue_full( struct se_cmd *cmd, - struct se_device *dev, - int (*qf_callback)(struct se_cmd *)) + struct se_device *dev) { spin_lock_irq(&dev->qf_cmd_lock); cmd->se_cmd_flags |= SCF_EMULATE_QUEUE_FULL; - cmd->transport_qf_callback = qf_callback; list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); atomic_inc(&dev->dev_qf_count); smp_mb__after_atomic_inc(); @@ -3492,14 +3502,6 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) schedule_work(&cmd->se_dev->qf_work_queue); - if (cmd->transport_qf_callback) { - ret = cmd->transport_qf_callback(cmd); - if (ret < 0) - goto queue_full; - - cmd->transport_qf_callback = NULL; - goto done; - } /* * Check if we need to retrieve a sense buffer from * the struct se_cmd in question. @@ -3575,7 +3577,6 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) break; } -done: transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; @@ -3583,7 +3584,8 @@ done: queue_full: pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," " data_direction: %d\n", cmd, cmd->data_direction); - transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); + cmd->t_state = TRANSPORT_COMPLETE_QF_OK; + transport_handle_queue_full(cmd, cmd->se_dev); } static void transport_free_dev_tasks(struct se_cmd *cmd) @@ -4110,15 +4112,15 @@ void transport_generic_process_write(struct se_cmd *cmd) } EXPORT_SYMBOL(transport_generic_process_write); -static int transport_write_pending_qf(struct se_cmd *cmd) +static void transport_write_pending_qf(struct se_cmd *cmd) { - return cmd->se_tfo->write_pending(cmd); + if (cmd->se_tfo->write_pending(cmd) == -EAGAIN) { + pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", + cmd); + transport_handle_queue_full(cmd, cmd->se_dev); + } } -/* transport_generic_write_pending(): - * - * - */ static int transport_generic_write_pending(struct se_cmd *cmd) { unsigned long flags; @@ -4128,17 +4130,6 @@ static int transport_generic_write_pending(struct se_cmd *cmd) cmd->t_state = TRANSPORT_WRITE_PENDING; spin_unlock_irqrestore(&cmd->t_state_lock, flags); - if (cmd->transport_qf_callback) { - ret = cmd->transport_qf_callback(cmd); - if (ret == -EAGAIN) - goto queue_full; - else if (ret < 0) - return ret; - - cmd->transport_qf_callback = NULL; - return 0; - } - /* * Clear the se_cmd for WRITE_PENDING status in order to set * cmd->t_transport_active=0 so that transport_generic_handle_data @@ -4163,8 +4154,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd) queue_full: pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); cmd->t_state = TRANSPORT_COMPLETE_QF_WP; - transport_handle_queue_full(cmd, cmd->se_dev, - transport_write_pending_qf); + transport_handle_queue_full(cmd, cmd->se_dev); return ret; } @@ -4851,7 +4841,10 @@ get_cmd: transport_generic_request_timeout(cmd); break; case TRANSPORT_COMPLETE_QF_WP: - transport_generic_write_pending(cmd); + transport_write_pending_qf(cmd); + break; + case TRANSPORT_COMPLETE_QF_OK: + transport_complete_qf(cmd); break; default: pr_err("Unknown t_state: %d deferred_t_state:" diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 16d7a4985639..2d47aa9f762a 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -102,6 +102,7 @@ enum transport_state_table { TRANSPORT_NEW_CMD_MAP = 16, TRANSPORT_FREE_CMD_INTR = 17, TRANSPORT_COMPLETE_QF_WP = 18, + TRANSPORT_COMPLETE_QF_OK = 19, }; /* Used for struct se_cmd->se_cmd_flags */ @@ -471,7 +472,6 @@ struct se_cmd { struct target_core_fabric_ops *se_tfo; int (*transport_emulate_cdb)(struct se_cmd *); void (*transport_complete_callback)(struct se_cmd *); - int (*transport_qf_callback)(struct se_cmd *); unsigned char *t_task_cdb; unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; From f7a5cc0b310af887f5391ba886d3d9254ac8920a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Oct 2011 13:56:42 -0400 Subject: [PATCH 42/62] target: remove SCF_EMULATE_QUEUE_FULL Add a new boolean at_head parameter to transport_add_cmd_to_queue and thus obsolete the SCF_EMULATE_QUEUE_FULL flag. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 32 +++++++++++--------------- include/target/target_core_base.h | 1 - 2 files changed, 13 insertions(+), 20 deletions(-) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 4ce205040b15..b5c3daf12169 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -596,9 +596,8 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) } } -static void transport_add_cmd_to_queue( - struct se_cmd *cmd, - int t_state) +static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state, + bool at_head) { struct se_device *dev = cmd->se_dev; struct se_queue_obj *qobj = &dev->dev_queue_obj; @@ -619,10 +618,9 @@ static void transport_add_cmd_to_queue( else atomic_inc(&qobj->queue_cnt); - if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) { - cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL; + if (at_head) list_add(&cmd->se_queue_node, &qobj->qobj_list); - } else + else list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); atomic_set(&cmd->t_transport_queue_active, 1); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); @@ -764,7 +762,7 @@ void transport_complete_task(struct se_task *task, int success) t_state = TRANSPORT_COMPLETE_TIMEOUT; spin_unlock_irqrestore(&cmd->t_state_lock, flags); - transport_add_cmd_to_queue(cmd, t_state); + transport_add_cmd_to_queue(cmd, t_state, false); return; } atomic_dec(&cmd->t_task_cdbs_timeout_left); @@ -796,7 +794,7 @@ void transport_complete_task(struct se_task *task, int success) } spin_unlock_irqrestore(&cmd->t_state_lock, flags); - transport_add_cmd_to_queue(cmd, t_state); + transport_add_cmd_to_queue(cmd, t_state, false); } EXPORT_SYMBOL(transport_complete_task); @@ -971,11 +969,8 @@ static void target_qf_do_work(struct work_struct *work) (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" : "UNKNOWN"); - /* - * The SCF_EMULATE_QUEUE_FULL flag will be cleared once se_cmd - * has been added to head of queue - */ - transport_add_cmd_to_queue(cmd, cmd->t_state); + + transport_add_cmd_to_queue(cmd, cmd->t_state, true); } } @@ -1714,7 +1709,7 @@ int transport_generic_handle_cdb_map( return -EINVAL; } - transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP); + transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false); return 0; } EXPORT_SYMBOL(transport_generic_handle_cdb_map); @@ -1744,7 +1739,7 @@ int transport_generic_handle_data( if (transport_check_aborted_status(cmd, 1) != 0) return 0; - transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE); + transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false); return 0; } EXPORT_SYMBOL(transport_generic_handle_data); @@ -1756,7 +1751,7 @@ EXPORT_SYMBOL(transport_generic_handle_data); int transport_generic_handle_tmr( struct se_cmd *cmd) { - transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR); + transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false); return 0; } EXPORT_SYMBOL(transport_generic_handle_tmr); @@ -1764,7 +1759,7 @@ EXPORT_SYMBOL(transport_generic_handle_tmr); void transport_generic_free_cmd_intr( struct se_cmd *cmd) { - transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR); + transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR, false); } EXPORT_SYMBOL(transport_generic_free_cmd_intr); @@ -2107,7 +2102,7 @@ static void transport_task_timeout_handler(unsigned long data) cmd->t_state = TRANSPORT_COMPLETE_FAILURE; spin_unlock_irqrestore(&cmd->t_state_lock, flags); - transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); + transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE, false); } /* @@ -3476,7 +3471,6 @@ static void transport_handle_queue_full( struct se_device *dev) { spin_lock_irq(&dev->qf_cmd_lock); - cmd->se_cmd_flags |= SCF_EMULATE_QUEUE_FULL; list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); atomic_inc(&dev->dev_qf_count); smp_mb__after_atomic_inc(); diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 2d47aa9f762a..77b3de846340 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -127,7 +127,6 @@ enum se_cmd_flags_table { SCF_UNUSED = 0x00100000, SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000, SCF_EMULATE_CDB_ASYNC = 0x01000000, - SCF_EMULATE_QUEUE_FULL = 0x02000000, }; /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ From 3df8d40ba3fea72c35ab092c091b19a599df1e81 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Oct 2011 13:56:43 -0400 Subject: [PATCH 43/62] target: do not pass the queue object to transport_remove_cmd_from_queue We always operated on the same queue, so move finding it into the function, just like we do for all other helpers operating on it. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index b5c3daf12169..5fe38e4a5025 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -82,8 +82,7 @@ static u32 transport_allocate_tasks(struct se_cmd *cmd, struct scatterlist *sgl, unsigned int nents); static int transport_generic_get_mem(struct se_cmd *cmd); static void transport_put_cmd(struct se_cmd *cmd); -static void transport_remove_cmd_from_queue(struct se_cmd *cmd, - struct se_queue_obj *qobj); +static void transport_remove_cmd_from_queue(struct se_cmd *cmd); static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); static void transport_stop_all_task_timers(struct se_cmd *cmd); @@ -591,7 +590,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) if (transport_cmd_check_stop_to_fabric(cmd)) return; if (remove) { - transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); + transport_remove_cmd_from_queue(cmd); transport_put_cmd(cmd); } } @@ -650,9 +649,9 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj) return cmd; } -static void transport_remove_cmd_from_queue(struct se_cmd *cmd, - struct se_queue_obj *qobj) +static void transport_remove_cmd_from_queue(struct se_cmd *cmd) { + struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj; unsigned long flags; spin_lock_irqsave(&qobj->cmd_queue_lock, flags); @@ -4232,7 +4231,7 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", cmd->se_tfo->get_task_tag(cmd)); } - transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); + transport_remove_cmd_from_queue(cmd); return 0; } From b7b8bef7f8c1c9b3358127608e867db7cd928022 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Oct 2011 13:56:44 -0400 Subject: [PATCH 44/62] target: use transport_cmd_check_stop_to_fabric consistently Change one remaining user of transport_cmd_check_stop(cmd, 2, 0) to the transport_cmd_check_stop_to_fabric wrapper. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 5fe38e4a5025..0bfef095c98d 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -4752,7 +4752,7 @@ int transport_generic_do_tmr(struct se_cmd *cmd) cmd->t_state = TRANSPORT_ISTATE_PROCESSING; cmd->se_tfo->queue_tm_rsp(cmd); - transport_cmd_check_stop(cmd, 2, 0); + transport_cmd_check_stop_to_fabric(cmd); return 0; } From 0c2cfe5fe78e682d6235a1d32a363460b1c77528 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Oct 2011 13:56:45 -0400 Subject: [PATCH 45/62] target: fix list walking in transport_free_dev_tasks list_for_each_entry_safe only protects against deletions from the list, but not against any concurrent modifications. Given that we drop t_state_lock inside the loop it is not safe in transport_free_dev_tasks. Instead of use a local dispose_list that we move all tasks that are to be deleted to. This is safe because we never do list_emptry checks on t_list to check if a command is on the list anywhere. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 0bfef095c98d..4dc492d6ae1b 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -3585,23 +3585,26 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) { struct se_task *task, *task_tmp; unsigned long flags; + LIST_HEAD(dispose_list); spin_lock_irqsave(&cmd->t_state_lock, flags); list_for_each_entry_safe(task, task_tmp, &cmd->t_task_list, t_list) { - if (task->task_flags & TF_ACTIVE) - continue; + if (!(task->task_flags & TF_ACTIVE)) + list_move_tail(&task->t_list, &dispose_list); + } + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + while (!list_empty(&dispose_list)) { + task = list_first_entry(&dispose_list, struct se_task, t_list); kfree(task->task_sg_bidi); kfree(task->task_sg); list_del(&task->t_list); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); cmd->se_dev->transport->free_task(task); - spin_lock_irqsave(&cmd->t_state_lock, flags); } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); } static inline void transport_free_sgl(struct scatterlist *sgl, int nents) From cdbb70bb4c17dad0ee23a357030021892a0f60f0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Oct 2011 13:56:46 -0400 Subject: [PATCH 46/62] target: factor some duplicate code for stopping a task Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_tmr.c | 19 ++--------- drivers/target/target_core_transport.c | 47 +++++++++++++++----------- include/target/target_core_transport.h | 1 + 3 files changed, 31 insertions(+), 36 deletions(-) diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index b8a780fff367..4e963da74a0d 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -236,7 +236,6 @@ static void core_tmr_drain_task_list( list_del(&task->t_state_list); cmd = task->task_se_cmd; - spin_lock_irqsave(&cmd->t_state_lock, flags); pr_debug("LUN_RESET: %s cmd: %p task: %p" " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/" "def_t_state: %d/%d cdb: 0x%02x\n", @@ -256,22 +255,8 @@ static void core_tmr_drain_task_list( atomic_read(&cmd->t_transport_stop), atomic_read(&cmd->t_transport_sent)); - if (task->task_flags & TF_ACTIVE) { - task->task_flags |= TF_REQUEST_STOP; - spin_unlock_irqrestore( - &cmd->t_state_lock, flags); - - pr_debug("LUN_RESET: Waiting for task: %p to shutdown" - " for dev: %p\n", task, dev); - wait_for_completion(&task->task_stop_comp); - pr_debug("LUN_RESET Completed task: %p shutdown for" - " dev: %p\n", task, dev); - - spin_lock_irqsave(&cmd->t_state_lock, flags); - atomic_dec(&cmd->t_task_cdbs_left); - task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP); - } - __transport_stop_task_timer(task, &flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); + target_stop_task(task, &flags); if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 4dc492d6ae1b..6dab8198ace8 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1762,6 +1762,33 @@ void transport_generic_free_cmd_intr( } EXPORT_SYMBOL(transport_generic_free_cmd_intr); +/* + * If the task is active, request it to be stopped and sleep until it + * has completed. + */ +bool target_stop_task(struct se_task *task, unsigned long *flags) +{ + struct se_cmd *cmd = task->task_se_cmd; + bool was_active = false; + + if (task->task_flags & TF_ACTIVE) { + task->task_flags |= TF_REQUEST_STOP; + spin_unlock_irqrestore(&cmd->t_state_lock, *flags); + + pr_debug("Task %p waiting to complete\n", task); + wait_for_completion(&task->task_stop_comp); + pr_debug("Task %p stopped successfully\n", task); + + spin_lock_irqsave(&cmd->t_state_lock, *flags); + atomic_dec(&cmd->t_task_cdbs_left); + task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP); + was_active = true; + } + + __transport_stop_task_timer(task, flags); + return was_active; +} + static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) { struct se_task *task, *task_tmp; @@ -1793,28 +1820,10 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) continue; } - /* - * If the struct se_task is active, sleep until it is returned - * from the plugin. - */ - if (task->task_flags & TF_ACTIVE) { - task->task_flags |= TF_REQUEST_STOP; - spin_unlock_irqrestore(&cmd->t_state_lock, - flags); - - pr_debug("Task %p waiting to complete\n", task); - wait_for_completion(&task->task_stop_comp); - pr_debug("Task %p stopped successfully\n", task); - - spin_lock_irqsave(&cmd->t_state_lock, flags); - atomic_dec(&cmd->t_task_cdbs_left); - task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP); - } else { + if (!target_stop_task(task, &flags)) { pr_debug("Task %p - did nothing\n", task); ret++; } - - __transport_stop_task_timer(task, &flags); } spin_unlock_irqrestore(&cmd->t_state_lock, flags); diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 9c7e32870f87..1ba5835426fc 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -171,6 +171,7 @@ extern int transport_generic_handle_data(struct se_cmd *); extern void transport_new_cmd_failure(struct se_cmd *); extern int transport_generic_handle_tmr(struct se_cmd *); extern void transport_generic_free_cmd_intr(struct se_cmd *); +extern bool target_stop_task(struct se_task *task, unsigned long *flags); extern void __transport_stop_task_timer(struct se_task *, unsigned long *); extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32, struct scatterlist *, u32); From e99d48a62bfc6e64548e0d5085240c5024eca471 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Oct 2011 13:56:47 -0400 Subject: [PATCH 47/62] target: remove TF_TIMER_STOP TF_TIMER_STOP is useless as it only helps to mitigate a tiny race during deleting the timer. But given that we have cleared TF_ACTIVE at this point we already have another mitigation a few lines down the function. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 6 ------ include/target/target_core_base.h | 1 - 2 files changed, 7 deletions(-) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 6dab8198ace8..a36f4451e5db 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2066,10 +2066,6 @@ static void transport_task_timeout_handler(unsigned long data) pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd); spin_lock_irqsave(&cmd->t_state_lock, flags); - if (task->task_flags & TF_TIMER_STOP) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return; - } task->task_flags &= ~TF_TIMER_RUNNING; /* @@ -2153,14 +2149,12 @@ void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) if (!(task->task_flags & TF_TIMER_RUNNING)) return; - task->task_flags |= TF_TIMER_STOP; spin_unlock_irqrestore(&cmd->t_state_lock, *flags); del_timer_sync(&task->task_timer); spin_lock_irqsave(&cmd->t_state_lock, *flags); task->task_flags &= ~TF_TIMER_RUNNING; - task->task_flags &= ~TF_TIMER_STOP; } static void transport_stop_all_task_timers(struct se_cmd *cmd) diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 77b3de846340..8f02a65de75f 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -78,7 +78,6 @@ enum se_task_flags { TF_TIMEOUT = (1 << 2), TF_REQUEST_STOP = (1 << 3), TF_TIMER_RUNNING = (1 << 4), - TF_TIMER_STOP = (1 << 5), }; /* Special transport agnostic struct se_cmd->t_states */ From cc5d0f0f61645ca43d9a7320ec2f268bad5016c5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Oct 2011 13:56:48 -0400 Subject: [PATCH 48/62] target: stop task timers earlier Currently we stop the timers for all tasks in a command fairly late during I/O completion, which is fairly pointless and requires all kinds of safety checks. Instead delete pending timers early on in transport_complete_task, thus ensuring no new timers firest after that. We take t_state_lock a bit later in that function thus making sure currenly running timers are out of the criticial section. To be completely sure the timer has finished we also add another del_timer_sync call when freeing the task. This also allows removing TF_TIMER_RUNNING as it would be equivalent to TF_ACTIVE now. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 64 ++++++-------------------- include/target/target_core_base.h | 1 - include/target/target_core_transport.h | 1 - 3 files changed, 14 insertions(+), 52 deletions(-) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index a36f4451e5db..19ea1c38d70a 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -84,7 +84,6 @@ static int transport_generic_get_mem(struct se_cmd *cmd); static void transport_put_cmd(struct se_cmd *cmd); static void transport_remove_cmd_from_queue(struct se_cmd *cmd); static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); -static void transport_stop_all_task_timers(struct se_cmd *cmd); int init_se_kmem_caches(void) { @@ -712,6 +711,8 @@ void transport_complete_task(struct se_task *task, int success) if (dev) atomic_inc(&dev->depth_left); + del_timer(&task->task_timer); + spin_lock_irqsave(&cmd->t_state_lock, flags); task->task_flags &= ~TF_ACTIVE; @@ -1507,6 +1508,7 @@ transport_generic_get_task(struct se_cmd *cmd, INIT_LIST_HEAD(&task->t_list); INIT_LIST_HEAD(&task->t_execute_list); INIT_LIST_HEAD(&task->t_state_list); + init_timer(&task->task_timer); init_completion(&task->task_stop_comp); task->task_se_cmd = cmd; task->task_data_direction = data_direction; @@ -1776,6 +1778,7 @@ bool target_stop_task(struct se_task *task, unsigned long *flags) spin_unlock_irqrestore(&cmd->t_state_lock, *flags); pr_debug("Task %p waiting to complete\n", task); + del_timer_sync(&task->task_timer); wait_for_completion(&task->task_stop_comp); pr_debug("Task %p stopped successfully\n", task); @@ -1785,7 +1788,6 @@ bool target_stop_task(struct se_task *task, unsigned long *flags) was_active = true; } - __transport_stop_task_timer(task, flags); return was_active; } @@ -1860,8 +1862,6 @@ static void transport_generic_request_failure( atomic_read(&cmd->t_transport_stop), atomic_read(&cmd->t_transport_sent)); - transport_stop_all_task_timers(cmd); - if (dev) atomic_inc(&dev->depth_left); /* @@ -2066,7 +2066,6 @@ static void transport_task_timeout_handler(unsigned long data) pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd); spin_lock_irqsave(&cmd->t_state_lock, flags); - task->task_flags &= ~TF_TIMER_RUNNING; /* * Determine if transport_complete_task() has already been called. @@ -2109,16 +2108,11 @@ static void transport_task_timeout_handler(unsigned long data) transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE, false); } -/* - * Called with cmd->t_state_lock held. - */ static void transport_start_task_timer(struct se_task *task) { struct se_device *dev = task->task_se_cmd->se_dev; int timeout; - if (task->task_flags & TF_TIMER_RUNNING) - return; /* * If the task_timeout is disabled, exit now. */ @@ -2126,47 +2120,10 @@ static void transport_start_task_timer(struct se_task *task) if (!timeout) return; - init_timer(&task->task_timer); task->task_timer.expires = (get_jiffies_64() + timeout * HZ); task->task_timer.data = (unsigned long) task; task->task_timer.function = transport_task_timeout_handler; - - task->task_flags |= TF_TIMER_RUNNING; add_timer(&task->task_timer); -#if 0 - pr_debug("Starting task timer for cmd: %p task: %p seconds:" - " %d\n", task->task_se_cmd, task, timeout); -#endif -} - -/* - * Called with spin_lock_irq(&cmd->t_state_lock) held. - */ -void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) -{ - struct se_cmd *cmd = task->task_se_cmd; - - if (!(task->task_flags & TF_TIMER_RUNNING)) - return; - - spin_unlock_irqrestore(&cmd->t_state_lock, *flags); - - del_timer_sync(&task->task_timer); - - spin_lock_irqsave(&cmd->t_state_lock, *flags); - task->task_flags &= ~TF_TIMER_RUNNING; -} - -static void transport_stop_all_task_timers(struct se_cmd *cmd) -{ - struct se_task *task = NULL, *task_tmp; - unsigned long flags; - - spin_lock_irqsave(&cmd->t_state_lock, flags); - list_for_each_entry_safe(task, task_tmp, - &cmd->t_task_list, t_list) - __transport_stop_task_timer(task, &flags); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); } static inline int transport_tcq_window_closed(struct se_device *dev) @@ -2365,6 +2322,7 @@ check_depth: spin_lock_irqsave(&cmd->t_state_lock, flags); task->task_flags &= ~TF_ACTIVE; spin_unlock_irqrestore(&cmd->t_state_lock, flags); + del_timer_sync(&task->task_timer); atomic_set(&cmd->transport_sent, 0); transport_stop_tasks_for_cmd(cmd); transport_generic_request_failure(cmd, dev, 0, 1); @@ -2403,6 +2361,7 @@ check_depth: spin_lock_irqsave(&cmd->t_state_lock, flags); task->task_flags &= ~TF_ACTIVE; spin_unlock_irqrestore(&cmd->t_state_lock, flags); + del_timer_sync(&task->task_timer); atomic_set(&cmd->transport_sent, 0); transport_stop_tasks_for_cmd(cmd); transport_generic_request_failure(cmd, dev, 0, 1); @@ -3431,7 +3390,6 @@ static void transport_complete_qf(struct se_cmd *cmd) { int ret = 0; - transport_stop_all_task_timers(cmd); if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) transport_complete_task_attr(cmd); @@ -3601,6 +3559,14 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) while (!list_empty(&dispose_list)) { task = list_first_entry(&dispose_list, struct se_task, t_list); + /* + * We already cancelled all pending timers in + * transport_complete_task, but that was just a pure del_timer, + * so do a full del_timer_sync here to make sure any handler + * that was running at that point has finished execution. + */ + del_timer_sync(&task->task_timer); + kfree(task->task_sg_bidi); kfree(task->task_sg); @@ -4820,7 +4786,6 @@ get_cmd: transport_generic_process_write(cmd); break; case TRANSPORT_COMPLETE_OK: - transport_stop_all_task_timers(cmd); transport_generic_complete_ok(cmd); break; case TRANSPORT_REMOVE: @@ -4836,7 +4801,6 @@ get_cmd: transport_generic_request_failure(cmd, NULL, 1, 1); break; case TRANSPORT_COMPLETE_TIMEOUT: - transport_stop_all_task_timers(cmd); transport_generic_request_timeout(cmd); break; case TRANSPORT_COMPLETE_QF_WP: diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 8f02a65de75f..027193650749 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -77,7 +77,6 @@ enum se_task_flags { TF_SENT = (1 << 1), TF_TIMEOUT = (1 << 2), TF_REQUEST_STOP = (1 << 3), - TF_TIMER_RUNNING = (1 << 4), }; /* Special transport agnostic struct se_cmd->t_states */ diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 1ba5835426fc..c8538c5d3fbf 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -172,7 +172,6 @@ extern void transport_new_cmd_failure(struct se_cmd *); extern int transport_generic_handle_tmr(struct se_cmd *); extern void transport_generic_free_cmd_intr(struct se_cmd *); extern bool target_stop_task(struct se_task *task, unsigned long *flags); -extern void __transport_stop_task_timer(struct se_task *, unsigned long *); extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32, struct scatterlist *, u32); extern int transport_clear_lun_from_sessions(struct se_lun *); From 4499dda85890e6726def812febaab5dc064cc920 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Oct 2011 13:56:49 -0400 Subject: [PATCH 49/62] target: move depth_left manipulation out of transport_generic_request_failure We only need to decrement dev->depth_left if failing a command from __transport_execute_tasks. Instead of doing it first thing in transport_generic_request_failure and requiring a pseudo-flag argument for it just opencode the decrement in the two callers (which should be factored into a single one anyway) Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 19ea1c38d70a..53d332c87dcf 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1646,8 +1646,8 @@ int transport_generic_allocate_tasks( } EXPORT_SYMBOL(transport_generic_allocate_tasks); -static void transport_generic_request_failure(struct se_cmd *, - struct se_device *, int, int); +static void transport_generic_request_failure(struct se_cmd *, int, int); + /* * Used by fabric module frontends to queue tasks directly. * Many only be used from process context only @@ -1689,7 +1689,7 @@ int transport_handle_cdb_direct( return 0; else if (ret < 0) { cmd->transport_error_status = ret; - transport_generic_request_failure(cmd, NULL, 0, + transport_generic_request_failure(cmd, 0, (cmd->data_direction != DMA_TO_DEVICE)); } return 0; @@ -1837,7 +1837,6 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) */ static void transport_generic_request_failure( struct se_cmd *cmd, - struct se_device *dev, int complete, int sc) { @@ -1862,8 +1861,6 @@ static void transport_generic_request_failure( atomic_read(&cmd->t_transport_stop), atomic_read(&cmd->t_transport_sent)); - if (dev) - atomic_inc(&dev->depth_left); /* * For SAM Task Attribute emulation for failed struct se_cmd */ @@ -2230,7 +2227,7 @@ static int transport_execute_tasks(struct se_cmd *cmd) if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; - transport_generic_request_failure(cmd, NULL, 0, 1); + transport_generic_request_failure(cmd, 0, 1); return 0; } @@ -2325,7 +2322,8 @@ check_depth: del_timer_sync(&task->task_timer); atomic_set(&cmd->transport_sent, 0); transport_stop_tasks_for_cmd(cmd); - transport_generic_request_failure(cmd, dev, 0, 1); + atomic_inc(&dev->depth_left); + transport_generic_request_failure(cmd, 0, 1); goto check_depth; } /* @@ -2364,7 +2362,8 @@ check_depth: del_timer_sync(&task->task_timer); atomic_set(&cmd->transport_sent, 0); transport_stop_tasks_for_cmd(cmd); - transport_generic_request_failure(cmd, dev, 0, 1); + atomic_inc(&dev->depth_left); + transport_generic_request_failure(cmd, 0, 1); } } @@ -4767,7 +4766,7 @@ get_cmd: ret = cmd->se_tfo->new_cmd_map(cmd); if (ret < 0) { cmd->transport_error_status = ret; - transport_generic_request_failure(cmd, NULL, + transport_generic_request_failure(cmd, 0, (cmd->data_direction != DMA_TO_DEVICE)); break; @@ -4777,7 +4776,7 @@ get_cmd: break; else if (ret < 0) { cmd->transport_error_status = ret; - transport_generic_request_failure(cmd, NULL, + transport_generic_request_failure(cmd, 0, (cmd->data_direction != DMA_TO_DEVICE)); } @@ -4798,7 +4797,7 @@ get_cmd: transport_generic_do_tmr(cmd); break; case TRANSPORT_COMPLETE_FAILURE: - transport_generic_request_failure(cmd, NULL, 1, 1); + transport_generic_request_failure(cmd, 1, 1); break; case TRANSPORT_COMPLETE_TIMEOUT: transport_generic_request_timeout(cmd); From bfaf40ada2e15bc972cab4cd5452a88720e30647 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Oct 2011 13:56:50 -0400 Subject: [PATCH 50/62] target: remove the TRANSPORT_REMOVE state We never queue an command with this state, and only set it in a completely bogus place in tcm_fc. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 3 --- drivers/target/tcm_fc/tfc_cmd.c | 1 - include/target/target_core_base.h | 1 - 3 files changed, 5 deletions(-) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 53d332c87dcf..0d055f08044e 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -4787,9 +4787,6 @@ get_cmd: case TRANSPORT_COMPLETE_OK: transport_generic_complete_ok(cmd); break; - case TRANSPORT_REMOVE: - transport_put_cmd(cmd); - break; case TRANSPORT_FREE_CMD_INTR: transport_generic_free_cmd(cmd, 0); break; diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 55a278ed1111..6195026cc7b0 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -267,7 +267,6 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg) if (IS_ERR(fp)) { /* XXX need to find cmd if queued */ - cmd->se_cmd.t_state = TRANSPORT_REMOVE; cmd->seq = NULL; transport_generic_free_cmd(&cmd->se_cmd, 0); return; diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 027193650749..ced065c1428d 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -95,7 +95,6 @@ enum transport_state_table { TRANSPORT_ISTATE_PROCESSING = 11, TRANSPORT_ISTATE_PROCESSED = 12, TRANSPORT_KILL = 13, - TRANSPORT_REMOVE = 14, TRANSPORT_FREE = 15, TRANSPORT_NEW_CMD_MAP = 16, TRANSPORT_FREE_CMD_INTR = 17, From f2da9dbdb54f2e9fa00dd01af6ff2ab06b4d90b7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Oct 2011 13:56:51 -0400 Subject: [PATCH 51/62] target: remove TRANSPORT_DEFERRED_CMD state We never check for this state, and it makes testing for a completed state much harder given that it overrides the existing state. Also remove the unused deferred_t_state which is related to it. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_tmr.c | 6 +++--- drivers/target/target_core_transport.c | 22 ++++++++-------------- include/target/target_core_base.h | 3 --- 3 files changed, 11 insertions(+), 20 deletions(-) diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 4e963da74a0d..532ce317406a 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -237,12 +237,12 @@ static void core_tmr_drain_task_list( cmd = task->task_se_cmd; pr_debug("LUN_RESET: %s cmd: %p task: %p" - " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/" - "def_t_state: %d/%d cdb: 0x%02x\n", + " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d" + "cdb: 0x%02x\n", (preempt_and_abort_list) ? "Preempt" : "", cmd, task, cmd->se_tfo->get_task_tag(cmd), 0, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, - cmd->deferred_t_state, cmd->t_task_cdb[0]); + cmd->t_task_cdb[0]); pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" " t_task_cdbs: %d t_task_cdbs_left: %d" " t_task_cdbs_sent: %d -- t_transport_active: %d" diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 0d055f08044e..87beae6c76a0 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -478,8 +478,6 @@ static int transport_cmd_check_stop( " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); - cmd->deferred_t_state = cmd->t_state; - cmd->t_state = TRANSPORT_DEFERRED_CMD; atomic_set(&cmd->t_transport_active, 0); if (transport_off == 2) transport_all_task_dev_remove_state(cmd); @@ -497,8 +495,6 @@ static int transport_cmd_check_stop( " TRUE for ITT: 0x%08x\n", __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); - cmd->deferred_t_state = cmd->t_state; - cmd->t_state = TRANSPORT_DEFERRED_CMD; if (transport_off == 2) transport_all_task_dev_remove_state(cmd); @@ -1845,10 +1841,9 @@ static void transport_generic_request_failure( pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), cmd->t_task_cdb[0]); - pr_debug("-----[ i_state: %d t_state/def_t_state:" - " %d/%d transport_error_status: %d\n", + pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n", cmd->se_tfo->get_cmd_state(cmd), - cmd->t_state, cmd->deferred_t_state, + cmd->t_state, cmd->transport_error_status); pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" @@ -4404,10 +4399,9 @@ void transport_wait_for_tasks(struct se_cmd *cmd) atomic_set(&cmd->t_transport_stop, 1); pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" - " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" - " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd), - cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, - cmd->deferred_t_state); + " i_state: %d, t_state: %d, t_transport_stop = TRUE\n", + cmd, cmd->se_tfo->get_task_tag(cmd), + cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -4806,9 +4800,9 @@ get_cmd: transport_complete_qf(cmd); break; default: - pr_err("Unknown t_state: %d deferred_t_state:" - " %d for ITT: 0x%08x i_state: %d on SE LUN:" - " %u\n", cmd->t_state, cmd->deferred_t_state, + pr_err("Unknown t_state: %d for ITT: 0x%08x " + "i_state: %d on SE LUN: %u\n", + cmd->t_state, cmd->se_tfo->get_task_tag(cmd), cmd->se_tfo->get_cmd_state(cmd), cmd->se_lun->unpacked_lun); diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index ced065c1428d..6e2dc3ebff37 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -83,7 +83,6 @@ enum se_task_flags { enum transport_state_table { TRANSPORT_NO_STATE = 0, TRANSPORT_NEW_CMD = 1, - TRANSPORT_DEFERRED_CMD = 2, TRANSPORT_WRITE_PENDING = 3, TRANSPORT_PROCESS_WRITE = 4, TRANSPORT_PROCESSING = 5, @@ -434,8 +433,6 @@ struct se_cmd { int sam_task_attr; /* Transport protocol dependent state, see transport_state_table */ enum transport_state_table t_state; - /* Transport protocol dependent state for out of order CmdSNs */ - int deferred_t_state; /* Transport specific error status */ int transport_error_status; /* See se_cmd_flags_table */ From 59aaad1ec44d9a77c32b873b001f31c5af47fcc7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Oct 2011 13:56:52 -0400 Subject: [PATCH 52/62] target: remove unused TRANSPORT_ states Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- include/target/target_core_base.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 6e2dc3ebff37..07104bf0a9c8 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -90,11 +90,7 @@ enum transport_state_table { TRANSPORT_COMPLETE_FAILURE = 7, TRANSPORT_COMPLETE_TIMEOUT = 8, TRANSPORT_PROCESS_TMR = 9, - TRANSPORT_TMR_COMPLETE = 10, TRANSPORT_ISTATE_PROCESSING = 11, - TRANSPORT_ISTATE_PROCESSED = 12, - TRANSPORT_KILL = 13, - TRANSPORT_FREE = 15, TRANSPORT_NEW_CMD_MAP = 16, TRANSPORT_FREE_CMD_INTR = 17, TRANSPORT_COMPLETE_QF_WP = 18, From 35e0e757537b9239172e35db773dd062727fd612 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Oct 2011 13:56:53 -0400 Subject: [PATCH 53/62] target: use a workqueue for I/O completions Instead of abusing the target processing thread for offloading I/O completion in the backends to user context add a new workqueue. This means completions can be processed as fast as available CPU time allows it, including in parallel with other completions and more importantly I/O submission or QUEUE FULL retries. This should give much better performance especially on loaded systems. As a fallout we can merge all the completed states into a single one. On the downside this change complicates lun reset handling a bit by requiring us to cancel a work item only for those states that have it initialized. The alternative would be to either always initialize the work item to a dummy handler, or always use the same handler and do a switch on the state. The long term solution will be a flag that says that the command has an initialized work item, but that's only going to be useful once we have more users. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_tmr.c | 10 ++ drivers/target/target_core_transport.c | 165 +++++++++++++------------ include/target/target_core_base.h | 6 +- 3 files changed, 98 insertions(+), 83 deletions(-) diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 532ce317406a..570b144a1edb 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -255,6 +255,16 @@ static void core_tmr_drain_task_list( atomic_read(&cmd->t_transport_stop), atomic_read(&cmd->t_transport_sent)); + /* + * If the command may be queued onto a workqueue cancel it now. + * + * This is equivalent to removal from the execute queue in the + * loop above, but we do it down here given that + * cancel_work_sync may block. + */ + if (cmd->t_state == TRANSPORT_COMPLETE) + cancel_work_sync(&cmd->work); + spin_lock_irqsave(&cmd->t_state_lock, flags); target_stop_task(task, &flags); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 87beae6c76a0..774ff00b1110 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -58,6 +58,7 @@ static int sub_api_initialized; +static struct workqueue_struct *target_completion_wq; static struct kmem_cache *se_cmd_cache; static struct kmem_cache *se_sess_cache; struct kmem_cache *se_tmr_req_cache; @@ -84,6 +85,8 @@ static int transport_generic_get_mem(struct se_cmd *cmd); static void transport_put_cmd(struct se_cmd *cmd); static void transport_remove_cmd_from_queue(struct se_cmd *cmd); static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); +static void transport_generic_request_failure(struct se_cmd *, int, int); +static void target_complete_ok_work(struct work_struct *work); int init_se_kmem_caches(void) { @@ -99,7 +102,7 @@ int init_se_kmem_caches(void) if (!se_tmr_req_cache) { pr_err("kmem_cache_create() for struct se_tmr_req" " failed\n"); - goto out; + goto out_free_cmd_cache; } se_sess_cache = kmem_cache_create("se_sess_cache", sizeof(struct se_session), __alignof__(struct se_session), @@ -107,14 +110,14 @@ int init_se_kmem_caches(void) if (!se_sess_cache) { pr_err("kmem_cache_create() for struct se_session" " failed\n"); - goto out; + goto out_free_tmr_req_cache; } se_ua_cache = kmem_cache_create("se_ua_cache", sizeof(struct se_ua), __alignof__(struct se_ua), 0, NULL); if (!se_ua_cache) { pr_err("kmem_cache_create() for struct se_ua failed\n"); - goto out; + goto out_free_sess_cache; } t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", sizeof(struct t10_pr_registration), @@ -122,7 +125,7 @@ int init_se_kmem_caches(void) if (!t10_pr_reg_cache) { pr_err("kmem_cache_create() for struct t10_pr_registration" " failed\n"); - goto out; + goto out_free_ua_cache; } t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), @@ -130,7 +133,7 @@ int init_se_kmem_caches(void) if (!t10_alua_lu_gp_cache) { pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" " failed\n"); - goto out; + goto out_free_pr_reg_cache; } t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", sizeof(struct t10_alua_lu_gp_member), @@ -138,7 +141,7 @@ int init_se_kmem_caches(void) if (!t10_alua_lu_gp_mem_cache) { pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" "cache failed\n"); - goto out; + goto out_free_lu_gp_cache; } t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", sizeof(struct t10_alua_tg_pt_gp), @@ -146,7 +149,7 @@ int init_se_kmem_caches(void) if (!t10_alua_tg_pt_gp_cache) { pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" "cache failed\n"); - goto out; + goto out_free_lu_gp_mem_cache; } t10_alua_tg_pt_gp_mem_cache = kmem_cache_create( "t10_alua_tg_pt_gp_mem_cache", @@ -156,34 +159,41 @@ int init_se_kmem_caches(void) if (!t10_alua_tg_pt_gp_mem_cache) { pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" "mem_t failed\n"); - goto out; + goto out_free_tg_pt_gp_cache; } + target_completion_wq = alloc_workqueue("target_completion", + WQ_MEM_RECLAIM, 0); + if (!target_completion_wq) + goto out_free_tg_pt_gp_mem_cache; + return 0; + +out_free_tg_pt_gp_mem_cache: + kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); +out_free_tg_pt_gp_cache: + kmem_cache_destroy(t10_alua_tg_pt_gp_cache); +out_free_lu_gp_mem_cache: + kmem_cache_destroy(t10_alua_lu_gp_mem_cache); +out_free_lu_gp_cache: + kmem_cache_destroy(t10_alua_lu_gp_cache); +out_free_pr_reg_cache: + kmem_cache_destroy(t10_pr_reg_cache); +out_free_ua_cache: + kmem_cache_destroy(se_ua_cache); +out_free_sess_cache: + kmem_cache_destroy(se_sess_cache); +out_free_tmr_req_cache: + kmem_cache_destroy(se_tmr_req_cache); +out_free_cmd_cache: + kmem_cache_destroy(se_cmd_cache); out: - if (se_cmd_cache) - kmem_cache_destroy(se_cmd_cache); - if (se_tmr_req_cache) - kmem_cache_destroy(se_tmr_req_cache); - if (se_sess_cache) - kmem_cache_destroy(se_sess_cache); - if (se_ua_cache) - kmem_cache_destroy(se_ua_cache); - if (t10_pr_reg_cache) - kmem_cache_destroy(t10_pr_reg_cache); - if (t10_alua_lu_gp_cache) - kmem_cache_destroy(t10_alua_lu_gp_cache); - if (t10_alua_lu_gp_mem_cache) - kmem_cache_destroy(t10_alua_lu_gp_mem_cache); - if (t10_alua_tg_pt_gp_cache) - kmem_cache_destroy(t10_alua_tg_pt_gp_cache); - if (t10_alua_tg_pt_gp_mem_cache) - kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); return -ENOMEM; } void release_se_kmem_caches(void) { + destroy_workqueue(target_completion_wq); kmem_cache_destroy(se_cmd_cache); kmem_cache_destroy(se_tmr_req_cache); kmem_cache_destroy(se_sess_cache); @@ -689,6 +699,33 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good) } EXPORT_SYMBOL(transport_complete_sync_cache); +static void target_complete_timeout_work(struct work_struct *work) +{ + struct se_cmd *cmd = container_of(work, struct se_cmd, work); + unsigned long flags; + + /* + * Reset cmd->t_se_count to allow transport_put_cmd() + * to allow last call to free memory resources. + */ + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (atomic_read(&cmd->t_transport_timeout) > 1) { + int tmp = (atomic_read(&cmd->t_transport_timeout) - 1); + + atomic_sub(tmp, &cmd->t_se_count); + } + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + transport_put_cmd(cmd); +} + +static void target_complete_failure_work(struct work_struct *work) +{ + struct se_cmd *cmd = container_of(work, struct se_cmd, work); + + transport_generic_request_failure(cmd, 1, 1); +} + /* transport_complete_task(): * * Called from interrupt and non interrupt context depending @@ -698,7 +735,6 @@ void transport_complete_task(struct se_task *task, int success) { struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; - int t_state; unsigned long flags; #if 0 pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task, @@ -749,17 +785,12 @@ void transport_complete_task(struct se_task *task, int success) * the processing thread. */ if (task->task_flags & TF_TIMEOUT) { - if (!atomic_dec_and_test( - &cmd->t_task_cdbs_timeout_left)) { - spin_unlock_irqrestore(&cmd->t_state_lock, - flags); + if (!atomic_dec_and_test(&cmd->t_task_cdbs_timeout_left)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } - t_state = TRANSPORT_COMPLETE_TIMEOUT; - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - transport_add_cmd_to_queue(cmd, t_state, false); - return; + INIT_WORK(&cmd->work, target_complete_timeout_work); + goto out_queue; } atomic_dec(&cmd->t_task_cdbs_timeout_left); @@ -769,28 +800,29 @@ void transport_complete_task(struct se_task *task, int success) * device queue depending upon int success. */ if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { - if (!success) - cmd->t_tasks_failed = 1; - spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } if (!success || cmd->t_tasks_failed) { - t_state = TRANSPORT_COMPLETE_FAILURE; if (!task->task_error_status) { task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; cmd->transport_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } + INIT_WORK(&cmd->work, target_complete_failure_work); } else { atomic_set(&cmd->t_transport_complete, 1); - t_state = TRANSPORT_COMPLETE_OK; + INIT_WORK(&cmd->work, target_complete_ok_work); } + +out_queue: + cmd->t_state = TRANSPORT_COMPLETE; + atomic_set(&cmd->t_transport_active, 1); spin_unlock_irqrestore(&cmd->t_state_lock, flags); - transport_add_cmd_to_queue(cmd, t_state, false); + queue_work(target_completion_wq, &cmd->work); } EXPORT_SYMBOL(transport_complete_task); @@ -1642,8 +1674,6 @@ int transport_generic_allocate_tasks( } EXPORT_SYMBOL(transport_generic_allocate_tasks); -static void transport_generic_request_failure(struct se_cmd *, int, int); - /* * Used by fabric module frontends to queue tasks directly. * Many only be used from process context only @@ -1985,25 +2015,6 @@ static void transport_direct_request_timeout(struct se_cmd *cmd) spin_unlock_irqrestore(&cmd->t_state_lock, flags); } -static void transport_generic_request_timeout(struct se_cmd *cmd) -{ - unsigned long flags; - - /* - * Reset cmd->t_se_count to allow transport_put_cmd() - * to allow last call to free memory resources. - */ - spin_lock_irqsave(&cmd->t_state_lock, flags); - if (atomic_read(&cmd->t_transport_timeout) > 1) { - int tmp = (atomic_read(&cmd->t_transport_timeout) - 1); - - atomic_sub(tmp, &cmd->t_se_count); - } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - transport_put_cmd(cmd); -} - static inline u32 transport_lba_21(unsigned char *cdb) { return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; @@ -2094,10 +2105,12 @@ static void transport_task_timeout_handler(unsigned long data) pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", task, cmd); - cmd->t_state = TRANSPORT_COMPLETE_FAILURE; + INIT_WORK(&cmd->work, target_complete_failure_work); + cmd->t_state = TRANSPORT_COMPLETE; + atomic_set(&cmd->t_transport_active, 1); spin_unlock_irqrestore(&cmd->t_state_lock, flags); - transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE, false); + queue_work(target_completion_wq, &cmd->work); } static void transport_start_task_timer(struct se_task *task) @@ -2879,7 +2892,7 @@ static int transport_generic_cmd_sequencer( if (passthrough) break; /* - * Setup BIDI XOR callback to be run during transport_generic_complete_ok() + * Setup BIDI XOR callback to be run after I/O completion. */ cmd->transport_complete_callback = &transport_xor_callback; cmd->t_tasks_fua = (cdb[1] & 0x8); @@ -2913,8 +2926,8 @@ static int transport_generic_cmd_sequencer( break; /* - * Setup BIDI XOR callback to be run during - * transport_generic_complete_ok() + * Setup BIDI XOR callback to be run during after I/O + * completion. */ cmd->transport_complete_callback = &transport_xor_callback; cmd->t_tasks_fua = (cdb[10] & 0x8); @@ -3311,8 +3324,7 @@ out_invalid_cdb_field: } /* - * Called from transport_generic_complete_ok() and - * transport_generic_request_failure() to determine which dormant/delayed + * Called from I/O completion to determine which dormant/delayed * and ordered cmds need to have their tasks added to the execution queue. */ static void transport_complete_task_attr(struct se_cmd *cmd) @@ -3433,9 +3445,11 @@ static void transport_handle_queue_full( schedule_work(&cmd->se_dev->qf_work_queue); } -static void transport_generic_complete_ok(struct se_cmd *cmd) +static void target_complete_ok_work(struct work_struct *work) { + struct se_cmd *cmd = container_of(work, struct se_cmd, work); int reason = 0, ret; + /* * Check if we need to move delayed/dormant tasks from cmds on the * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task @@ -4778,21 +4792,12 @@ get_cmd: case TRANSPORT_PROCESS_WRITE: transport_generic_process_write(cmd); break; - case TRANSPORT_COMPLETE_OK: - transport_generic_complete_ok(cmd); - break; case TRANSPORT_FREE_CMD_INTR: transport_generic_free_cmd(cmd, 0); break; case TRANSPORT_PROCESS_TMR: transport_generic_do_tmr(cmd); break; - case TRANSPORT_COMPLETE_FAILURE: - transport_generic_request_failure(cmd, 1, 1); - break; - case TRANSPORT_COMPLETE_TIMEOUT: - transport_generic_request_timeout(cmd); - break; case TRANSPORT_COMPLETE_QF_WP: transport_write_pending_qf(cmd); break; diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 07104bf0a9c8..8e2c83d4fbad 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -86,9 +86,7 @@ enum transport_state_table { TRANSPORT_WRITE_PENDING = 3, TRANSPORT_PROCESS_WRITE = 4, TRANSPORT_PROCESSING = 5, - TRANSPORT_COMPLETE_OK = 6, - TRANSPORT_COMPLETE_FAILURE = 7, - TRANSPORT_COMPLETE_TIMEOUT = 8, + TRANSPORT_COMPLETE = 6, TRANSPORT_PROCESS_TMR = 9, TRANSPORT_ISTATE_PROCESSING = 11, TRANSPORT_NEW_CMD_MAP = 16, @@ -492,6 +490,8 @@ struct se_cmd { struct completion transport_lun_stop_comp; struct scatterlist *t_tasks_sg_chained; + struct work_struct work; + /* * Used for pre-registered fabric SGL passthrough WRITE and READ * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop From dbc5623eb2898f5b5dcdc0b16077bb3f58629c78 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sat, 22 Oct 2011 01:03:54 -0700 Subject: [PATCH 54/62] target: transport_subsystem_check_init cleanups Remove the now unnecessary extra call to transport_subsystem_check_init() in target_core_register_fabric(), and also merge transport_subsystem_reqmods() directly into transport_subsystem_check_init(). Reported-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_configfs.c | 11 +---------- drivers/target/target_core_transport.c | 23 +++++------------------ include/target/target_core_transport.h | 2 +- 3 files changed, 7 insertions(+), 29 deletions(-) diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index f37e2b9cbbd7..1511a2ff86d8 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -131,14 +131,6 @@ static struct config_group *target_core_register_fabric( pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:" " %s\n", group, name); - /* - * Ensure that TCM subsystem plugins are loaded at this point for - * using the RAMDISK_DR virtual LUN 0 and all other struct se_port - * LUN symlinks. - */ - if (transport_subsystem_check_init() < 0) - return ERR_PTR(-EINVAL); - /* * Below are some hardcoded request_module() calls to automatically * local fabric modules when the following is called: @@ -3079,8 +3071,7 @@ static struct config_group *target_core_call_addhbatotarget( /* * Load up TCM subsystem plugins if they have not already been loaded. */ - if (transport_subsystem_check_init() < 0) - return ERR_PTR(-EINVAL); + transport_subsystem_check_init(); hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0); if (IS_ERR(hba)) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 774ff00b1110..06305beb61dd 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -234,10 +234,13 @@ void transport_init_queue_obj(struct se_queue_obj *qobj) } EXPORT_SYMBOL(transport_init_queue_obj); -static int transport_subsystem_reqmods(void) +void transport_subsystem_check_init(void) { int ret; + if (sub_api_initialized) + return; + ret = request_module("target_core_iblock"); if (ret != 0) pr_err("Unable to load target_core_iblock\n"); @@ -254,24 +257,8 @@ static int transport_subsystem_reqmods(void) if (ret != 0) pr_err("Unable to load target_core_stgt\n"); - return 0; -} - -int transport_subsystem_check_init(void) -{ - int ret; - - if (sub_api_initialized) - return 0; - /* - * Request the loading of known TCM subsystem plugins.. - */ - ret = transport_subsystem_reqmods(); - if (ret < 0) - return ret; - sub_api_initialized = 1; - return 0; + return; } struct se_session *transport_init_session(void) diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index c8538c5d3fbf..32c586346c0e 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -113,7 +113,7 @@ extern int init_se_kmem_caches(void); extern void release_se_kmem_caches(void); extern u32 scsi_get_new_index(scsi_index_t); extern void transport_init_queue_obj(struct se_queue_obj *); -extern int transport_subsystem_check_init(void); +extern void transport_subsystem_check_init(void); extern int transport_subsystem_register(struct se_subsystem_api *); extern void transport_subsystem_release(struct se_subsystem_api *); extern void transport_load_plugins(void); From 7c1c6af37af69a4ac4a6485c968496d257245b5d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 18 Oct 2011 06:57:00 -0400 Subject: [PATCH 55/62] target: remove the task_sg_bidi field se_task and pSCSI BIDI support This field is never used given that BIDI handling happens at the command and not the task level. Remove it and the dead code in pscsi that tries to work on it. It also prevents pSCSI passthrough for the two currently enabled BIDI commands now that task->task_sg_bidi support has been removed. Signed-off-by: Christoph Hellwig Cc: Boaz Harrosh Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_pscsi.c | 32 -------------------------- drivers/target/target_core_transport.c | 18 ++++----------- include/target/target_core_base.h | 1 - 3 files changed, 5 insertions(+), 46 deletions(-) diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 936b9fec4cca..dad671dee9e9 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -1095,32 +1095,6 @@ static int pscsi_do_task(struct se_task *task) pr_err("pSCSI: blk_make_request() failed\n"); goto fail; } - - if (task->task_sg_bidi) { - /* - * If present, set up the extra BIDI-COMMAND SCSI READ - * struct request and payload. - */ - ret = pscsi_map_sg(task, task->task_sg_bidi, &hbio); - if (ret < 0) { - /* XXX: free the main request? */ - return PYX_TRANSPORT_LU_COMM_FAILURE; - } - - /* - * Setup the secondary pt->pscsi_req->next_rq used for the extra - * BIDI READ payload. - */ - req->next_rq = blk_make_request(pdv->pdv_sd->request_queue, - hbio, GFP_KERNEL); - if (!req) { - pr_err("pSCSI: blk_make_request() failed for BIDI\n"); - /* XXX: free the main request? */ - goto fail; - } - - req->next_rq->cmd_type = REQ_TYPE_BLOCK_PC; - } } req->cmd_type = REQ_TYPE_BLOCK_PC; @@ -1240,12 +1214,6 @@ static void pscsi_req_done(struct request *req, int uptodate) pt->pscsi_resid = req->resid_len; pscsi_process_SAM_status(task, pt); - /* - * Release BIDI-READ if present - */ - if (req->next_rq != NULL) - __blk_put_request(req->q, req->next_rq); - __blk_put_request(req->q, req); } diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 06305beb61dd..624d86ea083b 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2871,13 +2871,10 @@ static int transport_generic_cmd_sequencer( size = transport_get_size(sectors, cdb, cmd); cmd->t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; - passthrough = (dev->transport->transport_type == - TRANSPORT_PLUGIN_PHBA_PDEV); - /* - * Skip the remaining assignments for TCM/PSCSI passthrough - */ - if (passthrough) - break; + + if (dev->transport->transport_type == + TRANSPORT_PLUGIN_PHBA_PDEV) + goto out_unsupported_cdb; /* * Setup BIDI XOR callback to be run after I/O completion. */ @@ -2906,12 +2903,8 @@ static int transport_generic_cmd_sequencer( cmd->t_task_lba = transport_lba_64_ext(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; - /* - * Skip the remaining assignments for TCM/PSCSI passthrough - */ if (passthrough) - break; - + goto out_unsupported_cdb; /* * Setup BIDI XOR callback to be run during after I/O * completion. @@ -3562,7 +3555,6 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) */ del_timer_sync(&task->task_timer); - kfree(task->task_sg_bidi); kfree(task->task_sg); list_del(&task->t_list); diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 8e2c83d4fbad..132266b15b7c 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -397,7 +397,6 @@ struct se_task { u32 task_size; struct se_cmd *task_se_cmd; struct scatterlist *task_sg; - struct scatterlist *task_sg_bidi; u32 task_sg_nents; u16 task_flags; u8 task_sense; From da0f7619913751d45fc3cda652789379f4f435fb Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 18 Oct 2011 06:57:01 -0400 Subject: [PATCH 56/62] target: merge transport_new_cmd_obj into transport_generic_new_cmd These are two fairly small functions, and merging them gives a much more readable control flow, and opportunities for more useful comments. It also moves all code related to resources allocation closer together and allows to remove a forward declaration for transport_allocate_tasks. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 129 ++++++++++--------------- 1 file changed, 53 insertions(+), 76 deletions(-) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 624d86ea083b..2fb6a2594429 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -77,10 +77,6 @@ static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev); static void transport_direct_request_timeout(struct se_cmd *cmd); static void transport_free_dev_tasks(struct se_cmd *cmd); -static u32 transport_allocate_tasks(struct se_cmd *cmd, - unsigned long long starting_lba, - enum dma_data_direction data_direction, - struct scatterlist *sgl, unsigned int nents); static int transport_generic_get_mem(struct se_cmd *cmd); static void transport_put_cmd(struct se_cmd *cmd); static void transport_remove_cmd_from_queue(struct se_cmd *cmd); @@ -3666,62 +3662,6 @@ int transport_generic_map_mem_to_cmd( } EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); -static int transport_new_cmd_obj(struct se_cmd *cmd) -{ - struct se_device *dev = cmd->se_dev; - int set_counts = 1, rc, task_cdbs; - - /* - * Setup any BIDI READ tasks and memory from - * cmd->t_mem_bidi_list so the READ struct se_tasks - * are queued first for the non pSCSI passthrough case. - */ - if (cmd->t_bidi_data_sg && - (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { - rc = transport_allocate_tasks(cmd, - cmd->t_task_lba, - DMA_FROM_DEVICE, - cmd->t_bidi_data_sg, - cmd->t_bidi_data_nents); - if (rc <= 0) { - cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - cmd->scsi_sense_reason = - TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - return -EINVAL; - } - atomic_inc(&cmd->t_fe_count); - atomic_inc(&cmd->t_se_count); - set_counts = 0; - } - /* - * Setup the tasks and memory from cmd->t_mem_list - * Note for BIDI transfers this will contain the WRITE payload - */ - task_cdbs = transport_allocate_tasks(cmd, - cmd->t_task_lba, - cmd->data_direction, - cmd->t_data_sg, - cmd->t_data_nents); - if (task_cdbs <= 0) { - cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - cmd->scsi_sense_reason = - TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - return -EINVAL; - } - - if (set_counts) { - atomic_inc(&cmd->t_fe_count); - atomic_inc(&cmd->t_se_count); - } - - cmd->t_task_list_num = task_cdbs; - - atomic_set(&cmd->t_task_cdbs_left, task_cdbs); - atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs); - atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs); - return 0; -} - void *transport_kmap_first_data_page(struct se_cmd *cmd) { struct scatterlist *sg = cmd->t_data_sg; @@ -4000,17 +3940,16 @@ static u32 transport_allocate_tasks( } -/* transport_generic_new_cmd(): Called from transport_processing_thread() - * - * Allocate storage transport resources from a set of values predefined - * by transport_generic_cmd_sequencer() from the iSCSI Target RX process. - * Any non zero return here is treated as an "out of resource' op here. +/* + * Allocate any required ressources to execute the command, and either place + * it on the execution queue if possible. For writes we might not have the + * payload yet, thus notify the fabric via a call to ->write_pending instead. */ - /* - * Generate struct se_task(s) and/or their payloads for this CDB. - */ int transport_generic_new_cmd(struct se_cmd *cmd) { + struct se_device *dev = cmd->se_dev; + int task_cdbs; + int set_counts = 1; int ret = 0; /* @@ -4024,16 +3963,49 @@ int transport_generic_new_cmd(struct se_cmd *cmd) if (ret < 0) return ret; } + /* - * Call transport_new_cmd_obj() to invoke transport_allocate_tasks() for - * control or data CDB types, and perform the map to backend subsystem - * code from SGL memory allocated here by transport_generic_get_mem(), or - * via pre-existing SGL memory setup explictly by fabric module code with - * transport_generic_map_mem_to_cmd(). + * Setup any BIDI READ tasks and memory from + * cmd->t_mem_bidi_list so the READ struct se_tasks + * are queued first for the non pSCSI passthrough case. */ - ret = transport_new_cmd_obj(cmd); - if (ret < 0) - return ret; + if (cmd->t_bidi_data_sg && + (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { + ret = transport_allocate_tasks(cmd, + cmd->t_task_lba, + DMA_FROM_DEVICE, + cmd->t_bidi_data_sg, + cmd->t_bidi_data_nents); + if (ret <= 0) + goto out_fail; + + atomic_inc(&cmd->t_fe_count); + atomic_inc(&cmd->t_se_count); + set_counts = 0; + } + /* + * Setup the tasks and memory from cmd->t_mem_list + * Note for BIDI transfers this will contain the WRITE payload + */ + task_cdbs = transport_allocate_tasks(cmd, + cmd->t_task_lba, + cmd->data_direction, + cmd->t_data_sg, + cmd->t_data_nents); + if (task_cdbs <= 0) + goto out_fail; + + if (set_counts) { + atomic_inc(&cmd->t_fe_count); + atomic_inc(&cmd->t_se_count); + } + + cmd->t_task_list_num = task_cdbs; + + atomic_set(&cmd->t_task_cdbs_left, task_cdbs); + atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs); + atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs); + /* * For WRITEs, let the fabric know its buffer is ready.. * This WRITE struct se_cmd (and all of its associated struct se_task's) @@ -4051,6 +4023,11 @@ int transport_generic_new_cmd(struct se_cmd *cmd) */ transport_execute_tasks(cmd); return 0; + +out_fail: + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; } EXPORT_SYMBOL(transport_generic_new_cmd); From 38b400676b5b33178a418bebc2b1af6c3fb0b380 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 18 Oct 2011 06:57:02 -0400 Subject: [PATCH 57/62] target: remove transport_allocate_tasks There were only two callers, and one of them always wants the call to transport_allocate_data_tasks anyway. Also drop the constant lba argument to transport_allocate_data_tasks and move the variables inside it into the minimum required scope. Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 81 ++++++++++---------------- 1 file changed, 32 insertions(+), 49 deletions(-) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 2fb6a2594429..8c8e62e2687f 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -3808,29 +3808,34 @@ EXPORT_SYMBOL(transport_do_task_sg_chain); /* * Break up cmd into chunks transport can handle */ -static int transport_allocate_data_tasks( - struct se_cmd *cmd, - unsigned long long lba, +static int +transport_allocate_data_tasks(struct se_cmd *cmd, enum dma_data_direction data_direction, - struct scatterlist *sgl, - unsigned int sgl_nents) + struct scatterlist *cmd_sg, unsigned int sgl_nents) { - struct se_task *task; struct se_device *dev = cmd->se_dev; - unsigned long flags; int task_count, i; - sector_t sectors, dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; - u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size; - struct scatterlist *sg; - struct scatterlist *cmd_sg; + unsigned long long lba; + sector_t sectors, dev_max_sectors; + u32 sector_size; + + if (transport_cmd_get_valid_sectors(cmd) < 0) + return -EINVAL; + + dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; + sector_size = dev->se_sub_dev->se_dev_attrib.block_size; WARN_ON(cmd->data_length % sector_size); + + lba = cmd->t_task_lba; sectors = DIV_ROUND_UP(cmd->data_length, sector_size); task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors); - cmd_sg = sgl; for (i = 0; i < task_count; i++) { + struct se_task *task; unsigned int task_size, task_sg_nents_padded; + struct scatterlist *sg; + unsigned long flags; int count; task = transport_generic_get_task(cmd, data_direction); @@ -3921,25 +3926,6 @@ transport_allocate_control_task(struct se_cmd *cmd) return 1; } -static u32 transport_allocate_tasks( - struct se_cmd *cmd, - unsigned long long lba, - enum dma_data_direction data_direction, - struct scatterlist *sgl, - unsigned int sgl_nents) -{ - if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { - if (transport_cmd_get_valid_sectors(cmd) < 0) - return -EINVAL; - - return transport_allocate_data_tasks(cmd, lba, data_direction, - sgl, sgl_nents); - } else - return transport_allocate_control_task(cmd); - -} - - /* * Allocate any required ressources to execute the command, and either place * it on the execution queue if possible. For writes we might not have the @@ -3965,17 +3951,14 @@ int transport_generic_new_cmd(struct se_cmd *cmd) } /* - * Setup any BIDI READ tasks and memory from - * cmd->t_mem_bidi_list so the READ struct se_tasks - * are queued first for the non pSCSI passthrough case. + * For BIDI command set up the read tasks first. */ if (cmd->t_bidi_data_sg && - (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { - ret = transport_allocate_tasks(cmd, - cmd->t_task_lba, - DMA_FROM_DEVICE, - cmd->t_bidi_data_sg, - cmd->t_bidi_data_nents); + dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { + BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)); + + ret = transport_allocate_data_tasks(cmd, DMA_FROM_DEVICE, + cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); if (ret <= 0) goto out_fail; @@ -3983,15 +3966,15 @@ int transport_generic_new_cmd(struct se_cmd *cmd) atomic_inc(&cmd->t_se_count); set_counts = 0; } - /* - * Setup the tasks and memory from cmd->t_mem_list - * Note for BIDI transfers this will contain the WRITE payload - */ - task_cdbs = transport_allocate_tasks(cmd, - cmd->t_task_lba, - cmd->data_direction, - cmd->t_data_sg, - cmd->t_data_nents); + + if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { + task_cdbs = transport_allocate_data_tasks(cmd, + cmd->data_direction, cmd->t_data_sg, + cmd->t_data_nents); + } else { + task_cdbs = transport_allocate_control_task(cmd); + } + if (task_cdbs <= 0) goto out_fail; From 9ac549873d35626cd6d7718691aaf4c55f2667a7 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sat, 22 Oct 2011 04:06:23 -0700 Subject: [PATCH 58/62] target: Fix BIDI t_task_cdb handling in transport_generic_new_cmd This patch fixes a bug for BIDI handling in transport_generic_new_cmd() where cmd->t_task_cdbs_left and Co. where not taking into account the extra task count generated during the first call to transport_allocate_data_tasks(). Cc: Christoph Hellwig Cc: Boaz Harrosh Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 8c8e62e2687f..ab4e3083d968 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -3934,7 +3934,7 @@ transport_allocate_control_task(struct se_cmd *cmd) int transport_generic_new_cmd(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; - int task_cdbs; + int task_cdbs, task_cdbs_bidi = 0; int set_counts = 1; int ret = 0; @@ -3957,9 +3957,10 @@ int transport_generic_new_cmd(struct se_cmd *cmd) dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)); - ret = transport_allocate_data_tasks(cmd, DMA_FROM_DEVICE, - cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); - if (ret <= 0) + task_cdbs_bidi = transport_allocate_data_tasks(cmd, + DMA_FROM_DEVICE, cmd->t_bidi_data_sg, + cmd->t_bidi_data_nents); + if (task_cdbs_bidi <= 0) goto out_fail; atomic_inc(&cmd->t_fe_count); @@ -3983,11 +3984,10 @@ int transport_generic_new_cmd(struct se_cmd *cmd) atomic_inc(&cmd->t_se_count); } - cmd->t_task_list_num = task_cdbs; - - atomic_set(&cmd->t_task_cdbs_left, task_cdbs); - atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs); - atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs); + cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi); + atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num); + atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num); + atomic_set(&cmd->t_task_cdbs_timeout_left, cmd->t_task_list_num); /* * For WRITEs, let the fabric know its buffer is ready.. From af3f00c75949369d937f499f49118e879939724d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 18 Oct 2011 06:57:03 -0400 Subject: [PATCH 59/62] target: re-use the command S/G list for single-task commands If we only have a single task per command (which at least in my testing is the by far most common case) we do not have to allocate a new per-task S/G list but can reuse the one from the command. (nab: Fix BIDI handling in transport_free_dev_tasks) Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 42 +++++++++++++++++++------- 1 file changed, 31 insertions(+), 11 deletions(-) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index ab4e3083d968..ac048db21240 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -3551,7 +3551,9 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) */ del_timer_sync(&task->task_timer); - kfree(task->task_sg); + if (task->task_sg != cmd->t_data_sg && + task->task_sg != cmd->t_bidi_data_sg) + kfree(task->task_sg); list_del(&task->t_list); @@ -3830,7 +3832,33 @@ transport_allocate_data_tasks(struct se_cmd *cmd, lba = cmd->t_task_lba; sectors = DIV_ROUND_UP(cmd->data_length, sector_size); task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors); - + + /* + * If we need just a single task reuse the SG list in the command + * and avoid a lot of work. + */ + if (task_count == 1) { + struct se_task *task; + unsigned long flags; + + task = transport_generic_get_task(cmd, data_direction); + if (!task) + return -ENOMEM; + + task->task_sg = cmd_sg; + task->task_sg_nents = sgl_nents; + + task->task_lba = lba; + task->task_sectors = sectors; + task->task_size = task->task_sectors * sector_size; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + list_add_tail(&task->t_list, &cmd->t_task_list); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + return task_count; + } + for (i = 0; i < task_count; i++) { struct se_task *task; unsigned int task_size, task_sg_nents_padded; @@ -3906,15 +3934,7 @@ transport_allocate_control_task(struct se_cmd *cmd) if (!task) return -ENOMEM; - task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, - GFP_KERNEL); - if (!task->task_sg) { - cmd->se_dev->transport->free_task(task); - return -ENOMEM; - } - - memcpy(task->task_sg, cmd->t_data_sg, - sizeof(struct scatterlist) * cmd->t_data_nents); + task->task_sg = cmd->t_data_sg; task->task_size = cmd->data_length; task->task_sg_nents = cmd->t_data_nents; From 415a090ade7e674018e3fa4255938e4c312339b3 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sun, 23 Oct 2011 18:16:13 -0700 Subject: [PATCH 60/62] target: Fix incorrect transport_sent usage This patch converts target-core to use se_cmd->t_transport_sent instead of a duplicated se_cmd->transport_sent member in a handful of locations. It also updates iscsi_target to properly use ->t_transport_sent instead of it's own iscsi_cmd_t->transport_sent value that was not being assigned. Reported-by: Christoph Hellwig Cc: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/iscsi/iscsi_target_core.h | 1 - drivers/target/iscsi/iscsi_target_tmr.c | 4 ++-- drivers/target/target_core_transport.c | 6 +++--- include/target/target_core_base.h | 1 - 4 files changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 84818d7a7057..3723d90d5ae5 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h @@ -428,7 +428,6 @@ struct iscsi_cmd { /* Number of times struct iscsi_cmd is present in immediate queue */ atomic_t immed_queue_count; atomic_t response_queue_count; - atomic_t transport_sent; spinlock_t datain_lock; spinlock_t dataout_timeout_lock; /* spinlock for protecting struct iscsi_cmd->i_state */ diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c index c3617d8ff3ea..490207eacde9 100644 --- a/drivers/target/iscsi/iscsi_target_tmr.c +++ b/drivers/target/iscsi/iscsi_target_tmr.c @@ -250,7 +250,7 @@ static int iscsit_task_reassign_complete_write( * so if we have received all DataOUT we can safety ignore Initiator. */ if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { - if (!atomic_read(&cmd->transport_sent)) { + if (!atomic_read(&cmd->se_cmd.t_transport_sent)) { pr_debug("WRITE ITT: 0x%08x: t_state: %d" " never sent to transport\n", cmd->init_task_tag, cmd->se_cmd.t_state); @@ -314,7 +314,7 @@ static int iscsit_task_reassign_complete_read( cmd->acked_data_sn = (tmr_req->exp_data_sn - 1); } - if (!atomic_read(&cmd->transport_sent)) { + if (!atomic_read(&cmd->se_cmd.t_transport_sent)) { pr_debug("READ ITT: 0x%08x: t_state: %d never sent to" " transport\n", cmd->init_task_tag, cmd->se_cmd.t_state); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index ac048db21240..5027619552f0 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2294,7 +2294,7 @@ check_depth: if (atomic_read(&cmd->t_task_cdbs_sent) == cmd->t_task_list_num) - atomic_set(&cmd->transport_sent, 1); + atomic_set(&cmd->t_transport_sent, 1); transport_start_task_timer(task); spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -2311,7 +2311,7 @@ check_depth: task->task_flags &= ~TF_ACTIVE; spin_unlock_irqrestore(&cmd->t_state_lock, flags); del_timer_sync(&task->task_timer); - atomic_set(&cmd->transport_sent, 0); + atomic_set(&cmd->t_transport_sent, 0); transport_stop_tasks_for_cmd(cmd); atomic_inc(&dev->depth_left); transport_generic_request_failure(cmd, 0, 1); @@ -2351,7 +2351,7 @@ check_depth: task->task_flags &= ~TF_ACTIVE; spin_unlock_irqrestore(&cmd->t_state_lock, flags); del_timer_sync(&task->task_timer); - atomic_set(&cmd->transport_sent, 0); + atomic_set(&cmd->t_transport_sent, 0); transport_stop_tasks_for_cmd(cmd); atomic_inc(&dev->depth_left); transport_generic_request_failure(cmd, 0, 1); diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 132266b15b7c..d210f1fe9962 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -439,7 +439,6 @@ struct se_cmd { u32 orig_fe_lun; /* Persistent Reservation key */ u64 pr_res_key; - atomic_t transport_sent; /* Used for sense data */ void *sense_buffer; struct list_head se_delayed_node; From 2e982ab92dff057c639d4a43ccfa275be62f5e59 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sun, 23 Oct 2011 18:46:36 -0700 Subject: [PATCH 61/62] target: Remove legacy se_task->task_timer and associated logic This patch removes the legacy usage of se_task->task_timer and associated infrastructure that originally was used as a way to help manage buggy backend SCSI LLDs that in certain cases would never return back an outstanding task. This includes the removal of target_complete_timeout_work(), timeout logic from transport_complete_task(), transport_task_timeout_handler(), transport_start_task_timer(), the per device task_timeout configfs attribute, and all task_timeout associated structure members and defines in target_core_base.h This is being removed in preparation to make transport_complete_task() run in lock-less mode. Cc: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_configfs.c | 4 - drivers/target/target_core_device.c | 15 --- drivers/target/target_core_transport.c | 156 ------------------------- include/target/target_core_base.h | 7 +- include/target/target_core_transport.h | 16 +-- 5 files changed, 4 insertions(+), 194 deletions(-) diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 1511a2ff86d8..e0c1e8a8dd4e 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -716,9 +716,6 @@ SE_DEV_ATTR_RO(hw_queue_depth); DEF_DEV_ATTRIB(queue_depth); SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR); -DEF_DEV_ATTRIB(task_timeout); -SE_DEV_ATTR(task_timeout, S_IRUGO | S_IWUSR); - DEF_DEV_ATTRIB(max_unmap_lba_count); SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR); @@ -752,7 +749,6 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = { &target_core_dev_attrib_optimal_sectors.attr, &target_core_dev_attrib_hw_queue_depth.attr, &target_core_dev_attrib_queue_depth.attr, - &target_core_dev_attrib_task_timeout.attr, &target_core_dev_attrib_max_unmap_lba_count.attr, &target_core_dev_attrib_max_unmap_block_desc_count.attr, &target_core_dev_attrib_unmap_granularity.attr, diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 81352b7f9130..f870c3bcfd82 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -914,21 +914,6 @@ void se_dev_set_default_attribs( dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth; } -int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) -{ - if (task_timeout > DA_TASK_TIMEOUT_MAX) { - pr_err("dev[%p]: Passed task_timeout: %u larger then" - " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout); - return -EINVAL; - } else { - dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout; - pr_debug("dev[%p]: Set SE Device task_timeout: %u\n", - dev, task_timeout); - } - - return 0; -} - int se_dev_set_max_unmap_lba_count( struct se_device *dev, u32 max_unmap_lba_count) diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 5027619552f0..d75255804481 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -75,7 +75,6 @@ static int __transport_execute_tasks(struct se_device *dev); static void transport_complete_task_attr(struct se_cmd *cmd); static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev); -static void transport_direct_request_timeout(struct se_cmd *cmd); static void transport_free_dev_tasks(struct se_cmd *cmd); static int transport_generic_get_mem(struct se_cmd *cmd); static void transport_put_cmd(struct se_cmd *cmd); @@ -682,26 +681,6 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good) } EXPORT_SYMBOL(transport_complete_sync_cache); -static void target_complete_timeout_work(struct work_struct *work) -{ - struct se_cmd *cmd = container_of(work, struct se_cmd, work); - unsigned long flags; - - /* - * Reset cmd->t_se_count to allow transport_put_cmd() - * to allow last call to free memory resources. - */ - spin_lock_irqsave(&cmd->t_state_lock, flags); - if (atomic_read(&cmd->t_transport_timeout) > 1) { - int tmp = (atomic_read(&cmd->t_transport_timeout) - 1); - - atomic_sub(tmp, &cmd->t_se_count); - } - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - transport_put_cmd(cmd); -} - static void target_complete_failure_work(struct work_struct *work) { struct se_cmd *cmd = container_of(work, struct se_cmd, work); @@ -726,8 +705,6 @@ void transport_complete_task(struct se_task *task, int success) if (dev) atomic_inc(&dev->depth_left); - del_timer(&task->task_timer); - spin_lock_irqsave(&cmd->t_state_lock, flags); task->task_flags &= ~TF_ACTIVE; @@ -749,34 +726,10 @@ void transport_complete_task(struct se_task *task, int success) * to complete for an exception condition */ if (task->task_flags & TF_REQUEST_STOP) { - /* - * Decrement cmd->t_se_count if this task had - * previously thrown its timeout exception handler. - */ - if (task->task_flags & TF_TIMEOUT) { - atomic_dec(&cmd->t_se_count); - task->task_flags &= ~TF_TIMEOUT; - } spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&task->task_stop_comp); return; } - /* - * If the task's timeout handler has fired, use the t_task_cdbs_timeout - * left counter to determine when the struct se_cmd is ready to be queued to - * the processing thread. - */ - if (task->task_flags & TF_TIMEOUT) { - if (!atomic_dec_and_test(&cmd->t_task_cdbs_timeout_left)) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return; - } - INIT_WORK(&cmd->work, target_complete_timeout_work); - goto out_queue; - } - atomic_dec(&cmd->t_task_cdbs_timeout_left); - /* * Decrement the outstanding t_task_cdbs_left count. The last * struct se_task from struct se_cmd will complete itself into the @@ -800,7 +753,6 @@ void transport_complete_task(struct se_task *task, int success) INIT_WORK(&cmd->work, target_complete_ok_work); } -out_queue: cmd->t_state = TRANSPORT_COMPLETE; atomic_set(&cmd->t_transport_active, 1); spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -1519,7 +1471,6 @@ transport_generic_get_task(struct se_cmd *cmd, INIT_LIST_HEAD(&task->t_list); INIT_LIST_HEAD(&task->t_execute_list); INIT_LIST_HEAD(&task->t_state_list); - init_timer(&task->task_timer); init_completion(&task->task_stop_comp); task->task_se_cmd = cmd; task->task_data_direction = data_direction; @@ -1787,7 +1738,6 @@ bool target_stop_task(struct se_task *task, unsigned long *flags) spin_unlock_irqrestore(&cmd->t_state_lock, *flags); pr_debug("Task %p waiting to complete\n", task); - del_timer_sync(&task->task_timer); wait_for_completion(&task->task_stop_comp); pr_debug("Task %p stopped successfully\n", task); @@ -1876,7 +1826,6 @@ static void transport_generic_request_failure( transport_complete_task_attr(cmd); if (complete) { - transport_direct_request_timeout(cmd); cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; } @@ -1979,25 +1928,6 @@ queue_full: transport_handle_queue_full(cmd, cmd->se_dev); } -static void transport_direct_request_timeout(struct se_cmd *cmd) -{ - unsigned long flags; - - spin_lock_irqsave(&cmd->t_state_lock, flags); - if (!atomic_read(&cmd->t_transport_timeout)) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return; - } - if (atomic_read(&cmd->t_task_cdbs_timeout_left)) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return; - } - - atomic_sub(atomic_read(&cmd->t_transport_timeout), - &cmd->t_se_count); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); -} - static inline u32 transport_lba_21(unsigned char *cdb) { return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; @@ -2040,80 +1970,6 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); } -/* - * Called from interrupt context. - */ -static void transport_task_timeout_handler(unsigned long data) -{ - struct se_task *task = (struct se_task *)data; - struct se_cmd *cmd = task->task_se_cmd; - unsigned long flags; - - pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd); - - spin_lock_irqsave(&cmd->t_state_lock, flags); - - /* - * Determine if transport_complete_task() has already been called. - */ - if (!(task->task_flags & TF_ACTIVE)) { - pr_debug("transport task: %p cmd: %p timeout !TF_ACTIVE\n", - task, cmd); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return; - } - - atomic_inc(&cmd->t_se_count); - atomic_inc(&cmd->t_transport_timeout); - cmd->t_tasks_failed = 1; - - task->task_flags |= TF_TIMEOUT; - task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; - task->task_scsi_status = 1; - - if (task->task_flags & TF_REQUEST_STOP) { - pr_debug("transport task: %p cmd: %p timeout TF_REQUEST_STOP" - " == 1\n", task, cmd); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&task->task_stop_comp); - return; - } - - if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { - pr_debug("transport task: %p cmd: %p timeout non zero" - " t_task_cdbs_left\n", task, cmd); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return; - } - pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", - task, cmd); - - INIT_WORK(&cmd->work, target_complete_failure_work); - cmd->t_state = TRANSPORT_COMPLETE; - atomic_set(&cmd->t_transport_active, 1); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - queue_work(target_completion_wq, &cmd->work); -} - -static void transport_start_task_timer(struct se_task *task) -{ - struct se_device *dev = task->task_se_cmd->se_dev; - int timeout; - - /* - * If the task_timeout is disabled, exit now. - */ - timeout = dev->se_sub_dev->se_dev_attrib.task_timeout; - if (!timeout) - return; - - task->task_timer.expires = (get_jiffies_64() + timeout * HZ); - task->task_timer.data = (unsigned long) task; - task->task_timer.function = transport_task_timeout_handler; - add_timer(&task->task_timer); -} - static inline int transport_tcq_window_closed(struct se_device *dev) { if (dev->dev_tcq_window_closed++ < @@ -2296,7 +2152,6 @@ check_depth: cmd->t_task_list_num) atomic_set(&cmd->t_transport_sent, 1); - transport_start_task_timer(task); spin_unlock_irqrestore(&cmd->t_state_lock, flags); /* * The struct se_cmd->transport_emulate_cdb() function pointer is used @@ -2310,7 +2165,6 @@ check_depth: spin_lock_irqsave(&cmd->t_state_lock, flags); task->task_flags &= ~TF_ACTIVE; spin_unlock_irqrestore(&cmd->t_state_lock, flags); - del_timer_sync(&task->task_timer); atomic_set(&cmd->t_transport_sent, 0); transport_stop_tasks_for_cmd(cmd); atomic_inc(&dev->depth_left); @@ -2350,7 +2204,6 @@ check_depth: spin_lock_irqsave(&cmd->t_state_lock, flags); task->task_flags &= ~TF_ACTIVE; spin_unlock_irqrestore(&cmd->t_state_lock, flags); - del_timer_sync(&task->task_timer); atomic_set(&cmd->t_transport_sent, 0); transport_stop_tasks_for_cmd(cmd); atomic_inc(&dev->depth_left); @@ -3543,14 +3396,6 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) while (!list_empty(&dispose_list)) { task = list_first_entry(&dispose_list, struct se_task, t_list); - /* - * We already cancelled all pending timers in - * transport_complete_task, but that was just a pure del_timer, - * so do a full del_timer_sync here to make sure any handler - * that was running at that point has finished execution. - */ - del_timer_sync(&task->task_timer); - if (task->task_sg != cmd->t_data_sg && task->task_sg != cmd->t_bidi_data_sg) kfree(task->task_sg); @@ -4007,7 +3852,6 @@ int transport_generic_new_cmd(struct se_cmd *cmd) cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi); atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num); atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num); - atomic_set(&cmd->t_task_cdbs_timeout_left, cmd->t_task_list_num); /* * For WRITEs, let the fabric know its buffer is ready.. diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index d210f1fe9962..35aa786f93da 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -75,8 +75,7 @@ enum transport_tpg_type_table { enum se_task_flags { TF_ACTIVE = (1 << 0), TF_SENT = (1 << 1), - TF_TIMEOUT = (1 << 2), - TF_REQUEST_STOP = (1 << 3), + TF_REQUEST_STOP = (1 << 2), }; /* Special transport agnostic struct se_cmd->t_states */ @@ -404,7 +403,6 @@ struct se_task { int task_error_status; enum dma_data_direction task_data_direction; atomic_t task_state_active; - struct timer_list task_timer; struct list_head t_list; struct list_head t_execute_list; struct list_head t_state_list; @@ -469,7 +467,6 @@ struct se_cmd { atomic_t t_se_count; atomic_t t_task_cdbs_left; atomic_t t_task_cdbs_ex_left; - atomic_t t_task_cdbs_timeout_left; atomic_t t_task_cdbs_sent; atomic_t t_transport_aborted; atomic_t t_transport_active; @@ -477,7 +474,6 @@ struct se_cmd { atomic_t t_transport_queue_active; atomic_t t_transport_sent; atomic_t t_transport_stop; - atomic_t t_transport_timeout; atomic_t transport_dev_active; atomic_t transport_lun_active; atomic_t transport_lun_fe_stop; @@ -646,7 +642,6 @@ struct se_dev_attrib { u32 optimal_sectors; u32 hw_queue_depth; u32 queue_depth; - u32 task_timeout; u32 max_unmap_lba_count; u32 max_unmap_block_desc_count; u32 unmap_granularity; diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 32c586346c0e..a037a1a6fbba 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -22,10 +22,9 @@ #define PYX_TRANSPORT_LU_COMM_FAILURE -7 #define PYX_TRANSPORT_UNKNOWN_MODE_PAGE -8 #define PYX_TRANSPORT_WRITE_PROTECTED -9 -#define PYX_TRANSPORT_TASK_TIMEOUT -10 -#define PYX_TRANSPORT_RESERVATION_CONFLICT -11 -#define PYX_TRANSPORT_ILLEGAL_REQUEST -12 -#define PYX_TRANSPORT_USE_SENSE_REASON -13 +#define PYX_TRANSPORT_RESERVATION_CONFLICT -10 +#define PYX_TRANSPORT_ILLEGAL_REQUEST -11 +#define PYX_TRANSPORT_USE_SENSE_REASON -12 #ifndef SAM_STAT_RESERVATION_CONFLICT #define SAM_STAT_RESERVATION_CONFLICT 0x18 @@ -38,13 +37,6 @@ #define TRANSPORT_PLUGIN_VHBA_PDEV 2 #define TRANSPORT_PLUGIN_VHBA_VDEV 3 -/* For SE OBJ Plugins, in seconds */ -#define TRANSPORT_TIMEOUT_TUR 10 -#define TRANSPORT_TIMEOUT_TYPE_DISK 60 -#define TRANSPORT_TIMEOUT_TYPE_ROM 120 -#define TRANSPORT_TIMEOUT_TYPE_TAPE 600 -#define TRANSPORT_TIMEOUT_TYPE_OTHER 300 - /* * struct se_subsystem_dev->su_dev_flags */ @@ -61,8 +53,6 @@ #define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000004 /* struct se_dev_attrib sanity values */ -/* 10 Minutes */ -#define DA_TASK_TIMEOUT_MAX 600 /* Default max_unmap_lba_count */ #define DA_MAX_UNMAP_LBA_COUNT 0 /* Default max_unmap_block_desc_count */ From b91bf5bf7fb0f35a8119a662e8e6b71ed950f443 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Tue, 25 Oct 2011 06:43:29 +0000 Subject: [PATCH 62/62] target: Fix compile warning w/ missing module.h include MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch fixes the following compile warning in target_core_cdb.c in recent linux-next code due to the new use of EXPORT_SYMBOL() for target_get_task_cdb(). drivers/target/target_core_cdb.c:1316: warning: data definition has no type or storage class drivers/target/target_core_cdb.c:1316: warning: type defaults to ‘int’ in declaration of ‘EXPORT_SYMBOL’ drivers/target/target_core_cdb.c:1316: warning: parameter names (without types) in function declaration Cc: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_cdb.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 575cf7216f52..38535eb13929 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -24,6 +24,7 @@ */ #include +#include #include #include