1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target fixes from Nicholas Bellinger:
 - fix tcm-user backend driver expired cmd time processing (agrover)
 - eliminate kref_put_spinlock_irqsave() for I/O completion (bart)
 - fix iscsi login kthread failure case hung task regression (nab)
 - fix COMPARE_AND_WRITE completion use-after-free race (nab)
 - fix COMPARE_AND_WRITE with SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC non zero
   SGL offset data corruption.  (Jan + Doug)
 - fix >= v4.4-rc1 regression for tcm_qla2xxx enable configfs attribute
   (Himanshu + HCH)

* git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending:
  target/stat: print full t10_wwn.model buffer
  target: fix COMPARE_AND_WRITE non zero SGL offset data corruption
  qla2xxx: Fix regression introduced by target configFS changes
  kref: Remove kref_put_spinlock_irqsave()
  target: Invoke release_cmd() callback without holding a spinlock
  target: Fix race for SCF_COMPARE_AND_WRITE_POST checking
  iscsi-target: Fix rx_login_comp hang after login failure
  iscsi-target: return -ENOMEM instead of -1 in case of failed kmalloc()
  target/user: Do not set unused fields in tcmu_ops
  target/user: Fix time calc in expired cmd processing
steinar/wifi_calib_4_9_kernel
Linus Torvalds 2015-11-29 09:03:57 -08:00
commit 36511e8607
11 changed files with 53 additions and 64 deletions

View File

@ -902,7 +902,7 @@ static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_show(struct config_item *item,
return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type);
}
CONFIGFS_ATTR_WO(tcm_qla2xxx_tpg_, enable);
CONFIGFS_ATTR(tcm_qla2xxx_tpg_, enable);
CONFIGFS_ATTR_RO(tcm_qla2xxx_tpg_, dynamic_sessions);
CONFIGFS_ATTR(tcm_qla2xxx_tpg_, fabric_prot_type);

View File

@ -4074,6 +4074,17 @@ reject:
return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
}
static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
{
bool ret;
spin_lock_bh(&conn->state_lock);
ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN);
spin_unlock_bh(&conn->state_lock);
return ret;
}
int iscsi_target_rx_thread(void *arg)
{
int ret, rc;
@ -4091,7 +4102,7 @@ int iscsi_target_rx_thread(void *arg)
* incoming iscsi/tcp socket I/O, and/or failing the connection.
*/
rc = wait_for_completion_interruptible(&conn->rx_login_comp);
if (rc < 0)
if (rc < 0 || iscsi_target_check_conn_state(conn))
return 0;
if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {

View File

@ -388,6 +388,7 @@ err:
if (login->login_complete) {
if (conn->rx_thread && conn->rx_thread_active) {
send_sig(SIGINT, conn->rx_thread, 1);
complete(&conn->rx_login_comp);
kthread_stop(conn->rx_thread);
}
if (conn->tx_thread && conn->tx_thread_active) {

View File

@ -208,7 +208,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
if (!pl) {
pr_err("Unable to allocate memory for"
" struct iscsi_param_list.\n");
return -1 ;
return -ENOMEM;
}
INIT_LIST_HEAD(&pl->param_list);
INIT_LIST_HEAD(&pl->extra_response_list);
@ -578,7 +578,7 @@ int iscsi_copy_param_list(
param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
if (!param_list) {
pr_err("Unable to allocate memory for struct iscsi_param_list.\n");
return -1;
return -ENOMEM;
}
INIT_LIST_HEAD(&param_list->param_list);
INIT_LIST_HEAD(&param_list->extra_response_list);
@ -629,7 +629,7 @@ int iscsi_copy_param_list(
err_out:
iscsi_release_param_list(param_list);
return -1;
return -ENOMEM;
}
static void iscsi_release_extra_responses(struct iscsi_param_list *param_list)
@ -729,7 +729,7 @@ static int iscsi_add_notunderstood_response(
if (!extra_response) {
pr_err("Unable to allocate memory for"
" struct iscsi_extra_response.\n");
return -1;
return -ENOMEM;
}
INIT_LIST_HEAD(&extra_response->er_list);
@ -1370,7 +1370,7 @@ int iscsi_decode_text_input(
tmpbuf = kzalloc(length + 1, GFP_KERNEL);
if (!tmpbuf) {
pr_err("Unable to allocate %u + 1 bytes for tmpbuf.\n", length);
return -1;
return -ENOMEM;
}
memcpy(tmpbuf, textbuf, length);

View File

@ -371,7 +371,8 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
return 0;
}
static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success)
static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
int *post_ret)
{
unsigned char *buf, *addr;
struct scatterlist *sg;
@ -437,7 +438,8 @@ sbc_execute_rw(struct se_cmd *cmd)
cmd->data_direction);
}
static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
int *post_ret)
{
struct se_device *dev = cmd->se_dev;
@ -447,8 +449,10 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
* sent to the backend driver.
*/
spin_lock_irq(&cmd->t_state_lock);
if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) {
cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
*post_ret = 1;
}
spin_unlock_irq(&cmd->t_state_lock);
/*
@ -460,7 +464,8 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
return TCM_NO_SENSE;
}
static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success)
static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
int *post_ret)
{
struct se_device *dev = cmd->se_dev;
struct scatterlist *write_sg = NULL, *sg;
@ -556,11 +561,11 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
if (block_size < PAGE_SIZE) {
sg_set_page(&write_sg[i], m.page, block_size,
block_size);
m.piter.sg->offset + block_size);
} else {
sg_miter_next(&m);
sg_set_page(&write_sg[i], m.page, block_size,
0);
m.piter.sg->offset);
}
len -= block_size;
i++;

View File

@ -246,7 +246,7 @@ static ssize_t target_stat_lu_prod_show(struct config_item *item, char *page)
char str[sizeof(dev->t10_wwn.model)+1];
/* scsiLuProductId */
for (i = 0; i < sizeof(dev->t10_wwn.vendor); i++)
for (i = 0; i < sizeof(dev->t10_wwn.model); i++)
str[i] = ISPRINT(dev->t10_wwn.model[i]) ?
dev->t10_wwn.model[i] : ' ';
str[i] = '\0';

View File

@ -130,6 +130,9 @@ void core_tmr_abort_task(
if (tmr->ref_task_tag != ref_tag)
continue;
if (!kref_get_unless_zero(&se_cmd->cmd_kref))
continue;
printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
se_cmd->se_tfo->get_fabric_name(), ref_tag);
@ -139,13 +142,15 @@ void core_tmr_abort_task(
" skipping\n", ref_tag);
spin_unlock(&se_cmd->t_state_lock);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
target_put_sess_cmd(se_cmd);
goto out;
}
se_cmd->transport_state |= CMD_T_ABORTED;
spin_unlock(&se_cmd->t_state_lock);
list_del_init(&se_cmd->se_cmd_list);
kref_get(&se_cmd->cmd_kref);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
cancel_work_sync(&se_cmd->work);

View File

@ -1658,7 +1658,7 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
void transport_generic_request_failure(struct se_cmd *cmd,
sense_reason_t sense_reason)
{
int ret = 0;
int ret = 0, post_ret = 0;
pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx"
" CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]);
@ -1680,7 +1680,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
*/
if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
cmd->transport_complete_callback)
cmd->transport_complete_callback(cmd, false);
cmd->transport_complete_callback(cmd, false, &post_ret);
switch (sense_reason) {
case TCM_NON_EXISTENT_LUN:
@ -2068,11 +2068,13 @@ static void target_complete_ok_work(struct work_struct *work)
*/
if (cmd->transport_complete_callback) {
sense_reason_t rc;
bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
bool zero_dl = !(cmd->data_length);
int post_ret = 0;
rc = cmd->transport_complete_callback(cmd, true);
if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
!cmd->data_length)
rc = cmd->transport_complete_callback(cmd, true, &post_ret);
if (!rc && !post_ret) {
if (caw && zero_dl)
goto queue_rsp;
return;
@ -2507,23 +2509,24 @@ out:
EXPORT_SYMBOL(target_get_sess_cmd);
static void target_release_cmd_kref(struct kref *kref)
__releases(&se_cmd->se_sess->sess_cmd_lock)
{
struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
struct se_session *se_sess = se_cmd->se_sess;
unsigned long flags;
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
if (list_empty(&se_cmd->se_cmd_list)) {
spin_unlock(&se_sess->sess_cmd_lock);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
se_cmd->se_tfo->release_cmd(se_cmd);
return;
}
if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
spin_unlock(&se_sess->sess_cmd_lock);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
complete(&se_cmd->cmd_wait_comp);
return;
}
list_del(&se_cmd->se_cmd_list);
spin_unlock(&se_sess->sess_cmd_lock);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
se_cmd->se_tfo->release_cmd(se_cmd);
}
@ -2539,8 +2542,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd)
se_cmd->se_tfo->release_cmd(se_cmd);
return 1;
}
return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,
&se_sess->sess_cmd_lock);
return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
}
EXPORT_SYMBOL(target_put_sess_cmd);

View File

@ -638,7 +638,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
return 0;
if (!time_after(cmd->deadline, jiffies))
if (!time_after(jiffies, cmd->deadline))
return 0;
set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
@ -1101,8 +1101,6 @@ tcmu_parse_cdb(struct se_cmd *cmd)
static const struct target_backend_ops tcmu_ops = {
.name = "user",
.inquiry_prod = "USER",
.inquiry_rev = TCMU_VERSION,
.owner = THIS_MODULE,
.transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
.attach_hba = tcmu_attach_hba,

View File

@ -19,7 +19,6 @@
#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
struct kref {
atomic_t refcount;
@ -99,38 +98,6 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
return kref_sub(kref, 1, release);
}
/**
* kref_put_spinlock_irqsave - decrement refcount for object.
* @kref: object.
* @release: pointer to the function that will clean up the object when the
* last reference to the object is released.
* This pointer is required, and it is not acceptable to pass kfree
* in as this function.
* @lock: lock to take in release case
*
* Behaves identical to kref_put with one exception. If the reference count
* drops to zero, the lock will be taken atomically wrt dropping the reference
* count. The release function has to call spin_unlock() without _irqrestore.
*/
static inline int kref_put_spinlock_irqsave(struct kref *kref,
void (*release)(struct kref *kref),
spinlock_t *lock)
{
unsigned long flags;
WARN_ON(release == NULL);
if (atomic_add_unless(&kref->refcount, -1, 1))
return 0;
spin_lock_irqsave(lock, flags);
if (atomic_dec_and_test(&kref->refcount)) {
release(kref);
local_irq_restore(flags);
return 1;
}
spin_unlock_irqrestore(lock, flags);
return 0;
}
static inline int kref_put_mutex(struct kref *kref,
void (*release)(struct kref *kref),
struct mutex *lock)

View File

@ -474,7 +474,7 @@ struct se_cmd {
struct completion cmd_wait_comp;
const struct target_core_fabric_ops *se_tfo;
sense_reason_t (*execute_cmd)(struct se_cmd *);
sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *);
void *protocol_data;
unsigned char *t_task_cdb;