1
0
Fork 0

iucv: fix iucv_buffer_cpumask check when calling IUCV functions

Prior to calling IUCV functions, the DECLARE BUFFER function must have been
called for at least one CPU to receive IUCV interrupts.

With commit "iucv: establish reboot notifier" (6c005961), a check has been
introduced to avoid calling IUCV functions if the current CPU does not have
an interrupt buffer declared.
Because one interrupt buffer is sufficient, change the condition to ensure
that one interrupt buffer is available.

In addition, checking the buffer on the current CPU creates a race with
CPU up/down notifications: before checking the buffer, the IUCV function
might be interrupted by an smp_call_function() that retrieves the interrupt
buffer for the current CPU.
When the IUCV function continues, the check fails and -EIO is returned. If a
buffer is available on any other CPU, the IUCV function call must be invoked
(instead of failing with -EIO).

Signed-off-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
Signed-off-by: Ursula Braun <ursula.braun@de.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
hifive-unleashed-5.1
Hendrik Brueckner 2009-09-16 04:37:23 +00:00 committed by David S. Miller
parent 4c89d86b4d
commit d28ecab0c4
1 changed files with 11 additions and 11 deletions

View File

@ -864,7 +864,7 @@ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
int rc;
local_bh_disable();
if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
if (cpus_empty(iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@ -913,7 +913,7 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
spin_lock_bh(&iucv_table_lock);
iucv_cleanup_queue();
if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
if (cpus_empty(iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@ -973,7 +973,7 @@ int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16])
int rc;
local_bh_disable();
if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
if (cpus_empty(iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@ -1005,7 +1005,7 @@ int iucv_path_resume(struct iucv_path *path, u8 userdata[16])
int rc;
local_bh_disable();
if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
if (cpus_empty(iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@ -1034,7 +1034,7 @@ int iucv_path_sever(struct iucv_path *path, u8 userdata[16])
int rc;
preempt_disable();
if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
if (cpus_empty(iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@ -1068,7 +1068,7 @@ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
int rc;
local_bh_disable();
if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
if (cpus_empty(iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@ -1160,7 +1160,7 @@ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
if (msg->flags & IUCV_IPRMDATA)
return iucv_message_receive_iprmdata(path, msg, flags,
buffer, size, residual);
if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
if (cpus_empty(iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@ -1233,7 +1233,7 @@ int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg)
int rc;
local_bh_disable();
if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
if (cpus_empty(iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@ -1272,7 +1272,7 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
int rc;
local_bh_disable();
if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
if (cpus_empty(iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@ -1322,7 +1322,7 @@ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
union iucv_param *parm;
int rc;
if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
if (cpus_empty(iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@ -1409,7 +1409,7 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
int rc;
local_bh_disable();
if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
if (cpus_empty(iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}