1
0
Fork 0

drbd: Introduced tconn->cstate_mutex

In compatibility mode with old DRBDs, use that as the state_mutex
as well.

Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
hifive-unleashed-5.1
Philipp Reisner 2011-02-11 20:11:10 +01:00
parent dad2055481
commit 8410da8f0e
6 changed files with 26 additions and 17 deletions

View File

@ -917,8 +917,9 @@ enum {
struct drbd_tconn { /* is a resource from the config file */
char *name; /* Resource name */
struct list_head all_tconn; /* List of all drbd_tconn, prot by global_state_lock */
struct idr volumes; /* <tconn, vnr> to mdev mapping */
enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */
struct idr volumes; /* <tconn, vnr> to mdev mapping */
enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */
struct mutex cstate_mutex; /* Protects graceful disconnects */
unsigned long flags;
struct net_conf *net_conf; /* protected by get_net_conf() and put_net_conf() */
@ -1080,7 +1081,8 @@ struct drbd_conf {
unsigned long comm_bm_set; /* communicated number of set bits. */
struct bm_io_work bm_io_work;
u64 ed_uuid; /* UUID of the exposed data */
struct mutex state_mutex;
struct mutex own_state_mutex;
struct mutex *state_mutex; /* either own_state_mutex or mdev->tconn->cstate_mutex */
char congestion_reason; /* Why we where congested... */
atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
atomic_t rs_sect_ev; /* for submitted resync data rate, both */

View File

@ -1801,7 +1801,8 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
atomic_set(&mdev->ap_in_flight, 0);
mutex_init(&mdev->md_io_mutex);
mutex_init(&mdev->state_mutex);
mutex_init(&mdev->own_state_mutex);
mdev->state_mutex = &mdev->own_state_mutex;
spin_lock_init(&mdev->al_lock);
spin_lock_init(&mdev->peer_seq_lock);
@ -2189,6 +2190,7 @@ struct drbd_tconn *drbd_new_tconn(char *name)
goto fail;
tconn->cstate = C_STANDALONE;
mutex_init(&tconn->cstate_mutex);
spin_lock_init(&tconn->req_lock);
atomic_set(&tconn->net_cnt, 0);
init_waitqueue_head(&tconn->net_cnt_wait);

View File

@ -320,7 +320,7 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
if (new_role == R_PRIMARY)
request_ping(mdev->tconn); /* Detect a dead peer ASAP */
mutex_lock(&mdev->state_mutex);
mutex_lock(mdev->state_mutex);
mask.i = 0; mask.role = R_MASK;
val.i = 0; val.role = new_role;
@ -439,7 +439,7 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
fail:
mutex_unlock(&mdev->state_mutex);
mutex_unlock(mdev->state_mutex);
return rv;
}
@ -2162,7 +2162,7 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
return 0;
}
mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */
mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
if (!get_ldev(mdev)) {
retcode = ERR_NO_DISK;
@ -2204,7 +2204,7 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
out_dec:
put_ldev(mdev);
out:
mutex_unlock(&mdev->state_mutex);
mutex_unlock(mdev->state_mutex);
reply->ret_code = retcode;
return 0;

View File

@ -753,6 +753,10 @@ static int drbd_connected(int vnr, void *p, void *data)
atomic_set(&mdev->packet_seq, 0);
mdev->peer_seq = 0;
mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
&mdev->tconn->cstate_mutex :
&mdev->own_state_mutex;
ok &= drbd_send_sync_param(mdev, &mdev->sync_conf);
ok &= drbd_send_sizes(mdev, 0, 0);
ok &= drbd_send_uuids(mdev);
@ -760,6 +764,7 @@ static int drbd_connected(int vnr, void *p, void *data)
clear_bit(USE_DEGR_WFC_T, &mdev->flags);
clear_bit(RESIZE_PENDING, &mdev->flags);
return !ok;
}
@ -3167,8 +3172,8 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packet cmd,
ongoing cluster wide state change is finished. That is important if
we are primary and are detaching from our disk. We need to see the
new disk state... */
mutex_lock(&mdev->state_mutex);
mutex_unlock(&mdev->state_mutex);
mutex_lock(mdev->state_mutex);
mutex_unlock(mdev->state_mutex);
if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
@ -3219,7 +3224,7 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packet cmd,
val.i = be32_to_cpu(p->val);
if (test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags) &&
mutex_is_locked(&mdev->state_mutex)) {
mutex_is_locked(mdev->state_mutex)) {
drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
return true;
}

View File

@ -163,7 +163,7 @@ drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
init_completion(&done);
if (f & CS_SERIALIZE)
mutex_lock(&mdev->state_mutex);
mutex_lock(mdev->state_mutex);
spin_lock_irqsave(&mdev->tconn->req_lock, flags);
os = mdev->state;
@ -215,7 +215,7 @@ drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
abort:
if (f & CS_SERIALIZE)
mutex_unlock(&mdev->state_mutex);
mutex_unlock(mdev->state_mutex);
return rv;
}

View File

@ -1538,19 +1538,19 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
if (current == mdev->tconn->worker.task) {
/* The worker should not sleep waiting for state_mutex,
that can take long */
if (!mutex_trylock(&mdev->state_mutex)) {
if (!mutex_trylock(mdev->state_mutex)) {
set_bit(B_RS_H_DONE, &mdev->flags);
mdev->start_resync_timer.expires = jiffies + HZ/5;
add_timer(&mdev->start_resync_timer);
return;
}
} else {
mutex_lock(&mdev->state_mutex);
mutex_lock(mdev->state_mutex);
}
clear_bit(B_RS_H_DONE, &mdev->flags);
if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
mutex_unlock(&mdev->state_mutex);
mutex_unlock(mdev->state_mutex);
return;
}
@ -1639,7 +1639,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
drbd_md_sync(mdev);
}
put_ldev(mdev);
mutex_unlock(&mdev->state_mutex);
mutex_unlock(mdev->state_mutex);
}
static int _worker_dying(int vnr, void *p, void *data)