1
0
Fork 0

staging: lustre: fix coding style on long lines

Signed-off-by: Hugues Morisset <morisset.hugues@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
hifive-unleashed-5.1
Hugues Morisset 2014-09-09 16:36:25 +02:00 committed by Greg Kroah-Hartman
parent 187516688d
commit 498374f7b6
1 changed files with 18 additions and 16 deletions

View File

@ -336,10 +336,10 @@ struct client_obd {
* grant before trying to dirty a page and unreserve the rest.
* See osc_{reserve|unreserve}_grant for details. */
long cl_reserved_grant;
struct list_head cl_cache_waiters; /* waiting for cache/grant */
unsigned long cl_next_shrink_grant; /* jiffies */
struct list_head cl_grant_shrink_list; /* Timeout event list */
int cl_grant_shrink_interval; /* seconds */
struct list_head cl_cache_waiters; /* waiting for cache/grant */
unsigned long cl_next_shrink_grant; /* jiffies */
struct list_head cl_grant_shrink_list; /* Timeout event list */
int cl_grant_shrink_interval; /* seconds */
/* A chunk is an optimal size used by osc_extent to determine
* the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size) */
@ -389,12 +389,12 @@ struct client_obd {
/* lru for osc caching pages */
struct cl_client_cache *cl_cache;
struct list_head cl_lru_osc; /* member of cl_cache->ccc_lru */
struct list_head cl_lru_osc; /* member of cl_cache->ccc_lru */
atomic_t *cl_lru_left;
atomic_t cl_lru_busy;
atomic_t cl_lru_shrinkers;
atomic_t cl_lru_in_list;
struct list_head cl_lru_list; /* lru page list */
struct list_head cl_lru_list; /* lru page list */
client_obd_lock_t cl_lru_list_lock; /* page list protector */
/* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
@ -500,9 +500,9 @@ struct lov_statfs_data {
};
/* Stripe placement optimization */
struct lov_qos {
struct list_head lq_oss_list; /* list of OSSs that targets use */
struct list_head lq_oss_list; /* list of OSSs that targets use */
struct rw_semaphore lq_rw_sem;
__u32 lq_active_oss_count;
__u32 lq_active_oss_count;
unsigned int lq_prio_free; /* priority for free space */
unsigned int lq_threshold_rr;/* priority for rr */
struct lov_qos_rr lq_rr; /* round robin qos data */
@ -545,8 +545,8 @@ struct pool_desc {
struct hlist_node pool_hash; /* access by poolname */
struct list_head pool_list; /* serial access */
struct proc_dir_entry *pool_proc_entry; /* file in /proc */
struct obd_device *pool_lobd; /* obd of the lov/lod to which
* this pool belongs */
struct obd_device *pool_lobd; /* obd of the lov/lod to which
* this pool belongs */
};
struct lov_obd {
@ -564,7 +564,7 @@ struct lov_obd {
int lov_connects;
int lov_pool_count;
struct cfs_hash *lov_pools_hash_body; /* used for key access */
struct list_head lov_pool_list; /* used for sequential access */
struct list_head lov_pool_list; /* used for sequential access */
struct proc_dir_entry *lov_pool_proc_entry;
enum lustre_sec_part lov_sp_me;
@ -679,7 +679,7 @@ struct obd_trans_info {
unsigned long oti_sync_write:1;
/* initial thread handling transaction */
struct ptlrpc_thread * oti_thread;
struct ptlrpc_thread *oti_thread;
__u32 oti_conn_cnt;
/** VBR: versions */
__u64 oti_pre_version;
@ -704,6 +704,7 @@ static inline void oti_init(struct obd_trans_info *oti,
if (req->rq_reqmsg != NULL &&
lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) {
__u64 *pre_version = lustre_msg_get_versions(req->rq_reqmsg);
oti->oti_pre_version = pre_version ? pre_version[0] : 0;
oti->oti_transno = lustre_msg_get_transno(req->rq_reqmsg);
}
@ -880,9 +881,9 @@ struct obd_device {
int obd_requests_queued_for_recovery;
wait_queue_head_t obd_next_transno_waitq;
/* protected by obd_recovery_task_lock */
struct timer_list obd_recovery_timer;
time_t obd_recovery_start; /* seconds */
time_t obd_recovery_end; /* seconds, for lprocfs_status */
struct timer_list obd_recovery_timer;
time_t obd_recovery_start; /* seconds */
time_t obd_recovery_end; /* seconds, for lprocfs_status */
int obd_recovery_time_hard;
int obd_recovery_timeout;
int obd_recovery_ir_factor;
@ -918,7 +919,7 @@ struct obd_device {
struct lprocfs_stats *obd_svc_stats;
atomic_t obd_evict_inprogress;
wait_queue_head_t obd_evict_inprogress_waitq;
struct list_head obd_evict_list; /* protected with pet_lock */
struct list_head obd_evict_list; /* protected with pet_lock */
/**
* Ldlm pool part. Save last calculated SLV and Limit.
@ -1405,6 +1406,7 @@ static inline struct lustre_capa *oinfo_capa(struct obd_info *oinfo)
static inline struct md_open_data *obd_mod_alloc(void)
{
struct md_open_data *mod;
OBD_ALLOC_PTR(mod);
if (mod == NULL)
return NULL;