Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw

* git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw:
  [GFS2] Fix GFS2's use of do_div() in its quota calculations
  [GFS2] Remove unused declaration
  [GFS2] Remove support for unused and pointless flag
  [GFS2] Replace rgrp "recent list" with mru list
  [GFS2] Allow local DF locks when holding a cached EX glock
  [GFS2] Fix delayed demote race
  [GFS2] don't call permission()
  [GFS2] Fix module building
  [GFS2] Glock documentation
  [GFS2] Remove all_list from lock_dlm
  [GFS2] Remove obsolete conversion deadlock avoidance code
  [GFS2] Remove remote lock dropping code
  [GFS2] kernel panic mounting volume
  [GFS2] Revise readpage locking
  [GFS2] Fix ordering of args for list_add
  [GFS2] trivial sparse lock annotations
  [GFS2] No lock_nolock
  [GFS2] Fix ordering bug in lock_dlm
  [GFS2] Clean up the glock core
This commit is contained in:
Linus Torvalds
2008-07-15 10:38:46 -07:00
34 changed files with 1278 additions and 1955 deletions
+3 -15
View File
@@ -14,23 +14,11 @@ config GFS2_FS
GFS is perfect consistency -- changes made to the filesystem on one
machine show up immediately on all other machines in the cluster.
To use the GFS2 filesystem, you will need to enable one or more of
the below locking modules. Documentation and utilities for GFS2 can
To use the GFS2 filesystem in a cluster, you will need to enable
the locking module below. Documentation and utilities for GFS2 can
be found here: http://sources.redhat.com/cluster
config GFS2_FS_LOCKING_NOLOCK
tristate "GFS2 \"nolock\" locking module"
depends on GFS2_FS
help
Single node locking module for GFS2.
Use this module if you want to use GFS2 on a single node without
its clustering features. You can still take advantage of the
large file support, and upgrade to running a full cluster later on
if required.
If you will only be using GFS2 in cluster mode, you do not need this
module.
The "nolock" lock module is now built in to GFS2 by default.
config GFS2_FS_LOCKING_DLM
tristate "GFS2 DLM locking module"
-1
View File
@@ -5,6 +5,5 @@ gfs2-y := acl.o bmap.o daemon.o dir.o eaops.o eattr.o glock.o \
ops_fstype.o ops_inode.o ops_super.o quota.o \
recovery.o rgrp.o super.o sys.o trans.o util.o
obj-$(CONFIG_GFS2_FS_LOCKING_NOLOCK) += locking/nolock/
obj-$(CONFIG_GFS2_FS_LOCKING_DLM) += locking/dlm/
-5
View File
@@ -15,11 +15,6 @@ enum {
CREATE = 1,
};
enum {
NO_WAIT = 0,
WAIT = 1,
};
enum {
NO_FORCE = 0,
FORCE = 1,
+665 -978
View File
File diff suppressed because it is too large Load Diff
+5 -6
View File
@@ -26,11 +26,8 @@
#define GL_SKIP 0x00000100
#define GL_ATIME 0x00000200
#define GL_NOCACHE 0x00000400
#define GL_FLOCK 0x00000800
#define GL_NOCANCEL 0x00001000
#define GLR_TRYFAILED 13
#define GLR_CANCELED 14
static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
{
@@ -41,6 +38,8 @@ static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *
spin_lock(&gl->gl_spin);
pid = task_pid(current);
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
break;
if (gh->gh_owner_pid == pid)
goto out;
}
@@ -70,7 +69,7 @@ static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl)
{
int ret;
spin_lock(&gl->gl_spin);
ret = test_bit(GLF_DEMOTE, &gl->gl_flags) || !list_empty(&gl->gl_waiters3);
ret = test_bit(GLF_DEMOTE, &gl->gl_flags);
spin_unlock(&gl->gl_spin);
return ret;
}
@@ -98,6 +97,7 @@ int gfs2_glock_nq_num(struct gfs2_sbd *sdp,
int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs);
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
/**
* gfs2_glock_nq_init - intialize a holder and enqueue it on a glock
@@ -130,10 +130,9 @@ int gfs2_lvb_hold(struct gfs2_glock *gl);
void gfs2_lvb_unhold(struct gfs2_glock *gl);
void gfs2_glock_cb(void *cb_data, unsigned int type, void *data);
void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl);
void gfs2_reclaim_glock(struct gfs2_sbd *sdp);
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait);
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
int __init gfs2_glock_init(void);
void gfs2_glock_exit(void);
+44 -26
View File
@@ -13,6 +13,7 @@
#include <linux/buffer_head.h>
#include <linux/gfs2_ondisk.h>
#include <linux/lm_interface.h>
#include <linux/bio.h>
#include "gfs2.h"
#include "incore.h"
@@ -171,26 +172,6 @@ static void inode_go_sync(struct gfs2_glock *gl)
}
}
/**
* inode_go_xmote_bh - After promoting/demoting a glock
* @gl: the glock
*
*/
static void inode_go_xmote_bh(struct gfs2_glock *gl)
{
struct gfs2_holder *gh = gl->gl_req_gh;
struct buffer_head *bh;
int error;
if (gl->gl_state != LM_ST_UNLOCKED &&
(!gh || !(gh->gh_flags & GL_SKIP))) {
error = gfs2_meta_read(gl, gl->gl_name.ln_number, 0, &bh);
if (!error)
brelse(bh);
}
}
/**
* inode_go_inval - prepare a inode glock to be released
* @gl: the glock
@@ -266,6 +247,26 @@ static int inode_go_lock(struct gfs2_holder *gh)
return error;
}
/**
* inode_go_dump - print information about an inode
* @seq: The iterator
* @ip: the inode
*
* Returns: 0 on success, -ENOBUFS when we run out of space
*/
static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
{
const struct gfs2_inode *ip = gl->gl_object;
if (ip == NULL)
return 0;
gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%08lx\n",
(unsigned long long)ip->i_no_formal_ino,
(unsigned long long)ip->i_no_addr,
IF2DT(ip->i_inode.i_mode), ip->i_flags);
return 0;
}
/**
* rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
* @gl: the glock
@@ -305,6 +306,22 @@ static void rgrp_go_unlock(struct gfs2_holder *gh)
gfs2_rgrp_bh_put(gh->gh_gl->gl_object);
}
/**
* rgrp_go_dump - print out an rgrp
* @seq: The iterator
* @gl: The glock in question
*
*/
static int rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
{
const struct gfs2_rgrpd *rgd = gl->gl_object;
if (rgd == NULL)
return 0;
gfs2_print_dbg(seq, " R: n:%llu\n", (unsigned long long)rgd->rd_addr);
return 0;
}
/**
* trans_go_sync - promote/demote the transaction glock
* @gl: the glock
@@ -330,7 +347,7 @@ static void trans_go_sync(struct gfs2_glock *gl)
*
*/
static void trans_go_xmote_bh(struct gfs2_glock *gl)
static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
{
struct gfs2_sbd *sdp = gl->gl_sbd;
struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
@@ -338,8 +355,7 @@ static void trans_go_xmote_bh(struct gfs2_glock *gl)
struct gfs2_log_header_host head;
int error;
if (gl->gl_state != LM_ST_UNLOCKED &&
test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
error = gfs2_find_jhead(sdp->sd_jdesc, &head);
@@ -354,6 +370,7 @@ static void trans_go_xmote_bh(struct gfs2_glock *gl)
gfs2_log_pointers_init(sdp, head.lh_blkno);
}
}
return 0;
}
/**
@@ -375,12 +392,12 @@ const struct gfs2_glock_operations gfs2_meta_glops = {
const struct gfs2_glock_operations gfs2_inode_glops = {
.go_xmote_th = inode_go_sync,
.go_xmote_bh = inode_go_xmote_bh,
.go_inval = inode_go_inval,
.go_demote_ok = inode_go_demote_ok,
.go_lock = inode_go_lock,
.go_dump = inode_go_dump,
.go_type = LM_TYPE_INODE,
.go_min_hold_time = HZ / 10,
.go_min_hold_time = HZ / 5,
};
const struct gfs2_glock_operations gfs2_rgrp_glops = {
@@ -389,8 +406,9 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
.go_demote_ok = rgrp_go_demote_ok,
.go_lock = rgrp_go_lock,
.go_unlock = rgrp_go_unlock,
.go_dump = rgrp_go_dump,
.go_type = LM_TYPE_RGRP,
.go_min_hold_time = HZ / 10,
.go_min_hold_time = HZ / 5,
};
const struct gfs2_glock_operations gfs2_trans_glops = {
+15 -23
View File
@@ -77,7 +77,6 @@ struct gfs2_rgrp_host {
struct gfs2_rgrpd {
struct list_head rd_list; /* Link with superblock */
struct list_head rd_list_mru;
struct list_head rd_recent; /* Recently used rgrps */
struct gfs2_glock *rd_gl; /* Glock for this rgrp */
u64 rd_addr; /* grp block disk address */
u64 rd_data0; /* first data location */
@@ -128,20 +127,20 @@ struct gfs2_bufdata {
struct gfs2_glock_operations {
void (*go_xmote_th) (struct gfs2_glock *gl);
void (*go_xmote_bh) (struct gfs2_glock *gl);
int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh);
void (*go_inval) (struct gfs2_glock *gl, int flags);
int (*go_demote_ok) (struct gfs2_glock *gl);
int (*go_lock) (struct gfs2_holder *gh);
void (*go_unlock) (struct gfs2_holder *gh);
int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
const int go_type;
const unsigned long go_min_hold_time;
};
enum {
/* States */
HIF_HOLDER = 6,
HIF_HOLDER = 6, /* Set for gh that "holds" the glock */
HIF_FIRST = 7,
HIF_ABORTED = 9,
HIF_WAIT = 10,
};
@@ -154,20 +153,20 @@ struct gfs2_holder {
unsigned gh_flags;
int gh_error;
unsigned long gh_iflags;
unsigned long gh_iflags; /* HIF_... */
unsigned long gh_ip;
};
enum {
GLF_LOCK = 1,
GLF_STICKY = 2,
GLF_DEMOTE = 3,
GLF_PENDING_DEMOTE = 4,
GLF_DIRTY = 5,
GLF_DEMOTE_IN_PROGRESS = 6,
GLF_LFLUSH = 7,
GLF_WAITERS2 = 8,
GLF_CONV_DEADLK = 9,
GLF_LOCK = 1,
GLF_STICKY = 2,
GLF_DEMOTE = 3,
GLF_PENDING_DEMOTE = 4,
GLF_DEMOTE_IN_PROGRESS = 5,
GLF_DIRTY = 6,
GLF_LFLUSH = 7,
GLF_INVALIDATE_IN_PROGRESS = 8,
GLF_REPLY_PENDING = 9,
};
struct gfs2_glock {
@@ -179,19 +178,14 @@ struct gfs2_glock {
spinlock_t gl_spin;
unsigned int gl_state;
unsigned int gl_target;
unsigned int gl_reply;
unsigned int gl_hash;
unsigned int gl_demote_state; /* state requested by remote node */
unsigned long gl_demote_time; /* time of first demote request */
struct pid *gl_owner_pid;
unsigned long gl_ip;
struct list_head gl_holders;
struct list_head gl_waiters1; /* HIF_MUTEX */
struct list_head gl_waiters3; /* HIF_PROMOTE */
const struct gfs2_glock_operations *gl_ops;
struct gfs2_holder *gl_req_gh;
void *gl_lock;
char *gl_lvb;
atomic_t gl_lvb_count;
@@ -427,7 +421,6 @@ struct gfs2_tune {
unsigned int gt_quota_quantum; /* Secs between syncs to quota file */
unsigned int gt_atime_quantum; /* Min secs between atime updates */
unsigned int gt_new_files_jdata;
unsigned int gt_new_files_directio;
unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
unsigned int gt_stall_secs; /* Detects trouble! */
unsigned int gt_complain_secs;
@@ -534,7 +527,6 @@ struct gfs2_sbd {
struct mutex sd_rindex_mutex;
struct list_head sd_rindex_list;
struct list_head sd_rindex_mru_list;
struct list_head sd_rindex_recent_list;
struct gfs2_rgrpd *sd_rindex_forward;
unsigned int sd_rgrps;
+3 -8
View File
@@ -504,7 +504,7 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
}
if (!is_root) {
error = permission(dir, MAY_EXEC, NULL);
error = gfs2_permission(dir, MAY_EXEC);
if (error)
goto out;
}
@@ -667,7 +667,7 @@ static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
{
int error;
error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC);
if (error)
return error;
@@ -789,12 +789,7 @@ static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) ||
gfs2_tune_get(sdp, gt_new_files_jdata))
di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_DIRECTIO) ||
gfs2_tune_get(sdp, gt_new_files_directio))
di->di_flags |= cpu_to_be32(GFS2_DIF_DIRECTIO);
} else if (S_ISDIR(mode)) {
di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
GFS2_DIF_INHERIT_DIRECTIO);
di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
GFS2_DIF_INHERIT_JDATA);
}
@@ -1134,7 +1129,7 @@ int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
if (IS_APPEND(&dip->i_inode))
return -EPERM;
error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC);
if (error)
return error;
+1 -1
View File
@@ -72,7 +72,6 @@ static inline void gfs2_inum_out(const struct gfs2_inode *ip,
}
void gfs2_inode_attr_in(struct gfs2_inode *ip);
void gfs2_set_iop(struct inode *inode);
struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
u64 no_addr, u64 no_formal_ino,
@@ -91,6 +90,7 @@ int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
struct gfs2_inode *ip);
int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
const struct gfs2_inode *ip);
int gfs2_permission(struct inode *inode, int mask);
int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to);
int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len);
int gfs2_glock_nq_atime(struct gfs2_holder *gh);
+50 -2
View File
@@ -23,12 +23,54 @@ struct lmh_wrapper {
const struct lm_lockops *lw_ops;
};
static int nolock_mount(char *table_name, char *host_data,
lm_callback_t cb, void *cb_data,
unsigned int min_lvb_size, int flags,
struct lm_lockstruct *lockstruct,
struct kobject *fskobj);
/* List of registered low-level locking protocols. A file system selects one
of them by name at mount time, e.g. lock_nolock, lock_dlm. */
static const struct lm_lockops nolock_ops = {
.lm_proto_name = "lock_nolock",
.lm_mount = nolock_mount,
};
static struct lmh_wrapper nolock_proto = {
.lw_list = LIST_HEAD_INIT(nolock_proto.lw_list),
.lw_ops = &nolock_ops,
};
static LIST_HEAD(lmh_list);
static DEFINE_MUTEX(lmh_lock);
static int nolock_mount(char *table_name, char *host_data,
lm_callback_t cb, void *cb_data,
unsigned int min_lvb_size, int flags,
struct lm_lockstruct *lockstruct,
struct kobject *fskobj)
{
char *c;
unsigned int jid;
c = strstr(host_data, "jid=");
if (!c)
jid = 0;
else {
c += 4;
sscanf(c, "%u", &jid);
}
lockstruct->ls_jid = jid;
lockstruct->ls_first = 1;
lockstruct->ls_lvb_size = min_lvb_size;
lockstruct->ls_ops = &nolock_ops;
lockstruct->ls_flags = LM_LSFLAG_LOCAL;
return 0;
}
/**
* gfs2_register_lockproto - Register a low-level locking protocol
* @proto: the protocol definition
@@ -116,9 +158,13 @@ int gfs2_mount_lockproto(char *proto_name, char *table_name, char *host_data,
int try = 0;
int error, found;
retry:
mutex_lock(&lmh_lock);
if (list_empty(&nolock_proto.lw_list))
list_add(&nolock_proto.lw_list, &lmh_list);
found = 0;
list_for_each_entry(lw, &lmh_list, lw_list) {
if (!strcmp(lw->lw_ops->lm_proto_name, proto_name)) {
@@ -139,7 +185,8 @@ retry:
goto out;
}
if (!try_module_get(lw->lw_ops->lm_owner)) {
if (lw->lw_ops->lm_owner &&
!try_module_get(lw->lw_ops->lm_owner)) {
try = 0;
mutex_unlock(&lmh_lock);
msleep(1000);
@@ -158,7 +205,8 @@ out:
void gfs2_unmount_lockproto(struct lm_lockstruct *lockstruct)
{
mutex_lock(&lmh_lock);
lockstruct->ls_ops->lm_unmount(lockstruct->ls_lockspace);
if (lockstruct->ls_ops->lm_unmount)
lockstruct->ls_ops->lm_unmount(lockstruct->ls_lockspace);
if (lockstruct->ls_ops->lm_owner)
module_put(lockstruct->ls_ops->lm_owner);
mutex_unlock(&lmh_lock);
+276 -96
View File
@@ -11,27 +11,287 @@
static char junk_lvb[GDLM_LVB_SIZE];
static void queue_complete(struct gdlm_lock *lp)
/* convert dlm lock-mode to gfs lock-state */
static s16 gdlm_make_lmstate(s16 dlmmode)
{
switch (dlmmode) {
case DLM_LOCK_IV:
case DLM_LOCK_NL:
return LM_ST_UNLOCKED;
case DLM_LOCK_EX:
return LM_ST_EXCLUSIVE;
case DLM_LOCK_CW:
return LM_ST_DEFERRED;
case DLM_LOCK_PR:
return LM_ST_SHARED;
}
gdlm_assert(0, "unknown DLM mode %d", dlmmode);
return -1;
}
/* A lock placed on this queue is re-submitted to DLM as soon as the lock_dlm
thread gets to it. */
static void queue_submit(struct gdlm_lock *lp)
{
struct gdlm_ls *ls = lp->ls;
clear_bit(LFL_ACTIVE, &lp->flags);
spin_lock(&ls->async_lock);
list_add_tail(&lp->clist, &ls->complete);
list_add_tail(&lp->delay_list, &ls->submit);
spin_unlock(&ls->async_lock);
wake_up(&ls->thread_wait);
}
static inline void gdlm_ast(void *astarg)
static void wake_up_ast(struct gdlm_lock *lp)
{
queue_complete(astarg);
clear_bit(LFL_AST_WAIT, &lp->flags);
smp_mb__after_clear_bit();
wake_up_bit(&lp->flags, LFL_AST_WAIT);
}
static inline void gdlm_bast(void *astarg, int mode)
static void gdlm_delete_lp(struct gdlm_lock *lp)
{
struct gdlm_ls *ls = lp->ls;
spin_lock(&ls->async_lock);
if (!list_empty(&lp->delay_list))
list_del_init(&lp->delay_list);
ls->all_locks_count--;
spin_unlock(&ls->async_lock);
kfree(lp);
}
static void gdlm_queue_delayed(struct gdlm_lock *lp)
{
struct gdlm_ls *ls = lp->ls;
spin_lock(&ls->async_lock);
list_add_tail(&lp->delay_list, &ls->delayed);
spin_unlock(&ls->async_lock);
}
static void process_complete(struct gdlm_lock *lp)
{
struct gdlm_ls *ls = lp->ls;
struct lm_async_cb acb;
memset(&acb, 0, sizeof(acb));
if (lp->lksb.sb_status == -DLM_ECANCEL) {
log_info("complete dlm cancel %x,%llx flags %lx",
lp->lockname.ln_type,
(unsigned long long)lp->lockname.ln_number,
lp->flags);
lp->req = lp->cur;
acb.lc_ret |= LM_OUT_CANCELED;
if (lp->cur == DLM_LOCK_IV)
lp->lksb.sb_lkid = 0;
goto out;
}
if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) {
if (lp->lksb.sb_status != -DLM_EUNLOCK) {
log_info("unlock sb_status %d %x,%llx flags %lx",
lp->lksb.sb_status, lp->lockname.ln_type,
(unsigned long long)lp->lockname.ln_number,
lp->flags);
return;
}
lp->cur = DLM_LOCK_IV;
lp->req = DLM_LOCK_IV;
lp->lksb.sb_lkid = 0;
if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) {
gdlm_delete_lp(lp);
return;
}
goto out;
}
if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID)
memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) {
if (lp->req == DLM_LOCK_PR)
lp->req = DLM_LOCK_CW;
else if (lp->req == DLM_LOCK_CW)
lp->req = DLM_LOCK_PR;
}
/*
* A canceled lock request. The lock was just taken off the delayed
* list and was never even submitted to dlm.
*/
if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) {
log_info("complete internal cancel %x,%llx",
lp->lockname.ln_type,
(unsigned long long)lp->lockname.ln_number);
lp->req = lp->cur;
acb.lc_ret |= LM_OUT_CANCELED;
goto out;
}
/*
* An error occured.
*/
if (lp->lksb.sb_status) {
/* a "normal" error */
if ((lp->lksb.sb_status == -EAGAIN) &&
(lp->lkf & DLM_LKF_NOQUEUE)) {
lp->req = lp->cur;
if (lp->cur == DLM_LOCK_IV)
lp->lksb.sb_lkid = 0;
goto out;
}
/* this could only happen with cancels I think */
log_info("ast sb_status %d %x,%llx flags %lx",
lp->lksb.sb_status, lp->lockname.ln_type,
(unsigned long long)lp->lockname.ln_number,
lp->flags);
return;
}
/*
* This is an AST for an EX->EX conversion for sync_lvb from GFS.
*/
if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) {
wake_up_ast(lp);
return;
}
/*
* A lock has been demoted to NL because it initially completed during
* BLOCK_LOCKS. Now it must be requested in the originally requested
* mode.
*/
if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) {
gdlm_assert(lp->req == DLM_LOCK_NL, "%x,%llx",
lp->lockname.ln_type,
(unsigned long long)lp->lockname.ln_number);
gdlm_assert(lp->prev_req > DLM_LOCK_NL, "%x,%llx",
lp->lockname.ln_type,
(unsigned long long)lp->lockname.ln_number);
lp->cur = DLM_LOCK_NL;
lp->req = lp->prev_req;
lp->prev_req = DLM_LOCK_IV;
lp->lkf &= ~DLM_LKF_CONVDEADLK;
set_bit(LFL_NOCACHE, &lp->flags);
if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
!test_bit(LFL_NOBLOCK, &lp->flags))
gdlm_queue_delayed(lp);
else
queue_submit(lp);
return;
}
/*
* A request is granted during dlm recovery. It may be granted
* because the locks of a failed node were cleared. In that case,
* there may be inconsistent data beneath this lock and we must wait
* for recovery to complete to use it. When gfs recovery is done this
* granted lock will be converted to NL and then reacquired in this
* granted state.
*/
if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
!test_bit(LFL_NOBLOCK, &lp->flags) &&
lp->req != DLM_LOCK_NL) {
lp->cur = lp->req;
lp->prev_req = lp->req;
lp->req = DLM_LOCK_NL;
lp->lkf |= DLM_LKF_CONVERT;
lp->lkf &= ~DLM_LKF_CONVDEADLK;
log_debug("rereq %x,%llx id %x %d,%d",
lp->lockname.ln_type,
(unsigned long long)lp->lockname.ln_number,
lp->lksb.sb_lkid, lp->cur, lp->req);
set_bit(LFL_REREQUEST, &lp->flags);
queue_submit(lp);
return;
}
/*
* DLM demoted the lock to NL before it was granted so GFS must be
* told it cannot cache data for this lock.
*/
if (lp->lksb.sb_flags & DLM_SBF_DEMOTED)
set_bit(LFL_NOCACHE, &lp->flags);
out:
/*
* This is an internal lock_dlm lock
*/
if (test_bit(LFL_INLOCK, &lp->flags)) {
clear_bit(LFL_NOBLOCK, &lp->flags);
lp->cur = lp->req;
wake_up_ast(lp);
return;
}
/*
* Normal completion of a lock request. Tell GFS it now has the lock.
*/
clear_bit(LFL_NOBLOCK, &lp->flags);
lp->cur = lp->req;
acb.lc_name = lp->lockname;
acb.lc_ret |= gdlm_make_lmstate(lp->cur);
ls->fscb(ls->sdp, LM_CB_ASYNC, &acb);
}
static void gdlm_ast(void *astarg)
{
struct gdlm_lock *lp = astarg;
clear_bit(LFL_ACTIVE, &lp->flags);
process_complete(lp);
}
static void process_blocking(struct gdlm_lock *lp, int bast_mode)
{
struct gdlm_ls *ls = lp->ls;
unsigned int cb = 0;
switch (gdlm_make_lmstate(bast_mode)) {
case LM_ST_EXCLUSIVE:
cb = LM_CB_NEED_E;
break;
case LM_ST_DEFERRED:
cb = LM_CB_NEED_D;
break;
case LM_ST_SHARED:
cb = LM_CB_NEED_S;
break;
default:
gdlm_assert(0, "unknown bast mode %u", bast_mode);
}
ls->fscb(ls->sdp, cb, &lp->lockname);
}
static void gdlm_bast(void *astarg, int mode)
{
struct gdlm_lock *lp = astarg;
if (!mode) {
printk(KERN_INFO "lock_dlm: bast mode zero %x,%llx\n",
@@ -40,23 +300,7 @@ static inline void gdlm_bast(void *astarg, int mode)
return;
}
spin_lock(&ls->async_lock);
if (!lp->bast_mode) {
list_add_tail(&lp->blist, &ls->blocking);
lp->bast_mode = mode;
} else if (lp->bast_mode < mode)
lp->bast_mode = mode;
spin_unlock(&ls->async_lock);
wake_up(&ls->thread_wait);
}
void gdlm_queue_delayed(struct gdlm_lock *lp)
{
struct gdlm_ls *ls = lp->ls;
spin_lock(&ls->async_lock);
list_add_tail(&lp->delay_list, &ls->delayed);
spin_unlock(&ls->async_lock);
process_blocking(lp, mode);
}
/* convert gfs lock-state to dlm lock-mode */
@@ -77,24 +321,6 @@ static s16 make_mode(s16 lmstate)
return -1;
}
/* convert dlm lock-mode to gfs lock-state */
s16 gdlm_make_lmstate(s16 dlmmode)
{
switch (dlmmode) {
case DLM_LOCK_IV:
case DLM_LOCK_NL:
return LM_ST_UNLOCKED;
case DLM_LOCK_EX:
return LM_ST_EXCLUSIVE;
case DLM_LOCK_CW:
return LM_ST_DEFERRED;
case DLM_LOCK_PR:
return LM_ST_SHARED;
}
gdlm_assert(0, "unknown DLM mode %d", dlmmode);
return -1;
}
/* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and
DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */
@@ -134,14 +360,6 @@ static inline unsigned int make_flags(struct gdlm_lock *lp,
if (lp->lksb.sb_lkid != 0) {
lkf |= DLM_LKF_CONVERT;
/* Conversion deadlock avoidance by DLM */
if (!(lp->ls->fsflags & LM_MFLAG_CONV_NODROP) &&
!test_bit(LFL_FORCE_PROMOTE, &lp->flags) &&
!(lkf & DLM_LKF_NOQUEUE) &&
cur > DLM_LOCK_NL && req > DLM_LOCK_NL && cur != req)
lkf |= DLM_LKF_CONVDEADLK;
}
if (lp->lvb)
@@ -173,14 +391,9 @@ static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name,
make_strname(name, &lp->strname);
lp->ls = ls;
lp->cur = DLM_LOCK_IV;
lp->lvb = NULL;
lp->hold_null = NULL;
INIT_LIST_HEAD(&lp->clist);
INIT_LIST_HEAD(&lp->blist);
INIT_LIST_HEAD(&lp->delay_list);
spin_lock(&ls->async_lock);
list_add(&lp->all_list, &ls->all_locks);
ls->all_locks_count++;
spin_unlock(&ls->async_lock);
@@ -188,26 +401,6 @@ static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name,
return 0;
}
void gdlm_delete_lp(struct gdlm_lock *lp)
{
struct gdlm_ls *ls = lp->ls;
spin_lock(&ls->async_lock);
if (!list_empty(&lp->clist))
list_del_init(&lp->clist);
if (!list_empty(&lp->blist))
list_del_init(&lp->blist);
if (!list_empty(&lp->delay_list))
list_del_init(&lp->delay_list);
gdlm_assert(!list_empty(&lp->all_list), "%x,%llx", lp->lockname.ln_type,
(unsigned long long)lp->lockname.ln_number);
list_del_init(&lp->all_list);
ls->all_locks_count--;
spin_unlock(&ls->async_lock);
kfree(lp);
}
int gdlm_get_lock(void *lockspace, struct lm_lockname *name,
void **lockp)
{
@@ -261,7 +454,7 @@ unsigned int gdlm_do_lock(struct gdlm_lock *lp)
if ((error == -EAGAIN) && (lp->lkf & DLM_LKF_NOQUEUE)) {
lp->lksb.sb_status = -EAGAIN;
queue_complete(lp);
gdlm_ast(lp);
error = 0;
}
@@ -308,6 +501,12 @@ unsigned int gdlm_lock(void *lock, unsigned int cur_state,
{
struct gdlm_lock *lp = lock;
if (req_state == LM_ST_UNLOCKED)
return gdlm_unlock(lock, cur_state);
if (req_state == LM_ST_UNLOCKED)
return gdlm_unlock(lock, cur_state);
clear_bit(LFL_DLM_CANCEL, &lp->flags);
if (flags & LM_FLAG_NOEXP)
set_bit(LFL_NOBLOCK, &lp->flags);
@@ -351,7 +550,7 @@ void gdlm_cancel(void *lock)
if (delay_list) {
set_bit(LFL_CANCEL, &lp->flags);
set_bit(LFL_ACTIVE, &lp->flags);
queue_complete(lp);
gdlm_ast(lp);
return;
}
@@ -507,22 +706,3 @@ void gdlm_submit_delayed(struct gdlm_ls *ls)
wake_up(&ls->thread_wait);
}
int gdlm_release_all_locks(struct gdlm_ls *ls)
{
struct gdlm_lock *lp, *safe;
int count = 0;
spin_lock(&ls->async_lock);
list_for_each_entry_safe(lp, safe, &ls->all_locks, all_list) {
list_del_init(&lp->all_list);
if (lp->lvb && lp->lvb != junk_lvb)
kfree(lp->lvb);
kfree(lp);
count++;
}
spin_unlock(&ls->async_lock);
return count;
}
+1 -17
View File
@@ -72,19 +72,12 @@ struct gdlm_ls {
int recover_jid_done;
int recover_jid_status;
spinlock_t async_lock;
struct list_head complete;
struct list_head blocking;
struct list_head delayed;
struct list_head submit;
struct list_head all_locks;
u32 all_locks_count;
wait_queue_head_t wait_control;
struct task_struct *thread1;
struct task_struct *thread2;
struct task_struct *thread;
wait_queue_head_t thread_wait;
unsigned long drop_time;
int drop_locks_count;
int drop_locks_period;
};
enum {
@@ -117,12 +110,7 @@ struct gdlm_lock {
u32 lkf; /* dlm flags DLM_LKF_ */
unsigned long flags; /* lock_dlm flags LFL_ */
int bast_mode; /* protected by async_lock */
struct list_head clist; /* complete */
struct list_head blist; /* blocking */
struct list_head delay_list; /* delayed */
struct list_head all_list; /* all locks for the fs */
struct gdlm_lock *hold_null; /* NL lock for hold_lvb */
};
@@ -159,11 +147,7 @@ void gdlm_release_threads(struct gdlm_ls *);
/* lock.c */
s16 gdlm_make_lmstate(s16);
void gdlm_queue_delayed(struct gdlm_lock *);
void gdlm_submit_delayed(struct gdlm_ls *);
int gdlm_release_all_locks(struct gdlm_ls *);
void gdlm_delete_lp(struct gdlm_lock *);
unsigned int gdlm_do_lock(struct gdlm_lock *);
int gdlm_get_lock(void *, struct lm_lockname *, void **);
+1 -13
View File
@@ -22,22 +22,14 @@ static struct gdlm_ls *init_gdlm(lm_callback_t cb, struct gfs2_sbd *sdp,
if (!ls)
return NULL;
ls->drop_locks_count = GDLM_DROP_COUNT;
ls->drop_locks_period = GDLM_DROP_PERIOD;
ls->fscb = cb;
ls->sdp = sdp;
ls->fsflags = flags;
spin_lock_init(&ls->async_lock);
INIT_LIST_HEAD(&ls->complete);
INIT_LIST_HEAD(&ls->blocking);
INIT_LIST_HEAD(&ls->delayed);
INIT_LIST_HEAD(&ls->submit);
INIT_LIST_HEAD(&ls->all_locks);
init_waitqueue_head(&ls->thread_wait);
init_waitqueue_head(&ls->wait_control);
ls->thread1 = NULL;
ls->thread2 = NULL;
ls->drop_time = jiffies;
ls->jid = -1;
strncpy(buf, table_name, 256);
@@ -180,7 +172,6 @@ out:
static void gdlm_unmount(void *lockspace)
{
struct gdlm_ls *ls = lockspace;
int rv;
log_debug("unmount flags %lx", ls->flags);
@@ -194,9 +185,7 @@ static void gdlm_unmount(void *lockspace)
gdlm_kobject_release(ls);
dlm_release_lockspace(ls->dlm_lockspace, 2);
gdlm_release_threads(ls);
rv = gdlm_release_all_locks(ls);
if (rv)
log_info("gdlm_unmount: %d stray locks freed", rv);
BUG_ON(ls->all_locks_count);
out:
kfree(ls);
}
@@ -232,7 +221,6 @@ static void gdlm_withdraw(void *lockspace)
dlm_release_lockspace(ls->dlm_lockspace, 2);
gdlm_release_threads(ls);
gdlm_release_all_locks(ls);
gdlm_kobject_release(ls);
}
-13
View File
@@ -114,17 +114,6 @@ static ssize_t recover_status_show(struct gdlm_ls *ls, char *buf)
return sprintf(buf, "%d\n", ls->recover_jid_status);
}
static ssize_t drop_count_show(struct gdlm_ls *ls, char *buf)
{
return sprintf(buf, "%d\n", ls->drop_locks_count);
}
static ssize_t drop_count_store(struct gdlm_ls *ls, const char *buf, size_t len)
{
ls->drop_locks_count = simple_strtol(buf, NULL, 0);
return len;
}
struct gdlm_attr {
struct attribute attr;
ssize_t (*show)(struct gdlm_ls *, char *);
@@ -144,7 +133,6 @@ GDLM_ATTR(first_done, 0444, first_done_show, NULL);
GDLM_ATTR(recover, 0644, recover_show, recover_store);
GDLM_ATTR(recover_done, 0444, recover_done_show, NULL);
GDLM_ATTR(recover_status, 0444, recover_status_show, NULL);
GDLM_ATTR(drop_count, 0644, drop_count_show, drop_count_store);
static struct attribute *gdlm_attrs[] = {
&gdlm_attr_proto_name.attr,
@@ -157,7 +145,6 @@ static struct attribute *gdlm_attrs[] = {
&gdlm_attr_recover.attr,
&gdlm_attr_recover_done.attr,
&gdlm_attr_recover_status.attr,
&gdlm_attr_drop_count.attr,
NULL,
};
+13 -320
View File
@@ -9,367 +9,60 @@
#include "lock_dlm.h"
/* A lock placed on this queue is re-submitted to DLM as soon as the lock_dlm
thread gets to it. */
static void queue_submit(struct gdlm_lock *lp)
{
struct gdlm_ls *ls = lp->ls;
spin_lock(&ls->async_lock);
list_add_tail(&lp->delay_list, &ls->submit);
spin_unlock(&ls->async_lock);
wake_up(&ls->thread_wait);
}
static void process_blocking(struct gdlm_lock *lp, int bast_mode)
{
struct gdlm_ls *ls = lp->ls;
unsigned int cb = 0;
switch (gdlm_make_lmstate(bast_mode)) {
case LM_ST_EXCLUSIVE:
cb = LM_CB_NEED_E;
break;
case LM_ST_DEFERRED:
cb = LM_CB_NEED_D;
break;
case LM_ST_SHARED:
cb = LM_CB_NEED_S;
break;
default:
gdlm_assert(0, "unknown bast mode %u", lp->bast_mode);
}
ls->fscb(ls->sdp, cb, &lp->lockname);
}
static void wake_up_ast(struct gdlm_lock *lp)
{
clear_bit(LFL_AST_WAIT, &lp->flags);
smp_mb__after_clear_bit();
wake_up_bit(&lp->flags, LFL_AST_WAIT);
}
static void process_complete(struct gdlm_lock *lp)
{
struct gdlm_ls *ls = lp->ls;
struct lm_async_cb acb;
s16 prev_mode = lp->cur;
memset(&acb, 0, sizeof(acb));
if (lp->lksb.sb_status == -DLM_ECANCEL) {
log_info("complete dlm cancel %x,%llx flags %lx",
lp->lockname.ln_type,
(unsigned long long)lp->lockname.ln_number,
lp->flags);
lp->req = lp->cur;
acb.lc_ret |= LM_OUT_CANCELED;
if (lp->cur == DLM_LOCK_IV)
lp->lksb.sb_lkid = 0;
goto out;
}
if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) {
if (lp->lksb.sb_status != -DLM_EUNLOCK) {
log_info("unlock sb_status %d %x,%llx flags %lx",
lp->lksb.sb_status, lp->lockname.ln_type,
(unsigned long long)lp->lockname.ln_number,
lp->flags);
return;
}
lp->cur = DLM_LOCK_IV;
lp->req = DLM_LOCK_IV;
lp->lksb.sb_lkid = 0;
if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) {
gdlm_delete_lp(lp);
return;
}
goto out;
}
if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID)
memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) {
if (lp->req == DLM_LOCK_PR)
lp->req = DLM_LOCK_CW;
else if (lp->req == DLM_LOCK_CW)
lp->req = DLM_LOCK_PR;
}
/*
* A canceled lock request. The lock was just taken off the delayed
* list and was never even submitted to dlm.
*/
if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) {
log_info("complete internal cancel %x,%llx",
lp->lockname.ln_type,
(unsigned long long)lp->lockname.ln_number);
lp->req = lp->cur;
acb.lc_ret |= LM_OUT_CANCELED;
goto out;
}
/*
* An error occured.
*/
if (lp->lksb.sb_status) {
/* a "normal" error */
if ((lp->lksb.sb_status == -EAGAIN) &&
(lp->lkf & DLM_LKF_NOQUEUE)) {
lp->req = lp->cur;
if (lp->cur == DLM_LOCK_IV)
lp->lksb.sb_lkid = 0;
goto out;
}
/* this could only happen with cancels I think */
log_info("ast sb_status %d %x,%llx flags %lx",
lp->lksb.sb_status, lp->lockname.ln_type,
(unsigned long long)lp->lockname.ln_number,
lp->flags);
if (lp->lksb.sb_status == -EDEADLOCK &&
lp->ls->fsflags & LM_MFLAG_CONV_NODROP) {
lp->req = lp->cur;
acb.lc_ret |= LM_OUT_CONV_DEADLK;
if (lp->cur == DLM_LOCK_IV)
lp->lksb.sb_lkid = 0;
goto out;
} else
return;
}
/*
* This is an AST for an EX->EX conversion for sync_lvb from GFS.
*/
if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) {
wake_up_ast(lp);
return;
}
/*
* A lock has been demoted to NL because it initially completed during
* BLOCK_LOCKS. Now it must be requested in the originally requested
* mode.
*/
if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) {
gdlm_assert(lp->req == DLM_LOCK_NL, "%x,%llx",
lp->lockname.ln_type,
(unsigned long long)lp->lockname.ln_number);
gdlm_assert(lp->prev_req > DLM_LOCK_NL, "%x,%llx",
lp->lockname.ln_type,
(unsigned long long)lp->lockname.ln_number);
lp->cur = DLM_LOCK_NL;
lp->req = lp->prev_req;
lp->prev_req = DLM_LOCK_IV;
lp->lkf &= ~DLM_LKF_CONVDEADLK;
set_bit(LFL_NOCACHE, &lp->flags);
if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
!test_bit(LFL_NOBLOCK, &lp->flags))
gdlm_queue_delayed(lp);
else
queue_submit(lp);
return;
}
/*
* A request is granted during dlm recovery. It may be granted
* because the locks of a failed node were cleared. In that case,
* there may be inconsistent data beneath this lock and we must wait
* for recovery to complete to use it. When gfs recovery is done this
* granted lock will be converted to NL and then reacquired in this
* granted state.
*/
if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
!test_bit(LFL_NOBLOCK, &lp->flags) &&
lp->req != DLM_LOCK_NL) {
lp->cur = lp->req;
lp->prev_req = lp->req;
lp->req = DLM_LOCK_NL;
lp->lkf |= DLM_LKF_CONVERT;
lp->lkf &= ~DLM_LKF_CONVDEADLK;
log_debug("rereq %x,%llx id %x %d,%d",
lp->lockname.ln_type,
(unsigned long long)lp->lockname.ln_number,
lp->lksb.sb_lkid, lp->cur, lp->req);
set_bit(LFL_REREQUEST, &lp->flags);
queue_submit(lp);
return;
}
/*
* DLM demoted the lock to NL before it was granted so GFS must be
* told it cannot cache data for this lock.
*/
if (lp->lksb.sb_flags & DLM_SBF_DEMOTED)
set_bit(LFL_NOCACHE, &lp->flags);
out:
/*
* This is an internal lock_dlm lock
*/
if (test_bit(LFL_INLOCK, &lp->flags)) {
clear_bit(LFL_NOBLOCK, &lp->flags);
lp->cur = lp->req;
wake_up_ast(lp);
return;
}
/*
* Normal completion of a lock request. Tell GFS it now has the lock.
*/
clear_bit(LFL_NOBLOCK, &lp->flags);
lp->cur = lp->req;
acb.lc_name = lp->lockname;
acb.lc_ret |= gdlm_make_lmstate(lp->cur);
if (!test_and_clear_bit(LFL_NOCACHE, &lp->flags) &&
(lp->cur > DLM_LOCK_NL) && (prev_mode > DLM_LOCK_NL))
acb.lc_ret |= LM_OUT_CACHEABLE;
ls->fscb(ls->sdp, LM_CB_ASYNC, &acb);
}
static inline int no_work(struct gdlm_ls *ls, int blocking)
static inline int no_work(struct gdlm_ls *ls)
{
int ret;
spin_lock(&ls->async_lock);
ret = list_empty(&ls->complete) && list_empty(&ls->submit);
if (ret && blocking)
ret = list_empty(&ls->blocking);
ret = list_empty(&ls->submit);
spin_unlock(&ls->async_lock);
return ret;
}
static inline int check_drop(struct gdlm_ls *ls)
{
if (!ls->drop_locks_count)
return 0;
if (time_after(jiffies, ls->drop_time + ls->drop_locks_period * HZ)) {
ls->drop_time = jiffies;
if (ls->all_locks_count >= ls->drop_locks_count)
return 1;
}
return 0;
}
static int gdlm_thread(void *data, int blist)
static int gdlm_thread(void *data)
{
struct gdlm_ls *ls = (struct gdlm_ls *) data;
struct gdlm_lock *lp = NULL;
uint8_t complete, blocking, submit, drop;
/* Only thread1 is allowed to do blocking callbacks since gfs
may wait for a completion callback within a blocking cb. */
while (!kthread_should_stop()) {
wait_event_interruptible(ls->thread_wait,
!no_work(ls, blist) || kthread_should_stop());
complete = blocking = submit = drop = 0;
!no_work(ls) || kthread_should_stop());
spin_lock(&ls->async_lock);
if (blist && !list_empty(&ls->blocking)) {
lp = list_entry(ls->blocking.next, struct gdlm_lock,
blist);
list_del_init(&lp->blist);
blocking = lp->bast_mode;
lp->bast_mode = 0;
} else if (!list_empty(&ls->complete)) {
lp = list_entry(ls->complete.next, struct gdlm_lock,
clist);
list_del_init(&lp->clist);
complete = 1;
} else if (!list_empty(&ls->submit)) {
if (!list_empty(&ls->submit)) {
lp = list_entry(ls->submit.next, struct gdlm_lock,
delay_list);
list_del_init(&lp->delay_list);
submit = 1;
}
drop = check_drop(ls);
spin_unlock(&ls->async_lock);
if (complete)
process_complete(lp);
else if (blocking)
process_blocking(lp, blocking);
else if (submit)
spin_unlock(&ls->async_lock);
gdlm_do_lock(lp);
if (drop)
ls->fscb(ls->sdp, LM_CB_DROPLOCKS, NULL);
schedule();
spin_lock(&ls->async_lock);
}
spin_unlock(&ls->async_lock);
}
return 0;
}
static int gdlm_thread1(void *data)
{
return gdlm_thread(data, 1);
}
static int gdlm_thread2(void *data)
{
return gdlm_thread(data, 0);
}
int gdlm_init_threads(struct gdlm_ls *ls)
{
struct task_struct *p;
int error;
p = kthread_run(gdlm_thread1, ls, "lock_dlm1");
p = kthread_run(gdlm_thread, ls, "lock_dlm");
error = IS_ERR(p);
if (error) {
log_error("can't start lock_dlm1 thread %d", error);
log_error("can't start lock_dlm thread %d", error);
return error;
}
ls->thread1 = p;
p = kthread_run(gdlm_thread2, ls, "lock_dlm2");
error = IS_ERR(p);
if (error) {
log_error("can't start lock_dlm2 thread %d", error);
kthread_stop(ls->thread1);
return error;
}
ls->thread2 = p;
ls->thread = p;
return 0;
}
void gdlm_release_threads(struct gdlm_ls *ls)
{
kthread_stop(ls->thread1);
kthread_stop(ls->thread2);
kthread_stop(ls->thread);
}
-3
View File
@@ -1,3 +0,0 @@
obj-$(CONFIG_GFS2_FS_LOCKING_NOLOCK) += lock_nolock.o
lock_nolock-y := main.o
-238
View File
@@ -1,238 +0,0 @@
/*
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
* Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License version 2.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/lm_interface.h>
struct nolock_lockspace {
unsigned int nl_lvb_size;
};
static const struct lm_lockops nolock_ops;
static int nolock_mount(char *table_name, char *host_data,
lm_callback_t cb, void *cb_data,
unsigned int min_lvb_size, int flags,
struct lm_lockstruct *lockstruct,
struct kobject *fskobj)
{
char *c;
unsigned int jid;
struct nolock_lockspace *nl;
c = strstr(host_data, "jid=");
if (!c)
jid = 0;
else {
c += 4;
sscanf(c, "%u", &jid);
}
nl = kzalloc(sizeof(struct nolock_lockspace), GFP_KERNEL);
if (!nl)
return -ENOMEM;
nl->nl_lvb_size = min_lvb_size;
lockstruct->ls_jid = jid;
lockstruct->ls_first = 1;
lockstruct->ls_lvb_size = min_lvb_size;
lockstruct->ls_lockspace = nl;
lockstruct->ls_ops = &nolock_ops;
lockstruct->ls_flags = LM_LSFLAG_LOCAL;
return 0;
}
static void nolock_others_may_mount(void *lockspace)
{
}
static void nolock_unmount(void *lockspace)
{
struct nolock_lockspace *nl = lockspace;
kfree(nl);
}
static void nolock_withdraw(void *lockspace)
{
}
/**
* nolock_get_lock - get a lm_lock_t given a descripton of the lock
* @lockspace: the lockspace the lock lives in
* @name: the name of the lock
* @lockp: return the lm_lock_t here
*
* Returns: 0 on success, -EXXX on failure
*/
static int nolock_get_lock(void *lockspace, struct lm_lockname *name,
void **lockp)
{
*lockp = lockspace;
return 0;
}
/**
* nolock_put_lock - get rid of a lock structure
* @lock: the lock to throw away
*
*/
static void nolock_put_lock(void *lock)
{
}
/**
* nolock_lock - acquire a lock
* @lock: the lock to manipulate
* @cur_state: the current state
* @req_state: the requested state
* @flags: modifier flags
*
* Returns: A bitmap of LM_OUT_*
*/
static unsigned int nolock_lock(void *lock, unsigned int cur_state,
unsigned int req_state, unsigned int flags)
{
return req_state | LM_OUT_CACHEABLE;
}
/**
* nolock_unlock - unlock a lock
* @lock: the lock to manipulate
* @cur_state: the current state
*
* Returns: 0
*/
static unsigned int nolock_unlock(void *lock, unsigned int cur_state)
{
return 0;
}
static void nolock_cancel(void *lock)
{
}
/**
* nolock_hold_lvb - hold on to a lock value block
* @lock: the lock the LVB is associated with
* @lvbp: return the lm_lvb_t here
*
* Returns: 0 on success, -EXXX on failure
*/
static int nolock_hold_lvb(void *lock, char **lvbp)
{
struct nolock_lockspace *nl = lock;
int error = 0;
*lvbp = kzalloc(nl->nl_lvb_size, GFP_NOFS);
if (!*lvbp)
error = -ENOMEM;
return error;
}
/**
* nolock_unhold_lvb - release a LVB
* @lock: the lock the LVB is associated with
* @lvb: the lock value block
*
*/
static void nolock_unhold_lvb(void *lock, char *lvb)
{
kfree(lvb);
}
static int nolock_plock_get(void *lockspace, struct lm_lockname *name,
struct file *file, struct file_lock *fl)
{
posix_test_lock(file, fl);
return 0;
}
static int nolock_plock(void *lockspace, struct lm_lockname *name,
struct file *file, int cmd, struct file_lock *fl)
{
int error;
error = posix_lock_file_wait(file, fl);
return error;
}
static int nolock_punlock(void *lockspace, struct lm_lockname *name,
struct file *file, struct file_lock *fl)
{
int error;
error = posix_lock_file_wait(file, fl);
return error;
}
static void nolock_recovery_done(void *lockspace, unsigned int jid,
unsigned int message)
{
}
static const struct lm_lockops nolock_ops = {
.lm_proto_name = "lock_nolock",
.lm_mount = nolock_mount,
.lm_others_may_mount = nolock_others_may_mount,
.lm_unmount = nolock_unmount,
.lm_withdraw = nolock_withdraw,
.lm_get_lock = nolock_get_lock,
.lm_put_lock = nolock_put_lock,
.lm_lock = nolock_lock,
.lm_unlock = nolock_unlock,
.lm_cancel = nolock_cancel,
.lm_hold_lvb = nolock_hold_lvb,
.lm_unhold_lvb = nolock_unhold_lvb,
.lm_plock_get = nolock_plock_get,
.lm_plock = nolock_plock,
.lm_punlock = nolock_punlock,
.lm_recovery_done = nolock_recovery_done,
.lm_owner = THIS_MODULE,
};
static int __init init_nolock(void)
{
int error;
error = gfs2_register_lockproto(&nolock_ops);
if (error) {
printk(KERN_WARNING
"lock_nolock: can't register protocol: %d\n", error);
return error;
}
printk(KERN_INFO
"Lock_Nolock (built %s %s) installed\n", __DATE__, __TIME__);
return 0;
}
static void __exit exit_nolock(void)
{
gfs2_unregister_lockproto(&nolock_ops);
}
module_init(init_nolock);
module_exit(exit_nolock);
MODULE_DESCRIPTION("GFS Nolock Locking Module");
MODULE_AUTHOR("Red Hat, Inc.");
MODULE_LICENSE("GPL");
+2
View File
@@ -87,6 +87,8 @@ void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
*/
static void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
__releases(&sdp->sd_log_lock)
__acquires(&sdp->sd_log_lock)
{
struct gfs2_bufdata *bd, *s;
struct buffer_head *bh;
+2
View File
@@ -21,6 +21,7 @@
*/
static inline void gfs2_log_lock(struct gfs2_sbd *sdp)
__acquires(&sdp->sd_log_lock)
{
spin_lock(&sdp->sd_log_lock);
}
@@ -32,6 +33,7 @@ static inline void gfs2_log_lock(struct gfs2_sbd *sdp)
*/
static inline void gfs2_log_unlock(struct gfs2_sbd *sdp)
__releases(&sdp->sd_log_lock)
{
spin_unlock(&sdp->sd_log_lock);
}
-2
View File
@@ -40,8 +40,6 @@ static void gfs2_init_glock_once(struct kmem_cache *cachep, void *foo)
INIT_HLIST_NODE(&gl->gl_list);
spin_lock_init(&gl->gl_spin);
INIT_LIST_HEAD(&gl->gl_holders);
INIT_LIST_HEAD(&gl->gl_waiters1);
INIT_LIST_HEAD(&gl->gl_waiters3);
gl->gl_lvb = NULL;
atomic_set(&gl->gl_lvb_count, 0);
INIT_LIST_HEAD(&gl->gl_reclaim);

Some files were not shown because too many files have changed in this diff Show More