Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
/*
* fs/nfs/nfs4state.c
*
* Client-side XDR for NFSv4.
*
* Copyright (c) 2002 The Regents of the University of Michigan.
* All rights reserved.
*
* Kendrick Smith <kmsmith@umich.edu>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Implementation of the NFSv4 state model. For the time being,
* this is minimal, but will be made much more complex in a
* subsequent patch.
*/
#include <linux/kernel.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_idmap.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/ratelimit.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include "pnfs.h"
int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
struct nfs4_setclientid_res clid = {
.clientid = clp->cl_clientid,
.confirm = clp->cl_confirm,
};
unsigned short port;
int status;
if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state))
goto do_confirm;
port = nfs_callback_tcpport;
if (clp->cl_addr.ss_family == AF_INET6)
port = nfs_callback_tcpport6;
status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid);
if (status != 0)
goto out;
clp->cl_clientid = clid.clientid;
clp->cl_confirm = clid.confirm;
set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
do_confirm:
status = nfs4_proc_setclientid_confirm(clp, &clid, cred);
if (status != 0)
goto out;
clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
nfs4_schedule_state_renewal(clp);
out:
struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
{
struct rpc_cred *cred = NULL;
if (clp->cl_machine_cred != NULL)
cred = get_rpccred(clp->cl_machine_cred);
return cred;
}
static void nfs4_clear_machine_cred(struct nfs_client *clp)
{
struct rpc_cred *cred;
spin_lock(&clp->cl_lock);
cred = clp->cl_machine_cred;
clp->cl_machine_cred = NULL;
spin_unlock(&clp->cl_lock);
if (cred != NULL)
put_rpccred(cred);
}
static struct rpc_cred *
nfs4_get_renew_cred_server_locked(struct nfs_server *server)
{
struct rpc_cred *cred = NULL;
struct nfs4_state_owner *sp;
struct rb_node *pos;
for (pos = rb_first(&server->state_owners);
pos != NULL;
pos = rb_next(pos)) {
sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
if (list_empty(&sp->so_states))
continue;
cred = get_rpccred(sp->so_cred);
break;
}
return cred;
}
/**
* nfs4_get_renew_cred_locked - Acquire credential for a renew operation
* @clp: client state handle
*
* Returns an rpc_cred with reference count bumped, or NULL.
* Caller must hold clp->cl_lock.
*/
struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
{
struct rpc_cred *cred = NULL;
struct nfs_server *server;
/* Use machine credentials if available */
cred = nfs4_get_machine_cred_locked(clp);
if (cred != NULL)
goto out;
rcu_read_lock();
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
cred = nfs4_get_renew_cred_server_locked(server);
if (cred != NULL)
break;
}
rcu_read_unlock();
return cred;
}
static int nfs41_setup_state_renewal(struct nfs_client *clp)
{
int status;
struct nfs_fsinfo fsinfo;
if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) {
nfs4_schedule_state_renewal(clp);
return 0;
}
status = nfs4_proc_get_lease_time(clp, &fsinfo);
if (status == 0) {
/* Update lease time and schedule renewal */
spin_lock(&clp->cl_lock);
clp->cl_lease_time = fsinfo.lease_time * HZ;
clp->cl_last_renewal = jiffies;
spin_unlock(&clp->cl_lock);
nfs4_schedule_state_renewal(clp);
}
return status;
}
/*
* Back channel returns NFS4ERR_DELAY for new requests when
* NFS4_SESSION_DRAINING is set so there is no work to be done when draining
* is ended.
*/
static void nfs4_end_drain_session(struct nfs_client *clp)
struct nfs4_session *ses = clp->cl_session;
struct nfs4_slot_table *tbl;
int max_slots;
if (ses == NULL)
return;
tbl = &ses->fc_slot_table;
if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
spin_lock(&tbl->slot_tbl_lock);
max_slots = tbl->max_slots;
while (max_slots--) {
if (rpc_wake_up_first(&tbl->slot_tbl_waitq,
nfs4_set_task_privileged,
NULL) == NULL)
break;
}
spin_unlock(&tbl->slot_tbl_lock);
static int nfs4_wait_on_slot_tbl(struct nfs4_slot_table *tbl)
{
spin_lock(&tbl->slot_tbl_lock);
if (tbl->highest_used_slotid != NFS4_NO_SLOT) {
spin_unlock(&tbl->slot_tbl_lock);
return wait_for_completion_interruptible(&tbl->complete);
}
spin_unlock(&tbl->slot_tbl_lock);
return 0;
}
static int nfs4_begin_drain_session(struct nfs_client *clp)
{
struct nfs4_session *ses = clp->cl_session;
int ret = 0;
set_bit(NFS4_SESSION_DRAINING, &ses->session_state);
/* back channel */
ret = nfs4_wait_on_slot_tbl(&ses->bc_slot_table);
if (ret)
return ret;
/* fore channel */
return nfs4_wait_on_slot_tbl(&ses->fc_slot_table);
}
int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
{
int status;
if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state))
goto do_confirm;
nfs4_begin_drain_session(clp);
status = nfs4_proc_exchange_id(clp, cred);
set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
do_confirm:
status = nfs4_proc_create_session(clp);
if (status != 0)
goto out;
clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
nfs41_setup_state_renewal(clp);
nfs_mark_client_ready(clp, NFS_CS_READY);
out:
struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
{
struct rpc_cred *cred;
spin_lock(&clp->cl_lock);
cred = nfs4_get_machine_cred_locked(clp);
spin_unlock(&clp->cl_lock);
return cred;
}
#endif /* CONFIG_NFS_V4_1 */
static struct rpc_cred *
nfs4_get_setclientid_cred_server(struct nfs_server *server)

Trond Myklebust
committed
{
struct nfs_client *clp = server->nfs_client;
struct rpc_cred *cred = NULL;

Trond Myklebust
committed
struct nfs4_state_owner *sp;
struct rb_node *pos;
spin_lock(&clp->cl_lock);
pos = rb_first(&server->state_owners);
if (pos != NULL) {
sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
cred = get_rpccred(sp->so_cred);
}
spin_unlock(&clp->cl_lock);
return cred;
}
/**
* nfs4_get_setclientid_cred - Acquire credential for a setclientid operation
* @clp: client state handle
*
* Returns an rpc_cred with reference count bumped, or NULL.
*/
struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
{
struct nfs_server *server;
struct rpc_cred *cred;

Trond Myklebust
committed
spin_lock(&clp->cl_lock);
cred = nfs4_get_machine_cred_locked(clp);
spin_unlock(&clp->cl_lock);
if (cred != NULL)
goto out;
rcu_read_lock();
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
cred = nfs4_get_setclientid_cred_server(server);
if (cred != NULL)
break;

Trond Myklebust
committed
}
rcu_read_unlock();
out:
return cred;

Trond Myklebust
committed
}
nfs4_find_state_owner_locked(struct nfs_server *server, struct rpc_cred *cred)
struct rb_node **p = &server->state_owners.rb_node,
*parent = NULL;
struct nfs4_state_owner *sp;
while (*p != NULL) {
parent = *p;
sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
if (cred < sp->so_cred)
p = &parent->rb_left;
else if (cred > sp->so_cred)
p = &parent->rb_right;
else {
if (!list_empty(&sp->so_lru))
list_del_init(&sp->so_lru);
atomic_inc(&sp->so_count);
static struct nfs4_state_owner *
nfs4_insert_state_owner_locked(struct nfs4_state_owner *new)
struct nfs_server *server = new->so_server;
struct rb_node **p = &server->state_owners.rb_node,
*parent = NULL;
struct nfs4_state_owner *sp;
int err;
while (*p != NULL) {
parent = *p;
sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
if (new->so_cred < sp->so_cred)
p = &parent->rb_left;
else if (new->so_cred > sp->so_cred)
p = &parent->rb_right;
else {
if (!list_empty(&sp->so_lru))
list_del_init(&sp->so_lru);
atomic_inc(&sp->so_count);
return sp;
}
}
err = ida_get_new(&server->openowner_id, &new->so_seqid.owner_id);
if (err)
return ERR_PTR(err);
rb_link_node(&new->so_server_node, parent, p);
rb_insert_color(&new->so_server_node, &server->state_owners);
return new;
}
static void
nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp)
struct nfs_server *server = sp->so_server;
if (!RB_EMPTY_NODE(&sp->so_server_node))
rb_erase(&sp->so_server_node, &server->state_owners);
ida_remove(&server->openowner_id, sp->so_seqid.owner_id);
static void
nfs4_init_seqid_counter(struct nfs_seqid_counter *sc)
{
sc->create_time = ktime_get();
sc->flags = 0;
sc->counter = 0;
spin_lock_init(&sc->lock);
INIT_LIST_HEAD(&sc->list);
rpc_init_wait_queue(&sc->wait, "Seqid_waitqueue");
}
static void
nfs4_destroy_seqid_counter(struct nfs_seqid_counter *sc)
{
rpc_destroy_wait_queue(&sc->wait);
}
/*
* nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
* create a new state_owner.
*
*/
static struct nfs4_state_owner *
nfs4_alloc_state_owner(struct nfs_server *server,
struct rpc_cred *cred,
gfp_t gfp_flags)
sp = kzalloc(sizeof(*sp), gfp_flags);
sp->so_server = server;
sp->so_cred = get_rpccred(cred);
spin_lock_init(&sp->so_lock);
nfs4_init_seqid_counter(&sp->so_seqid);
INIT_LIST_HEAD(&sp->so_lru);
nfs4_drop_state_owner(struct nfs4_state_owner *sp)
{
if (!RB_EMPTY_NODE(&sp->so_server_node)) {
struct nfs_server *server = sp->so_server;
struct nfs_client *clp = server->nfs_client;
spin_lock(&clp->cl_lock);
rb_erase(&sp->so_server_node, &server->state_owners);
RB_CLEAR_NODE(&sp->so_server_node);
spin_unlock(&clp->cl_lock);
}
static void nfs4_free_state_owner(struct nfs4_state_owner *sp)
{
nfs4_destroy_seqid_counter(&sp->so_seqid);
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
put_rpccred(sp->so_cred);
kfree(sp);
}
static void nfs4_gc_state_owners(struct nfs_server *server)
{
struct nfs_client *clp = server->nfs_client;
struct nfs4_state_owner *sp, *tmp;
unsigned long time_min, time_max;
LIST_HEAD(doomed);
spin_lock(&clp->cl_lock);
time_max = jiffies;
time_min = (long)time_max - (long)clp->cl_lease_time;
list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
/* NB: LRU is sorted so that oldest is at the head */
if (time_in_range(sp->so_expires, time_min, time_max))
break;
list_move(&sp->so_lru, &doomed);
nfs4_remove_state_owner_locked(sp);
}
spin_unlock(&clp->cl_lock);
list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
list_del(&sp->so_lru);
nfs4_free_state_owner(sp);
}
}
/**
* nfs4_get_state_owner - Look up a state owner given a credential
* @server: nfs_server to search
* @cred: RPC credential to match
*
* Returns a pointer to an instantiated nfs4_state_owner struct, or NULL.
*/
struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server,
struct rpc_cred *cred,
gfp_t gfp_flags)
struct nfs_client *clp = server->nfs_client;
struct nfs4_state_owner *sp, *new;
spin_lock(&clp->cl_lock);
sp = nfs4_find_state_owner_locked(server, cred);
new = nfs4_alloc_state_owner(server, cred, gfp_flags);
if (new == NULL)
do {
if (ida_pre_get(&server->openowner_id, gfp_flags) == 0)
break;
spin_lock(&clp->cl_lock);
sp = nfs4_insert_state_owner_locked(new);
spin_unlock(&clp->cl_lock);
} while (sp == ERR_PTR(-EAGAIN));
if (sp != new)
nfs4_free_state_owner(new);
out:
nfs4_gc_state_owners(server);
return sp;
/**
* nfs4_put_state_owner - Release a nfs4_state_owner
* @sp: state owner data to release
*/
void nfs4_put_state_owner(struct nfs4_state_owner *sp)
{
struct nfs_server *server = sp->so_server;
struct nfs_client *clp = server->nfs_client;
if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
return;
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
if (!RB_EMPTY_NODE(&sp->so_server_node)) {
sp->so_expires = jiffies;
list_add_tail(&sp->so_lru, &server->state_owners_lru);
spin_unlock(&clp->cl_lock);
} else {
nfs4_remove_state_owner_locked(sp);
spin_unlock(&clp->cl_lock);
nfs4_free_state_owner(sp);
}
}
/**
* nfs4_purge_state_owners - Release all cached state owners
* @server: nfs_server with cached state owners to release
*
* Called at umount time. Remaining state owners will be on
* the LRU with ref count of zero.
*/
void nfs4_purge_state_owners(struct nfs_server *server)
{
struct nfs_client *clp = server->nfs_client;
struct nfs4_state_owner *sp, *tmp;
LIST_HEAD(doomed);
spin_lock(&clp->cl_lock);
list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
list_move(&sp->so_lru, &doomed);
nfs4_remove_state_owner_locked(sp);
}
list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
list_del(&sp->so_lru);
nfs4_free_state_owner(sp);
}
}
static struct nfs4_state *
nfs4_alloc_open_state(void)
{
struct nfs4_state *state;
state = kzalloc(sizeof(*state), GFP_NOFS);
if (!state)
return NULL;
atomic_set(&state->count, 1);
INIT_LIST_HEAD(&state->lock_states);
spin_lock_init(&state->state_lock);
nfs4_state_set_mode_locked(struct nfs4_state *state, fmode_t fmode)
if (state->state == fmode)
return;
/* NB! List reordering - see the reclaim code for why. */
if ((fmode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
if (fmode & FMODE_WRITE)
list_move(&state->open_states, &state->owner->so_states);
else
list_move_tail(&state->open_states, &state->owner->so_states);
}
state->state = fmode;
static struct nfs4_state *
__nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
{
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs4_state *state;
list_for_each_entry(state, &nfsi->open_states, inode_states) {
if (state->owner != owner)
if (atomic_inc_not_zero(&state->count))
return state;
}
return NULL;
}
static void
nfs4_free_open_state(struct nfs4_state *state)
{
kfree(state);
}
struct nfs4_state *
nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
{
struct nfs4_state *state, *new;
struct nfs_inode *nfsi = NFS_I(inode);
spin_lock(&inode->i_lock);
state = __nfs4_find_state_byowner(inode, owner);
spin_unlock(&inode->i_lock);
if (state)
goto out;
new = nfs4_alloc_open_state();
spin_lock(&owner->so_lock);
spin_lock(&inode->i_lock);
state = __nfs4_find_state_byowner(inode, owner);
if (state == NULL && new != NULL) {
state = new;
state->owner = owner;
atomic_inc(&owner->so_count);
list_add(&state->inode_states, &nfsi->open_states);
ihold(inode);
state->inode = inode;
/* Note: The reclaim code dictates that we add stateless
* and read-only stateids to the end of the list */
list_add_tail(&state->open_states, &owner->so_states);
spin_unlock(&owner->so_lock);
spin_unlock(&owner->so_lock);
if (new)
nfs4_free_open_state(new);
}
out:
return state;
}
void nfs4_put_open_state(struct nfs4_state *state)
{
struct inode *inode = state->inode;
struct nfs4_state_owner *owner = state->owner;
if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
list_del(&state->inode_states);
spin_unlock(&inode->i_lock);
spin_unlock(&owner->so_lock);
iput(inode);
nfs4_free_open_state(state);
nfs4_put_state_owner(owner);
}
/*
* Close the current file.
static void __nfs4_close(struct nfs4_state *state,
fmode_t fmode, gfp_t gfp_mask, int wait)
atomic_inc(&owner->so_count);
/* Protect against nfs4_find_state() */
spin_lock(&owner->so_lock);
switch (fmode & (FMODE_READ | FMODE_WRITE)) {
case FMODE_READ:
state->n_rdonly--;
break;
case FMODE_WRITE:
state->n_wronly--;
break;
case FMODE_READ|FMODE_WRITE:
state->n_rdwr--;
}
newstate = FMODE_READ|FMODE_WRITE;
if (state->n_rdwr == 0) {
if (state->n_rdonly == 0) {
newstate &= ~FMODE_READ;
call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
}
if (state->n_wronly == 0) {
newstate &= ~FMODE_WRITE;
call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
}
if (newstate == 0)
clear_bit(NFS_DELEGATED_STATE, &state->flags);
}
nfs4_state_set_mode_locked(state, newstate);
spin_unlock(&owner->so_lock);
nfs4_put_open_state(state);
nfs4_put_state_owner(owner);
nfs4_do_close(state, gfp_mask, wait, roc);
void nfs4_close_state(struct nfs4_state *state, fmode_t fmode)
__nfs4_close(state, fmode, GFP_NOFS, 0);
void nfs4_close_sync(struct nfs4_state *state, fmode_t fmode)
__nfs4_close(state, fmode, GFP_KERNEL, 1);
}
/*
* Search the state->lock_states for an existing lock_owner
* that is compatible with current->files
*/
static struct nfs4_lock_state *

Trond Myklebust
committed
__nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type)
{
struct nfs4_lock_state *pos;
list_for_each_entry(pos, &state->lock_states, ls_locks) {

Trond Myklebust
committed
if (type != NFS4_ANY_LOCK_TYPE && pos->ls_owner.lo_type != type)

Trond Myklebust
committed
switch (pos->ls_owner.lo_type) {
case NFS4_POSIX_LOCK_TYPE:
if (pos->ls_owner.lo_u.posix_owner != fl_owner)
continue;
break;
case NFS4_FLOCK_LOCK_TYPE:
if (pos->ls_owner.lo_u.flock_owner != fl_pid)
continue;
}
atomic_inc(&pos->ls_count);
return pos;
}
return NULL;
}
/*
* Return a compatible lock_state. If no initialized lock_state structure
* exists, return an uninitialized one.
*
*/

Trond Myklebust
committed
static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type)
struct nfs_server *server = state->owner->so_server;
lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
nfs4_init_seqid_counter(&lsp->ls_seqid);

Trond Myklebust
committed
lsp->ls_owner.lo_type = type;
switch (lsp->ls_owner.lo_type) {
case NFS4_FLOCK_LOCK_TYPE:
lsp->ls_owner.lo_u.flock_owner = fl_pid;
break;
case NFS4_POSIX_LOCK_TYPE:
lsp->ls_owner.lo_u.posix_owner = fl_owner;
break;
default:
goto out_free;

Trond Myklebust
committed
}
lsp->ls_seqid.owner_id = ida_simple_get(&server->lockowner_id, 0, 0, GFP_NOFS);
if (lsp->ls_seqid.owner_id < 0)
goto out_free;
INIT_LIST_HEAD(&lsp->ls_locks);
out_free:
kfree(lsp);
return NULL;
void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
ida_simple_remove(&server->lockowner_id, lsp->ls_seqid.owner_id);
nfs4_destroy_seqid_counter(&lsp->ls_seqid);
kfree(lsp);
}
/*
* Return a compatible lock_state. If no initialized lock_state structure
* exists, return an uninitialized one.
*
*/

Trond Myklebust
committed
static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner, pid_t pid, unsigned int type)
struct nfs4_lock_state *lsp, *new = NULL;
for(;;) {
spin_lock(&state->state_lock);

Trond Myklebust
committed
lsp = __nfs4_find_lock_state(state, owner, pid, type);
if (lsp != NULL)
break;
if (new != NULL) {
list_add(&new->ls_locks, &state->lock_states);
set_bit(LK_STATE_IN_USE, &state->flags);
lsp = new;
new = NULL;
break;
}
spin_unlock(&state->state_lock);

Trond Myklebust
committed
new = nfs4_alloc_lock_state(state, owner, pid, type);
if (new == NULL)
return NULL;
}
spin_unlock(&state->state_lock);
if (new != NULL)
nfs4_free_lock_state(state->owner->so_server, new);
* Release reference to lock_state, and free it if we see that
* it is no longer in use
void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
struct nfs4_state *state;
if (lsp == NULL)
return;
state = lsp->ls_state;
if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
return;
list_del(&lsp->ls_locks);
if (list_empty(&state->lock_states))
clear_bit(LK_STATE_IN_USE, &state->flags);
spin_unlock(&state->state_lock);
if (lsp->ls_flags & NFS_LOCK_INITIALIZED) {
if (nfs4_release_lockowner(lsp) == 0)
return;
}
nfs4_free_lock_state(lsp->ls_state->owner->so_server, lsp);
static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
dst->fl_u.nfs4_fl.owner = lsp;
atomic_inc(&lsp->ls_count);
}
static void nfs4_fl_release_lock(struct file_lock *fl)
nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
static const struct file_lock_operations nfs4_fl_lock_ops = {
.fl_copy_lock = nfs4_fl_copy_lock,
.fl_release_private = nfs4_fl_release_lock,
};
int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
struct nfs4_lock_state *lsp;
if (fl->fl_ops != NULL)
return 0;

Trond Myklebust
committed
if (fl->fl_flags & FL_POSIX)
lsp = nfs4_get_lock_state(state, fl->fl_owner, 0, NFS4_POSIX_LOCK_TYPE);
else if (fl->fl_flags & FL_FLOCK)
lsp = nfs4_get_lock_state(state, NULL, fl->fl_pid,
NFS4_FLOCK_LOCK_TYPE);

Trond Myklebust
committed
else
return -EINVAL;
if (lsp == NULL)
return -ENOMEM;
fl->fl_u.nfs4_fl.owner = lsp;
fl->fl_ops = &nfs4_fl_lock_ops;
return 0;
static bool nfs4_copy_lock_stateid(nfs4_stateid *dst, struct nfs4_state *state,
fl_owner_t fl_owner, pid_t fl_pid)
struct nfs4_lock_state *lsp;
if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
spin_lock(&state->state_lock);

Trond Myklebust
committed
lsp = __nfs4_find_lock_state(state, fl_owner, fl_pid, NFS4_ANY_LOCK_TYPE);
if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) {
nfs4_stateid_copy(dst, &lsp->ls_stateid);
spin_unlock(&state->state_lock);
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
out:
return ret;
}
static void nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
{
int seq;
do {
seq = read_seqbegin(&state->seqlock);
nfs4_stateid_copy(dst, &state->stateid);
} while (read_seqretry(&state->seqlock, seq));
}
/*
* Byte-range lock aware utility to initialize the stateid of read/write
* requests.
*/
void nfs4_select_rw_stateid(nfs4_stateid *dst, struct nfs4_state *state,
fmode_t fmode, fl_owner_t fl_owner, pid_t fl_pid)
{
if (nfs4_copy_delegation_stateid(dst, state->inode, fmode))
return;
if (nfs4_copy_lock_stateid(dst, state, fl_owner, fl_pid))
return;
nfs4_copy_open_stateid(dst, state);
struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask)
{
struct nfs_seqid *new;
new = kmalloc(sizeof(*new), gfp_mask);
if (new != NULL) {
new->sequence = counter;
INIT_LIST_HEAD(&new->list);
new->task = NULL;
void nfs_release_seqid(struct nfs_seqid *seqid)
struct nfs_seqid_counter *sequence;
if (list_empty(&seqid->list))
return;
sequence = seqid->sequence;
spin_lock(&sequence->lock);
list_del_init(&seqid->list);
if (!list_empty(&sequence->list)) {
struct nfs_seqid *next;
next = list_first_entry(&sequence->list,
struct nfs_seqid, list);
rpc_wake_up_queued_task(&sequence->wait, next->task);
spin_unlock(&sequence->lock);
}
void nfs_free_seqid(struct nfs_seqid *seqid)
{
nfs_release_seqid(seqid);
* Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
* failed with a seqid incrementing error -
* see comments nfs_fs.h:seqid_mutating_error()
*/
static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
BUG_ON(list_first_entry(&seqid->sequence->list, struct nfs_seqid, list) != seqid);
switch (status) {
case 0:
break;
case -NFS4ERR_BAD_SEQID:
if (seqid->sequence->flags & NFS_SEQID_CONFIRMED)
return;
pr_warn_ratelimited("NFS: v4 server returned a bad"
" sequence-id error on an"
" unconfirmed sequence %p!\n",
seqid->sequence);
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_BADXDR: