Skip to content
Snippets Groups Projects
Commit 43df74e7 authored by Avi Kivity's avatar Avi Kivity
Browse files

vfs: relax dependency of close() performance on rcu grace periods

Currently, we perform the final fdrop() in an rcu callback, which means it
can have considerable latency.  This in turn places restrictions on
further optimizations we can do to rcu, as we need prompt close() execution.

Relax this by performing the fdrop() immediately, and only deferring the
final free().  To do this, we harden fget() so it now expects files with
positive refcounts, and make sure that after the final fdrop() refcounts
are either zero or negative.

This also makes close() take effect immediately, which fixes tst-pipe.so.
parent 0873adf5
No related branches found
No related tags found
No related merge requests found
......@@ -77,7 +77,7 @@ int fdclose(int fd)
gfdt[fd].assign(nullptr);
}
rcu_defer(fdrop, fp);
fdrop(fp);
return 0;
}
......@@ -107,6 +107,18 @@ int fdset(int fd, struct file *fp)
return 0;
}
static bool fhold_if_positive(file* f)
{
auto c = f->f_count;
// zero or negative f_count means that the file is being closed; don't
// increment
while (c > 0 && !__atomic_compare_exchange_n(&f->f_count, &c, c + 1, true,
__ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
// nothing to do
}
return c > 0;
}
/*
* Retrieves a file structure from the gfdt and increases its refcount in a
* synchronized way, this ensures that a concurrent close will not interfere.
......@@ -124,7 +136,9 @@ int fget(int fd, struct file **out_fp)
return EBADF;
}
fhold(fp);
if (!fhold_if_positive(fp)) {
return EBADF;
}
}
*out_fp = fp;
......@@ -196,19 +210,20 @@ void fhold(struct file* fp)
int fdrop(struct file *fp)
{
if (__sync_fetch_and_sub(&fp->f_count, 1) > 1)
if (__sync_fetch_and_sub(&fp->f_count, 1) != 1)
return 0;
/* We are about to free this file structure, but we still do things with it
* so we increase the refcount by one, fdrop may get called again
* so set the refcount to INT_MIN, fhold/fdrop may get called again
* and we don't want to reach this point more than once.
* INT_MIN is also safe against fget() seeing this file.
*/
fhold(fp);
fp->f_count = INT_MIN;
fo_close(fp);
poll_drain(fp);
mutex_destroy(&fp->f_lock);
free(fp);
rcu_dispose(fp);
return 1;
}
......
......@@ -61,7 +61,7 @@ struct fileops;
*/
struct file {
int f_flags; /* open flags */
int f_count; /* reference count */
int f_count; /* reference count, see below */
off_t f_offset; /* current position in file */
struct vnode *f_vnode; /* vnode */
struct fileops *f_ops; /* file ops abstraction */
......@@ -71,6 +71,14 @@ struct file {
mutex_t f_lock; /* lock */
};
// f_count rules:
//
// > 0: file is live and open, normal reference counting applies
// = 0: file is open but being removed from file table, may not
// acquire new references
// < 0: file is being closed, may not acquire new references (but
// close path may still call fhold()/fdrop()
#define FD_LOCK(fp) mutex_lock(&(fp->f_lock))
#define FD_UNLOCK(fp) mutex_unlock(&(fp->f_lock))
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment