Skip to content

Commit

Permalink
cgofuse: use fd table shrinker and change limits
Browse files Browse the repository at this point in the history
Since the file table can now reclaim memory, we can raise the hardcoded
open file limit.
These limits are arbitrary and should become variable at some point.

The file table grows by some factor as needed, and now it can shrink
when descriptors are freed (down to some limit based on the growth
scaling factor).

No metrics on how this impacts performance. For now, we just want to
prevent the table from growing large and remaining large after lots of
open+close calls have been made.
This could help handle lookups slightly, since we no longer need to
range over as many `nil` entries at the tail anymore.
But it's hard to judge without measuring if the checks+realloc cost more
than we save in the average usecases.
  • Loading branch information
djdv committed Dec 2, 2022
1 parent 7165c8d commit 5101a8a
Showing 1 changed file with 27 additions and 26 deletions.
53 changes: 27 additions & 26 deletions internal/filesystem/cgofuse/table.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,14 @@ type (

const (
errorHandle = math.MaxUint64
// TODO: handleMax needs to be configurable.
// This value is arbitrary.
handleMax = 2048
// TODO: handleMax needs to be configurable like `ulimit` allows.
// NOTE: file table sizes and bounds were chosen arbitrarily.
// Suggestions for better averages or ways to tune are welcome.
handleMax = 4096
tableStartingSize = 8
tableGrowthfactor = 2
tableShrinkLimitFactor = tableGrowthfactor * 2
tableShrinkBound = tableStartingSize * tableShrinkLimitFactor
)

var (
Expand Down Expand Up @@ -54,13 +59,9 @@ func (files handleSlice) extend() (handleSlice, error) {
if filesLen < filesCap {
return files[:filesEnd], nil
}
const (
initialSize = 8 // NOTE: Initial size is chosen arbitrarily.
factor = 2 // If a better average is known, replace this.
)
var (
scaledCap = filesCap * factor
newCap = generic.Max(scaledCap, initialSize)
scaledCap = filesCap * tableGrowthfactor
newCap = generic.Max(scaledCap, tableStartingSize)
)
if newCap > handleMax {
return nil, errFull
Expand All @@ -74,7 +75,6 @@ func (files handleSlice) shrink(lowerBound int) handleSlice {
var (
emptySlots int
filesLen = len(files)
filesCap = cap(files)
)
for i := filesLen - 1; i != -1; i-- {
if files[i] != nil {
Expand All @@ -83,23 +83,23 @@ func (files handleSlice) shrink(lowerBound int) handleSlice {
emptySlots++
}
var (
newLen = filesLen - emptySlots
bound = boundCheck(lowerBound, newLen)
newLen = filesLen - emptySlots
newCap = lowestAlignment(newLen, tableStartingSize)
tooSmall = newCap < lowerBound
sameSize = newCap == cap(files)
lessOrEqualToBound = tooSmall || sameSize
)
if newLen == bound || filesCap == bound {
if lessOrEqualToBound {
return nil
}
newTable := make(handleSlice, newLen, bound)
newTable := make(handleSlice, newLen, newCap)
copy(newTable, files)
return newTable
}

func boundCheck(lowerBound, oldCap int) int {
newCap := lowerBound
for newCap < oldCap {
newCap *= 2
}
return newCap
func lowestAlignment(size, alignment int) int {
remainder := (size - 1) % alignment
return (size - 1) + (alignment - remainder)
}

func (ft *fileTable) add(f fs.File) (fileDescriptor, error) {
Expand Down Expand Up @@ -154,12 +154,13 @@ func (ft *fileTable) remove(fh fileDescriptor) error {
if err := ft.validLocked(fh); err != nil {
return err
}
ft.files[fh] = nil
// TODO: We could trim the slice here so that it's not wasting memory.
// Need metrics on this though. May not be worth the cost.
// And not sure what capacity we should trim to as a maximum.
// If it's too low we're going to constantly thrash.
// Too high and we'll be wasting memory.
files := ft.files
files[fh] = nil
if cap(files) > tableShrinkBound {
if newTable := files.shrink(tableShrinkBound); newTable != nil {
ft.files = newTable
}
}
return nil
}

Expand Down

0 comments on commit 5101a8a

Please sign in to comment.