Skip to content

Commit

Permalink
Ensure published copy is never zapped
Browse files Browse the repository at this point in the history
  • Loading branch information
ties committed Dec 8, 2023
1 parent 3679fc9 commit 3aaa625
Show file tree
Hide file tree
Showing 2 changed files with 66 additions and 13 deletions.
14 changes: 13 additions & 1 deletion src/main/java/net/ripe/rpki/rsyncit/rsync/RsyncWriter.java
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import java.io.UncheckedIOException;
import java.net.URI;
import java.nio.file.Files;
import java.nio.file.LinkOption;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.attribute.FileTime;
Expand Down Expand Up @@ -156,7 +157,7 @@ static List<RpkiObject> filterOutBadUrls(Path hostBasedPath, Collection<RpkiObje
var objectRelativePath = Paths.get(relativePath(object.url().getPath()));
// Check that the resulting path of the object stays within `hostBasedPath`
// to prevent URLs like rsync://bla.net/path/../../../../../PATH_INJECTION.txt
// writing data outside of the controlled path.
// writing data outside the controlled path.
final String normalizedPath = hostBasedPath.resolve(objectRelativePath).normalize().toString();
if (normalizedPath.startsWith(normalizedHostPath)) {
return Stream.of(object);
Expand Down Expand Up @@ -188,13 +189,24 @@ private void atomicallyReplacePublishedSymlink(Path baseDirectory, Path targetDi
void cleanupOldTargetDirectories(Instant now, Path baseDirectory) throws IOException {
long cutoff = now.toEpochMilli() - config.targetDirectoryRetentionPeriodMs();

// resolve the published symlink - because we definitely want to keep that copy.
// TODO: published dir should be built without string concat, but this is where we are ¯\_(ツ)_/¯
var actualPublishedDir = config.rsyncPath().resolve("published").toRealPath();

try (
Stream<Path> oldDirectories = Files.list(baseDirectory)
.filter(path -> PUBLICATION_DIRECTORY_PATTERN.matcher(path.getFileName().toString()).matches())
.filter(Files::isDirectory)
.sorted(Comparator.comparing(this::getLastModifiedTime).reversed())
.skip(config.targetDirectoryRetentionCopiesCount())
.filter((directory) -> getLastModifiedTime(directory).toMillis() < cutoff)
.filter(dir -> {
try {
return !dir.toRealPath().equals(actualPublishedDir);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
});
) {
fileWriterPool.submit(() -> oldDirectories.parallel().forEach(directory -> {
log.info("Removing old publication directory {}", directory);
Expand Down
65 changes: 53 additions & 12 deletions src/test/java/net/ripe/rpki/rsyncit/rsync/RsyncWriterTest.java
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.attribute.FileTime;
import java.time.Instant;
import java.time.ZoneId;
import java.time.format.DateTimeFormatter;
Expand Down Expand Up @@ -98,6 +99,37 @@ public void testIgnoreBadUrls(@TempDir Path tmpPath) throws Exception {
});
}

@Test
public void testDoNotDeleteLinkedDirAndLinksMostRecentlyWritten(@TempDir Path tmpPath) throws Exception {
final Function<RsyncWriter, Path> writeSomeObjects = rsyncWriter ->
rsyncWriter.writeObjects(IntStream.range(0, 10).mapToObj(i ->
new RpkiObject(URI.create("rsync://bla.net/path1/" + i + ".cer"), someBytes(), Instant.now())
).collect(Collectors.toList()));

withRsyncWriter(
tmpPath,
config -> config.withTargetDirectoryRetentionPeriodMs(100).withTargetDirectoryRetentionCopiesCount(0),
rsyncWriter -> {
var path1 = writeSomeObjects.apply(rsyncWriter);
var path2 = writeSomeObjects.apply(rsyncWriter);
Files.setLastModifiedTime(path1, FileTime.from(Instant.now().plus(1, ChronoUnit.DAYS)));
sleep(1100);
var path3 = writeSomeObjects.apply(rsyncWriter);

// path 1 has the mangled timestamp
assertThat(path1.toFile()).exists();
// path 2 is too old
assertThat(path2.toFile()).doesNotExist();
assertThat(path3.toFile()).exists();

// published path points MUST exist and MUST link to the last write (not last timestamp).
//
// A race condition that we check for can not be triggered easily.
// idea: time of check - time of use gap: continuously update the timestamp of path3 to be before cutoff
assertThat(tmpPath.resolve("published").toRealPath().toFile()).exists();
});
}

@Test
public void testRemoveOldDirectoriesWhenTheyAreOld(@TempDir Path tmpPath) throws Exception {
final Function<RsyncWriter, Path> writeSomeObjects = rsyncWriter ->
Expand All @@ -111,16 +143,20 @@ public void testRemoveOldDirectoriesWhenTheyAreOld(@TempDir Path tmpPath) throws
config -> config.withTargetDirectoryRetentionPeriodMs(100).withTargetDirectoryRetentionCopiesCount(0),
rsyncWriter -> {
var path1 = writeSomeObjects.apply(rsyncWriter);
sleep(200);
var path2 = writeSomeObjects.apply(rsyncWriter);
// path1 should be deleted as old
assertThat(Files.exists(path1)).isFalse();
assertThat(Files.exists(path2)).isTrue();
sleep(200);
// base case: Both are present because they are not too old
assertThat(path1.toFile()).exists();
assertThat(path2.toFile()).exists();
assertThat(tmpPath.resolve("published").toRealPath()).isEqualTo(path2.toRealPath());
sleep(1100);
var path3 = writeSomeObjects.apply(rsyncWriter);
// path2 should also be deleted as old
assertThat(Files.exists(path2)).isFalse();
assertThat(Files.exists(path3)).isTrue();
// inductive case: it cleans the older dirs
assertThat(path1.toFile()).doesNotExist();
assertThat(path2.toFile()).doesNotExist();
assertThat(path3.toFile()).exists();

// published path points to most recent copy
assertThat(tmpPath.resolve("published").toRealPath()).isEqualTo(path3.toRealPath());
});
}

Expand All @@ -137,11 +173,16 @@ public void testRemoveOldDirectoriesButKeepSomeNumberOfThem(@TempDir Path tmpDir
config -> config.withTargetDirectoryRetentionPeriodMs(100).withTargetDirectoryRetentionCopiesCount(2),
rsyncWriter -> {
var path1 = writeSomeObjects.apply(rsyncWriter);
sleep(200);
var path2 = writeSomeObjects.apply(rsyncWriter);
// Nothing should be deleted, because we want to keep more older copies
assertThat(Files.exists(path1)).isTrue();
assertThat(Files.exists(path2)).isTrue();
sleep(1100);
var path3 = writeSomeObjects.apply(rsyncWriter);
// It should delete the oldest directory
assertThat(path1.toFile()).doesNotExist();
assertThat(path2.toFile()).exists();
assertThat(path3.toFile()).exists();

// published path points to most recent copy
assertThat(tmpDir.resolve("published").toRealPath()).isEqualTo(path3.toRealPath());
});
}

Expand Down

0 comments on commit 3aaa625

Please sign in to comment.