diff --git a/container-engine-lib/lib/backend_impls/docker/docker_kurtosis_backend/logs_aggregator_functions/implementations/vector/consts.go b/container-engine-lib/lib/backend_impls/docker/docker_kurtosis_backend/logs_aggregator_functions/implementations/vector/consts.go index 0dbab25c2b..8e22a16d33 100644 --- a/container-engine-lib/lib/backend_impls/docker/docker_kurtosis_backend/logs_aggregator_functions/implementations/vector/consts.go +++ b/container-engine-lib/lib/backend_impls/docker/docker_kurtosis_backend/logs_aggregator_functions/implementations/vector/consts.go @@ -21,12 +21,11 @@ const ( fileSinkIdSuffix = "file" fileTypeId = "\"file\"" - // We instruct vector to store log files per-year, per-week (00-53), per-enclave, per-service // To construct the filepath, we utilize vectors template syntax that allows us to reference fields in log events // https://vector.dev/docs/reference/configuration/template-syntax/ - baseLogsFilepath = "\"" + logsStorageDirpath + "%%Y/%%V/" + baseLogsFilepath = "\"" + logsStorageDirpath + "%%Y/%%V/%%u/%%H/" - uuidLogsFilepath = baseLogsFilepath + "{{ enclave_uuid }}/{{ service_uuid }}.json\"" + VectorLogsFilepathFormat = baseLogsFilepath + "{{ enclave_uuid }}/{{ service_uuid }}.json\"" sourceConfigFileTemplateName = "srcVectorConfigFileTemplate" sinkConfigFileTemplateName = "sinkVectorConfigFileTemplate" diff --git a/container-engine-lib/lib/backend_impls/docker/docker_kurtosis_backend/logs_aggregator_functions/implementations/vector/vector_config.go b/container-engine-lib/lib/backend_impls/docker/docker_kurtosis_backend/logs_aggregator_functions/implementations/vector/vector_config.go index a6022b5b85..f16b0f3955 100644 --- a/container-engine-lib/lib/backend_impls/docker/docker_kurtosis_backend/logs_aggregator_functions/implementations/vector/vector_config.go +++ b/container-engine-lib/lib/backend_impls/docker/docker_kurtosis_backend/logs_aggregator_functions/implementations/vector/vector_config.go @@ -38,7 +38,7 @@ func newDefaultVectorConfig(listeningPortNumber uint16) *VectorConfig { Id: "uuid_" + fileSinkIdSuffix, Type: fileTypeId, Inputs: []string{fluentBitSourceId}, - Filepath: uuidLogsFilepath, + Filepath: VectorLogsFilepathFormat, }, }, } diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/file_layout/file_layout.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/file_layout/file_layout.go index 40ca4369f6..2e2720bb9d 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/file_layout/file_layout.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/file_layout/file_layout.go @@ -9,8 +9,8 @@ type LogFileLayout interface { // GetLogFileLayoutFormat returns a string representation the "format" that files are laid out in // Formats are composed: // - "/" - representing a nested directory - // - "" - representing where an enclave uuid is inserted - // - "" - representing where a service uuid is inserted + // - "{{ enclaveUuid }}" - representing where an enclave uuid is inserted + // - "{{ serviceUuid }}" - representing where a service uuid is inserted // - time formats specified by strftime https://cplusplus.com/reference/ctime/strftime/ // - any other ascii text GetLogFileLayoutFormat() string @@ -21,6 +21,6 @@ type LogFileLayout interface { // GetLogFilePaths retrieves a list of filepaths [filesystem] for [serviceUuid] in [enclaveUuid] // If [retentionPeriodIntervals] is set to -1, retrieves all filepaths from the currentTime till [retentionPeriod] in order // If [retentionPeriodIntervals] is positive, retrieves all filepaths within the range [currentTime - retentionPeriod] and [currentTime - (retentionPeriodIntervals) * retentionPeriod] - // Returned filepaths sorted from most recent to least recent + // Returned filepaths sorted from oldest to most recent GetLogFilePaths(filesystem volume_filesystem.VolumeFilesystem, retentionPeriod time.Duration, retentionPeriodIntervals int, enclaveUuid, serviceUuid string) ([]string, error) } diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/file_layout/per_hour_file_layout.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/file_layout/per_hour_file_layout.go new file mode 100644 index 0000000000..0c6d6cda08 --- /dev/null +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/file_layout/per_hour_file_layout.go @@ -0,0 +1,139 @@ +package file_layout + +import ( + "fmt" + "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/logs_clock" + "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_consts" + "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_filesystem" + "golang.org/x/exp/slices" + "math" + "os" + "strconv" + "time" +) + +const ( + // basepath year/week/day/hour/ + perHourDirPathFmtStr = "%s%s/%s/%s/%s/" + + // ... enclave-uuid/service-uuid + perHourFilePathFmtSt = perHourDirPathFmtStr + "%s/%s%s" +) + +type PerHourFileLayout struct { + time logs_clock.LogsClock + baseLogsFilePath string +} + +func NewPerHourFileLayout(time logs_clock.LogsClock, baseLogsFilePath string) *PerHourFileLayout { + return &PerHourFileLayout{ + time: time, + baseLogsFilePath: baseLogsFilePath, + } +} + +func (phf *PerHourFileLayout) GetLogFileLayoutFormat() string { + // Right now this format is specifically made for Vector Logs Aggregators format + // This wil be used my Vector LogsAggregator to determine the path to output to + return fmt.Sprintf("\"%s%%%%Y/%%%%V/%%%%u/%%%%H/{{ enclave_uuid }}/{{ service_uuid }}.json\"", volume_consts.LogsStorageDirpath) +} + +func (phf *PerHourFileLayout) GetLogFilePath(time time.Time, enclaveUuid, serviceUuid string) string { + year, week, day, hour := TimeToWeekDayHour(time) + return phf.getHourlyLogFilePath(year, week, day, hour, enclaveUuid, serviceUuid) +} + +func (phf *PerHourFileLayout) GetLogFilePaths( + filesystem volume_filesystem.VolumeFilesystem, + retentionPeriod time.Duration, + retentionPeriodIntervals int, + enclaveUuid, serviceUuid string, +) ([]string, error) { + var paths []string + retentionPeriodInHours := DurationToHours(retentionPeriod) + + if retentionPeriodIntervals < 0 { + return phf.getLogFilePathsFromNowTillRetentionPeriod(filesystem, retentionPeriodInHours, enclaveUuid, serviceUuid) + } else { + paths = phf.getLogFilePathsBeyondRetentionPeriod(filesystem, retentionPeriodInHours, retentionPeriodIntervals, enclaveUuid, serviceUuid) + } + + return paths, nil +} + +func (phf *PerHourFileLayout) getLogFilePathsFromNowTillRetentionPeriod(fs volume_filesystem.VolumeFilesystem, retentionPeriodInHours int, enclaveUuid, serviceUuid string) ([]string, error) { + var paths []string + currentTime := phf.time.Now() + + // scan for first existing log file + firstHourWithLogs := 0 + for i := 0; i < retentionPeriodInHours; i++ { + year, week, day, hour := TimeToWeekDayHour(currentTime.Add(time.Duration(-i) * time.Hour)) + filePathStr := phf.getHourlyLogFilePath(year, week, day, hour, enclaveUuid, serviceUuid) + if _, err := fs.Stat(filePathStr); err == nil { + paths = append(paths, filePathStr) + firstHourWithLogs = i + break + } else { + // return if error is not due to nonexistent file path + if !os.IsNotExist(err) { + return paths, err + } + } + } + + // scan for remaining files as far back as they exist before the retention period + for i := firstHourWithLogs + 1; i < retentionPeriodInHours; i++ { + year, week, day, hour := TimeToWeekDayHour(currentTime.Add(time.Duration(-i) * time.Hour)) + filePathStr := phf.getHourlyLogFilePath(year, week, day, hour, enclaveUuid, serviceUuid) + if _, err := fs.Stat(filePathStr); err != nil { + break + } + paths = append(paths, filePathStr) + } + + // reverse for oldest to most recent + slices.Reverse(paths) + + return paths, nil +} + +func (phf *PerHourFileLayout) getLogFilePathsBeyondRetentionPeriod(fs volume_filesystem.VolumeFilesystem, retentionPeriodInHours int, retentionPeriodIntervals int, enclaveUuid, serviceUuid string) []string { + var paths []string + currentTime := phf.time.Now() + + // scan for log files just beyond the retention period + for i := 0; i < retentionPeriodIntervals; i++ { + numHoursToGoBack := retentionPeriodInHours + i + year, week, day, hour := TimeToWeekDayHour(currentTime.Add(time.Duration(-numHoursToGoBack) * time.Hour)) + filePathStr := phf.getHourlyLogFilePath(year, week, day, hour, enclaveUuid, serviceUuid) + if _, err := fs.Stat(filePathStr); err != nil { + continue + } + paths = append(paths, filePathStr) + } + + return paths +} + +func (phf *PerHourFileLayout) getHourlyLogFilePath(year, week, day, hour int, enclaveUuid, serviceUuid string) string { + // match the format in which Vector outputs week, hours, days + formattedWeekNum := fmt.Sprintf("%02d", week) + formattedHourNum := fmt.Sprintf("%02d", hour) + return fmt.Sprintf(perHourFilePathFmtSt, phf.baseLogsFilePath, strconv.Itoa(year), formattedWeekNum, strconv.Itoa(day), formattedHourNum, enclaveUuid, serviceUuid, volume_consts.Filetype) +} + +func TimeToWeekDayHour(time time.Time) (int, int, int, int) { + year, week := time.ISOWeek() + hour := time.Hour() + day := int(time.Weekday()) + // convert sunday in golang's time(0) to sunday (0) in strftime/Vector log aggregator time(7) + if day == 0 { + day = 7 + } + return year, week, day, hour +} + +func DurationToHours(duration time.Duration) int { + return int(math.Ceil(duration.Hours())) +} diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/file_layout/per_hour_file_layout_test.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/file_layout/per_hour_file_layout_test.go new file mode 100644 index 0000000000..7fcf1fa96a --- /dev/null +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/file_layout/per_hour_file_layout_test.go @@ -0,0 +1,326 @@ +package file_layout + +import ( + "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/logs_clock" + "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_consts" + "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_filesystem" + "github.com/stretchr/testify/require" + "testing" + "time" +) + +func TestGetLogFilePath(t *testing.T) { + currentTime := logs_clock.NewMockLogsClockPerHour(2024, 1, 1, 1) + fileLayout := NewPerHourFileLayout(currentTime, volume_consts.LogsStorageDirpath) + + expectedFilepath := "/var/log/kurtosis/2024/01/1/01/test-enclave/test-user-service-1.json" + now := currentTime.Now() + actualFilePath := fileLayout.GetLogFilePath(now, testEnclaveUuid, testUserService1Uuid) + require.Equal(t, expectedFilepath, actualFilePath) +} + +func TestGetLogFilePathsWithHourlyRetention(t *testing.T) { + filesystem := volume_filesystem.NewMockedVolumeFilesystem() + + currentTime := logs_clock.NewMockLogsClockPerHour(defaultYear, defaultWeek, defaultDay, 5) + fileLayout := NewPerHourFileLayout(currentTime, volume_consts.LogsStorageDirpath) + + hourZeroFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(defaultYear, defaultWeek, defaultDay, 0).Now(), testEnclaveUuid, testUserService1Uuid) + hourOneFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(defaultYear, defaultWeek, defaultDay, 1).Now(), testEnclaveUuid, testUserService1Uuid) + hourTwoFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(defaultYear, defaultWeek, defaultDay, 2).Now(), testEnclaveUuid, testUserService1Uuid) + hourThreeFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(defaultYear, defaultWeek, defaultDay, 3).Now(), testEnclaveUuid, testUserService1Uuid) + hourFourFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(defaultYear, defaultWeek, defaultDay, 4).Now(), testEnclaveUuid, testUserService1Uuid) + hourFiveFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(defaultYear, defaultWeek, defaultDay, 5).Now(), testEnclaveUuid, testUserService1Uuid) + + createFilepaths(t, filesystem, []string{ + hourZeroFp, + hourOneFp, + hourTwoFp, + hourThreeFp, + hourFourFp, + hourFiveFp, + }) + + expectedLogFilePaths := []string{ + hourZeroFp, + hourOneFp, + hourTwoFp, + hourThreeFp, + hourFourFp, + hourFiveFp, + } + + retentionPeriod := 6 * time.Hour // retention period of 6 hours should return all the file paths + logFilePaths, err := fileLayout.GetLogFilePaths(filesystem, retentionPeriod, -1, testEnclaveUuid, testUserService1Uuid) + + require.NoError(t, err) + require.Equal(t, len(expectedLogFilePaths), len(logFilePaths)) + for i, filePath := range expectedLogFilePaths { + require.Equal(t, filePath, logFilePaths[i]) + } +} + +func TestGetLogFilePathsWithHourlyRetentionAcrossDays(t *testing.T) { + filesystem := volume_filesystem.NewMockedVolumeFilesystem() + + currentTime := logs_clock.NewMockLogsClockPerHour(defaultYear, defaultWeek, 2, 2) + fileLayout := NewPerHourFileLayout(currentTime, volume_consts.LogsStorageDirpath) + + hourZeroFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(defaultYear, defaultWeek, 1, 21).Now(), testEnclaveUuid, testUserService1Uuid) + hourOneFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(defaultYear, defaultWeek, 1, 22).Now(), testEnclaveUuid, testUserService1Uuid) + hourTwoFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(defaultYear, defaultWeek, 1, 23).Now(), testEnclaveUuid, testUserService1Uuid) + hourThreeFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(defaultYear, defaultWeek, 2, 0).Now(), testEnclaveUuid, testUserService1Uuid) + hourFourFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(defaultYear, defaultWeek, 2, 1).Now(), testEnclaveUuid, testUserService1Uuid) + hourFiveFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(defaultYear, defaultWeek, 2, 2).Now(), testEnclaveUuid, testUserService1Uuid) + + createFilepaths(t, filesystem, []string{ + hourZeroFp, + hourOneFp, + hourTwoFp, + hourThreeFp, + hourFourFp, + hourFiveFp, + }) + + expectedLogFilePaths := []string{ + hourZeroFp, + hourOneFp, + hourTwoFp, + hourThreeFp, + hourFourFp, + hourFiveFp, + } + + retentionPeriod := 6 * time.Hour // retention period of 6 hours should return all the file paths + logFilePaths, err := fileLayout.GetLogFilePaths(filesystem, retentionPeriod, -1, testEnclaveUuid, testUserService1Uuid) + + require.NoError(t, err) + require.Equal(t, len(expectedLogFilePaths), len(logFilePaths)) + for i, filePath := range expectedLogFilePaths { + require.Equal(t, filePath, logFilePaths[i]) + } +} + +func TestGetLogFilePathsWithHourlyRetentionAcrossWeeks(t *testing.T) { + filesystem := volume_filesystem.NewMockedVolumeFilesystem() + + currentTime := logs_clock.NewMockLogsClockPerHour(defaultYear, 18, 1, 2) + fileLayout := NewPerHourFileLayout(currentTime, volume_consts.LogsStorageDirpath) + + hourZeroFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(defaultYear, 17, 0, 21).Now(), testEnclaveUuid, testUserService1Uuid) + hourOneFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(defaultYear, 17, 0, 22).Now(), testEnclaveUuid, testUserService1Uuid) + hourTwoFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(defaultYear, 17, 0, 23).Now(), testEnclaveUuid, testUserService1Uuid) + hourThreeFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(defaultYear, 18, 1, 0).Now(), testEnclaveUuid, testUserService1Uuid) + hourFourFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(defaultYear, 18, 1, 1).Now(), testEnclaveUuid, testUserService1Uuid) + hourFiveFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(defaultYear, 18, 1, 2).Now(), testEnclaveUuid, testUserService1Uuid) + + createFilepaths(t, filesystem, []string{ + hourZeroFp, + hourOneFp, + hourTwoFp, + hourThreeFp, + hourFourFp, + hourFiveFp, + }) + + expectedLogFilePaths := []string{ + hourZeroFp, + hourOneFp, + hourTwoFp, + hourThreeFp, + hourFourFp, + hourFiveFp, + } + + retentionPeriod := 6 * time.Hour // retention period of 6 hours should return all the file paths + logFilePaths, err := fileLayout.GetLogFilePaths(filesystem, retentionPeriod, -1, testEnclaveUuid, testUserService1Uuid) + + require.NoError(t, err) + require.Equal(t, len(expectedLogFilePaths), len(logFilePaths)) + for i, filePath := range expectedLogFilePaths { + require.Equal(t, filePath, logFilePaths[i]) + } +} + +func TestGetLogFilePathsWithHourlyRetentionAcrossYears(t *testing.T) { + filesystem := volume_filesystem.NewMockedVolumeFilesystem() + + currentTime := logs_clock.NewMockLogsClockPerHour(2024, 1, 1, 2) + fileLayout := NewPerHourFileLayout(currentTime, volume_consts.LogsStorageDirpath) + + hourZeroFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(2023, 52, 0, 21).Now(), testEnclaveUuid, testUserService1Uuid) + hourOneFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(2023, 52, 0, 22).Now(), testEnclaveUuid, testUserService1Uuid) + hourTwoFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(2023, 52, 0, 23).Now(), testEnclaveUuid, testUserService1Uuid) + hourThreeFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(2024, 1, 1, 0).Now(), testEnclaveUuid, testUserService1Uuid) + hourFourFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(2024, 1, 1, 1).Now(), testEnclaveUuid, testUserService1Uuid) + hourFiveFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(2024, 1, 1, 2).Now(), testEnclaveUuid, testUserService1Uuid) + + createFilepaths(t, filesystem, []string{ + hourZeroFp, + hourOneFp, + hourTwoFp, + hourThreeFp, + hourFourFp, + hourFiveFp, + }) + + expectedLogFilePaths := []string{ + hourZeroFp, + hourOneFp, + hourTwoFp, + hourThreeFp, + hourFourFp, + hourFiveFp, + } + + retentionPeriod := 6 * time.Hour // retention period of 6 hours should return all the file paths + logFilePaths, err := fileLayout.GetLogFilePaths(filesystem, retentionPeriod, -1, testEnclaveUuid, testUserService1Uuid) + + require.NoError(t, err) + require.Equal(t, len(expectedLogFilePaths), len(logFilePaths)) + for i, filePath := range expectedLogFilePaths { + require.Equal(t, filePath, logFilePaths[i]) + } + +} + +func TestSundayIsConvertedFromStrftimeToGolangTime(t *testing.T) { + expectedFilepath := "/var/log/kurtosis/2024/02/7/05/test-enclave/test-user-service-1.json" + + mockTime := logs_clock.NewMockLogsClockPerHour(2024, 2, 0, 5) + fileLayout := NewPerHourFileLayout(mockTime, volume_consts.LogsStorageDirpath) + + actualFilePath := fileLayout.GetLogFilePath(mockTime.Now(), testEnclaveUuid, testUserService1Uuid) + require.Equal(t, expectedFilepath, actualFilePath) +} + +func TestGetLogFilePathsWithHourlyRetentionReturnsCorrectPathsIfHoursMissingInBetween(t *testing.T) { + filesystem := volume_filesystem.NewMockedVolumeFilesystem() + + currentTime := logs_clock.NewMockLogsClockPerHour(2024, 1, 1, 2) + fileLayout := NewPerHourFileLayout(currentTime, volume_consts.LogsStorageDirpath) + + hourZeroFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(2023, 52, 0, 21).Now(), testEnclaveUuid, testUserService1Uuid) + hourOneFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(2023, 52, 0, 22).Now(), testEnclaveUuid, testUserService1Uuid) + hourTwoFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(2023, 52, 0, 23).Now(), testEnclaveUuid, testUserService1Uuid) + hourThreeFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(2023, 1, 1, 3).Now(), testEnclaveUuid, testUserService1Uuid) + hourFiveFp := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerHour(2024, 1, 1, 2).Now(), testEnclaveUuid, testUserService1Uuid) + + createFilepaths(t, filesystem, []string{ + hourZeroFp, + hourOneFp, + hourTwoFp, + hourThreeFp, + hourFiveFp, + }) + + retentionPeriod := 6 * time.Hour // this would return all filepaths, but hour three is missing + logFilePaths, err := fileLayout.GetLogFilePaths(filesystem, retentionPeriod, -1, testEnclaveUuid, testUserService1Uuid) + require.NoError(t, err) + require.Len(t, logFilePaths, 1) + require.Equal(t, hourFiveFp, logFilePaths[0]) // should only return hour 5 3 because hour 4 is missing +} + +func TestTimeToWeekDayHour(t *testing.T) { + tests := []struct { + name string + inputTime time.Time + expectedYear int + expectedWeek int + expectedDay int + expectedHour int + }{ + { + name: "Midweek Wednesday 14:00", + inputTime: time.Date(2023, 10, 18, 14, 0, 0, 0, time.UTC), + expectedYear: 2023, + expectedWeek: 42, + expectedDay: 3, + expectedHour: 14, + }, + { + name: "Sunday midnight", + inputTime: time.Date(2023, 10, 15, 0, 0, 0, 0, time.UTC), + expectedYear: 2023, + expectedWeek: 41, + expectedDay: 7, // Sunday should be converted to 7 + expectedHour: 0, + }, + { + name: "Monday 9:30", + inputTime: time.Date(2024, 1, 1, 9, 30, 0, 0, time.UTC), + expectedYear: 2024, + expectedWeek: 1, + expectedDay: 1, + expectedHour: 9, + }, + { + name: "Saturday afternoon", + inputTime: time.Date(2024, 10, 19, 15, 0, 0, 0, time.UTC), + expectedYear: 2024, + expectedWeek: 42, + expectedDay: 6, + expectedHour: 15, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + year, week, day, hour := TimeToWeekDayHour(tt.inputTime) + if year != tt.expectedYear || week != tt.expectedWeek || day != tt.expectedDay || hour != tt.expectedHour { + t.Errorf("TimeToWeekDayHour(%v) = (%d, %d, %d, %d); expected (%d, %d, %d, %d)", + tt.inputTime, year, week, day, hour, tt.expectedYear, tt.expectedWeek, tt.expectedDay, tt.expectedHour) + } + }) + } +} + +func TestDurationToHours(t *testing.T) { + tests := []struct { + name string + inputDuration time.Duration + expectedHours int + }{ + { + name: "Zero duration", + inputDuration: 0, + expectedHours: 0, + }, + { + name: "One hour duration", + inputDuration: time.Hour, + expectedHours: 1, + }, + { + name: "Fractional hour duration", + inputDuration: 90 * time.Minute, // 1.5 hours + expectedHours: 2, // should round up + }, + { + name: "More than one day", + inputDuration: 25 * time.Hour, + expectedHours: 25, + }, + { + name: "Negative duration", + inputDuration: -time.Hour, + expectedHours: -1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := DurationToHours(tt.inputDuration) + if result != tt.expectedHours { + t.Errorf("DurationToHours(%v) = %d; expected %d", tt.inputDuration, result, tt.expectedHours) + } + }) + } +} + +func createFilepaths(t *testing.T, filesystem volume_filesystem.VolumeFilesystem, filepaths []string) { + for _, path := range filepaths { + _, err := filesystem.Create(path) + require.NoError(t, err) + } +} diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/file_layout/per_week_file_layout.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/file_layout/per_week_file_layout.go index c4fcb7fd8f..b03112ce70 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/file_layout/per_week_file_layout.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/file_layout/per_week_file_layout.go @@ -16,30 +16,32 @@ const ( oneWeekInHours = 7 * 24 oneWeekDuration = oneWeekInHours * time.Hour - // basepath /year/week - PerWeekDirPathStr = "%s%s/%s/" + // basepath year/week + perWeekDirPathFmtStr = "%s%s/%s/" // ... enclave uuid/service uuid - PerWeekFilePathFmtStr = PerWeekDirPathStr + "%s/%s%s" + PerWeekFilePathFmtStr = perWeekDirPathFmtStr + "%s/%s%s" ) type PerWeekFileLayout struct { - time logs_clock.LogsClock + time logs_clock.LogsClock + baseLogsFilePath string } -func NewPerWeekFileLayout(time logs_clock.LogsClock) *PerWeekFileLayout { - return &PerWeekFileLayout{time: time} +func NewPerWeekFileLayout(time logs_clock.LogsClock, baseLogsFilePath string) *PerWeekFileLayout { + return &PerWeekFileLayout{time: time, baseLogsFilePath: baseLogsFilePath} } func (pwf *PerWeekFileLayout) GetLogFileLayoutFormat() string { // Right now this format is specifically made for Vector Logs Aggregators format // This wil be used my Vector LogsAggregator to determine the path to output to - return "/var/log/kurtosis/%%Y/%%V/{{ enclave_uuid }}/{{ service_uuid }}.json" + // is there a way to get rid of the /var/log/kurtosis? + return fmt.Sprintf("\"%s%%%%Y/%%%%V/{{ enclave_uuid }}/{{ service_uuid }}.json\"", pwf.baseLogsFilePath) } func (pwf *PerWeekFileLayout) GetLogFilePath(time time.Time, enclaveUuid, serviceUuid string) string { year, week := time.ISOWeek() - return getLogFilePath(year, week, enclaveUuid, serviceUuid) + return pwf.getWeeklyFilePath(year, week, enclaveUuid, serviceUuid) } func (pwf *PerWeekFileLayout) GetLogFilePaths( @@ -67,7 +69,7 @@ func (pwf *PerWeekFileLayout) getLogFilePathsFromNowTillRetentionPeriod(fs volum firstWeekWithLogs := 0 for i := 0; i < retentionPeriodInWeeks; i++ { year, week := currentTime.Add(time.Duration(-i) * oneWeekDuration).ISOWeek() - filePathStr := getLogFilePath(year, week, enclaveUuid, serviceUuid) + filePathStr := pwf.getWeeklyFilePath(year, week, enclaveUuid, serviceUuid) if _, err := fs.Stat(filePathStr); err == nil { paths = append(paths, filePathStr) firstWeekWithLogs = i @@ -83,7 +85,7 @@ func (pwf *PerWeekFileLayout) getLogFilePathsFromNowTillRetentionPeriod(fs volum // scan for remaining files as far back as they exist before the retention period for i := firstWeekWithLogs + 1; i < retentionPeriodInWeeks; i++ { year, week := currentTime.Add(time.Duration(-i) * oneWeekDuration).ISOWeek() - filePathStr := getLogFilePath(year, week, enclaveUuid, serviceUuid) + filePathStr := pwf.getWeeklyFilePath(year, week, enclaveUuid, serviceUuid) if _, err := fs.Stat(filePathStr); err != nil { break } @@ -104,7 +106,7 @@ func (pwf *PerWeekFileLayout) getLogFilePathsBeyondRetentionPeriod(fs volume_fil for i := 0; i < retentionPeriodIntervals; i++ { numWeeksToGoBack := retentionPeriodInWeeks + i year, weekToRemove := currentTime.Add(time.Duration(-numWeeksToGoBack) * oneWeekDuration).ISOWeek() - filePathStr := getLogFilePath(year, weekToRemove, enclaveUuid, serviceUuid) + filePathStr := pwf.getWeeklyFilePath(year, weekToRemove, enclaveUuid, serviceUuid) if _, err := fs.Stat(filePathStr); err != nil { continue } @@ -114,11 +116,11 @@ func (pwf *PerWeekFileLayout) getLogFilePathsBeyondRetentionPeriod(fs volume_fil return paths } -func DurationToWeeks(d time.Duration) int { - return int(math.Round(d.Hours() / float64(oneWeekInHours))) +func (pwf *PerWeekFileLayout) getWeeklyFilePath(year, week int, enclaveUuid, serviceUuid string) string { + formattedWeekNum := fmt.Sprintf("%02d", week) + return fmt.Sprintf(PerWeekFilePathFmtStr, pwf.baseLogsFilePath, strconv.Itoa(year), formattedWeekNum, enclaveUuid, serviceUuid, volume_consts.Filetype) } -func getLogFilePath(year, week int, enclaveUuid, serviceUuid string) string { - formattedWeekNum := fmt.Sprintf("%02d", week) - return fmt.Sprintf(PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(year), formattedWeekNum, enclaveUuid, serviceUuid, volume_consts.Filetype) +func DurationToWeeks(d time.Duration) int { + return int(math.Round(d.Hours() / float64(oneWeekInHours))) } diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/file_layout/per_week_file_layout_test.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/file_layout/per_week_file_layout_test.go index 3c4731ed1b..312884f2f9 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/file_layout/per_week_file_layout_test.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/file_layout/per_week_file_layout_test.go @@ -2,6 +2,7 @@ package file_layout import ( "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/logs_clock" + "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_consts" "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_filesystem" "github.com/stretchr/testify/require" "testing" @@ -14,6 +15,7 @@ const ( retentionPeriodInWeeksForTesting = 5 defaultYear = 2023 + defaultWeek = 17 defaultDay = 0 // sunday ) @@ -21,15 +23,15 @@ func TestGetLogFilePaths(t *testing.T) { filesystem := volume_filesystem.NewMockedVolumeFilesystem() currentWeek := 17 - currentTime := logs_clock.NewMockLogsClock(defaultYear, currentWeek, defaultDay) - fileLayout := NewPerWeekFileLayout(currentTime) + currentTime := logs_clock.NewMockLogsClockPerDay(defaultYear, currentWeek, defaultDay) + fileLayout := NewPerWeekFileLayout(currentTime, volume_consts.LogsStorageDirpath) - week12filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(defaultYear, 12, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week13filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(defaultYear, 13, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week14filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(defaultYear, 14, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week15filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(defaultYear, 15, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week16filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(defaultYear, 16, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week17filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(defaultYear, 17, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week12filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(defaultYear, 12, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week13filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(defaultYear, 13, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week14filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(defaultYear, 14, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week15filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(defaultYear, 15, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week16filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(defaultYear, 16, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week17filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(defaultYear, 17, 0).Now(), testEnclaveUuid, testUserService1Uuid) _, _ = filesystem.Create(week12filepath) _, _ = filesystem.Create(week13filepath) @@ -60,15 +62,15 @@ func TestGetLogFilePathsAcrossNewYear(t *testing.T) { filesystem := volume_filesystem.NewMockedVolumeFilesystem() currentWeek := 2 - currentTime := logs_clock.NewMockLogsClock(defaultYear, currentWeek, defaultDay) - fileLayout := NewPerWeekFileLayout(currentTime) + currentTime := logs_clock.NewMockLogsClockPerDay(defaultYear, currentWeek, defaultDay) + fileLayout := NewPerWeekFileLayout(currentTime, volume_consts.LogsStorageDirpath) // ../week/enclave uuid/service uuid.json - week50filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(defaultYear-1, 50, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week51filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(defaultYear-1, 51, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week52filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(defaultYear-1, 52, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week1filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(defaultYear, 1, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week2filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(defaultYear, 2, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week50filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(defaultYear-1, 50, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week51filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(defaultYear-1, 51, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week52filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(defaultYear-1, 52, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week1filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(defaultYear, 1, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week2filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(defaultYear, 2, 0).Now(), testEnclaveUuid, testUserService1Uuid) _, _ = filesystem.Create(week50filepath) _, _ = filesystem.Create(week51filepath) @@ -98,15 +100,15 @@ func TestGetLogFilePathsAcrossNewYearWith53Weeks(t *testing.T) { filesystem := volume_filesystem.NewMockedVolumeFilesystem() currentWeek := 3 - currentTime := logs_clock.NewMockLogsClock(2016, currentWeek, 1) - fileLayout := NewPerWeekFileLayout(currentTime) + currentTime := logs_clock.NewMockLogsClockPerDay(2016, currentWeek, 1) + fileLayout := NewPerWeekFileLayout(currentTime, volume_consts.LogsStorageDirpath) // According to ISOWeek, 2015 has 53 weeks - week52filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2015, 51, 3).Now(), testEnclaveUuid, testUserService1Uuid) - week53filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2015, 52, 3).Now(), testEnclaveUuid, testUserService1Uuid) - week1filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2016, 1, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week2filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2016, 2, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week3filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2016, 3, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week52filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2015, 51, 3).Now(), testEnclaveUuid, testUserService1Uuid) + week53filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2015, 52, 3).Now(), testEnclaveUuid, testUserService1Uuid) + week1filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2016, 1, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week2filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2016, 2, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week3filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2016, 3, 0).Now(), testEnclaveUuid, testUserService1Uuid) _, _ = filesystem.Create(week52filepath) _, _ = filesystem.Create(week53filepath) @@ -136,13 +138,13 @@ func TestGetLogFilePathsWithDiffRetentionPeriod(t *testing.T) { filesystem := volume_filesystem.NewMockedVolumeFilesystem() currentWeek := 2 - mockTime := logs_clock.NewMockLogsClock(defaultYear, currentWeek, defaultDay) - fileLayout := NewPerWeekFileLayout(mockTime) + mockTime := logs_clock.NewMockLogsClockPerDay(defaultYear, currentWeek, defaultDay) + fileLayout := NewPerWeekFileLayout(mockTime, volume_consts.LogsStorageDirpath) // ../week/enclave uuid/service uuid.json - week52filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(defaultYear-1, 52, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week1filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(defaultYear, 1, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week2filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(defaultYear, 2, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week52filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(defaultYear-1, 52, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week1filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(defaultYear, 1, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week2filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(defaultYear, 2, 0).Now(), testEnclaveUuid, testUserService1Uuid) _, _ = filesystem.Create(week52filepath) _, _ = filesystem.Create(week1filepath) @@ -167,13 +169,13 @@ func TestGetLogFilePathsReturnsAllAvailableWeeks(t *testing.T) { filesystem := volume_filesystem.NewMockedVolumeFilesystem() currentWeek := 2 - currentTime := logs_clock.NewMockLogsClock(defaultYear, currentWeek, defaultDay) - fileLayout := NewPerWeekFileLayout(currentTime) + currentTime := logs_clock.NewMockLogsClockPerDay(defaultYear, currentWeek, defaultDay) + fileLayout := NewPerWeekFileLayout(currentTime, volume_consts.LogsStorageDirpath) // ../week/enclave uuid/service uuid.json - week52filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(defaultYear-1, 52, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week1filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(defaultYear, 1, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week2filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(defaultYear, 2, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week52filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(defaultYear-1, 52, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week1filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(defaultYear, 1, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week2filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(defaultYear, 2, 0).Now(), testEnclaveUuid, testUserService1Uuid) _, _ = filesystem.Create(week52filepath) _, _ = filesystem.Create(week1filepath) @@ -199,13 +201,13 @@ func TestGetLogFilePathsReturnsCorrectPathsIfWeeksMissingInBetween(t *testing.T) filesystem := volume_filesystem.NewMockedVolumeFilesystem() currentWeek := 3 - currentTime := logs_clock.NewMockLogsClock(defaultYear, currentWeek, defaultDay) - fileLayout := NewPerWeekFileLayout(currentTime) + currentTime := logs_clock.NewMockLogsClockPerDay(defaultYear, currentWeek, defaultDay) + fileLayout := NewPerWeekFileLayout(currentTime, volume_consts.LogsStorageDirpath) // ../week/enclave uuid/service uuid.json - week52filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(defaultYear, 0, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week1filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(defaultYear, 1, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week3filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(defaultYear, 3, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week52filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(defaultYear, 0, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week1filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(defaultYear, 1, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week3filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(defaultYear, 3, 0).Now(), testEnclaveUuid, testUserService1Uuid) _, _ = filesystem.Create(week52filepath) _, _ = filesystem.Create(week1filepath) @@ -222,12 +224,12 @@ func TestGetLogFilePathsReturnsCorrectPathsIfCurrentWeekHasNoLogsYet(t *testing. filesystem := volume_filesystem.NewMockedVolumeFilesystem() currentWeek := 3 - currentTime := logs_clock.NewMockLogsClock(defaultYear, currentWeek, defaultDay) - fileLayout := NewPerWeekFileLayout(currentTime) + currentTime := logs_clock.NewMockLogsClockPerDay(defaultYear, currentWeek, defaultDay) + fileLayout := NewPerWeekFileLayout(currentTime, volume_consts.LogsStorageDirpath) // ../week/enclave uuid/service uuid.json - week1filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(defaultYear, 1, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week2filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(defaultYear, 2, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week1filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(defaultYear, 1, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week2filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(defaultYear, 2, 0).Now(), testEnclaveUuid, testUserService1Uuid) // no logs for week current week exist yet _, _ = filesystem.Create(week1filepath) @@ -252,15 +254,15 @@ func TestGetLogFilePathsReturnsCorrectPathsIfCurrentWeekHasNoLogsYet(t *testing. func TestGetLogFilePathsOneIntervalBeyondRetentionPeriod(t *testing.T) { filesystem := volume_filesystem.NewMockedVolumeFilesystem() - mockTime := logs_clock.NewMockLogsClock(2023, 2, defaultDay) - fileLayout := NewPerWeekFileLayout(mockTime) + mockTime := logs_clock.NewMockLogsClockPerDay(2023, 2, defaultDay) + fileLayout := NewPerWeekFileLayout(mockTime, volume_consts.LogsStorageDirpath) - week49filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 49, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week50filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 50, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week51filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 51, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week52filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 52, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week1filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2023, 1, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week2filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2023, 2, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week49filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 49, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week50filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 50, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week51filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 51, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week52filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 52, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week1filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2023, 1, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week2filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2023, 2, 0).Now(), testEnclaveUuid, testUserService1Uuid) _, _ = filesystem.Create(week49filepath) _, _ = filesystem.Create(week50filepath) @@ -279,16 +281,16 @@ func TestGetLogFilePathsOneIntervalBeyondRetentionPeriod(t *testing.T) { func TestGetLogFilePathsTwoIntervalBeyondRetentionPeriod(t *testing.T) { filesystem := volume_filesystem.NewMockedVolumeFilesystem() - mockTime := logs_clock.NewMockLogsClock(2023, 2, defaultDay) - fileLayout := NewPerWeekFileLayout(mockTime) + mockTime := logs_clock.NewMockLogsClockPerDay(2023, 2, defaultDay) + fileLayout := NewPerWeekFileLayout(mockTime, volume_consts.LogsStorageDirpath) - week48filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 48, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week49filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 49, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week50filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 50, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week51filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 51, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week52filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 52, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week1filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2023, 1, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week2filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2023, 2, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week48filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 48, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week49filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 49, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week50filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 50, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week51filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 51, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week52filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 52, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week1filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2023, 1, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week2filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2023, 2, 0).Now(), testEnclaveUuid, testUserService1Uuid) _, _ = filesystem.Create(week48filepath) _, _ = filesystem.Create(week49filepath) @@ -317,14 +319,14 @@ func TestGetLogFilePathsTwoIntervalBeyondRetentionPeriod(t *testing.T) { func TestGetLogFilePathsWithNoPathsBeyondRetentionPeriod(t *testing.T) { filesystem := volume_filesystem.NewMockedVolumeFilesystem() - mockTime := logs_clock.NewMockLogsClock(2023, 2, defaultDay) - fileLayout := NewPerWeekFileLayout(mockTime) + mockTime := logs_clock.NewMockLogsClockPerDay(2023, 2, defaultDay) + fileLayout := NewPerWeekFileLayout(mockTime, volume_consts.LogsStorageDirpath) - week50filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 50, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week51filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 51, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week52filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 52, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week1filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2023, 1, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week2filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2023, 2, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week50filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 50, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week51filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 51, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week52filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 52, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week1filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2023, 1, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week2filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2023, 2, 0).Now(), testEnclaveUuid, testUserService1Uuid) _, _ = filesystem.Create(week50filepath) _, _ = filesystem.Create(week51filepath) @@ -342,16 +344,16 @@ func TestGetLogFilePathsWithNoPathsBeyondRetentionPeriod(t *testing.T) { func TestGetLogFilePathsWithMissingPathBetweenIntervals(t *testing.T) { filesystem := volume_filesystem.NewMockedVolumeFilesystem() - mockTime := logs_clock.NewMockLogsClock(2023, 2, defaultDay) - fileLayout := NewPerWeekFileLayout(mockTime) + mockTime := logs_clock.NewMockLogsClockPerDay(2023, 2, defaultDay) + fileLayout := NewPerWeekFileLayout(mockTime, volume_consts.LogsStorageDirpath) - week47filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 48, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week49filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 49, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week50filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 50, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week51filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 51, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week52filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 52, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week1filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2023, 1, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week2filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2023, 2, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week47filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 48, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week49filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 49, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week50filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 50, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week51filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 51, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week52filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 52, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week1filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2023, 1, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week2filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2023, 2, 0).Now(), testEnclaveUuid, testUserService1Uuid) _, _ = filesystem.Create(week47filepath) // 48 is missing diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/log_file_manager/log_file_manager.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/log_file_manager/log_file_manager.go index 9338df5d31..67293a3d7f 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/log_file_manager/log_file_manager.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/log_file_manager/log_file_manager.go @@ -2,25 +2,30 @@ package log_file_manager import ( "context" - "fmt" "github.com/kurtosis-tech/kurtosis/container-engine-lib/lib/backend_interface" "github.com/kurtosis-tech/kurtosis/container-engine-lib/lib/backend_interface/objects/enclave" "github.com/kurtosis-tech/kurtosis/container-engine-lib/lib/backend_interface/objects/service" "github.com/kurtosis-tech/kurtosis/container-engine-lib/lib/uuid_generator" "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/file_layout" "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/logs_clock" - "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_consts" "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_filesystem" "github.com/kurtosis-tech/stacktrace" "github.com/sirupsen/logrus" + "io/fs" "k8s.io/apimachinery/pkg/api/errors" "os" - "strconv" + "strings" "time" ) const ( - oneWeek = 7 * 24 * time.Hour + removeLogsWaitHours = 5 * time.Minute + + createLogsWaitMinutes = 1 * time.Minute + + emptyEnclaveUuid = "" + + retentionPeriodIntervals = 1 ) // LogFileManager is responsible for creating and removing log files from filesystem. @@ -33,21 +38,19 @@ type LogFileManager struct { time logs_clock.LogsClock - logRetentionPeriodInWeeks int + logRetentionPeriod time.Duration + + baseFilePath string } -func NewLogFileManager( - kurtosisBackend backend_interface.KurtosisBackend, - filesystem volume_filesystem.VolumeFilesystem, - fileLayout file_layout.LogFileLayout, - time logs_clock.LogsClock, - logRetentionPeriodInWeeks int) *LogFileManager { +func NewLogFileManager(kurtosisBackend backend_interface.KurtosisBackend, filesystem volume_filesystem.VolumeFilesystem, fileLayout file_layout.LogFileLayout, time logs_clock.LogsClock, logRetentionPeriod time.Duration, baseFilePath string) *LogFileManager { return &LogFileManager{ - kurtosisBackend: kurtosisBackend, - filesystem: filesystem, - fileLayout: fileLayout, - time: time, - logRetentionPeriodInWeeks: logRetentionPeriodInWeeks, + kurtosisBackend: kurtosisBackend, + filesystem: filesystem, + fileLayout: fileLayout, + time: time, + logRetentionPeriod: logRetentionPeriod, + baseFilePath: baseFilePath, } } @@ -55,10 +58,10 @@ func NewLogFileManager( func (manager *LogFileManager) StartLogFileManagement(ctx context.Context) { // Schedule thread for removing log files beyond retention period go func() { - logrus.Debugf("Scheduling log removal for log retention every '%v' hours...", volume_consts.RemoveLogsWaitHours) + logrus.Debugf("Scheduling log removal for log retention every '%v' hours...", removeLogsWaitHours) manager.RemoveLogsBeyondRetentionPeriod(ctx) - logRemovalTicker := time.NewTicker(volume_consts.RemoveLogsWaitHours) + logRemovalTicker := time.NewTicker(removeLogsWaitHours) for range logRemovalTicker.C { logrus.Debug("Attempting to remove old log file paths...") manager.RemoveLogsBeyondRetentionPeriod(ctx) @@ -72,9 +75,9 @@ func (manager *LogFileManager) StartLogFileManagement(ctx context.Context) { // The LogsAggregator is configured to write logs to three different log file paths, one for uuid, service name, and shortened uuid // This is so that the logs are retrievable by each identifier even when enclaves are stopped. More context on this here: https://github.com/kurtosis-tech/kurtosis/pull/1213 // To prevent storing duplicate logs, the CreateLogFiles will ensure that the service name and short uuid log files are just symlinks to the uuid log file path - logFileCreatorTicker := time.NewTicker(volume_consts.CreateLogsWaitMinutes) + logFileCreatorTicker := time.NewTicker(createLogsWaitMinutes) - logrus.Debugf("Scheduling log file path creation every '%v' minutes...", volume_consts.CreateLogsWaitMinutes) + logrus.Debugf("Scheduling log file path creation every '%v' minutes...", createLogsWaitMinutes) for range logFileCreatorTicker.C { logrus.Trace("Creating log file paths...") err := manager.CreateLogFiles(ctx) @@ -143,22 +146,21 @@ func (manager *LogFileManager) RemoveLogsBeyondRetentionPeriod(ctx context.Conte serviceNameStr := string(serviceRegistration.GetName()) serviceShortUuidStr := uuid_generator.ShortenedUUIDString(serviceUuidStr) - retentionPeriod := time.Duration(manager.logRetentionPeriodInWeeks) * oneWeek - oldServiceLogFilesByUuid, err := manager.fileLayout.GetLogFilePaths(manager.filesystem, retentionPeriod, 1, string(enclaveUuid), serviceUuidStr) + oldServiceLogFilesByUuid, err := manager.fileLayout.GetLogFilePaths(manager.filesystem, manager.logRetentionPeriod, retentionPeriodIntervals, string(enclaveUuid), serviceUuidStr) if err != nil { logrus.Errorf("An error occurred getting log file paths for service '%v' in enclave '%v' logs beyond retention: %v", serviceUuidStr, enclaveUuid, err) } else { pathsToRemove = append(pathsToRemove, oldServiceLogFilesByUuid...) } - oldServiceLogFilesByName, err := manager.fileLayout.GetLogFilePaths(manager.filesystem, retentionPeriod, 1, string(enclaveUuid), serviceNameStr) + oldServiceLogFilesByName, err := manager.fileLayout.GetLogFilePaths(manager.filesystem, manager.logRetentionPeriod, retentionPeriodIntervals, string(enclaveUuid), serviceNameStr) if err != nil { logrus.Errorf("An error occurred getting log file paths for service '%v' in enclave '%v' logs beyond retention: %v", serviceNameStr, enclaveUuid, err) } else { pathsToRemove = append(pathsToRemove, oldServiceLogFilesByName...) } - oldServiceLogFilesByShortUuid, err := manager.fileLayout.GetLogFilePaths(manager.filesystem, retentionPeriod, 1, string(enclaveUuid), serviceShortUuidStr) + oldServiceLogFilesByShortUuid, err := manager.fileLayout.GetLogFilePaths(manager.filesystem, manager.logRetentionPeriod, retentionPeriodIntervals, string(enclaveUuid), serviceShortUuidStr) if err != nil { logrus.Errorf("An error occurred getting log file paths for service '%v' in enclave '%v' logs beyond retention: %v", serviceShortUuidStr, enclaveUuid, err) } else { @@ -183,26 +185,35 @@ func (manager *LogFileManager) RemoveLogsBeyondRetentionPeriod(ctx context.Conte } func (manager *LogFileManager) RemoveAllLogs() error { - // only removes logs for this year because Docker prevents all logs from base logs storage file path - year, _ := manager.time.Now().ISOWeek() - if err := manager.filesystem.RemoveAll(getLogsDirPathForYear(year)); err != nil { - return stacktrace.Propagate(err, "An error occurred attempting to remove all logs.") + logFilePaths, err := manager.getAllLogFilePaths(emptyEnclaveUuid) + if err != nil { + return stacktrace.Propagate(err, "An error occurred getting all log file paths.") + } + for _, filePath := range logFilePaths { + if err := manager.filesystem.Remove(filePath); err != nil { + return stacktrace.Propagate(err, "An error occurred removing log file path '%v'.", filePath) + } } return nil } func (manager *LogFileManager) RemoveEnclaveLogs(enclaveUuid string) error { - currentTime := manager.time.Now() - for i := 0; i < manager.logRetentionPeriodInWeeks; i++ { - year, week := currentTime.Add(time.Duration(-i) * oneWeek).ISOWeek() - enclaveLogsDirPathForWeek := getEnclaveLogsDirPath(year, week, enclaveUuid) - if err := manager.filesystem.RemoveAll(enclaveLogsDirPathForWeek); err != nil { - return stacktrace.Propagate(err, "An error occurred attempting to remove logs for enclave '%v' logs at the following path: %v", enclaveUuid, enclaveLogsDirPathForWeek) + enclaveLogFilePaths, err := manager.getAllLogFilePaths(enclaveUuid) + if err != nil { + return stacktrace.Propagate(err, "An error occurred getting all log file paths for '%v'.", enclaveUuid) + } + for _, filePath := range enclaveLogFilePaths { + if err := manager.filesystem.Remove(filePath); err != nil { + return stacktrace.Propagate(err, "An error occurred removing enclave '%v' log file path '%v'.", enclaveUuid, filePath) } } return nil } +func (manager *LogFileManager) GetLogFileLayoutFormat() string { + return manager.fileLayout.GetLogFileLayoutFormat() +} + func (manager *LogFileManager) getEnclaveAndServiceInfo(ctx context.Context) (map[enclave.EnclaveUUID][]*service.ServiceRegistration, error) { enclaveToServicesMap := map[enclave.EnclaveUUID][]*service.ServiceRegistration{} @@ -253,20 +264,23 @@ func (manager *LogFileManager) createSymlinkLogFile(targetLogFilePath, symlinkLo return nil } -// creates a directory path of format //year/week// -func getEnclaveLogsDirPath(year, week int, enclaveUuid string) string { - logsDirPathForYearAndWeek := getLogsDirPathForWeek(year, week) - return fmt.Sprintf("%s%s/", logsDirPathForYearAndWeek, enclaveUuid) -} - -// creates a directory path of format //year/week/ -func getLogsDirPathForWeek(year, week int) string { - logsDirPathForYear := getLogsDirPathForYear(year) - formattedWeekNum := fmt.Sprintf("%02d", week) - return fmt.Sprintf("%s%s/", logsDirPathForYear, formattedWeekNum) -} - -// creates a directory path of format //year/ -func getLogsDirPathForYear(year int) string { - return fmt.Sprintf("%s%s/", volume_consts.LogsStorageDirpath, strconv.Itoa(year)) +// if [enclaveUuid] is empty, gets log file paths from all enclaves +func (manager *LogFileManager) getAllLogFilePaths(enclaveUuid string) ([]string, error) { + var paths []string + walkFunc := func(path string, info fs.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + if enclaveUuid == emptyEnclaveUuid || strings.Contains(path, enclaveUuid) { + paths = append(paths, path) + } + return nil + } + if err := manager.filesystem.Walk(manager.baseFilePath, walkFunc); err != nil { + return []string{}, stacktrace.Propagate(err, "An error occurred walking file path.") + } + return paths, nil } diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/log_file_manager/log_file_manager_test.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/log_file_manager/log_file_manager_test.go index 5d502597c5..355fe09874 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/log_file_manager/log_file_manager_test.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/log_file_manager/log_file_manager_test.go @@ -10,6 +10,8 @@ import ( "github.com/kurtosis-tech/kurtosis/container-engine-lib/lib/uuid_generator" "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/file_layout" "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/logs_clock" + "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_helpers" + "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_consts" "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_filesystem" "github.com/stretchr/testify/require" "net" @@ -27,19 +29,19 @@ const ( func TestRemoveLogsBeyondRetentionPeriod(t *testing.T) { ctx := context.Background() - mockTime := logs_clock.NewMockLogsClock(2023, 2, defaultDay) - fileLayout := file_layout.NewPerWeekFileLayout(mockTime) + mockTime := logs_clock.NewMockLogsClockPerDay(2023, 2, defaultDay) + fileLayout := file_layout.NewPerWeekFileLayout(mockTime, volume_consts.LogsStorageDirpath) mockKurtosisBackend := getMockedKurtosisBackendWithEnclavesAndServices(ctx, t, mockTime) // setup filesystem mockFs := volume_filesystem.NewMockedVolumeFilesystem() - week49filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 49, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week50filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 50, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week51filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 51, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week52filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 52, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week1filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2023, 1, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week2filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2023, 2, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week49filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 49, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week50filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 50, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week51filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 51, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week52filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 52, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week1filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2023, 1, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week2filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2023, 2, 0).Now(), testEnclaveUuid, testUserService1Uuid) _, _ = mockFs.Create(week49filepath) _, _ = mockFs.Create(week50filepath) @@ -48,7 +50,7 @@ func TestRemoveLogsBeyondRetentionPeriod(t *testing.T) { _, _ = mockFs.Create(week1filepath) _, _ = mockFs.Create(week2filepath) - logFileManager := NewLogFileManager(mockKurtosisBackend, mockFs, fileLayout, mockTime, 5) + logFileManager := NewLogFileManager(mockKurtosisBackend, mockFs, fileLayout, mockTime, persistent_volume_helpers.ConvertWeeksToDuration(5), volume_consts.LogsStorageDirpath) logFileManager.RemoveLogsBeyondRetentionPeriod(ctx) // should remove week 49 logs _, err := mockFs.Stat(week49filepath) @@ -58,23 +60,23 @@ func TestRemoveLogsBeyondRetentionPeriod(t *testing.T) { func TestRemoveEnclaveLogs(t *testing.T) { mockKurtosisBackend := backend_interface.NewMockKurtosisBackend(t) - mockTime := logs_clock.NewMockLogsClock(2022, 52, defaultDay) - fileLayout := file_layout.NewPerWeekFileLayout(mockTime) + mockTime := logs_clock.NewMockLogsClockPerDay(2022, 52, defaultDay) + fileLayout := file_layout.NewPerWeekFileLayout(mockTime, volume_consts.LogsStorageDirpath) // setup filesystem mockFs := volume_filesystem.NewMockedVolumeFilesystem() - week51filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 51, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week52filepathDiffEnclave := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 52, 0).Now(), "enclaveOne", "serviceTwo") - week52filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 52, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week52filepathDiffService := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 52, 0).Now(), testEnclaveUuid, "serviceThree") + week51filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 51, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week52filepathDiffEnclave := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 52, 0).Now(), "enclaveOne", "serviceTwo") + week52filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 52, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week52filepathDiffService := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 52, 0).Now(), testEnclaveUuid, "serviceThree") _, _ = mockFs.Create(week51filepath) _, _ = mockFs.Create(week52filepathDiffEnclave) _, _ = mockFs.Create(week52filepath) _, _ = mockFs.Create(week52filepathDiffService) - logFileManager := NewLogFileManager(mockKurtosisBackend, mockFs, fileLayout, mockTime, 5) + logFileManager := NewLogFileManager(mockKurtosisBackend, mockFs, fileLayout, mockTime, persistent_volume_helpers.ConvertWeeksToDuration(5), volume_consts.LogsStorageDirpath) err := logFileManager.RemoveEnclaveLogs(testEnclaveUuid) // should remove only all log files for enclave one require.NoError(t, err) @@ -97,23 +99,23 @@ func TestRemoveEnclaveLogs(t *testing.T) { func TestRemoveAllLogs(t *testing.T) { mockKurtosisBackend := backend_interface.NewMockKurtosisBackend(t) - mockTime := logs_clock.NewMockLogsClock(2022, 52, defaultDay) - fileLayout := file_layout.NewPerWeekFileLayout(mockTime) + mockTime := logs_clock.NewMockLogsClockPerDay(2022, 52, defaultDay) + fileLayout := file_layout.NewPerWeekFileLayout(mockTime, volume_consts.LogsStorageDirpath) // setup filesystem mockFs := volume_filesystem.NewMockedVolumeFilesystem() - week51filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 51, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week52filepathDiffEnclave := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 52, 0).Now(), "enclaveOne", "serviceTwo") - week52filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 52, 0).Now(), testEnclaveUuid, testUserService1Uuid) - week52filepathDiffService := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 52, 0).Now(), testEnclaveUuid, "serviceThree") + week51filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 51, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week52filepathDiffEnclave := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 52, 0).Now(), "enclaveOne", "serviceTwo") + week52filepath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 52, 0).Now(), testEnclaveUuid, testUserService1Uuid) + week52filepathDiffService := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 52, 0).Now(), testEnclaveUuid, "serviceThree") _, _ = mockFs.Create(week51filepath) _, _ = mockFs.Create(week52filepathDiffEnclave) _, _ = mockFs.Create(week52filepath) _, _ = mockFs.Create(week52filepathDiffService) - logFileManager := NewLogFileManager(mockKurtosisBackend, mockFs, fileLayout, mockTime, 5) + logFileManager := NewLogFileManager(mockKurtosisBackend, mockFs, fileLayout, mockTime, persistent_volume_helpers.ConvertWeeksToDuration(5), volume_consts.LogsStorageDirpath) err := logFileManager.RemoveAllLogs() require.NoError(t, err) @@ -136,19 +138,19 @@ func TestRemoveAllLogs(t *testing.T) { } func TestCreateLogFiles(t *testing.T) { - mockTime := logs_clock.NewMockLogsClock(2022, 52, defaultDay) + mockTime := logs_clock.NewMockLogsClockPerDay(2022, 52, defaultDay) mockFs := volume_filesystem.NewMockedVolumeFilesystem() - fileLayout := file_layout.NewPerWeekFileLayout(mockTime) + fileLayout := file_layout.NewPerWeekFileLayout(mockTime, volume_consts.LogsStorageDirpath) // setup kurtosis backend ctx := context.Background() mockKurtosisBackend := getMockedKurtosisBackendWithEnclavesAndServices(ctx, t, mockTime) - expectedServiceUuidFilePath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 52, 0).Now(), testEnclaveUuid, testUserService1Uuid) - expectedServiceNameFilePath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 52, 0).Now(), testEnclaveUuid, testUserService1Name) - expectedServiceShortUuidFilePath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClock(2022, 52, 0).Now(), testEnclaveUuid, uuid_generator.ShortenedUUIDString(testUserService1Uuid)) + expectedServiceUuidFilePath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 52, 0).Now(), testEnclaveUuid, testUserService1Uuid) + expectedServiceNameFilePath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 52, 0).Now(), testEnclaveUuid, testUserService1Name) + expectedServiceShortUuidFilePath := fileLayout.GetLogFilePath(logs_clock.NewMockLogsClockPerDay(2022, 52, 0).Now(), testEnclaveUuid, uuid_generator.ShortenedUUIDString(testUserService1Uuid)) - logFileManager := NewLogFileManager(mockKurtosisBackend, mockFs, fileLayout, mockTime, 5) + logFileManager := NewLogFileManager(mockKurtosisBackend, mockFs, fileLayout, mockTime, persistent_volume_helpers.ConvertWeeksToDuration(5), volume_consts.LogsStorageDirpath) err := logFileManager.CreateLogFiles(ctx) require.NoError(t, err) diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/logs_clock/logs_clock.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/logs_clock/logs_clock.go index f2a199589f..710a4b3d13 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/logs_clock/logs_clock.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/logs_clock/logs_clock.go @@ -1,6 +1,8 @@ package logs_clock -import "time" +import ( + "time" +) const ( daysInWeek = 7 @@ -23,32 +25,53 @@ func (clock *RealLogsClock) Now() time.Time { return time.Now() } +// week 00-52 +// day 0-7 +// hour 0-23 type MockLogsClock struct { - year, week, day int + year, week, day, hour int } -// week 00-52 -// day 0-7 -func NewMockLogsClock(year, week, day int) *MockLogsClock { +func NewMockLogsClockPerDay(year, week, day int) *MockLogsClock { return &MockLogsClock{ year: year, week: week, day: day, + hour: 0, + } +} + +func NewMockLogsClockPerHour(year, week, day, hour int) *MockLogsClock { + return &MockLogsClock{ + year: year, + week: week, + day: day, + hour: hour, } } -// The mocked Now() function returns a time object representing the start of date specified by the year, week, and day func (clock *MockLogsClock) Now() time.Time { - // Create a time.Time object for January 1st of the given year - startOfYear := time.Date(clock.year, time.January, 1, 0, 0, 0, 0, time.UTC) + // Create a time object for January 4th of the given year (ISO week 1 always includes January 4th). + startOfYear := time.Date(clock.year, time.January, 4, clock.hour, 0, 0, 0, time.UTC) + + // Get the Monday of the first ISO week of the year + isoYearStart := startOfYear.AddDate(0, 0, int(time.Monday-startOfYear.Weekday())) + + // Adjust for Sunday as day 0 in the tests (Go uses Sunday as the first day of the week, but ISO uses Monday). + var dayToAdd int + if clock.day == 0 { + // If the test input day is 0 (Sunday), we need to handle it as the 7th day of the week. + dayToAdd = 6 + } else { + // Otherwise, shift the day back by 1 to align with ISO (Monday as 1, etc.). + dayToAdd = clock.day - 1 + } - // Calculate the number of days to add to reach the start of the desired week. - daysToAdd := time.Duration(clock.week * daysInWeek) + // Calculate the number of days to add based on the week and adjusted day. + daysToAdd := (clock.week-1)*daysInWeek + dayToAdd - // Calculate the start of the desired week by adding days to the start of the year. - startOfWeek := startOfYear.Add(daysToAdd * 24 * time.Hour) + // Add the calculated days to the ISO week start and return the result. + mockTime := isoYearStart.AddDate(0, 0, daysToAdd) - // Adjust the start of the week to the beginning of the week (usually Sunday or Monday). - startOfWeek = startOfWeek.Add(time.Duration(clock.day) * 24 * time.Hour) - return startOfWeek + return mockTime } diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/logs_clock/logs_clock_test.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/logs_clock/logs_clock_test.go new file mode 100644 index 0000000000..c79e7bd6af --- /dev/null +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/logs_clock/logs_clock_test.go @@ -0,0 +1,86 @@ +package logs_clock + +import ( + "testing" +) + +func TestMockLogsClockPerDay(t *testing.T) { + tests := []struct { + year int + week int + day int + description string + }{ + {2024, 1, 0, "First day of ISO week 1 (Sunday)"}, + {2024, 1, 1, "Second day of ISO week 1 (Monday)"}, + {2024, 1, 2, "Third day of ISO week 1 (Tuesday)"}, + {2024, 1, 3, "Fourth day of ISO week 1 (Wednesday)"}, + {2024, 1, 4, "Fifth day of ISO week 1 (Thursday)"}, + {2024, 1, 5, "Sixth day of ISO week 1 (Friday)"}, + {2024, 1, 6, "Last day of ISO week 1 (Saturday)"}, + {2024, 52, 0, "First day of ISO week 52 (Sunday)"}, + {2024, 52, 1, "First day of ISO week 52 (Monday)"}, + {2024, 52, 5, "Fifth day of ISO week 52 (Friday)"}, + {2024, 52, 6, "Last day of ISO week 52 (Saturday)"}, + {2024, 48, 0, "First day of ISO week 48 (Sunday)"}, + {2024, 48, 4, "Fifth day of ISO week 48 (Thursday)"}, + {2024, 24, 2, "Third day of ISO week 24 (Tuesday)"}, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + clock := NewMockLogsClockPerDay(test.year, test.week, test.day) + result := clock.Now() + + // Get the ISO week and day from the result + year, week := result.ISOWeek() + day := int(result.Weekday()) + + if year != test.year || week != test.week || day != test.day { + t.Errorf("Expected (year: %d, week: %d, day: %d) but got (year: %d, week: %d, day: %d)", + test.year, test.week, test.day, year, week, day) + } + }) + } +} + +func TestMockLogsClockPerHour(t *testing.T) { + tests := []struct { + year int + week int + day int + hour int + description string + }{ + {2024, 1, 0, 0, "First day of ISO week 1, hour 0 (Sunday)"}, + {2024, 1, 0, 12, "First day of ISO week 1, hour 12 (Sunday)"}, + {2024, 1, 1, 0, "Second day of ISO week 1, hour 0 (Monday)"}, + {2024, 1, 1, 6, "Second day of ISO week 1, hour 6 (Monday)"}, + {2024, 1, 2, 0, "Third day of ISO week 1, hour 0 (Tuesday)"}, + {2024, 1, 6, 23, "Last day of ISO week 1, hour 23 (Saturday)"}, + {2024, 52, 0, 0, "First day of ISO week 52, hour 0 (Sunday)"}, + {2024, 52, 1, 0, "First day of ISO week 52, hour 0 (Monday)"}, + {2024, 52, 5, 12, "Fifth day of ISO week 52, hour 12 (Friday)"}, + {2024, 52, 6, 0, "Last day of ISO week 52, hour 0 (Saturday)"}, + {2024, 48, 0, 0, "First day of ISO week 48, hour 0 (Sunday)"}, + {2024, 48, 4, 18, "Fifth day of ISO week 48, hour 18 (Thursday)"}, + {2024, 24, 2, 15, "Third day of ISO week 24, hour 15 (Tuesday)"}, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + clock := NewMockLogsClockPerHour(test.year, test.week, test.day, test.hour) + result := clock.Now() + + // Get the ISO week, day, and hour from the result + year, week := result.ISOWeek() + day := int(result.Weekday()) + hour := result.Hour() + + if year != test.year || week != test.week || day != test.day || hour != test.hour { + t.Errorf("Expected (year: %d, week: %d, day: %d, hour: %d) but got (year: %d, week: %d, day: %d, hour: %d)", + test.year, test.week, test.day, test.hour, year, week, day, hour) + } + }) + } +} diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_helpers/persistent_volume_helpers.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_helpers/persistent_volume_helpers.go new file mode 100644 index 0000000000..c6b9d483fb --- /dev/null +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_helpers/persistent_volume_helpers.go @@ -0,0 +1,11 @@ +package persistent_volume_helpers + +import "time" + +const ( + hoursInWeek = 7 * 24 +) + +func ConvertWeeksToDuration(weeks int) time.Duration { + return time.Duration(weeks*hoursInWeek) * time.Hour +} diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_helpers/persistent_volume_helpers_test.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_helpers/persistent_volume_helpers_test.go new file mode 100644 index 0000000000..dde63db192 --- /dev/null +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_helpers/persistent_volume_helpers_test.go @@ -0,0 +1,44 @@ +package persistent_volume_helpers + +import ( + "testing" + "time" +) + +func TestConvertWeeksToDuration(t *testing.T) { + tests := []struct { + name string + weeks int + expected time.Duration + }{ + { + name: "Zero weeks", + weeks: 0, + expected: 0, + }, + { + name: "One week", + weeks: 1, + expected: 7 * 24 * time.Hour, + }, + { + name: "Two weeks", + weeks: 2, + expected: 2 * 7 * 24 * time.Hour, + }, + { + name: "Negative weeks", + weeks: -1, + expected: -7 * 24 * time.Hour, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ConvertWeeksToDuration(tt.weeks) + if result != tt.expected { + t.Errorf("ConvertWeeksToDuration(%d) = %v; want %v", tt.weeks, result, tt.expected) + } + }) + } +} diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client_test.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client_test.go index d245581bde..d6011e1bb0 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client_test.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_logs_database_client_test.go @@ -9,6 +9,7 @@ import ( "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/file_layout" "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/log_file_manager" "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/logs_clock" + "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_helpers" "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy" "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_consts" "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_filesystem" @@ -52,6 +53,7 @@ const ( defaultYear = 2023 defaultDay = 0 // sunday + defaultHour = 5 startingWeek = 4 defaultShouldReturnAllLogs = true @@ -130,9 +132,10 @@ func TestStreamUserServiceLogsPerWeek_WithFilters(t *testing.T) { testUserService3Uuid: true, } - underlyingFs := createFilledPerWeekFilesystem(startingWeek) - mockTime := logs_clock.NewMockLogsClock(defaultYear, startingWeek, defaultDay) - perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) + mockTime := logs_clock.NewMockLogsClockPerDay(defaultYear, startingWeek, defaultDay) + perWeekFileLayout := file_layout.NewPerWeekFileLayout(mockTime, volume_consts.LogsStorageDirpath) + underlyingFs := createFilledFilesystem(perWeekFileLayout, mockTime.Now()) + perWeekStreamStrategy := stream_logs_strategy.NewStreamLogsStrategyImpl(mockTime, persistent_volume_helpers.ConvertWeeksToDuration(retentionPeriodInWeeksForTesting), perWeekFileLayout) receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( t, @@ -154,6 +157,56 @@ func TestStreamUserServiceLogsPerWeek_WithFilters(t *testing.T) { require.NoError(t, testEvaluationErr) } +func TestStreamUserServiceLogsPerHour_WithFilters(t *testing.T) { + expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ + testUserService1Uuid: 2, + testUserService2Uuid: 2, + testUserService3Uuid: 2, + } + + firstTextFilter := logline.NewDoesContainTextLogLineFilter(firstFilterText) + secondTextFilter := logline.NewDoesNotContainTextLogLineFilter(secondFilterText) + regexFilter := logline.NewDoesContainMatchRegexLogLineFilter(firstMatchRegexFilterStr) + + logLinesFilters := []logline.LogLineFilter{ + *firstTextFilter, + *secondTextFilter, + *regexFilter, + } + + expectedFirstLogLine := "Starting feature 'runs idempotently'" + + userServiceUuids := map[service.ServiceUUID]bool{ + testUserService1Uuid: true, + testUserService2Uuid: true, + testUserService3Uuid: true, + } + + mockTime := logs_clock.NewMockLogsClockPerHour(defaultYear, startingWeek, defaultDay, defaultHour) + perHourFileLayout := file_layout.NewPerHourFileLayout(mockTime, volume_consts.LogsStorageDirpath) + underlyingFs := createFilledFilesystem(perHourFileLayout, mockTime.Now()) + perHourStreamStrategy := stream_logs_strategy.NewStreamLogsStrategyImpl(mockTime, persistent_volume_helpers.ConvertWeeksToDuration(retentionPeriodInWeeksForTesting), perHourFileLayout) + + receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( + t, + logLinesFilters, + userServiceUuids, + expectedServiceAmountLogLinesByServiceUuid, + doNotFollowLogs, + underlyingFs, + perHourStreamStrategy, + ) + + for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { + expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] + require.True(t, found) + require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) + require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) + } + + require.NoError(t, testEvaluationErr) +} + func TestStreamUserServiceLogs_NoLogsFromPersistentVolume(t *testing.T) { expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ testUserService1Uuid: 0, @@ -213,9 +266,10 @@ func TestStreamUserServiceLogsPerWeek_NoLogsFromPersistentVolume(t *testing.T) { testUserService3Uuid: true, } - underlyingFs := createEmptyPerWeekFilesystem(startingWeek) - mockTime := logs_clock.NewMockLogsClock(defaultYear, startingWeek, defaultDay) - perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) + mockTime := logs_clock.NewMockLogsClockPerDay(defaultYear, startingWeek, defaultDay) + perWeekFileLayout := file_layout.NewPerWeekFileLayout(mockTime, volume_consts.LogsStorageDirpath) + underlyingFs := createEmptyFilesystem(perWeekFileLayout, mockTime.Now()) + perWeekStreamStrategy := stream_logs_strategy.NewStreamLogsStrategyImpl(mockTime, persistent_volume_helpers.ConvertWeeksToDuration(retentionPeriodInWeeksForTesting), perWeekFileLayout) receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( t, @@ -235,6 +289,48 @@ func TestStreamUserServiceLogsPerWeek_NoLogsFromPersistentVolume(t *testing.T) { } } +func TestStreamUserServiceLogsPerHour_NoLogsFromPersistentVolume(t *testing.T) { + expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ + testUserService1Uuid: 0, + testUserService2Uuid: 0, + testUserService3Uuid: 0, + } + + firstTextFilter := logline.NewDoesContainTextLogLineFilter(notFoundedFilterText) + + logLinesFilters := []logline.LogLineFilter{ + *firstTextFilter, + } + + userServiceUuids := map[service.ServiceUUID]bool{ + testUserService1Uuid: true, + testUserService2Uuid: true, + testUserService3Uuid: true, + } + + mockTime := logs_clock.NewMockLogsClockPerHour(defaultYear, startingWeek, defaultDay, defaultHour) + perHourFileLayout := file_layout.NewPerWeekFileLayout(mockTime, volume_consts.LogsStorageDirpath) + underlyingFs := createEmptyFilesystem(perHourFileLayout, mockTime.Now()) + perHourStreamStrategy := stream_logs_strategy.NewStreamLogsStrategyImpl(mockTime, persistent_volume_helpers.ConvertWeeksToDuration(retentionPeriodInWeeksForTesting), perHourFileLayout) + + receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( + t, + logLinesFilters, + userServiceUuids, + expectedServiceAmountLogLinesByServiceUuid, + doNotFollowLogs, + underlyingFs, + perHourStreamStrategy, + ) + require.NoError(t, testEvaluationErr) + + for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { + expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] + require.True(t, found) + require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) + } +} + func TestStreamUserServiceLogs_ThousandsOfLogLinesSuccessfulExecution(t *testing.T) { expectedAmountLogLines := 10_000 @@ -311,16 +407,15 @@ func TestStreamUserServiceLogsPerWeek_ThousandsOfLogLinesSuccessfulExecution(t * } underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() - // %02d to format week num with leading zeros so 1-9 are converted to 01-09 for %V format - formattedWeekNum := fmt.Sprintf("%02d", startingWeek) - file1PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, string(enclaveUuid), testUserService1Uuid, volume_consts.Filetype) + mockTime := logs_clock.NewMockLogsClockPerDay(defaultYear, startingWeek, defaultDay) + perWeekFileLayout := file_layout.NewPerWeekFileLayout(mockTime, volume_consts.LogsStorageDirpath) + file1PathStr := perWeekFileLayout.GetLogFilePath(mockTime.Now(), testEnclaveUuid, testUserService1Uuid) file1, err := underlyingFs.Create(file1PathStr) require.NoError(t, err) _, err = file1.WriteString(logLinesStr) require.NoError(t, err) - mockTime := logs_clock.NewMockLogsClock(defaultYear, startingWeek, defaultDay) - perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) + perWeekStreamStrategy := stream_logs_strategy.NewStreamLogsStrategyImpl(mockTime, persistent_volume_helpers.ConvertWeeksToDuration(retentionPeriodInWeeksForTesting), perWeekFileLayout) receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( t, @@ -341,6 +436,59 @@ func TestStreamUserServiceLogsPerWeek_ThousandsOfLogLinesSuccessfulExecution(t * } } +func TestStreamUserServiceLogsPerHour_ThousandsOfLogLinesSuccessfulExecution(t *testing.T) { + expectedAmountLogLines := 10_000 + + expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ + testUserService1Uuid: expectedAmountLogLines, + } + + var emptyFilters []logline.LogLineFilter + + expectedFirstLogLine := "Starting feature 'centralized logs'" + + var logLines []string + + for i := 0; i <= expectedAmountLogLines; i++ { + logLines = append(logLines, logLine1) + } + + logLinesStr := strings.Join(logLines, "\n") + + userServiceUuids := map[service.ServiceUUID]bool{ + testUserService1Uuid: true, + } + + underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() + mockTime := logs_clock.NewMockLogsClockPerHour(defaultYear, startingWeek, defaultDay, defaultHour) + perHourFileLayout := file_layout.NewPerHourFileLayout(mockTime, volume_consts.LogsStorageDirpath) + file1PathStr := perHourFileLayout.GetLogFilePath(mockTime.Now(), testEnclaveUuid, testUserService1Uuid) + file1, err := underlyingFs.Create(file1PathStr) + require.NoError(t, err) + _, err = file1.WriteString(logLinesStr) + require.NoError(t, err) + + perHourStreamStrategy := stream_logs_strategy.NewStreamLogsStrategyImpl(mockTime, persistent_volume_helpers.ConvertWeeksToDuration(retentionPeriodInWeeksForTesting), perHourFileLayout) + + receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( + t, + emptyFilters, + userServiceUuids, + expectedServiceAmountLogLinesByServiceUuid, + doNotFollowLogs, + underlyingFs, + perHourStreamStrategy, + ) + require.NoError(t, testEvaluationErr) + + for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { + expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] + require.True(t, found) + require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) + require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) + } +} + func TestStreamUserServiceLogs_EmptyLogLines(t *testing.T) { expectedAmountLogLines := 0 @@ -399,15 +547,15 @@ func TestStreamUserServiceLogsPerWeek_EmptyLogLines(t *testing.T) { logLinesStr := "" underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() - formattedWeekNum := fmt.Sprintf("%02d", startingWeek) - file1PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, string(enclaveUuid), testUserService1Uuid, volume_consts.Filetype) + mockTime := logs_clock.NewMockLogsClockPerDay(defaultYear, startingWeek, defaultDay) + perWeekFileLayout := file_layout.NewPerWeekFileLayout(mockTime, volume_consts.LogsStorageDirpath) + file1PathStr := perWeekFileLayout.GetLogFilePath(mockTime.Now(), testEnclaveUuid, testUserService1Uuid) file1, err := underlyingFs.Create(file1PathStr) require.NoError(t, err) _, err = file1.WriteString(logLinesStr) require.NoError(t, err) - mockTime := logs_clock.NewMockLogsClock(defaultYear, startingWeek, defaultDay) - perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) + perWeekStreamStrategy := stream_logs_strategy.NewStreamLogsStrategyImpl(mockTime, persistent_volume_helpers.ConvertWeeksToDuration(retentionPeriodInWeeksForTesting), perWeekFileLayout) receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( t, @@ -427,6 +575,50 @@ func TestStreamUserServiceLogsPerWeek_EmptyLogLines(t *testing.T) { } } +func TestStreamUserServiceLogsPerHour_EmptyLogLines(t *testing.T) { + expectedAmountLogLines := 0 + + expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ + testUserService1Uuid: expectedAmountLogLines, + } + + var emptyFilters []logline.LogLineFilter + + userServiceUuids := map[service.ServiceUUID]bool{ + testUserService1Uuid: true, + } + + logLinesStr := "" + + underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() + mockTime := logs_clock.NewMockLogsClockPerHour(defaultYear, startingWeek, defaultDay, defaultHour) + perHourFileLayout := file_layout.NewPerHourFileLayout(mockTime, volume_consts.LogsStorageDirpath) + file1PathStr := perHourFileLayout.GetLogFilePath(mockTime.Now(), testEnclaveUuid, testUserService1Uuid) + file1, err := underlyingFs.Create(file1PathStr) + require.NoError(t, err) + _, err = file1.WriteString(logLinesStr) + require.NoError(t, err) + + perHourStreamStrategy := stream_logs_strategy.NewStreamLogsStrategyImpl(mockTime, persistent_volume_helpers.ConvertWeeksToDuration(retentionPeriodInWeeksForTesting), perHourFileLayout) + + receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( + t, + emptyFilters, + userServiceUuids, + expectedServiceAmountLogLinesByServiceUuid, + doNotFollowLogs, + underlyingFs, + perHourStreamStrategy, + ) + require.NoError(t, testEvaluationErr) + + for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { + expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] + require.True(t, found) + require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) + } +} + func TestStreamUserServiceLogsPerWeek_WithLogsAcrossWeeks(t *testing.T) { expectedAmountLogLines := 8 @@ -455,26 +647,26 @@ func TestStreamUserServiceLogsPerWeek_WithLogsAcrossWeeks(t *testing.T) { logLine4} underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() + mockTime := logs_clock.NewMockLogsClockPerDay(defaultYear, 4, defaultDay) + perWeekFileLayout := file_layout.NewPerWeekFileLayout(mockTime, volume_consts.LogsStorageDirpath) week3logLinesStr := strings.Join(week3logLines, "\n") + "\n" week4logLinesStr := strings.Join(week4logLines, "\n") - formattedWeekFour := fmt.Sprintf("%02d", 4) - week4filepath := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekFour, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) + week4filepath := perWeekFileLayout.GetLogFilePath(mockTime.Now(), testEnclaveUuid, testUserService1Uuid) week4, err := underlyingFs.Create(week4filepath) require.NoError(t, err) _, err = week4.WriteString(week4logLinesStr) require.NoError(t, err) - formattedWeekThree := fmt.Sprintf("%02d", 3) - week3filepath := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekThree, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) + week3Time := logs_clock.NewMockLogsClockPerDay(defaultYear, 3, defaultDay) + week3filepath := perWeekFileLayout.GetLogFilePath(week3Time.Now(), testEnclaveUuid, testUserService1Uuid) week3, err := underlyingFs.Create(week3filepath) require.NoError(t, err) _, err = week3.WriteString(week3logLinesStr) require.NoError(t, err) - mockTime := logs_clock.NewMockLogsClock(defaultYear, 4, defaultDay) - perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) + perWeekStreamStrategy := stream_logs_strategy.NewStreamLogsStrategyImpl(mockTime, persistent_volume_helpers.ConvertWeeksToDuration(retentionPeriodInWeeksForTesting), perWeekFileLayout) receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( t, @@ -493,7 +685,74 @@ func TestStreamUserServiceLogsPerWeek_WithLogsAcrossWeeks(t *testing.T) { require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) } +} + +func TestStreamUserServiceLogsPerHour_WithLogsAcrossHours(t *testing.T) { + expectedAmountLogLines := 8 + + expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ + testUserService1Uuid: expectedAmountLogLines, + } + var logLinesFilters []logline.LogLineFilter + + userServiceUuids := map[service.ServiceUUID]bool{ + testUserService1Uuid: true, + } + + expectedFirstLogLine := "Starting feature 'centralized logs'" + + hour4logLines := []string{ + logLine5, + logLine6, + logLine7, + logLine8} + hour3logLines := []string{ + logLine1, + logLine2, + logLine3a, + logLine3b, + logLine4} + + underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() + hour4Time := logs_clock.NewMockLogsClockPerHour(defaultYear, startingWeek, defaultDay, 4) + perHourFileLayout := file_layout.NewPerHourFileLayout(hour4Time, volume_consts.LogsStorageDirpath) + + hour3logLinesStr := strings.Join(hour3logLines, "\n") + "\n" + hour4logLinesStr := strings.Join(hour4logLines, "\n") + + hour4filepath := perHourFileLayout.GetLogFilePath(hour4Time.Now(), testEnclaveUuid, testUserService1Uuid) + hour4, err := underlyingFs.Create(hour4filepath) + require.NoError(t, err) + _, err = hour4.WriteString(hour4logLinesStr) + require.NoError(t, err) + + hour3Time := logs_clock.NewMockLogsClockPerHour(defaultYear, startingWeek, defaultDay, 3) + hour3filepath := perHourFileLayout.GetLogFilePath(hour3Time.Now(), testEnclaveUuid, testUserService1Uuid) + hour3, err := underlyingFs.Create(hour3filepath) + require.NoError(t, err) + _, err = hour3.WriteString(hour3logLinesStr) + require.NoError(t, err) + + perHourStreamStrategy := stream_logs_strategy.NewStreamLogsStrategyImpl(hour4Time, persistent_volume_helpers.ConvertWeeksToDuration(retentionPeriodInWeeksForTesting), perHourFileLayout) + + receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( + t, + logLinesFilters, + userServiceUuids, + expectedServiceAmountLogLinesByServiceUuid, + doNotFollowLogs, + underlyingFs, + perHourStreamStrategy, + ) + require.NoError(t, testEvaluationErr) + + for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { + expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] + require.True(t, found) + require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) + require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) + } } func TestStreamUserServiceLogsPerWeek_WithLogLineAcrossWeeks(t *testing.T) { @@ -529,21 +788,21 @@ func TestStreamUserServiceLogsPerWeek_WithLogLineAcrossWeeks(t *testing.T) { week4logLinesStr := strings.Join(week4logLines, "\n") + "\n" formattedWeekFour := fmt.Sprintf("%02d", 4) - week4filepath := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekFour, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) + week4filepath := fmt.Sprintf(file_layout.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekFour, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) week4, err := underlyingFs.Create(week4filepath) require.NoError(t, err) _, err = week4.WriteString(week4logLinesStr) require.NoError(t, err) formattedWeekThree := fmt.Sprintf("%02d", 3) - week3filepath := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekThree, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) + week3filepath := fmt.Sprintf(file_layout.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekThree, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) week3, err := underlyingFs.Create(week3filepath) require.NoError(t, err) _, err = week3.WriteString(week3logLinesStr) require.NoError(t, err) - mockTime := logs_clock.NewMockLogsClock(defaultYear, 4, defaultDay) - perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) + mockTime := logs_clock.NewMockLogsClockPerDay(defaultYear, 4, defaultDay) + perWeekStreamStrategy := stream_logs_strategy.NewStreamLogsStrategyImpl(mockTime, persistent_volume_helpers.ConvertWeeksToDuration(retentionPeriodInWeeksForTesting), file_layout.NewPerWeekFileLayout(mockTime, volume_consts.LogsStorageDirpath)) receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( t, @@ -564,6 +823,211 @@ func TestStreamUserServiceLogsPerWeek_WithLogLineAcrossWeeks(t *testing.T) { } } +func TestStreamUserServiceLogsPerHour_WithLogLineAcrossHours(t *testing.T) { + expectedAmountLogLines := 8 + + expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ + testUserService1Uuid: expectedAmountLogLines, + } + + var logLinesFilters []logline.LogLineFilter + + userServiceUuids := map[service.ServiceUUID]bool{ + testUserService1Uuid: true, + } + + expectedFirstLogLine := "Starting feature 'centralized logs'" + + hour4logLines := []string{ + logLine3b, + logLine4, + logLine5, + logLine6, + logLine7, + logLine8} + hour3logLines := []string{ + logLine1, + logLine2, + logLine3a} + + underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() + hour4Time := logs_clock.NewMockLogsClockPerHour(defaultYear, startingWeek, defaultDay, 4) + perHourFileLayout := file_layout.NewPerHourFileLayout(hour4Time, volume_consts.LogsStorageDirpath) + + hour3logLinesStr := strings.Join(hour3logLines, "\n") + "\n" + hour4logLinesStr := strings.Join(hour4logLines, "\n") + + hour4filepath := perHourFileLayout.GetLogFilePath(hour4Time.Now(), testEnclaveUuid, testUserService1Uuid) + hour4, err := underlyingFs.Create(hour4filepath) + require.NoError(t, err) + _, err = hour4.WriteString(hour4logLinesStr) + require.NoError(t, err) + + hour3Time := logs_clock.NewMockLogsClockPerHour(defaultYear, startingWeek, defaultDay, 3) + hour3filepath := perHourFileLayout.GetLogFilePath(hour3Time.Now(), testEnclaveUuid, testUserService1Uuid) + hour3, err := underlyingFs.Create(hour3filepath) + require.NoError(t, err) + _, err = hour3.WriteString(hour3logLinesStr) + require.NoError(t, err) + + perHourStreamStrategy := stream_logs_strategy.NewStreamLogsStrategyImpl(hour4Time, persistent_volume_helpers.ConvertWeeksToDuration(retentionPeriodInWeeksForTesting), perHourFileLayout) + + receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( + t, + logLinesFilters, + userServiceUuids, + expectedServiceAmountLogLinesByServiceUuid, + doNotFollowLogs, + underlyingFs, + perHourStreamStrategy, + ) + require.NoError(t, testEvaluationErr) + + for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { + expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] + require.True(t, found) + require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) + require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) + } +} + +func TestStreamUserServiceLogsPerHour_WithLogLineAcrossWeekDayHour(t *testing.T) { + expectedAmountLogLines := 8 + + expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ + testUserService1Uuid: expectedAmountLogLines, + } + + var logLinesFilters []logline.LogLineFilter + + userServiceUuids := map[service.ServiceUUID]bool{ + testUserService1Uuid: true, + } + + expectedFirstLogLine := "Starting feature 'centralized logs'" + + day1Hour0logLines := []string{ + logLine3b, + logLine4, + logLine5, + logLine6, + logLine7, + logLine8} + day0Hour24logLines := []string{ + logLine1, + logLine2, + logLine3a} + + underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() + day1Hour0Time := logs_clock.NewMockLogsClockPerHour(defaultYear, 4, 1, 0) + day0Hour24Time := logs_clock.NewMockLogsClockPerHour(defaultYear, 3, 0, 23) + + perHourFileLayout := file_layout.NewPerHourFileLayout(day1Hour0Time, volume_consts.LogsStorageDirpath) + + day1Hour0logLinesStr := strings.Join(day1Hour0logLines, "\n") + "\n" + day0Hour24logLinesStr := strings.Join(day0Hour24logLines, "\n") + + day1Hour0filepath := perHourFileLayout.GetLogFilePath(day1Hour0Time.Now(), testEnclaveUuid, testUserService1Uuid) + day1Hour0, err := underlyingFs.Create(day1Hour0filepath) + require.NoError(t, err) + _, err = day1Hour0.WriteString(day1Hour0logLinesStr) + require.NoError(t, err) + + day0Hour24filepath := perHourFileLayout.GetLogFilePath(day0Hour24Time.Now(), testEnclaveUuid, testUserService1Uuid) + day0Hour24, err := underlyingFs.Create(day0Hour24filepath) + require.NoError(t, err) + _, err = day0Hour24.WriteString(day0Hour24logLinesStr) + require.NoError(t, err) + + perHourStreamStrategy := stream_logs_strategy.NewStreamLogsStrategyImpl(day1Hour0Time, persistent_volume_helpers.ConvertWeeksToDuration(retentionPeriodInWeeksForTesting), perHourFileLayout) + + receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( + t, + logLinesFilters, + userServiceUuids, + expectedServiceAmountLogLinesByServiceUuid, + doNotFollowLogs, + underlyingFs, + perHourStreamStrategy, + ) + require.NoError(t, testEvaluationErr) + + for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { + expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] + require.True(t, found) + require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) + require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) + } +} + +func TestStreamUserServiceLogsPerHour_WithLogLineAcrossWeeks(t *testing.T) { + expectedAmountLogLines := 8 + + expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ + testUserService1Uuid: expectedAmountLogLines, + } + + var logLinesFilters []logline.LogLineFilter + + userServiceUuids := map[service.ServiceUUID]bool{ + testUserService1Uuid: true, + } + + expectedFirstLogLine := "Starting feature 'centralized logs'" + + hour4logLines := []string{ + logLine3b, + logLine4, + logLine5, + logLine6, + logLine7, + logLine8} + hour3logLines := []string{ + logLine1, + logLine2, + logLine3a} + + underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() + hour4Time := logs_clock.NewMockLogsClockPerHour(defaultYear, startingWeek, defaultDay, 4) + perHourFileLayout := file_layout.NewPerHourFileLayout(hour4Time, volume_consts.LogsStorageDirpath) + + hour3logLinesStr := strings.Join(hour3logLines, "\n") + "\n" + hour4logLinesStr := strings.Join(hour4logLines, "\n") + + hour4filepath := perHourFileLayout.GetLogFilePath(hour4Time.Now(), testEnclaveUuid, testUserService1Uuid) + hour4, err := underlyingFs.Create(hour4filepath) + require.NoError(t, err) + _, err = hour4.WriteString(hour4logLinesStr) + require.NoError(t, err) + + hour3Time := logs_clock.NewMockLogsClockPerHour(defaultYear, startingWeek, defaultDay, 3) + hour3filepath := perHourFileLayout.GetLogFilePath(hour3Time.Now(), testEnclaveUuid, testUserService1Uuid) + hour3, err := underlyingFs.Create(hour3filepath) + require.NoError(t, err) + _, err = hour3.WriteString(hour3logLinesStr) + require.NoError(t, err) + + perHourStreamStrategy := stream_logs_strategy.NewStreamLogsStrategyImpl(hour4Time, persistent_volume_helpers.ConvertWeeksToDuration(retentionPeriodInWeeksForTesting), perHourFileLayout) + + receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( + t, + logLinesFilters, + userServiceUuids, + expectedServiceAmountLogLinesByServiceUuid, + doNotFollowLogs, + underlyingFs, + perHourStreamStrategy, + ) + require.NoError(t, testEvaluationErr) + + for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { + expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] + require.True(t, found) + require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) + require.Equal(t, expectedFirstLogLine, serviceLogLines[0].GetContent()) + } +} + func TestStreamUserServiceLogsPerWeekReturnsTimestampedLogLines(t *testing.T) { expectedAmountLogLines := 3 @@ -585,16 +1049,16 @@ func TestStreamUserServiceLogsPerWeekReturnsTimestampedLogLines(t *testing.T) { timestampedLogLinesStr := strings.Join(timestampedLogLines, "\n") + "\n" underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() + mockTime := logs_clock.NewMockLogsClockPerDay(defaultYear, startingWeek, defaultDay) + perWeekFileLayout := file_layout.NewPerWeekFileLayout(mockTime, volume_consts.LogsStorageDirpath) - formattedWeekNum := fmt.Sprintf("%02d", startingWeek) - filepath := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) + filepath := perWeekFileLayout.GetLogFilePath(mockTime.Now(), testEnclaveUuid, testUserService1Uuid) file, err := underlyingFs.Create(filepath) require.NoError(t, err) _, err = file.WriteString(timestampedLogLinesStr) require.NoError(t, err) - mockTime := logs_clock.NewMockLogsClock(defaultYear, startingWeek, defaultDay) - perWeekStreamStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) + perWeekStreamStrategy := stream_logs_strategy.NewStreamLogsStrategyImpl(mockTime, persistent_volume_helpers.ConvertWeeksToDuration(retentionPeriodInWeeksForTesting), perWeekFileLayout) expectedTime, err := time.Parse(utcFormat, defaultUTCTimestampStr) require.NoError(t, err) @@ -620,6 +1084,62 @@ func TestStreamUserServiceLogsPerWeekReturnsTimestampedLogLines(t *testing.T) { } } +func TestStreamUserServiceLogsPerHourReturnsTimestampedLogLines(t *testing.T) { + expectedAmountLogLines := 3 + + expectedServiceAmountLogLinesByServiceUuid := map[service.ServiceUUID]int{ + testUserService1Uuid: expectedAmountLogLines, + } + + var logLinesFilters []logline.LogLineFilter + + userServiceUuids := map[service.ServiceUUID]bool{ + testUserService1Uuid: true, + } + + timedLogLine1 := fmt.Sprintf("{\"log\":\"Starting feature 'centralized logs'\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) + timedLogLine2 := fmt.Sprintf("{\"log\":\"Starting feature 'runs idempotently'\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) + timedLogLine3 := fmt.Sprintf("{\"log\":\"The enclave was created\", \"timestamp\":\"%v\"}", defaultUTCTimestampStr) + + timestampedLogLines := []string{timedLogLine1, timedLogLine2, timedLogLine3} + timestampedLogLinesStr := strings.Join(timestampedLogLines, "\n") + "\n" + + underlyingFs := volume_filesystem.NewMockedVolumeFilesystem() + mockTime := logs_clock.NewMockLogsClockPerHour(defaultYear, startingWeek, defaultDay, defaultHour) + perHourFileLayout := file_layout.NewPerWeekFileLayout(mockTime, volume_consts.LogsStorageDirpath) + + filepath := perHourFileLayout.GetLogFilePath(mockTime.Now(), testEnclaveUuid, testUserService1Uuid) + file, err := underlyingFs.Create(filepath) + require.NoError(t, err) + _, err = file.WriteString(timestampedLogLinesStr) + require.NoError(t, err) + + perHourStreamStrategy := stream_logs_strategy.NewStreamLogsStrategyImpl(mockTime, persistent_volume_helpers.ConvertWeeksToDuration(retentionPeriodInWeeksForTesting), perHourFileLayout) + + expectedTime, err := time.Parse(utcFormat, defaultUTCTimestampStr) + require.NoError(t, err) + + receivedUserServiceLogsByUuid, testEvaluationErr := executeStreamCallAndGetReceivedServiceLogLines( + t, + logLinesFilters, + userServiceUuids, + expectedServiceAmountLogLinesByServiceUuid, + doNotFollowLogs, + underlyingFs, + perHourStreamStrategy, + ) + require.NoError(t, testEvaluationErr) + + for serviceUuid, serviceLogLines := range receivedUserServiceLogsByUuid { + expectedAmountLogLines, found := expectedServiceAmountLogLinesByServiceUuid[serviceUuid] + require.True(t, found) + require.Equal(t, expectedAmountLogLines, len(serviceLogLines)) + for _, logLine := range serviceLogLines { + require.Equal(t, expectedTime, logLine.GetTimestamp()) + } + } +} + func TestStreamUserServiceLogsPerFileReturnsTimestampedLogLines(t *testing.T) { expectedAmountLogLines := 3 @@ -696,10 +1216,8 @@ func executeStreamCallAndGetReceivedServiceLogLines( kurtosisBackend := backend_interface.NewMockKurtosisBackend(t) - // no log file management is done in these tests so values for logFileManager aren't important - mockTime := logs_clock.NewMockLogsClock(0, 0, 0) - fileLayout := file_layout.NewPerWeekFileLayout(mockTime) - logFileManager := log_file_manager.NewLogFileManager(kurtosisBackend, underlyingFs, fileLayout, mockTime, 0) + // no log file management is done in these tests so values provided to logFileManager aren't important + logFileManager := log_file_manager.NewLogFileManager(nil, nil, nil, nil, time.Duration(0), "") logsDatabaseClient := NewPersistentVolumeLogsDatabaseClient(kurtosisBackend, underlyingFs, logFileManager, streamStrategy) userServiceLogsByUuidChan, errChan, receivedCancelCtxFunc, err := logsDatabaseClient.StreamUserServiceLogs(ctx, enclaveUuid, userServiceUuids, logLinesFilters, shouldFollowLogs, defaultShouldReturnAllLogs, defaultNumLogLines) @@ -755,14 +1273,13 @@ func executeStreamCallAndGetReceivedServiceLogLines( return receivedServiceLogsByUuid, nil } -func createFilledPerFileFilesystem() volume_filesystem.VolumeFilesystem { +func createFilledFilesystem(fileLayout file_layout.LogFileLayout, time time.Time) volume_filesystem.VolumeFilesystem { logLines := []string{logLine1, logLine2, logLine3a, logLine3b, logLine4, logLine5, logLine6, logLine7, logLine8} - logLinesStr := strings.Join(logLines, "\n") - file1PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) - file2PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService2Uuid, volume_consts.Filetype) - file3PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService3Uuid, volume_consts.Filetype) + file1PathStr := fileLayout.GetLogFilePath(time, testEnclaveUuid, testUserService1Uuid) + file2PathStr := fileLayout.GetLogFilePath(time, testEnclaveUuid, testUserService2Uuid) + file3PathStr := fileLayout.GetLogFilePath(time, testEnclaveUuid, testUserService3Uuid) mapFs := volume_filesystem.NewMockedVolumeFilesystem() @@ -778,26 +1295,16 @@ func createFilledPerFileFilesystem() volume_filesystem.VolumeFilesystem { return mapFs } -func createFilledPerWeekFilesystem(week int) volume_filesystem.VolumeFilesystem { - logLines := []string{logLine1, logLine2, logLine3a, logLine3b, logLine4, logLine5, logLine6, logLine7, logLine8} - - logLinesStr := strings.Join(logLines, "\n") - // %02d to format week num with leading zeros so 1-9 are converted to 01-09 for %V format - formattedWeekNum := fmt.Sprintf("%02d", week) - file1PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) - file2PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService2Uuid, volume_consts.Filetype) - file3PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService3Uuid, volume_consts.Filetype) +func createEmptyFilesystem(fileLayout file_layout.LogFileLayout, time time.Time) volume_filesystem.VolumeFilesystem { + file1PathStr := fileLayout.GetLogFilePath(time, testEnclaveUuid, testUserService1Uuid) + file2PathStr := fileLayout.GetLogFilePath(time, testEnclaveUuid, testUserService2Uuid) + file3PathStr := fileLayout.GetLogFilePath(time, testEnclaveUuid, testUserService3Uuid) mapFs := volume_filesystem.NewMockedVolumeFilesystem() - file1, _ := mapFs.Create(file1PathStr) - _, _ = file1.WriteString(logLinesStr) - - file2, _ := mapFs.Create(file2PathStr) - _, _ = file2.WriteString(logLinesStr) - - file3, _ := mapFs.Create(file3PathStr) - _, _ = file3.WriteString(logLinesStr) + _, _ = mapFs.Create(file1PathStr) + _, _ = mapFs.Create(file2PathStr) + _, _ = mapFs.Create(file3PathStr) return mapFs } @@ -816,18 +1323,25 @@ func createEmptyPerFileFilesystem() volume_filesystem.VolumeFilesystem { return mapFs } -func createEmptyPerWeekFilesystem(week int) volume_filesystem.VolumeFilesystem { - // %02d to format week num with leading zeros so 1-9 are converted to 01-09 for %V format - formattedWeekNum := fmt.Sprintf("%02d", week) - file1PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) - file2PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService2Uuid, volume_consts.Filetype) - file3PathStr := fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(defaultYear), formattedWeekNum, testEnclaveUuid, testUserService3Uuid, volume_consts.Filetype) +func createFilledPerFileFilesystem() volume_filesystem.VolumeFilesystem { + logLines := []string{logLine1, logLine2, logLine3a, logLine3b, logLine4, logLine5, logLine6, logLine7, logLine8} + + logLinesStr := strings.Join(logLines, "\n") + + file1PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) + file2PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService2Uuid, volume_consts.Filetype) + file3PathStr := fmt.Sprintf(volume_consts.PerFileFmtStr, volume_consts.LogsStorageDirpath, testEnclaveUuid, testUserService3Uuid, volume_consts.Filetype) mapFs := volume_filesystem.NewMockedVolumeFilesystem() - _, _ = mapFs.Create(file1PathStr) - _, _ = mapFs.Create(file2PathStr) - _, _ = mapFs.Create(file3PathStr) + file1, _ := mapFs.Create(file1PathStr) + _, _ = file1.WriteString(logLinesStr) + + file2, _ := mapFs.Create(file2PathStr) + _, _ = file2.WriteString(logLinesStr) + + file3, _ := mapFs.Create(file3PathStr) + _, _ = file3.WriteString(logLinesStr) return mapFs } diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_file_stream_logs_strategy.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_file_stream_logs_strategy.go index b322e8c214..98830c002e 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_file_stream_logs_strategy.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_file_stream_logs_strategy.go @@ -17,6 +17,7 @@ import ( "strings" ) +// TODO: Remove after FileLayout and new StreamLogStrategyImpl are merged // This strategy pulls logs from filesytsem where there is a log file per enclave, per service type PerFileStreamLogsStrategy struct { } diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy_test.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy_test.go deleted file mode 100644 index 6339688daa..0000000000 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy_test.go +++ /dev/null @@ -1,518 +0,0 @@ -package stream_logs_strategy - -import ( - "bufio" - "fmt" - "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/logs_clock" - "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_consts" - "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_filesystem" - "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/logline" - "github.com/stretchr/testify/require" - "io" - "strconv" - "strings" - "testing" - "time" -) - -const ( - testEnclaveUuid = "test-enclave" - testUserService1Uuid = "test-user-service-1" - - retentionPeriodInWeeksForTesting = 5 - - defaultYear = 2023 - defaultDay = 0 // sunday -) - -// TODO: migrate GetLogFilePaths tests to FileLayout interface when it is fully merged -// for now, leave them duplicated so there's an extra layer of testing as the migration happens -func TestGetLogFilePaths(t *testing.T) { - filesystem := volume_filesystem.NewMockedVolumeFilesystem() - - // ../week/enclave uuid/service uuid.json - week12filepath := getWeekFilepathStr(defaultYear, 12) - week13filepath := getWeekFilepathStr(defaultYear, 13) - week14filepath := getWeekFilepathStr(defaultYear, 14) - week15filepath := getWeekFilepathStr(defaultYear, 15) - week16filepath := getWeekFilepathStr(defaultYear, 16) - week17filepath := getWeekFilepathStr(defaultYear, 17) - - _, _ = filesystem.Create(week12filepath) - _, _ = filesystem.Create(week13filepath) - _, _ = filesystem.Create(week14filepath) - _, _ = filesystem.Create(week15filepath) - _, _ = filesystem.Create(week16filepath) - _, _ = filesystem.Create(week17filepath) - - currentWeek := 17 - - expectedLogFilePaths := []string{ - week13filepath, - week14filepath, - week15filepath, - week16filepath, - week17filepath, - } - - mockTime := logs_clock.NewMockLogsClock(defaultYear, currentWeek, defaultDay) - strategy := NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) - logFilePaths, err := strategy.getLogFilePaths(filesystem, retentionPeriodInWeeksForTesting, testEnclaveUuid, testUserService1Uuid) - - require.NoError(t, err) - require.Equal(t, len(expectedLogFilePaths), len(logFilePaths)) - for i, filePath := range expectedLogFilePaths { - require.Equal(t, filePath, logFilePaths[i]) - } -} - -func TestGetLogFilePathsAcrossNewYear(t *testing.T) { - filesystem := volume_filesystem.NewMockedVolumeFilesystem() - - // ../week/enclave uuid/service uuid.json - week50filepath := getWeekFilepathStr(defaultYear-1, 50) - week51filepath := getWeekFilepathStr(defaultYear-1, 51) - week52filepath := getWeekFilepathStr(defaultYear-1, 52) - week1filepath := getWeekFilepathStr(defaultYear, 1) - week2filepath := getWeekFilepathStr(defaultYear, 2) - - _, _ = filesystem.Create(week50filepath) - _, _ = filesystem.Create(week51filepath) - _, _ = filesystem.Create(week52filepath) - _, _ = filesystem.Create(week1filepath) - _, _ = filesystem.Create(week2filepath) - - currentWeek := 2 - - expectedLogFilePaths := []string{ - week50filepath, - week51filepath, - week52filepath, - week1filepath, - week2filepath, - } - - mockTime := logs_clock.NewMockLogsClock(defaultYear, currentWeek, defaultDay) - strategy := NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) - logFilePaths, err := strategy.getLogFilePaths(filesystem, retentionPeriodInWeeksForTesting, testEnclaveUuid, testUserService1Uuid) - - require.NoError(t, err) - require.Equal(t, len(expectedLogFilePaths), len(logFilePaths)) - for i, filePath := range expectedLogFilePaths { - require.Equal(t, filePath, logFilePaths[i]) - } -} - -func TestGetLogFilePathsAcrossNewYearWith53Weeks(t *testing.T) { - filesystem := volume_filesystem.NewMockedVolumeFilesystem() - - // According to ISOWeek, 2015 has 53 weeks - week52filepath := getWeekFilepathStr(2015, 52) - week53filepath := getWeekFilepathStr(2015, 53) - week1filepath := getWeekFilepathStr(2016, 1) - week2filepath := getWeekFilepathStr(2016, 2) - week3filepath := getWeekFilepathStr(2016, 3) - - _, _ = filesystem.Create(week52filepath) - _, _ = filesystem.Create(week53filepath) - _, _ = filesystem.Create(week1filepath) - _, _ = filesystem.Create(week2filepath) - _, _ = filesystem.Create(week3filepath) - - currentWeek := 3 - - expectedLogFilePaths := []string{ - week52filepath, - week53filepath, - week1filepath, - week2filepath, - week3filepath, - } - - mockTime := logs_clock.NewMockLogsClock(2016, currentWeek, 1) - strategy := NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) - logFilePaths, err := strategy.getLogFilePaths(filesystem, retentionPeriodInWeeksForTesting, testEnclaveUuid, testUserService1Uuid) - - require.NoError(t, err) - require.Equal(t, len(expectedLogFilePaths), len(logFilePaths)) - for i, filePath := range expectedLogFilePaths { - require.Equal(t, filePath, logFilePaths[i]) - } -} - -func TestGetLogFilePathsWithDiffRetentionPeriod(t *testing.T) { - filesystem := volume_filesystem.NewMockedVolumeFilesystem() - - // ../week/enclave uuid/service uuid.json - week52filepath := getWeekFilepathStr(defaultYear-1, 52) - week1filepath := getWeekFilepathStr(defaultYear, 1) - week2filepath := getWeekFilepathStr(defaultYear, 2) - - _, _ = filesystem.Create(week52filepath) - _, _ = filesystem.Create(week1filepath) - _, _ = filesystem.Create(week2filepath) - - currentWeek := 2 - retentionPeriod := 3 - - expectedLogFilePaths := []string{ - week52filepath, - week1filepath, - week2filepath, - } - - mockTime := logs_clock.NewMockLogsClock(defaultYear, currentWeek, defaultDay) - strategy := NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) - logFilePaths, err := strategy.getLogFilePaths(filesystem, retentionPeriod, testEnclaveUuid, testUserService1Uuid) - - require.NoError(t, err) - require.Equal(t, len(expectedLogFilePaths), len(logFilePaths)) - for i, filePath := range expectedLogFilePaths { - require.Equal(t, filePath, logFilePaths[i]) - } -} - -func TestGetLogFilePathsReturnsAllAvailableWeeks(t *testing.T) { - filesystem := volume_filesystem.NewMockedVolumeFilesystem() - - // ../week/enclave uuid/service uuid.json - week52filepath := getWeekFilepathStr(defaultYear-1, 52) - week1filepath := getWeekFilepathStr(defaultYear, 1) - week2filepath := getWeekFilepathStr(defaultYear, 2) - - _, _ = filesystem.Create(week52filepath) - _, _ = filesystem.Create(week1filepath) - _, _ = filesystem.Create(week2filepath) - - // should return existing file paths even though log files going all the back to retention period don't exist - expectedLogFilePaths := []string{ - week52filepath, - week1filepath, - week2filepath, - } - - currentWeek := 2 - - mockTime := logs_clock.NewMockLogsClock(defaultYear, currentWeek, defaultDay) - strategy := NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) - logFilePaths, err := strategy.getLogFilePaths(filesystem, retentionPeriodInWeeksForTesting, testEnclaveUuid, testUserService1Uuid) - - require.NoError(t, err) - require.Less(t, len(logFilePaths), retentionPeriodInWeeksForTesting) - for i, filePath := range expectedLogFilePaths { - require.Equal(t, filePath, logFilePaths[i]) - } -} - -func TestGetLogFilePathsReturnsCorrectPathsIfWeeksMissingInBetween(t *testing.T) { - filesystem := volume_filesystem.NewMockedVolumeFilesystem() - - // ../week/enclave uuid/service uuid.json - week52filepath := getWeekFilepathStr(defaultYear, 0) - week1filepath := getWeekFilepathStr(defaultYear, 1) - week3filepath := getWeekFilepathStr(defaultYear, 3) - - _, _ = filesystem.Create(week52filepath) - _, _ = filesystem.Create(week1filepath) - _, _ = filesystem.Create(week3filepath) - - currentWeek := 3 - - mockTime := logs_clock.NewMockLogsClock(defaultYear, currentWeek, defaultDay) - strategy := NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) - logFilePaths, err := strategy.getLogFilePaths(filesystem, retentionPeriodInWeeksForTesting, testEnclaveUuid, testUserService1Uuid) - - require.NoError(t, err) - require.Len(t, logFilePaths, 1) - require.Equal(t, week3filepath, logFilePaths[0]) // should only return week 3 because week 2 is missing -} - -func TestGetLogFilePathsReturnsCorrectPathsIfCurrentWeekHasNoLogsYet(t *testing.T) { - // currently in week 3 - currentWeek := 3 - mockTime := logs_clock.NewMockLogsClock(defaultYear, currentWeek, defaultDay) - - filesystem := volume_filesystem.NewMockedVolumeFilesystem() - - // ../week/enclave uuid/service uuid.json - week1filepath := getWeekFilepathStr(defaultYear, 1) - week2filepath := getWeekFilepathStr(defaultYear, 2) - - // no logs for week current week exist yet - _, _ = filesystem.Create(week1filepath) - _, _ = filesystem.Create(week2filepath) - - // should return week 1 and 2 logs, even though no logs for current week yet - expectedLogFilePaths := []string{ - week1filepath, - week2filepath, - } - - strategy := NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) - logFilePaths, err := strategy.getLogFilePaths(filesystem, retentionPeriodInWeeksForTesting, testEnclaveUuid, testUserService1Uuid) - - require.NoError(t, err) - require.Equal(t, len(expectedLogFilePaths), len(logFilePaths)) - for i, filePath := range expectedLogFilePaths { - require.Equal(t, filePath, logFilePaths[i]) - } -} - -func TestIsWithinRetentionPeriod(t *testing.T) { - // this is the 36th week of the year - jsonLogLine := map[string]string{ - "timestamp": "2023-09-06T00:35:15-04:00", - } - - // week 41 would put the log line outside the retention period - mockTime := logs_clock.NewMockLogsClock(2023, 41, 0) - strategy := NewPerWeekStreamLogsStrategy(mockTime, retentionPeriodInWeeksForTesting) - - timestamp, err := parseTimestampFromJsonLogLine(jsonLogLine) - require.NoError(t, err) - logLine := logline.NewLogLine("", *timestamp) - - isWithinRetentionPeriod, err := strategy.isWithinRetentionPeriod(logLine) - - require.NoError(t, err) - require.False(t, isWithinRetentionPeriod) -} - -func getWeekFilepathStr(year, week int) string { - // %02d to format week num with leading zeros so 1-9 are converted to 01-09 for %V format - formattedWeekNum := fmt.Sprintf("%02d", week) - return fmt.Sprintf(volume_consts.PerWeekFilePathFmtStr, volume_consts.LogsStorageDirpath, strconv.Itoa(year), formattedWeekNum, testEnclaveUuid, testUserService1Uuid, volume_consts.Filetype) -} - -func TestGetCompleteJsonLogString(t *testing.T) { - logLine1 := "{\"log\":\"Starting feature 'runs idempotently'\"}" - logLine2a := "{\"log\":\"Starting feature 'apic " - logLine2b := "idempotently'\"}" - - logs := strings.Join([]string{logLine1, logLine2a, logLine2b}, string(volume_consts.NewLineRune)) - logsReader := bufio.NewReader(strings.NewReader(logs)) - - var jsonLogStr string - var err error - - // First read - jsonLogStr, err = getCompleteJsonLogString(logsReader) - require.NoError(t, err) - require.Equal(t, logLine1, jsonLogStr) - - // Second read - logLine2 := "{\"log\":\"Starting feature 'apic idempotently'\"}" - jsonLogStr, err = getCompleteJsonLogString(logsReader) - require.Error(t, err) - require.ErrorIs(t, io.EOF, err) - require.Equal(t, logLine2, jsonLogStr) -} - -func TestGetCompleteJsonLogStringAcrossManyCompleteLines(t *testing.T) { - logLine1 := "{\"log\":\"Starting feature 'files manager'\"}" - logLine2 := "{\"log\":\"The enclave was created\"}" - logLine3 := "{\"log\":\"User service started\"}" - logLine4 := "{\"log\":\"The data have being loaded\"}" - - logs := strings.Join([]string{logLine1, logLine2, logLine3, logLine4}, string(volume_consts.NewLineRune)) - logsReader := bufio.NewReader(strings.NewReader(logs)) - - var jsonLogStr string - var err error - - // First read - jsonLogStr, err = getCompleteJsonLogString(logsReader) - require.NoError(t, err) - require.Equal(t, logLine1, jsonLogStr) - - // Second read - jsonLogStr, err = getCompleteJsonLogString(logsReader) - require.NoError(t, err) - require.Equal(t, logLine2, jsonLogStr) - - // Fourth read - jsonLogStr, err = getCompleteJsonLogString(logsReader) - require.NoError(t, err) - require.Equal(t, logLine3, jsonLogStr) - - // Last read - jsonLogStr, err = getCompleteJsonLogString(logsReader) - require.Error(t, err) - require.ErrorIs(t, io.EOF, err) - require.Equal(t, logLine4, jsonLogStr) -} - -func TestGetCompleteJsonLogStringAcrossManyBrokenLines(t *testing.T) { - logLine1a := "{\"log\":\"Starting" - logLine1b := " feature " - logLine1c := "'runs " - logLine1d := "idempotently'\"}" - - logs := strings.Join([]string{logLine1a, logLine1b, logLine1c, logLine1d}, string(volume_consts.NewLineRune)) - logsReader := bufio.NewReader(strings.NewReader(logs)) - - var jsonLogStr string - var err error - - logLine1 := "{\"log\":\"Starting feature 'runs idempotently'\"}" - jsonLogStr, err = getCompleteJsonLogString(logsReader) - require.Error(t, err) - require.ErrorIs(t, io.EOF, err) - require.Equal(t, logLine1, jsonLogStr) -} - -func TestGetCompleteJsonLogStringWithNoValidJsonEnding(t *testing.T) { - logLine1 := "{\"log\":\"Starting idempotently'\"" - - logsReader := bufio.NewReader(strings.NewReader(logLine1)) - - var jsonLogStr string - var err error - - // this will end up in an infinite loop, bc [getCompleteJsonLogString] keeps looping till it finds EOF or complete json - jsonLogStr, err = getCompleteJsonLogString(logsReader) - require.Error(t, err) - require.ErrorIs(t, io.EOF, err) - require.Equal(t, logLine1, jsonLogStr) -} - -func TestGetJsonLogString(t *testing.T) { - logLine1 := "{\"log\":\"Starting feature 'centralized logs'\"}" - logLine2 := "{\"log\":\"Starting feature 'runs idempotently'\"}" - logLine3a := "{\"log\":\"Starting feature 'apic " - logLine3b := "idempotently'\"}" - - logs := strings.Join([]string{logLine1, logLine2, logLine3a, logLine3b}, string(volume_consts.NewLineRune)) - logsReader := bufio.NewReader(strings.NewReader(logs)) - - var jsonLogStr string - var isComplete bool - var err error - - // First read - jsonLogStr, isComplete, err = getJsonLogString(logsReader) - require.NoError(t, err) - require.True(t, isComplete) - require.Equal(t, logLine1, jsonLogStr) - - // Second read - jsonLogStr, isComplete, err = getJsonLogString(logsReader) - require.NoError(t, err) - require.True(t, isComplete) - require.Equal(t, logLine2, jsonLogStr) - - // Third read - jsonLogStr, isComplete, err = getJsonLogString(logsReader) - require.NoError(t, err) - require.False(t, isComplete) - require.Equal(t, logLine3a, jsonLogStr) - - // Last read - jsonLogStr, isComplete, err = getJsonLogString(logsReader) - require.Error(t, err) - require.ErrorIs(t, io.EOF, err) - require.True(t, isComplete) - require.Equal(t, logLine3b, jsonLogStr) -} - -func TestGetJsonLogStringWithEOFAndNoNewLine(t *testing.T) { - logLine1a := "{\"log\":\"Starting feature 'apic " - logLine1b := "idempotently'\"}" - - logs := logLine1a + "\n" + logLine1b - logsReader := bufio.NewReader(strings.NewReader(logs)) - - var jsonLogStr string - var isComplete bool - var err error - - // First read - jsonLogStr, isComplete, err = getJsonLogString(logsReader) - require.NoError(t, err) - require.False(t, isComplete) - require.Equal(t, logLine1a, jsonLogStr) - - // Second read - jsonLogStr, isComplete, err = getJsonLogString(logsReader) - require.Error(t, err) - require.ErrorIs(t, io.EOF, err) - require.True(t, isComplete) - require.Equal(t, logLine1b, jsonLogStr) -} - -func TestGetJsonLogStringWithEOFAndNoValidJsonEnding(t *testing.T) { - logLine1 := "{\"log\":\"Starting feature 'centralized logs'\"" - - logsReader := bufio.NewReader(strings.NewReader(logLine1)) - - var jsonLogStr string - var isComplete bool - var err error - - // First read - jsonLogStr, isComplete, err = getJsonLogString(logsReader) - require.Error(t, err) - require.ErrorIs(t, io.EOF, err) - require.False(t, isComplete) - require.Equal(t, logLine1, jsonLogStr) -} - -func TestParseTimestampFromJsonLogLineReturnsTime(t *testing.T) { - timestampStr := "2023-09-06T00:35:15Z" // utc timestamp - jsonLogLine := map[string]string{ - "timestamp": timestampStr, - } - - expectedTime, err := time.Parse(time.RFC3339, timestampStr) - require.NoError(t, err) - - actualTime, err := parseTimestampFromJsonLogLine(jsonLogLine) - - require.NoError(t, err) - require.Equal(t, expectedTime, *actualTime) -} - -func TestParseTimestampFromJsonLogLineWithOffsetReturnsTime(t *testing.T) { - timestampStr := "2023-09-06T00:35:15-04:00" // utc timestamp with offset '-4:00' - jsonLogLine := map[string]string{ - "timestamp": timestampStr, - } - - expectedTime, err := time.Parse(time.RFC3339, timestampStr) - require.NoError(t, err) - - actualTime, err := parseTimestampFromJsonLogLine(jsonLogLine) - - require.NoError(t, err) - require.Equal(t, expectedTime, *actualTime) -} - -func TestParseTimestampFromJsonLogLineWithIncorrectlyFormattedTimeReturnsError(t *testing.T) { - timestampStr := "2023-09-06" // not UTC formatted timestamp str - jsonLogLine := map[string]string{ - "timestamp": timestampStr, - } - - _, err := parseTimestampFromJsonLogLine(jsonLogLine) - - require.Error(t, err) -} - -func TestParseTimestampFromJsonLogLineWithoutTimezoneReturnsError(t *testing.T) { - timestampStr := "2023-09-06T00:35:15" // no utc timezone indicator or offset to indicate timezone - jsonLogLine := map[string]string{ - "timestamp": timestampStr, - } - - _, err := parseTimestampFromJsonLogLine(jsonLogLine) - - require.Error(t, err) -} - -func TestParseTimestampFromJsonLogLineWithNoTimestampFieldReturnsError(t *testing.T) { - jsonLogLine := map[string]string{} - - _, err := parseTimestampFromJsonLogLine(jsonLogLine) - - require.Error(t, err) -} diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/stream_logs_strategy_impl.go similarity index 82% rename from engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go rename to engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/stream_logs_strategy_impl.go index 26d7be80ae..70351b6318 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/per_week_stream_logs_strategy.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/stream_logs_strategy_impl.go @@ -22,27 +22,29 @@ import ( ) const ( - oneWeek = 7 * 24 * time.Hour + nowToRetentionPeriod = -1 ) -// PerWeekStreamLogsStrategy pulls logs from filesystem where there is a log file per year, per week, per enclave, per service +// StreamLogsStrategyImpl pulls logs from filesystem where there is a log file per year, per week, per enclave, per service // Weeks are denoted 01-52 // e.g. // [.../28/d3e8832d671f/61830789f03a.json] is the file containing logs from service with uuid 61830789f03a, in enclave with uuid d3e8832d671f, // in the 28th week of the current year -type PerWeekStreamLogsStrategy struct { - time logs_clock.LogsClock - logRetentionPeriodInWeeks int +type StreamLogsStrategyImpl struct { + time logs_clock.LogsClock + logRetentionPeriod time.Duration + fileLayout file_layout.LogFileLayout } -func NewPerWeekStreamLogsStrategy(time logs_clock.LogsClock, logRetentionPeriodInWeeks int) *PerWeekStreamLogsStrategy { - return &PerWeekStreamLogsStrategy{ - time: time, - logRetentionPeriodInWeeks: logRetentionPeriodInWeeks, +func NewStreamLogsStrategyImpl(time logs_clock.LogsClock, logRetentionPeriod time.Duration, fileLayout file_layout.LogFileLayout) *StreamLogsStrategyImpl { + return &StreamLogsStrategyImpl{ + time: time, + logRetentionPeriod: logRetentionPeriod, + fileLayout: fileLayout, } } -func (strategy *PerWeekStreamLogsStrategy) StreamLogs( +func (strategy *StreamLogsStrategyImpl) StreamLogs( ctx context.Context, fs volume_filesystem.VolumeFilesystem, logLineSender *logline.LogLineSender, @@ -54,7 +56,7 @@ func (strategy *PerWeekStreamLogsStrategy) StreamLogs( shouldReturnAllLogs bool, numLogLines uint32, ) { - paths, err := strategy.getLogFilePaths(fs, strategy.logRetentionPeriodInWeeks, string(enclaveUuid), string(serviceUuid)) + paths, err := strategy.fileLayout.GetLogFilePaths(fs, strategy.logRetentionPeriod, nowToRetentionPeriod, string(enclaveUuid), string(serviceUuid)) if err != nil { streamErrChan <- stacktrace.Propagate(err, "An error occurred retrieving log file paths for service '%v' in enclave '%v'.", serviceUuid, enclaveUuid) return @@ -67,12 +69,6 @@ func (strategy *PerWeekStreamLogsStrategy) StreamLogs( serviceUuid, enclaveUuid) return } - if len(paths) > strategy.logRetentionPeriodInWeeks { - logrus.Warnf( - `We expected to retrieve logs going back '%v' weeks, but instead retrieved logs going back '%v' weeks. - This means logs past the retention period are being returned, likely a bug in Kurtosis.`, - strategy.logRetentionPeriodInWeeks, len(paths)) - } logsReader, files, err := getLogsReader(fs, paths) if err != nil { @@ -109,19 +105,6 @@ func (strategy *PerWeekStreamLogsStrategy) StreamLogs( } } -// [getLogFilePaths] returns a list of log file paths containing logs for [serviceUuid] in [enclaveUuid] -// going [retentionPeriodInWeeks] back from the [currentWeek]. -// Notes: -// - File paths are of the format '/week/enclave uuid/service uuid.json' where 'week' is %V strftime specifier -// - The list of file paths is returned in order of oldest logs to most recent logs e.g. [ 03/80124/1234.json, /04/801234/1234.json, ...] -// - If a file path does not exist, the function with exits and returns whatever file paths were found -func (strategy *PerWeekStreamLogsStrategy) getLogFilePaths(filesystem volume_filesystem.VolumeFilesystem, retentionPeriodInWeeks int, enclaveUuid, serviceUuid string) ([]string, error) { - // TODO: embed FileLayout into StreamLogsStrategy interface - perWeekFileLayout := file_layout.NewPerWeekFileLayout(strategy.time) - retentionPeriod := time.Duration(retentionPeriodInWeeks) * oneWeek - return perWeekFileLayout.GetLogFilePaths(filesystem, retentionPeriod, -1, enclaveUuid, serviceUuid) -} - // Returns a Reader over all logs in [logFilePaths] and the open file descriptors of the associated [logFilePaths] func getLogsReader(filesystem volume_filesystem.VolumeFilesystem, logFilePaths []string) (*bufio.Reader, []volume_filesystem.VolumeFile, error) { var fileReaders []io.Reader @@ -143,7 +126,7 @@ func getLogsReader(filesystem volume_filesystem.VolumeFilesystem, logFilePaths [ return bufio.NewReader(combinedLogsReader), files, nil } -func (strategy *PerWeekStreamLogsStrategy) streamAllLogs( +func (strategy *StreamLogsStrategyImpl) streamAllLogs( ctx context.Context, logsReader *bufio.Reader, logLineSender *logline.LogLineSender, @@ -181,7 +164,7 @@ func (strategy *PerWeekStreamLogsStrategy) streamAllLogs( } // tail -n X -func (strategy *PerWeekStreamLogsStrategy) streamTailLogs( +func (strategy *StreamLogsStrategyImpl) streamTailLogs( ctx context.Context, logsReader *bufio.Reader, numLogLines uint32, @@ -266,7 +249,7 @@ func isValidJsonEnding(line string) bool { return endOfLine == volume_consts.EndOfJsonLine } -func (strategy *PerWeekStreamLogsStrategy) sendJsonLogLine(jsonLog JsonLog, conjunctiveLogLinesFiltersWithRegex []logline.LogLineFilterWithRegex, logLineSender *logline.LogLineSender, serviceUuid service.ServiceUUID) error { +func (strategy *StreamLogsStrategyImpl) sendJsonLogLine(jsonLog JsonLog, conjunctiveLogLinesFiltersWithRegex []logline.LogLineFilterWithRegex, logLineSender *logline.LogLineSender, serviceUuid service.ServiceUUID) error { // each logLineStr is of the following structure: {"enclave_uuid": "...", "service_uuid":"...", "log": "...",.. "timestamp":"..."} // eg. {"container_type":"api-container", "container_id":"8f8558ba", "container_name":"/kurtosis-api--ffd", // "log":"hi","timestamp":"2023-08-14T14:57:49Z"} @@ -307,14 +290,14 @@ func (strategy *PerWeekStreamLogsStrategy) sendJsonLogLine(jsonLog JsonLog, conj } // Returns true if [logLine] has no timestamp -func (strategy *PerWeekStreamLogsStrategy) isWithinRetentionPeriod(logLine *logline.LogLine) (bool, error) { - retentionPeriod := strategy.time.Now().Add(time.Duration(-strategy.logRetentionPeriodInWeeks) * oneWeek) +func (strategy *StreamLogsStrategyImpl) isWithinRetentionPeriod(logLine *logline.LogLine) (bool, error) { + oldestTimeWithinRetentionPeriod := strategy.time.Now().Add(-strategy.logRetentionPeriod) timestamp := logLine.GetTimestamp() - return timestamp.After(retentionPeriod), nil + return timestamp.After(oldestTimeWithinRetentionPeriod), nil } // Continue streaming log lines as they are written to log file (tail -f [filepath]) -func (strategy *PerWeekStreamLogsStrategy) followLogs( +func (strategy *StreamLogsStrategyImpl) followLogs( ctx context.Context, filepath string, logLineSender *logline.LogLineSender, diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/stream_logs_strategy_impl_test.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/stream_logs_strategy_impl_test.go new file mode 100644 index 0000000000..3fae808e3d --- /dev/null +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/stream_logs_strategy/stream_logs_strategy_impl_test.go @@ -0,0 +1,272 @@ +package stream_logs_strategy + +import ( + "bufio" + "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/file_layout" + "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/logs_clock" + "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/persistent_volume_helpers" + "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_consts" + "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/logline" + "github.com/stretchr/testify/require" + "io" + "strings" + "testing" + "time" +) + +const ( + retentionPeriodInWeeksForTesting = 5 +) + +func TestIsWithinRetentionPeriod(t *testing.T) { + // this is the 36th week of the year + jsonLogLine := map[string]string{ + "timestamp": "2023-09-06T00:35:15-04:00", + } + + // week 41 would put the log line outside the retention period + mockTime := logs_clock.NewMockLogsClockPerDay(2023, 41, 0) + strategy := NewStreamLogsStrategyImpl(mockTime, persistent_volume_helpers.ConvertWeeksToDuration(retentionPeriodInWeeksForTesting), file_layout.NewPerWeekFileLayout(mockTime, volume_consts.LogsStorageDirpath)) + + timestamp, err := parseTimestampFromJsonLogLine(jsonLogLine) + require.NoError(t, err) + logLine := logline.NewLogLine("", *timestamp) + + isWithinRetentionPeriod, err := strategy.isWithinRetentionPeriod(logLine) + + require.NoError(t, err) + require.False(t, isWithinRetentionPeriod) +} + +func TestGetCompleteJsonLogString(t *testing.T) { + logLine1 := "{\"log\":\"Starting feature 'runs idempotently'\"}" + logLine2a := "{\"log\":\"Starting feature 'apic " + logLine2b := "idempotently'\"}" + + logs := strings.Join([]string{logLine1, logLine2a, logLine2b}, string(volume_consts.NewLineRune)) + logsReader := bufio.NewReader(strings.NewReader(logs)) + + var jsonLogStr string + var err error + + // First read + jsonLogStr, err = getCompleteJsonLogString(logsReader) + require.NoError(t, err) + require.Equal(t, logLine1, jsonLogStr) + + // Second read + logLine2 := "{\"log\":\"Starting feature 'apic idempotently'\"}" + jsonLogStr, err = getCompleteJsonLogString(logsReader) + require.Error(t, err) + require.ErrorIs(t, io.EOF, err) + require.Equal(t, logLine2, jsonLogStr) +} + +func TestGetCompleteJsonLogStringAcrossManyCompleteLines(t *testing.T) { + logLine1 := "{\"log\":\"Starting feature 'files manager'\"}" + logLine2 := "{\"log\":\"The enclave was created\"}" + logLine3 := "{\"log\":\"User service started\"}" + logLine4 := "{\"log\":\"The data have being loaded\"}" + + logs := strings.Join([]string{logLine1, logLine2, logLine3, logLine4}, string(volume_consts.NewLineRune)) + logsReader := bufio.NewReader(strings.NewReader(logs)) + + var jsonLogStr string + var err error + + // First read + jsonLogStr, err = getCompleteJsonLogString(logsReader) + require.NoError(t, err) + require.Equal(t, logLine1, jsonLogStr) + + // Second read + jsonLogStr, err = getCompleteJsonLogString(logsReader) + require.NoError(t, err) + require.Equal(t, logLine2, jsonLogStr) + + // Fourth read + jsonLogStr, err = getCompleteJsonLogString(logsReader) + require.NoError(t, err) + require.Equal(t, logLine3, jsonLogStr) + + // Last read + jsonLogStr, err = getCompleteJsonLogString(logsReader) + require.Error(t, err) + require.ErrorIs(t, io.EOF, err) + require.Equal(t, logLine4, jsonLogStr) +} + +func TestGetCompleteJsonLogStringAcrossManyBrokenLines(t *testing.T) { + logLine1a := "{\"log\":\"Starting" + logLine1b := " feature " + logLine1c := "'runs " + logLine1d := "idempotently'\"}" + + logs := strings.Join([]string{logLine1a, logLine1b, logLine1c, logLine1d}, string(volume_consts.NewLineRune)) + logsReader := bufio.NewReader(strings.NewReader(logs)) + + var jsonLogStr string + var err error + + logLine1 := "{\"log\":\"Starting feature 'runs idempotently'\"}" + jsonLogStr, err = getCompleteJsonLogString(logsReader) + require.Error(t, err) + require.ErrorIs(t, io.EOF, err) + require.Equal(t, logLine1, jsonLogStr) +} + +func TestGetCompleteJsonLogStringWithNoValidJsonEnding(t *testing.T) { + logLine1 := "{\"log\":\"Starting idempotently'\"" + + logsReader := bufio.NewReader(strings.NewReader(logLine1)) + + var jsonLogStr string + var err error + + // this will end up in an infinite loop, bc [getCompleteJsonLogString] keeps looping till it finds EOF or complete json + jsonLogStr, err = getCompleteJsonLogString(logsReader) + require.Error(t, err) + require.ErrorIs(t, io.EOF, err) + require.Equal(t, logLine1, jsonLogStr) +} + +func TestGetJsonLogString(t *testing.T) { + logLine1 := "{\"log\":\"Starting feature 'centralized logs'\"}" + logLine2 := "{\"log\":\"Starting feature 'runs idempotently'\"}" + logLine3a := "{\"log\":\"Starting feature 'apic " + logLine3b := "idempotently'\"}" + + logs := strings.Join([]string{logLine1, logLine2, logLine3a, logLine3b}, string(volume_consts.NewLineRune)) + logsReader := bufio.NewReader(strings.NewReader(logs)) + + var jsonLogStr string + var isComplete bool + var err error + + // First read + jsonLogStr, isComplete, err = getJsonLogString(logsReader) + require.NoError(t, err) + require.True(t, isComplete) + require.Equal(t, logLine1, jsonLogStr) + + // Second read + jsonLogStr, isComplete, err = getJsonLogString(logsReader) + require.NoError(t, err) + require.True(t, isComplete) + require.Equal(t, logLine2, jsonLogStr) + + // Third read + jsonLogStr, isComplete, err = getJsonLogString(logsReader) + require.NoError(t, err) + require.False(t, isComplete) + require.Equal(t, logLine3a, jsonLogStr) + + // Last read + jsonLogStr, isComplete, err = getJsonLogString(logsReader) + require.Error(t, err) + require.ErrorIs(t, io.EOF, err) + require.True(t, isComplete) + require.Equal(t, logLine3b, jsonLogStr) +} + +func TestGetJsonLogStringWithEOFAndNoNewLine(t *testing.T) { + logLine1a := "{\"log\":\"Starting feature 'apic " + logLine1b := "idempotently'\"}" + + logs := logLine1a + "\n" + logLine1b + logsReader := bufio.NewReader(strings.NewReader(logs)) + + var jsonLogStr string + var isComplete bool + var err error + + // First read + jsonLogStr, isComplete, err = getJsonLogString(logsReader) + require.NoError(t, err) + require.False(t, isComplete) + require.Equal(t, logLine1a, jsonLogStr) + + // Second read + jsonLogStr, isComplete, err = getJsonLogString(logsReader) + require.Error(t, err) + require.ErrorIs(t, io.EOF, err) + require.True(t, isComplete) + require.Equal(t, logLine1b, jsonLogStr) +} + +func TestGetJsonLogStringWithEOFAndNoValidJsonEnding(t *testing.T) { + logLine1 := "{\"log\":\"Starting feature 'centralized logs'\"" + + logsReader := bufio.NewReader(strings.NewReader(logLine1)) + + var jsonLogStr string + var isComplete bool + var err error + + // First read + jsonLogStr, isComplete, err = getJsonLogString(logsReader) + require.Error(t, err) + require.ErrorIs(t, io.EOF, err) + require.False(t, isComplete) + require.Equal(t, logLine1, jsonLogStr) +} + +func TestParseTimestampFromJsonLogLineReturnsTime(t *testing.T) { + timestampStr := "2023-09-06T00:35:15Z" // utc timestamp + jsonLogLine := map[string]string{ + "timestamp": timestampStr, + } + + expectedTime, err := time.Parse(time.RFC3339, timestampStr) + require.NoError(t, err) + + actualTime, err := parseTimestampFromJsonLogLine(jsonLogLine) + + require.NoError(t, err) + require.Equal(t, expectedTime, *actualTime) +} + +func TestParseTimestampFromJsonLogLineWithOffsetReturnsTime(t *testing.T) { + timestampStr := "2023-09-06T00:35:15-04:00" // utc timestamp with offset '-4:00' + jsonLogLine := map[string]string{ + "timestamp": timestampStr, + } + + expectedTime, err := time.Parse(time.RFC3339, timestampStr) + require.NoError(t, err) + + actualTime, err := parseTimestampFromJsonLogLine(jsonLogLine) + + require.NoError(t, err) + require.Equal(t, expectedTime, *actualTime) +} + +func TestParseTimestampFromJsonLogLineWithIncorrectlyFormattedTimeReturnsError(t *testing.T) { + timestampStr := "2023-09-06" // not UTC formatted timestamp str + jsonLogLine := map[string]string{ + "timestamp": timestampStr, + } + + _, err := parseTimestampFromJsonLogLine(jsonLogLine) + + require.Error(t, err) +} + +func TestParseTimestampFromJsonLogLineWithoutTimezoneReturnsError(t *testing.T) { + timestampStr := "2023-09-06T00:35:15" // no utc timezone indicator or offset to indicate timezone + jsonLogLine := map[string]string{ + "timestamp": timestampStr, + } + + _, err := parseTimestampFromJsonLogLine(jsonLogLine) + + require.Error(t, err) +} + +func TestParseTimestampFromJsonLogLineWithNoTimestampFieldReturnsError(t *testing.T) { + jsonLogLine := map[string]string{} + + _, err := parseTimestampFromJsonLogLine(jsonLogLine) + + require.Error(t, err) +} diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_consts/consts.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_consts/consts.go index a2f53c054c..c0582c807b 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_consts/consts.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_consts/consts.go @@ -1,9 +1,5 @@ package volume_consts -import ( - "time" -) - const ( // Location of logs on the filesystem of the engine LogsStorageDirpath = "/var/log/kurtosis/" @@ -17,20 +13,6 @@ const ( EndOfJsonLine = "}" - // promise to keep 1 weeks of logs for users - LogRetentionPeriodInWeeks = 1 - - RemoveLogsWaitHours = 6 * time.Hour - - CreateLogsWaitMinutes = 1 * time.Minute - // basepath/enclave uuid/service uuid PerFileFmtStr = "%s%s/%s%s" - - // TODO: remove these from consts once PerWeekFileLayout is fully merged - // basepath /year/week - PerWeekDirPathStr = "%s%s/%s/" - - // ... enclave uuid/service uuid - PerWeekFilePathFmtStr = PerWeekDirPathStr + "%s/%s%s" ) diff --git a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_filesystem/volume_filesystem.go b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_filesystem/volume_filesystem.go index 72f2520ac0..0d03848883 100644 --- a/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_filesystem/volume_filesystem.go +++ b/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_filesystem/volume_filesystem.go @@ -4,6 +4,7 @@ import ( "github.com/spf13/afero" "io" "os" + "path/filepath" ) // VolumeFilesystem interface is an abstraction of the disk filesystem @@ -15,6 +16,7 @@ type VolumeFilesystem interface { RemoveAll(path string) error Remove(filepath string) error Symlink(target, link string) error + Walk(root string, walkFn filepath.WalkFunc) error } type VolumeFile interface { @@ -58,6 +60,10 @@ func (fs *OsVolumeFilesystem) Symlink(target, link string) error { return os.Symlink(target, link) } +func (fs *OsVolumeFilesystem) Walk(root string, fn filepath.WalkFunc) error { + return filepath.Walk(root, fn) +} + // MockedVolumeFilesystem is an implementation used for unit testing type MockedVolumeFilesystem struct { // uses an underlying map filesystem that's easy to mock file data with @@ -93,3 +99,7 @@ func (fs *MockedVolumeFilesystem) Symlink(target, link string) error { _, err := fs.mapFS.Create(link) return err } + +func (fs *MockedVolumeFilesystem) Walk(root string, fn filepath.WalkFunc) error { + return afero.Walk(fs.mapFS, root, fn) +} diff --git a/engine/server/engine/main.go b/engine/server/engine/main.go index 4670968c54..2e30563804 100644 --- a/engine/server/engine/main.go +++ b/engine/server/engine/main.go @@ -8,9 +8,10 @@ package main import ( "context" "fmt" + vector_consts "github.com/kurtosis-tech/kurtosis/container-engine-lib/lib/backend_impls/docker/docker_kurtosis_backend/logs_aggregator_functions/implementations/vector" "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/file_layout" + "github.com/kurtosis-tech/kurtosis/engine/server/engine/centralized_logs/client_implementations/persistent_volume/volume_consts" "io/fs" - "math" "net" "net/http" "os" @@ -180,7 +181,11 @@ func runMain() error { if err != nil { return stacktrace.Propagate(err, "An error occurred parsing a duration from provided log retention period string: %v", serverArgs.LogRetentionPeriod) } - logsDatabaseClient := getLogsDatabaseClient(serverArgs.KurtosisBackendType, kurtosisBackend, logRetentionPeriodDuration) + logsDatabaseClient, err := getLogsDatabaseClient(serverArgs.KurtosisBackendType, kurtosisBackend, logRetentionPeriodDuration) + if err != nil { + // already wrapped + return err + } logsDatabaseClient.StartLogFileManagement(ctx) enclaveManager, err := getEnclaveManager( @@ -405,27 +410,25 @@ func getKurtosisBackend(ctx context.Context, kurtosisBackendType args.KurtosisBa } // if cluster is docker, return logs client for centralized logging, otherwise use logs db of kurtosis backend which uses k8s logs under the hood -func getLogsDatabaseClient(kurtosisBackendType args.KurtosisBackendType, kurtosisBackend backend_interface.KurtosisBackend, logRetentionPeriod time.Duration) centralized_logs.LogsDatabaseClient { +func getLogsDatabaseClient(kurtosisBackendType args.KurtosisBackendType, kurtosisBackend backend_interface.KurtosisBackend, logRetentionPeriod time.Duration) (centralized_logs.LogsDatabaseClient, error) { var logsDatabaseClient centralized_logs.LogsDatabaseClient switch kurtosisBackendType { case args.KurtosisBackendType_Docker: realTime := logs_clock.NewRealClock() - logRetentionPeriodInWeeks := int(math.Ceil(logRetentionPeriod.Hours() / float64(numHoursInAWeek))) - if logRetentionPeriodInWeeks < 1 { - logRetentionPeriodInWeeks = 1 - } - logrus.Infof("Setting log retention period to '%v' week(s).", logRetentionPeriodInWeeks) + logrus.Infof("Setting log retention period to '%v' hours(s).", logRetentionPeriod.Hours()) osFs := volume_filesystem.NewOsVolumeFilesystem() - perWeekFileLayout := file_layout.NewPerWeekFileLayout(realTime) - logFileManager := log_file_manager.NewLogFileManager(kurtosisBackend, osFs, perWeekFileLayout, realTime, logRetentionPeriodInWeeks) - perWeekStreamLogsStrategy := stream_logs_strategy.NewPerWeekStreamLogsStrategy(realTime, logRetentionPeriodInWeeks) - - logsDatabaseClient = persistent_volume.NewPersistentVolumeLogsDatabaseClient(kurtosisBackend, osFs, logFileManager, perWeekStreamLogsStrategy) + perHourFileLayout := file_layout.NewPerHourFileLayout(realTime, volume_consts.LogsStorageDirpath) + logFileManager := log_file_manager.NewLogFileManager(kurtosisBackend, osFs, perHourFileLayout, realTime, logRetentionPeriod, volume_consts.LogsStorageDirpath) + if logFileManager.GetLogFileLayoutFormat() != vector_consts.VectorLogsFilepathFormat { + return nil, stacktrace.NewError("Log file format for this logs database client '%v' does not much format output by Vector logs aggregator '%v'. This is a Kurtosis bug.", logFileManager.GetLogFileLayoutFormat(), vector_consts.VectorLogsFilepathFormat) + } + streamLogsStrategy := stream_logs_strategy.NewStreamLogsStrategyImpl(realTime, logRetentionPeriod, perHourFileLayout) + logsDatabaseClient = persistent_volume.NewPersistentVolumeLogsDatabaseClient(kurtosisBackend, osFs, logFileManager, streamLogsStrategy) case args.KurtosisBackendType_Kubernetes: logsDatabaseClient = kurtosis_backend.NewKurtosisBackendLogsDatabaseClient(kurtosisBackend) } - return logsDatabaseClient + return logsDatabaseClient, nil } func formatFilenameFunctionForLogs(filename string, functionName string) string { diff --git a/internal_testsuites/golang/testsuite/startlark_user_passing_test/starlark_user_passing_test.go b/internal_testsuites/golang/testsuite/startlark_user_passing_test/starlark_user_passing_test.go index b41104f7e7..2611de036f 100644 --- a/internal_testsuites/golang/testsuite/startlark_user_passing_test/starlark_user_passing_test.go +++ b/internal_testsuites/golang/testsuite/startlark_user_passing_test/starlark_user_passing_test.go @@ -14,7 +14,7 @@ const ( userOverrideServiceName = "user-override" starlarkScriptWithUserIdPassed = ` -IMAGE = "hyperledger/besu:latest" +IMAGE = "hyperledger/besu:24.3" def run(plan, args): no_override = plan.add_service( name = "` + noOverrideServiceName + `",