I have updated the wiper.pl script (/opt/rtcds/caltech/c1/target/fb/wiper.pl) that runs on the framebuilder (in crontab) to delete old frames in case of file system overloading. The point of this script is to keep the file system from overloading by deleting the oldest frames. As it was, it was not properly sorting numbers which would have caused it to delete post-GPS 1000000000 frames first. This issue was identified at LHO, and below is the patch that I applied to the script.
--- wiper.pl.orig 2011-04-11 13:54:40.000000000 -0700
+++ wiper.pl 2011-09-14 19:48:36.000000000 -0700
@@ -1,5 +1,7 @@
#!/usr/bin/perl
+use File::Basename;
+
print "\n" . `date` . "\n";
# Dry run, do not delete anything
$dry_run = 1;
@@ -126,14 +128,23 @@
if ($du{$minute_trend_frames_dir} > $minute_frames_keep) { $do_min = 1; };
+# sort files by GPS time split into prefixL-T-GPS-sec.gwf
+# numerically sort on 3rd field
+sub byGPSTime {
+ my $c = basename $a;
+ $c =~ s/\D+(\d+)\D+(\d+)\D+/$1/g;
+ my $d = basename $b;
+ $d =~ s/\D+(\d+)\D+(\d+)\D+/$1/g;
+ $c <=> $d;
+}
+
# Delete frame files in $dir to free $ktofree Kbytes of space
# This one reads file names in $dir/*/*.gwf sorts them by file names
# and progressively deletes them up to $ktofree limit
sub delete_frames {
($dir, $ktofree) = @_;
# Read file names; Could this be inefficient?
- @a= <$dir/*/*.gwf>;
- sort @a;
+ @a = sort byGPSTime <$dir/*/*.gwf>;
$dacc = 0; # How many kilobytes we deleted
$fnum = @a;
$dnum = 0;
@@ -145,6 +156,7 @@
if ($dacc >= $ktofree) { last; }
$dnum ++;
# Delete $file here
+ print "- " . $file . "\n";
if (!$dry_run) {
unlink($file);
}
|