use Julian;
use IO::File;
use DXDebug;
+use DXDupe;
use strict;
+
+use vars qw($VERSION $BRANCH);
+$VERSION = sprintf( "%d.%03d", q$Revision$ =~ /(\d+)\.(\d+)/ );
+$BRANCH = sprintf( "%d.%03d", q$Revision$ =~ /\d+\.\d+\.(\d+)\.(\d+)/ || (0,0));
+$main::build += $VERSION;
+$main::branch += $BRANCH;
+
use vars qw($date $sfi $k $a $r $forecast @allowed @denied $fp $node $from
$dirprefix $param
- %dup $duplth $dupage);
+ $duplth $dupage $filterdef);
$fp = 0; # the DXLog fcb
$date = 0; # the unix time of the WWV (notional)
$from = ""; # who this came from
@allowed = (); # if present only these callsigns are regarded as valid WWV updators
@denied = (); # if present ignore any wwv from these callsigns
-%dup = (); # the spot duplicates hash
$duplth = 20; # the length of text to use in the deduping
$dupage = 12*3600; # the length of time to hold spot dups
$dirprefix = "$main::data/wwv";
$param = "$dirprefix/param";
+$filterdef = bless ([
+ # tag, sort, field, priv, special parser
+ ['by', 'c', 0],
+ ['origin', 'c', 1],
+ ['channel', 'c', 2],
+ ['by_dxcc', 'nc', 3],
+ ['by_itu', 'ni', 4],
+ ['by_zone', 'nz', 5],
+ ['origin_dxcc', 'nc', 6],
+ ['origin_itu', 'ni', 7],
+ ['origin_zone', 'nz', 8],
+ ], 'Filter::Cmd');
+
sub init
{
$fp = DXLog::new('wwv', 'dat', 'm');
sub update
{
my ($mydate, $mytime, $mysfi, $mya, $myk, $myforecast, $myfrom, $mynode, $myr) = @_;
- if ((@allowed && grep {$_ eq $from} @allowed) ||
- (@denied && !grep {$_ eq $from} @denied) ||
+ $myfrom =~ s/-\d+$//;
+ if ((@allowed && grep {$_ eq $myfrom} @allowed) ||
+ (@denied && !grep {$_ eq $myfrom} @denied) ||
(@allowed == 0 && @denied == 0)) {
# my $trydate = cltounix($mydate, sprintf("%02d18Z", $mytime));
- if ($mydate >= $date) {
+ if ($mydate > $date) {
if ($myr) {
$r = 0 + $myr;
} else {
{
my $from = shift;
my $to = shift;
- my @date = $fp->unixtoj(shift);
+ my $date = $fp->unixtoj(shift);
my $pattern = shift;
my $search;
my @out;
$fp->close; # close any open files
- my $fh = $fp->open(@date);
+ my $fh = $fp->open($date);
for ($count = 0; $count < $to; ) {
my @in = ();
if ($fh) {
#
sub readfile
{
- my @date = $fp->unixtoj(shift);
- my $fh = $fp->open(@date);
+ my $date = $fp->unixtoj(shift);
+ my $fh = $fp->open($date);
my @spots = ();
my @in;
# enter the spot for dup checking and return true if it is already a dup
sub dup
{
- my ($d, $sfi, $k, $a, $text) = @_;
+ my ($d, $sfi, $k, $a, $text, $call) = @_;
# dump if too old
return 2 if $d < $main::systime - $dupage;
-# chomp $text;
-# $text = substr($text, 0, $duplth) if length $text > $duplth;
- my $dupkey = "$d|$sfi|$k|$a";
- return 1 if exists $dup{$dupkey};
- $dup{$dupkey} = $d; # in seconds (to the nearest minute)
- return 0;
-}
-
-# called every hour and cleans out the dup cache
-sub process
-{
- my $cutoff = $main::systime - $dupage;
- while (my ($key, $val) = each %dup) {
- delete $dup{$key} if $val < $cutoff;
- }
+ my $dupkey = "W$d|$sfi|$k|$a|$call";
+ return DXDupe::check($dupkey, $main::systime+$dupage);
}
sub listdups
{
- my @out;
- for (sort { $dup{$a} <=> $dup{$b} } keys %dup) {
- my $val = $dup{$_};
- push @out, "$_ = $val (" . cldatetime($val) . ")";
- }
- return @out;
+ return DXDupe::listdups('W', $dupage, @_);
}
1;
__END__;