X-Git-Url: http://dxcluster.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=perl%2FDXUser.pm;h=d7c6a1ae73aac90d476f36bcf6bd7416bbbd6fe7;hb=e67d75717f0625225632cfd12a7a2d899fb692ea;hp=5dd1ced21ccbe93648ed138d25ae4a3b67d59fda;hpb=4216a8c68e18bc33c24092d1dba73bfce41b59ff;p=spider.git diff --git a/perl/DXUser.pm b/perl/DXUser.pm index 5dd1ced2..d7c6a1ae 100644 --- a/perl/DXUser.pm +++ b/perl/DXUser.pm @@ -1,9 +1,60 @@ # # DX cluster user routines # -# Copyright (c) 1998 - Dirk Koopman G1TLH +# Copyright (c) 1998-2020 - Dirk Koopman G1TLH # +# The new internal structure of the users system looks like this: # +# The users.v4 file formatted as a file of lines containing: \t{json serialised version of user record}\n +# +# You can look at it with any text tools or your favourite editor :-) +# +# In terms of internal structure, the main user hash remains as %u, keyed on callsign as before. +# +# The value is a one or two element array [position] or [position, ref], depending on whether the record has been "get()ed" +# [i.e. got from disk] or not. The 'position' is simply the start of each line in the file. The function "get()" simply returns +# the stored reference in array[1], if present, or seeks to the position from array[0], reads a line, json_decodes it, +# stores that reference into array[1] and returns that. That reference will be used from that time onwards. +# +# The routine writeoutjson() will (very) lazily write out a copy of %u WITHOUT STORING ANY EXTRA CURRENTLY UNREFERENCED CALLSIGN +# records to users.v4.n. It, in effect, does a sort of random accessed merge of the current user file and any "in memory" +# versions of any user record. This can be done with a spawned command because it will just be reading %u and merging +# loaded records, not altering the current users.v4 file in any way. +# +# %u -> $u{call} -> [position of json line in users.v4 (, reference -> {call=>'G1TLH', ...} if this record is in use)]. +# +# On my machine, it takes about 250mS to read the entire users.v4 file of 190,000 records and to create a +# $u{callsign}->[record position in users.v4] for every callsign in the users.v4 file. Loading ~19,000 records +# (read from disk, decode json, store reference) takes about 110mS (or 580nS/record). +# +# A periodic dump of users.v4.n, with said ~19,000 records in memory takes about 750mS to write (this can be speeded up, +# by at least a half, if it becomes a problem!). As this periodic dump will be spawned off, it will not interrupt the data +# stream. +# +# This is the first rewrite of DXUsers since inception. In the mojo branch we will no longer use Storable but use JSON instead. +# We will now be storing all the keys in memory and will use opportunistic loading of actual records in "get()". So out of +# say 200,000 known users it is unlikely that we will have more than 10% (more likely less) of the user records in memory. +# This will mean that there will be a increase in memory requirement, but it is modest. I estimate it's unlikely be more +# than 30 or so MB. +# +# At the moment that means that the working users.v4 is "immutable". +# +# In normal operation, when first calling 'init()', the keys and positions will be read from the newer of users.v4.n and +# users.v4. If there is no users.v4.n, then users.v4 will be used. As time wears on, %u will then accrete active user records. +# Once an hour the current %u will be saved to users.v4.n. +# +# If it becomes too much of a problem then we are likely to chuck off "close()d" users onto the end of the current users.v4 +# leaving existing users intact, but updating the pointer to the (now cleared out) user ref to the new location. This will +# be a sort of write behind log file. The users.v4 file is still immutable for the starting positions, but any chucked off +# records (or even "updates") will be written to the end of that file. If this has to be reread at any time, then the last +# entry for any callsign "wins". But this will only happen if I think the memory requirements over time become too much. +# +# As there is no functional difference between the users.v4 and export_user generated "user_json" file(s), other than the latter +# will be in sorted order with the record elements in "canonical" order. There will now longer be any code to execute to +# "restore the users file". Simply copy one of the "user_json" files to users.v4, remove users.v4.n and restart. +# +# Hopefully though, this will put to rest the need to do all that messing about ever again... Pigs may well be seen flying over +# your node as well :-) # package DXUser; @@ -19,7 +70,8 @@ use File::Copy; use JSON; use DXDebug; use Data::Structure::Util qw(unbless); - +use Time::HiRes qw(gettimeofday tv_interval); +use IO::File; use strict; @@ -31,13 +83,21 @@ $filename = undef; $lastoperinterval = 60*24*60*60; $lasttime = 0; $lrusize = 2000; -$tooold = 86400 * 365; # this marks an old user who hasn't given enough info to be useful +$tooold = 86400 * 365 + 31; # this marks an old user who hasn't given enough info to be useful $v3 = 0; $v4 = 0; my $json; our $maxconnlist = 3; # remember this many connection time (duration) [start, end] pairs +our $newusers; # per execution stats +our $modusers; +our $totusers; +our $delusers; + +my $ifh; # the input file, initialised by readinjson() + + # hash of valid elements and a simple prompt %valid = ( call => '0,Callsign', @@ -132,54 +192,48 @@ sub init my $fn = "users"; - if ($mode == 4 || -e localdata("users.v4")) { - $ufn = localdata("users.v4"); + $json = JSON->new()->canonical(1); + $filename = $ufn = localdata("$fn.json"); + + if (-e localdata("$fn.json")) { $v4 = 1; - $json = JSON->new(); - $json->canonical(1); } else { eval { require Storable; }; + if ($@) { - $ufn = localdata("users.v2"); - $v3 = $convert = 0; - dbg("the module Storable appears to be missing!!"); - dbg("trying to continue in compatibility mode (this may fail)"); - dbg("please install Storable from CPAN as soon as possible"); - } - else { + if ( ! -e localdata("users.v3") && -e localdata("users.v2") ) { + $convert = 2; + } + LogDbg('',"the module Storable appears to be missing!!"); + LogDbg('',"trying to continue in compatibility mode (this may fail)"); + LogDbg('',"please install Storable from CPAN as soon as possible"); + } else { import Storable qw(nfreeze thaw); - $ufn = localdata("users.v3"); - $v3 = 1; - $convert++ if -e localdata("users.v2") && !-e $ufn; + $convert = 3 if -e localdata("users.v3") && !-e $ufn; } } - - if ($mode) { - $dbm = tie (%u, 'DB_File', $ufn, O_CREAT|O_RDWR, 0666, $DB_BTREE) or confess "can't open user file: $fn ($!) [rebuild it from user_asc?]"; - } else { - $dbm = tie (%u, 'DB_File', $ufn, O_RDONLY, 0666, $DB_BTREE) or confess "can't open user file: $fn ($!) [rebuild it from user_asc?]"; - } - - die "Cannot open $ufn ($!)\n" unless $dbm; - $lru = LRU->newbase("DXUser", $lrusize); - # do a conversion if required - if ($dbm && $convert) { + if ($convert) { my ($key, $val, $action, $count, $err) = ('','',0,0,0); + my $ta = [gettimeofday]; my %oldu; - dbg("Converting the User File to V$convert "); - dbg("This will take a while, I suggest you go and have cup of strong tea"); - my $odbm = tie (%oldu, 'DB_File', localdata("users.v2"), O_RDONLY, 0666, $DB_BTREE) or confess "can't open user file: $fn.v2 ($!) [rebuild it from user_asc?]"; + LogDbg('',"Converting the User File from V$convert to $fn.json "); + LogDbg('',"This will take a while, I suggest you go and have cup of strong tea"); + my $odbm = tie (%oldu, 'DB_File', localdata("users.v$convert"), O_RDONLY, 0666, $DB_BTREE) or confess "can't open user file: $fn.v$convert ($!) [rebuild it from user_asc?]"; for ($action = R_FIRST; !$odbm->seq($key, $val, $action); $action = R_NEXT) { my $ref; - eval { $ref = asc_decode($val) }; + if ($convert == 3) { + eval { $ref = storable_decode($val) }; + } else { + eval { $ref = asc_decode($val) }; + } unless ($@) { if ($ref) { - $ref->put; + $u{$key} = $ref; $count++; } else { $err++ @@ -190,7 +244,20 @@ sub init } undef $odbm; untie %oldu; - dbg("Conversion completed $count records $err errors"); + my $t = _diffms($ta); + LogDbg('',"Conversion from users.v$convert to users.json completed $count records $err errors $t mS"); + + # now write it away for future use + $ta = [gettimeofday]; + $err = 0; + $count = writeoutjson(); + $t = _diffms($ta); + LogDbg('',"New Userfile users.json write completed $count records $err errors $t mS"); + LogDbg('',"Now restarting.."); + $main::ending = 10; + } else { + # otherwise (i.e normally) slurp it in + readinjson(); } $filename = $ufn; } @@ -213,10 +280,10 @@ sub del_file # sub process { - if ($main::systime > $lasttime + 15) { - $dbm->sync if $dbm; - $lasttime = $main::systime; - } +# if ($main::systime > $lasttime + 15) { +# #$dbm->sync if $dbm; +# $lasttime = $main::systime; +# } } # @@ -251,6 +318,8 @@ sub new my $self = $pkg->alloc($call); $self->put; + ++$newusers; + ++$totusers; return $self; } @@ -262,32 +331,48 @@ sub new sub get { my $call = uc shift; - my $data; - - # is it in the LRU cache? - my $ref = $lru->get($call); - return $ref if $ref && ref $ref eq 'DXUser'; + my $nodecode = shift; + my $ref = $u{$call}; + return undef unless $ref; - # search for it - unless ($dbm->get($call, $data)) { - eval { $ref = decode($data); }; - - if ($ref) { - if (!UNIVERSAL::isa($ref, 'DXUser')) { - dbg("DXUser::get: got strange answer from decode of $call". ref $ref. " ignoring"); - return undef; + unless ($ref->[1]) { + $ifh->seek($ref->[0], 0); + my $l = $ifh->getline; + if ($l) { + my ($k,$s) = split /\t/, $l; + return $s if $nodecode; + my $j = json_decode($s); + if ($j) { + $ref->[1] = $j; } - # we have a reference and it *is* a DXUser - } else { - if ($@) { - LogDbg('err', "DXUser::get decode error on $call '$@'"); - } else { - dbg("DXUser::get: no reference returned from decode of $call $!"); - } - return undef; } - $lru->put($call, $ref); - return $ref; + } elsif ($nodecode) { + return json_encode($ref->[1]); + } + return $ref->[1]; +} + +# +# get an "ephemeral" reference - i.e. this will give you new temporary copy of +# the call's user record, but without storing it (if it isn't already there) +# +# This is not as quick as get()! But it will allow safe querying of the +# user file. Probably in conjunction with get_some_calls feeding it. +# +# Probably need to create a new copy of any existing records WIP + +sub get_tmp +{ + my $call = uc shift; + my $ref = $u{call}; + if ($ref) { + $ifh->seek($ref->[0], 0); + my $l = $ifh->getline; + if ($l) { + my ($k,$s) = split /\t/, $l; + my $j = json_decode($s); + return $; + } } return undef; } @@ -323,6 +408,16 @@ sub get_all_calls return (sort keys %u); } +# +# get some calls - provide a qr// style selector string as a partial key +# + +sub get_some_calls +{ + my $pattern = shift || qr/.*/; + return sort grep {$pattern} keys %u; +} + # # put - put a user # @@ -331,15 +426,8 @@ sub put { my $self = shift; confess "Trying to put nothing!" unless $self && ref $self; - my $call = $self->{call}; - - $dbm->del($call); - delete $self->{annok} if $self->{annok}; - delete $self->{dxok} if $self->{dxok}; - - $lru->put($call, $self); - my $ref = $self->encode; - $dbm->put($call, $ref); + $self->{lastin} = $main::systime; + ++$modusers; # new or existing, it's still been modified } # freeze the user @@ -355,33 +443,18 @@ sub encode sub decode { goto &json_decode if $v4; - goto &asc_decode unless $v3; + goto &storable_decode if $v3; + goto &asc_decode; +} + +# should now be obsolete for mojo branch build 238 and above +sub storable_decode +{ my $ref; $ref = thaw(shift); return $ref; } -# -# create a string from a user reference (in_ascii) -# -sub asc_encode -{ - my $self = shift; - my $strip = shift; - my $p; - - if ($strip) { - my $ref = bless {}, ref $self; - foreach my $k (qw(qth lat long qra sort call homenode node lastoper lastin)) { - $ref->{$k} = $self->{$k} if exists $self->{$k}; - } - $ref->{name} = $self->{name} if exists $self->{name} && $self->{name} !~ /selfspot/i; - $p = dd($ref); - } else { - $p = dd($self); - } - return $p; -} # # create a hash from a string (in ascii) @@ -429,8 +502,11 @@ sub del { my $self = shift; my $call = $self->{call}; - $lru->remove($call); - $dbm->del($call); +# $lru->remove($call); + # $dbm->del($call); + ++$delusers; + --$totusers; + delete $u{$call}; } # @@ -448,7 +524,7 @@ sub close push @$ref, $ip if $ip; push @{$self->{connlist}}, $ref; shift @{$self->{connlist}} if @{$self->{connlist}} > $maxconnlist; - $self->put(); +# $self->put(); } # @@ -457,7 +533,7 @@ sub close sub sync { - $dbm->sync; +# $dbm->sync; } # @@ -476,141 +552,9 @@ sub fields sub export { - my $name = shift || 'user_asc'; - my $basic_info_only = shift; - - my $fn = $name ne 'user_asc' ? $name : "$main::local_data/$name"; # force use of local - - # save old ones - move "$fn.oooo", "$fn.ooooo" if -e "$fn.oooo"; - move "$fn.ooo", "$fn.oooo" if -e "$fn.ooo"; - move "$fn.oo", "$fn.ooo" if -e "$fn.oo"; - move "$fn.o", "$fn.oo" if -e "$fn.o"; - move "$fn", "$fn.o" if -e "$fn"; - - my $count = 0; - my $err = 0; - my $del = 0; - my $fh = new IO::File ">$fn" or return "cannot open $fn ($!)"; - if ($fh) { - my $key = 0; - my $val = undef; - my $action; - my $t = scalar localtime; - print $fh q{#!/usr/bin/perl -# -# The exported userfile for a DXSpider System -# -# Input file: $filename -# Time: $t -# - -package main; - -# search local then perl directories -BEGIN { - umask 002; - - # root of directory tree for this system - $root = "/spider"; - $root = $ENV{'DXSPIDER_ROOT'} if $ENV{'DXSPIDER_ROOT'}; - - unshift @INC, "$root/perl"; # this IS the right way round! - unshift @INC, "$root/local"; - - # try to detect a lockfile (this isn't atomic but - # should do for now - $lockfn = "$root/local_data/cluster.lck"; # lock file name - if (-e $lockfn) { - open(CLLOCK, "$lockfn") or die "Can't open Lockfile ($lockfn) $!"; - my $pid = ; - chomp $pid; - die "Lockfile ($lockfn) and process $pid exists - cluster must be stopped first\n" if kill 0, $pid; - close CLLOCK; - } -} - -use SysVar; -use DXUser; - -if (@ARGV) { - $main::userfn = shift @ARGV; - print "user filename now $userfn\n"; -} - -package DXUser; - -del_file(); -init(1); -%u = (); -my $count = 0; -my $err = 0; -while () { - chomp; - my @f = split /\t/; - my $ref = asc_decode($f[1]); - if ($ref) { - $ref->put(); - $count++; - DXUser::sync() unless $count % 10000; - } else { - print "# Error: $f[0]\t$f[1]\n"; - $err++ - } -} -DXUser::sync(); DXUser::finish(); -print "There are $count user records and $err errors\n"; -}; - print $fh "__DATA__\n"; - - for ($action = R_FIRST; !$dbm->seq($key, $val, $action); $action = R_NEXT) { - if (!is_callsign($key) || $key =~ /^0/) { - my $eval = $val; - my $ekey = $key; - $eval =~ s/([\%\x00-\x1f\x7f-\xff])/sprintf("%%%02X", ord($1))/eg; - $ekey =~ s/([\%\x00-\x1f\x7f-\xff])/sprintf("%%%02X", ord($1))/eg; - LogDbg('DXCommand', "Export Error1: $ekey\t$eval"); - eval {$dbm->del($key)}; - dbg(carp("Export Error1: $ekey\t$eval\n$@")) if $@; - ++$err; - next; - } - my $ref; - eval {$ref = decode($val); }; - if ($ref) { - my $t = $ref->{lastin} || 0; - if ($ref->is_user && !$ref->{priv} && $main::systime > $t + $tooold) { - unless ($ref->{lat} && $ref->{long} || $ref->{qth} || $ref->{qra}) { - eval {$dbm->del($key)}; - dbg(carp("Export Error2: $key\t$val\n$@")) if $@; - LogDbg('DXCommand', "$ref->{call} deleted, too old"); - $del++; - next; - } - } - # only store users that are reasonably active or have useful information - print $fh "$key\t" . $ref->asc_encode($basic_info_only) . "\n"; - ++$count; - } else { - LogDbg('DXCommand', "Export Error3: $key\t" . carp($val) ."\n$@"); - eval {$dbm->del($key)}; - dbg(carp("Export Error3: $key\t$val\n$@")) if $@; - ++$err; - } - } - $fh->close; - } - my $s = qq{Exported users to $fn - $count Users $del Deleted $err Errors ('sh/log Export' for details)}; - LogDbg('command', $s); - return $s; -} - -sub export_json -{ - my $name = shift || 'user_json'; - my $basic_info_only = shift; + my $name = shift; - my $fn = $name ne 'user_json' ? $name : "$main::local_data/$name"; # force use of local + my $fn = $name || localdata("user_json"); # force use of local_data # save old ones move "$fn.oooo", "$fn.ooooo" if -e "$fn.oooo"; @@ -621,7 +565,6 @@ sub export_json my $json = JSON->new; $json->canonical(1); - $json->allow_blessed(1); my $count = 0; my $err = 0; @@ -630,115 +573,25 @@ sub export_json if ($fh) { my $key = 0; my $val = undef; - my $action; - my $t = scalar localtime; - print $fh q{#!/usr/bin/perl -# -# The exported userfile for a DXSpider System -# -# Input file: $filename -# Time: $t -# - -package main; - -# search local then perl directories -BEGIN { - umask 002; - - # root of directory tree for this system - $root = "/spider"; - $root = $ENV{'DXSPIDER_ROOT'} if $ENV{'DXSPIDER_ROOT'}; - - unshift @INC, "$root/perl"; # this IS the right way round! - unshift @INC, "$root/local"; - - # try to detect a lockfile (this isn't atomic but - # should do for now - $lockfn = "$root/local_data/cluster.lck"; # lock file name - if (-e $lockfn) { - open(CLLOCK, "$lockfn") or die "Can't open Lockfile ($lockfn) $!"; - my $pid = ; - chomp $pid; - die "Lockfile ($lockfn) and process $pid exists - cluster must be stopped first\n" if kill 0, $pid; - close CLLOCK; - } -} - -use SysVar; -use DXUser; - -if (@ARGV) { - $main::userfn = shift @ARGV; - print "user filename now $userfn\n"; -} - -package DXUser; - -use JSON; -my $json = JSON->new; - -del_file(); -init(4); -%u = (); -my $count = 0; -my $err = 0; -while () { - chomp; - my @f = split /\t/; - my $ref; - eval { $ref = $json->decode($f[1]); }; - if ($ref && !$@) { - $ref = bless $ref, 'DXUser'; - $ref->put(); - $count++; - DXUser::sync() unless $count % 10000; - } else { - print "# Error: $f[0]\t$f[1]\n"; - $err++ - } -} -DXUser::sync(); DXUser::finish(); -print "There are $count user records and $err errors\n"; -}; - print $fh "__DATA__\n"; - - for ($action = R_FIRST; !$dbm->seq($key, $val, $action); $action = R_NEXT) { - if (!is_callsign($key) || $key =~ /^0/) { - my $eval = $val; - my $ekey = $key; - $eval =~ s/([\%\x00-\x1f\x7f-\xff])/sprintf("%%%02X", ord($1))/eg; - $ekey =~ s/([\%\x00-\x1f\x7f-\xff])/sprintf("%%%02X", ord($1))/eg; - LogDbg('DXCommand', "Export Error1: $ekey\t$eval"); - eval {$dbm->del($key)}; - dbg(carp("Export Error1: $ekey\t$eval\n$@")) if $@; - ++$err; - next; - } - my $ref; - eval {$ref = decode($val); }; - if ($ref) { - my $t = $ref->{lastin} || 0; - if ($ref->is_user && !$ref->{priv} && $main::systime > $t + $tooold) { - unless ($ref->{lat} && $ref->{long} || $ref->{qth} || $ref->{qra}) { - eval {$dbm->del($key)}; - dbg(carp("Export Error2: $key\t$val\n$@")) if $@; - LogDbg('DXCommand', "$ref->{call} deleted, too old"); - $del++; - next; - } + foreach my $k (sort keys %u) { + my $r = $u{$k}; + if ($r->{sort} eq 'U' && !$r->{priv} && $main::systime > $r->{lastin}+$tooold ) { + unless ($r->{lat} || $r->{long} || $r->{qra} || $r->{qth} || $r->{name}) { + LogDbg('err', "DXUser::export deleting $k - too old, last in " . cldatetime($r->lastin) . " " . difft([$r->lastin, $main::systime])); + delete $u{$k}; + ++$del; + next; } - # only store users that are reasonably active or have useful information - unbless($ref); - print $fh "$key\t" . $json->encode($ref) . "\n"; - ++$count; - } else { - LogDbg('DXCommand', "Export Error3: $key\t" . carp($val) ."\n$@"); - eval {$dbm->del($key)}; - dbg(carp("Export Error3: $key\t$val\n$@")) if $@; - ++$err; } - } + eval {$val = json_encode($r);}; + if ($@) { + LogDbg('err', "DXUser::export error encoding call: $k $@"); + ++$err; + next; + } + $fh->print("$k\t$val\n"); + ++$count; + } $fh->close; } my $s = qq{Exported users to $fn - $count Users $del Deleted $err Errors ('sh/log Export' for details)}; @@ -1064,6 +917,86 @@ sub lastping $b->{$call} = shift if @_; return $b->{$call}; } + +# +# read in the latest version of the user file. As this file is immutable, the file one really wants is +# a later (generated) copy. But, if the plain users.v4 file is all we have, we'll use that. +# + +use File::Copy; + +sub readinjson +{ + my $fn = $filename; + my $nfn = "$fn.n"; + my $ofn = "$fn.o"; + + my $ta = [gettimeofday]; + my $count = 0; + my $s; + my $err = 0; + + unless (-r $fn) { + dbg("DXUser $fn not found - probably about to convert"); + return; + } + + if (-e $nfn && -e $fn && (stat($nfn))[9] > (stat($fn))[9]) { + # move the old file to .o + unlink $ofn; + move($fn, $ofn); + move($nfn, $fn); + }; + + if ($ifh) { + $ifh->seek(0, 0); + } else { + $ifh = IO::File->new("+<$fn") or die "$fn read error $!"; + } + my $pos = $ifh->tell; + while (<$ifh>) { + chomp; + my @f = split /\t/; + $u{$f[0]} = [$pos]; + $count++; + $pos = $ifh->tell; + } + $ifh->seek(0, 0); + + # $ifh is "global" and should not be closed + + dbg("DXUser::readinjson $count record headers read from $fn in ". _diffms($ta) . " mS"); + return $totusers = $count; +} + +# +# Write a newer copy of the users.v4 file to users.v4.n, which is what will be read in. +# This means that the existing users.v4 is not touched during a run of dxspider, or at least +# not yet. + +sub writeoutjson +{ + my $ofn = shift || "$filename.n"; + my $ta = [gettimeofday]; + + my $ofh = IO::File->new(">$ofn") or die "$ofn write error $!"; + my $count = 0; + $ifh->seek(0, 0); + for my $k (sort keys %u) { + my $l = get($k, 1); + if ($l) { + chomp $l; + print $ofh "$k\t$l\n"; + ++$count; + } else { + LogDbg('DXCommand', "DXUser::writeoutjson callsign $k not found") + } + } + + $ofh->close; + dbg("DXUser::writeoutjson $count records written to $ofn in ". _diffms($ta) . " mS"); + return $count; +} 1; __END__