code
stringlengths
2
1.05M
repo_name
stringlengths
5
101
path
stringlengths
4
991
language
stringclasses
3 values
license
stringclasses
5 values
size
int64
2
1.05M
# -*- perl -*- # !!! DO NOT EDIT !!! # This file was automatically generated. package Net::Amazon::Validate::ItemSearch::fr::State; use 5.006; use strict; use warnings; sub new { my ($class , %options) = @_; my $self = { '_default' => 'Music', %options, }; push @{$self->{_options}}, 'Classical'; push @{$self->{_options}}, 'DVD'; push @{$self->{_options}}, 'Kitchen'; push @{$self->{_options}}, 'Music'; push @{$self->{_options}}, 'VHS'; push @{$self->{_options}}, 'Video'; bless $self, $class; } sub user_or_default { my ($self, $user) = @_; if (defined $user && length($user) > 0) { return $self->find_match($user); } return $self->default(); } sub default { my ($self) = @_; return $self->{_default}; } sub find_match { my ($self, $value) = @_; for (@{$self->{_options}}) { return $_ if lc($_) eq lc($value); } die "$value is not a valid value for fr::State!\n"; } 1; __END__ =head1 NAME Net::Amazon::Validate::ItemSearch::fr::State; =head1 DESCRIPTION The default value is Music, unless mode is specified. The list of available values are: Classical DVD Kitchen Music VHS Video =cut
carlgao/lenga
images/lenny64-peon/usr/share/perl5/Net/Amazon/Validate/ItemSearch/fr/State.pm
Perl
mit
1,245
package MIP::Program::Allele_frequency; use 5.026; use Carp; use charnames qw{ :full :short }; use English qw{ -no_match_vars }; use open qw{ :encoding(UTF-8) :std }; use Params::Check qw{ allow check last_error }; use utf8; use warnings; use warnings qw{ FATAL utf8 }; ## CPANM use autodie qw{ :all }; use Readonly; ## MIPs lib/ use MIP::Constants qw{ $SPACE }; use MIP::Unix::Standard_streams qw{ unix_standard_streams }; use MIP::Unix::Write_to_file qw{ unix_write_to_file }; BEGIN { require Exporter; use base qw{ Exporter }; # Functions and variables which can be optionally exported our @EXPORT_OK = qw{ calculate_af max_af }; } sub calculate_af { ## Function : Perl wrapper for writing calculate_af recipe to already open $filehandle or return commands array. Based on calculate_af 0.0.2. ## Returns : @commands ## Arguments: $filehandle => Filehandle to write to ## : $infile_path => Infile path ## : $stderrfile_path => Stderrfile path ## : $stderrfile_path_append => Append stderr info to file path ## : $stdoutfile_path => Stdoutfile path my ($arg_href) = @_; ## Flatten argument(s) my $filehandle; my $infile_path; my $stderrfile_path; my $stderrfile_path_append; my $stdoutfile_path; my $tmpl = { filehandle => { store => \$filehandle, }, infile_path => { store => \$infile_path, strict_type => 1, }, stderrfile_path => { store => \$stderrfile_path, strict_type => 1, }, stderrfile_path_append => { store => \$stderrfile_path_append, strict_type => 1, }, stdoutfile_path => { store => \$stdoutfile_path, strict_type => 1, }, }; check( $tmpl, $arg_href, 1 ) or croak q{Could not parse arguments!}; my @commands = qw{ calculate_af }; if ($infile_path) { push @commands, $infile_path; } push @commands, unix_standard_streams( { stderrfile_path => $stderrfile_path, stderrfile_path_append => $stderrfile_path_append, stdoutfile_path => $stdoutfile_path, } ); unix_write_to_file( { commands_ref => \@commands, filehandle => $filehandle, separator => $SPACE, } ); return @commands; } sub max_af { ## Function : Perl wrapper for writing max_af recipe to already open $filehandle or return commands array. Based on max_af 0.0.2. ## Returns : @commands ## Arguments: $filehandle => Filehandle to write to ## : $infile_path => Infile path ## : $stderrfile_path => Stderrfile path ## : $stderrfile_path_append => Append stderr info to file path ## : $stdoutfile_path => Stdoutfile path my ($arg_href) = @_; ## Flatten argument(s) my $filehandle; my $infile_path; my $stderrfile_path; my $stderrfile_path_append; my $stdoutfile_path; my $tmpl = { filehandle => { store => \$filehandle, }, infile_path => { store => \$infile_path, strict_type => 1, }, stderrfile_path => { store => \$stderrfile_path, strict_type => 1, }, stderrfile_path_append => { store => \$stderrfile_path_append, strict_type => 1, }, stdoutfile_path => { store => \$stdoutfile_path, strict_type => 1, }, }; check( $tmpl, $arg_href, 1 ) or croak q{Could not parse arguments!}; my @commands = qw{ max_af }; if ($infile_path) { push @commands, $infile_path; } push @commands, unix_standard_streams( { stderrfile_path => $stderrfile_path, stderrfile_path_append => $stderrfile_path_append, stdoutfile_path => $stdoutfile_path, } ); unix_write_to_file( { commands_ref => \@commands, filehandle => $filehandle, separator => $SPACE, } ); return @commands; } 1;
henrikstranneheim/MIP
lib/MIP/Program/Allele_frequency.pm
Perl
mit
4,298
# This file is auto-generated by the Perl DateTime Suite time zone # code generator (0.07) This code generator comes with the # DateTime::TimeZone module distribution in the tools/ directory # # Generated from /tmp/Q713JNUf8G/australasia. Olson data version 2016a # # Do not edit this file directly. # package DateTime::TimeZone::Pacific::Guadalcanal; $DateTime::TimeZone::Pacific::Guadalcanal::VERSION = '1.95'; use strict; use Class::Singleton 1.03; use DateTime::TimeZone; use DateTime::TimeZone::OlsonDB; @DateTime::TimeZone::Pacific::Guadalcanal::ISA = ( 'Class::Singleton', 'DateTime::TimeZone' ); my $spans = [ [ DateTime::TimeZone::NEG_INFINITY, # utc_start 60328934412, # utc_end 1912-09-30 13:20:12 (Mon) DateTime::TimeZone::NEG_INFINITY, # local_start 60328972800, # local_end 1912-10-01 00:00:00 (Tue) 38388, 0, 'LMT', ], [ 60328934412, # utc_start 1912-09-30 13:20:12 (Mon) DateTime::TimeZone::INFINITY, # utc_end 60328974012, # local_start 1912-10-01 00:20:12 (Tue) DateTime::TimeZone::INFINITY, # local_end 39600, 0, 'SBT', ], ]; sub olson_version {'2016a'} sub has_dst_changes {0} sub _max_year {2026} sub _new_instance { return shift->_init( @_, spans => $spans ); } 1;
jkb78/extrajnm
local/lib/perl5/DateTime/TimeZone/Pacific/Guadalcanal.pm
Perl
mit
1,244
package Google::Ads::AdWords::v201809::AdCustomizerFeedService::mutateResponse; use strict; use warnings; { # BLOCK to scope variables sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201809' } __PACKAGE__->__set_name('mutateResponse'); __PACKAGE__->__set_nillable(); __PACKAGE__->__set_minOccurs(); __PACKAGE__->__set_maxOccurs(); __PACKAGE__->__set_ref(); use base qw( SOAP::WSDL::XSD::Typelib::Element Google::Ads::SOAP::Typelib::ComplexType ); our $XML_ATTRIBUTE_CLASS; undef $XML_ATTRIBUTE_CLASS; sub __get_attr_class { return $XML_ATTRIBUTE_CLASS; } use Class::Std::Fast::Storable constructor => 'none'; use base qw(Google::Ads::SOAP::Typelib::ComplexType); { # BLOCK to scope variables my %rval_of :ATTR(:get<rval>); __PACKAGE__->_factory( [ qw( rval ) ], { 'rval' => \%rval_of, }, { 'rval' => 'Google::Ads::AdWords::v201809::AdCustomizerFeedReturnValue', }, { 'rval' => 'rval', } ); } # end BLOCK } # end of BLOCK 1; =pod =head1 NAME Google::Ads::AdWords::v201809::AdCustomizerFeedService::mutateResponse =head1 DESCRIPTION Perl data type class for the XML Schema defined element mutateResponse from the namespace https://adwords.google.com/api/adwords/cm/v201809. =head1 PROPERTIES The following properties may be accessed using get_PROPERTY / set_PROPERTY methods: =over =item * rval $element->set_rval($data); $element->get_rval(); =back =head1 METHODS =head2 new my $element = Google::Ads::AdWords::v201809::AdCustomizerFeedService::mutateResponse->new($data); Constructor. The following data structure may be passed to new(): { rval => $a_reference_to, # see Google::Ads::AdWords::v201809::AdCustomizerFeedReturnValue }, =head1 AUTHOR Generated by SOAP::WSDL =cut
googleads/googleads-perl-lib
lib/Google/Ads/AdWords/v201809/AdCustomizerFeedService/mutateResponse.pm
Perl
apache-2.0
1,827
package NIC1; sub new { my $proto = shift; my $fh = shift; $class = ref($proto) || $proto; my $self = {}; $self->{NAME} = undef; $self->{DIRECTORIES} = (); $self->{FILES} = {}; $self->{SYMLINKS} = {}; $self->{VARIABLES} = {}; $self->{CONSTRAINTS} = {}; $self->{PROMPTS} = (); bless($self, $class); return $self; } sub _processLine { my $self = shift; my $fh = shift; local $_ = shift; if(/^name \"(.*)\"$/) { $self->{NAME} = $1; } elsif(/^dir (.+)$/) { push(@{$self->{DIRECTORIES}}, $1); } elsif(/^file (\d+) (.+)$/) { my $lines = $1; my $filename = $2; my $filedata = ""; while($lines > 0) { $filedata .= <$fh>; $lines--; } $self->{FILES}->{$filename} = {} if !defined $self->{FILES}->{$filename}; $self->{FILES}->{$filename}->{data} = $filedata; } elsif(/^prompt (\w+) \"(.*?)\"( \"(.*?)\")?$/) { my $key = $1; my $prompt = $2; my $default = $4 || undef; $self->_addPrompt($key, $prompt, $default); } elsif(/^symlink \"(.+)\" \"(.+)\"$/) { my $name = $1; my $dest = $2; $self->{SYMLINKS}->{$name} = $dest; } elsif(/^constrain file \"(.+)\" to (.+)$/) { my $constraint = $2; my $filename = $1; $self->{FILES}->{$filename} = {} if !defined $self->{FILES}->{$filename}; $self->{FILES}->{$filename}->{constraints} = () if !defined $self->{FILES}->{$filename}->{constraints}; push(@{$self->{FILES}->{$filename}->{constraints}}, $constraint); } } sub load { my $self = shift; my $fh = shift; while(<$fh>) { $self->_processLine($fh, $_); } } sub set { my $self = shift; my $key = shift; my $value = shift; $self->{VARIABLES}->{$key} = $value; } sub get { my $self = shift; my $key = shift; return $self->{VARIABLES}->{$key}; } sub name { my $self = shift; return $self->{NAME}; } sub prompts { my $self = shift; return @{$self->{PROMPTS}}; } sub addConstraint { my $self = shift; my $constraint = shift; $self->{CONSTRAINTS}->{$constraint} = 1; } sub removeConstraint { my $self = shift; my $constraint = shift; delete $self->{CONSTRAINTS}->{$constraint}; } sub _addPrompt { my($self, $key, $prompt, $default) = @_; push(@{$self->{PROMPTS}}, { name => $key, prompt => $prompt, default => $default }); } sub _constraintMatch { my $self = shift; my $constraint = shift; my $negated = 0; if(substr($constraint, 0, 1) eq "!") { $negated = 1; substr($constraint, 0, 1, ""); } return 0 if(!$negated && (!defined $self->{CONSTRAINTS}->{$constraint} || $self->{CONSTRAINTS}->{$constraint} != 1)); return 0 if($negated && (defined $self->{CONSTRAINTS}->{$constraint} || $self->{CONSTRAINTS}->{$constraint} != 0)); return 1; } sub _fileMeetsConstraints { my $self = shift; my $file = shift; foreach (@{$file->{constraints}}) { return 0 if !$self->_constraintMatch($_); } return 1; } sub _substituteVariables { my $self = shift; my $line = shift; foreach $key (keys %{$self->{VARIABLES}}) { my $value = $self->{VARIABLES}->{$key}; $line =~ s/\@\@$key\@\@/$value/g; } return $line; } sub build { my $self = shift; my $dir = shift; mkdir($dir); chdir($dir) or die $!; foreach $subdir (@{$self->{DIRECTORIES}}) { mkdir $self->_substituteVariables($subdir); } foreach $filename (keys %{$self->{FILES}}) { my $file = $self->{FILES}->{$filename}; if(defined $file->{constraints}) { if(!$self->_fileMeetsConstraints($file)) { next; } } open(my $nicfile, ">", $self->_substituteVariables($filename)); print $nicfile $self->_substituteVariables($file->{data}); close($nicfile); } foreach $symlink (keys %{$self->{SYMLINKS}}) { my $name = $self->_substituteVariables($symlink); my $dest = $self->_substituteVariables($self->{SYMLINKS}->{$symlink}); symlink($dest, $name); } } sub dumpPreamble { my $self = shift; my $preamblefn = shift; open(my $pfh, ">", $preamblefn); print $pfh "name \"".$self->{NAME}."\"",$/; foreach my $prompt (@{$self->{PROMPTS}}) { print $pfh "prompt ".$prompt->{name}." \"".$prompt->{prompt}."\""; print $pfh " \"".$prompt->{default}."\"" if defined $prompt->{default}; print $pfh $/; } foreach $filename (keys %{$self->{FILES}}) { my $file = $self->{FILES}->{$filename}; if(!defined $file->{constraints}) { next; } foreach (@{$file->{constraints}}) { print $pfh "constrain file \"".$filename."\" to ".$_,$/ } } close($pfh); } 1;
bboyle18/AppleTV-LiveTV
theos/bin/lib/NIC/Formats/NIC1.pm
Perl
apache-2.0
4,352
package Paws::Batch::ComputeResource; use Moose; has BidPercentage => (is => 'ro', isa => 'Int', request_name => 'bidPercentage', traits => ['NameInRequest']); has DesiredvCpus => (is => 'ro', isa => 'Int', request_name => 'desiredvCpus', traits => ['NameInRequest']); has Ec2KeyPair => (is => 'ro', isa => 'Str', request_name => 'ec2KeyPair', traits => ['NameInRequest']); has ImageId => (is => 'ro', isa => 'Str', request_name => 'imageId', traits => ['NameInRequest']); has InstanceRole => (is => 'ro', isa => 'Str', request_name => 'instanceRole', traits => ['NameInRequest'], required => 1); has InstanceTypes => (is => 'ro', isa => 'ArrayRef[Str|Undef]', request_name => 'instanceTypes', traits => ['NameInRequest'], required => 1); has MaxvCpus => (is => 'ro', isa => 'Int', request_name => 'maxvCpus', traits => ['NameInRequest'], required => 1); has MinvCpus => (is => 'ro', isa => 'Int', request_name => 'minvCpus', traits => ['NameInRequest'], required => 1); has SecurityGroupIds => (is => 'ro', isa => 'ArrayRef[Str|Undef]', request_name => 'securityGroupIds', traits => ['NameInRequest'], required => 1); has SpotIamFleetRole => (is => 'ro', isa => 'Str', request_name => 'spotIamFleetRole', traits => ['NameInRequest']); has Subnets => (is => 'ro', isa => 'ArrayRef[Str|Undef]', request_name => 'subnets', traits => ['NameInRequest'], required => 1); has Tags => (is => 'ro', isa => 'Paws::Batch::TagsMap', request_name => 'tags', traits => ['NameInRequest']); has Type => (is => 'ro', isa => 'Str', request_name => 'type', traits => ['NameInRequest'], required => 1); 1; ### main pod documentation begin ### =head1 NAME Paws::Batch::ComputeResource =head1 USAGE This class represents one of two things: =head3 Arguments in a call to a service Use the attributes of this class as arguments to methods. You shouldn't make instances of this class. Each attribute should be used as a named argument in the calls that expect this type of object. As an example, if Att1 is expected to be a Paws::Batch::ComputeResource object: $service_obj->Method(Att1 => { BidPercentage => $value, ..., Type => $value }); =head3 Results returned from an API call Use accessors for each attribute. If Att1 is expected to be an Paws::Batch::ComputeResource object: $result = $service_obj->Method(...); $result->Att1->BidPercentage =head1 DESCRIPTION An object representing an AWS Batch compute resource. =head1 ATTRIBUTES =head2 BidPercentage => Int The minimum percentage that a Spot Instance price must be when compared with the On-Demand price for that instance type before instances are launched. For example, if your bid percentage is 20%, then the Spot price must be below 20% of the current On-Demand price for that EC2 instance. =head2 DesiredvCpus => Int The desired number of EC2 vCPUS in the compute environment. =head2 Ec2KeyPair => Str The EC2 key pair that is used for instances launched in the compute environment. =head2 ImageId => Str The Amazon Machine Image (AMI) ID used for instances launched in the compute environment. =head2 B<REQUIRED> InstanceRole => Str The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example, C<ecsInstanceRole> or C<arn:aws:iam::E<lt>aws_account_idE<gt>:instance-profile/ecsInstanceRole>. For more information, see Amazon ECS Instance Role in the I<AWS Batch User Guide>. =head2 B<REQUIRED> InstanceTypes => ArrayRef[Str|Undef] The instances types that may launched. =head2 B<REQUIRED> MaxvCpus => Int The maximum number of EC2 vCPUs that an environment can reach. =head2 B<REQUIRED> MinvCpus => Int The minimum number of EC2 vCPUs that an environment should maintain. =head2 B<REQUIRED> SecurityGroupIds => ArrayRef[Str|Undef] The EC2 security group that is associated with instances launched in the compute environment. =head2 SpotIamFleetRole => Str The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a C<SPOT> compute environment. =head2 B<REQUIRED> Subnets => ArrayRef[Str|Undef] The VPC subnets into which the compute resources are launched. =head2 Tags => L<Paws::Batch::TagsMap> Key-value pair tags to be applied to resources that are launched in the compute environment. =head2 B<REQUIRED> Type => Str The type of compute environment. =head1 SEE ALSO This class forms part of L<Paws>, describing an object used in L<Paws::Batch> =head1 BUGS and CONTRIBUTIONS The source code is located here: https://github.com/pplu/aws-sdk-perl Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues =cut
ioanrogers/aws-sdk-perl
auto-lib/Paws/Batch/ComputeResource.pm
Perl
apache-2.0
4,739
x1(W,Z) :- W is 1, Z is 2. %FAIL x1(X,X) %?- x1(X,Y) % X=1 % Y=2 x2(W,Z) :- W is 1, Z == 1. %?- x2(X,X) % X=1 %FAIL x2(X,Y) x3(X, Y) :- X = Y, x1(X,Y). %FAIL x3(X,X) %FAIL x3(X,Y) x4(X, Y) :- X = Y, x2(X,Y). %?- x4(X,Y) % X=1 % Y=1 %?- x4(X,X) % X=1 %?- X=Y, x2(X,Y) % X=1 % Y=1 %?- Y=X, x2(X,Y) % X=1 % Y=1 q(1). q(2). q(3). x5(X, Y) :- X = Y, q(X). %?- x5(X,Y) % X=1 % Y=1 % X=2 % Y=2 % X=3 % Y=3 %?- x5(X,X) % X=1 % X=2 % X=3 x6(X, Y) :- X = Y, q(Y). %?- x6(X,Y) % X=1 % Y=1 % X=2 % Y=2 % X=3 % Y=3 %?- x6(X,X) % X=1 % X=2 % X=3
s-webber/projog
src/test/prolog/miscellaneous/12.pl
Perl
apache-2.0
546
#!/usr/bin/perl # Input parameters from loxberryupdate.pl: # release: the final version update is going to (not the version of the script) # logfilename: The filename of LoxBerry::Log where the script can append # updatedir: The directory where the update resides # cron: If 1, the update was triggered automatically by cron use LoxBerry::Update; use LoxBerry::System; init(); LOGINF "Repairing apt sources - one of the rasbian mirrors are not active anymore..."; system("sed -i -e '/^deb http:\\/\\/raspbian.raspberrypi.org\\/raspbian\\/ buster/s/^/#/g' /etc/apt/sources.list"); system("sed -i -e '/^deb http:\\/\\/ftp.gwdg.de\\/pub\\/linux\\/debian\\/raspbian\\/raspbian\\/ buster/s/^/#/g' /etc/apt/sources.list"); system("sed -i -e '/^deb http:\\/\\/ftp.agdsn.de\\/pub\\/mirrors\\/raspbian\\/raspbian\\ buster/s/^/#/g' /etc/apt/sources.list"); system("sed -i -e '/^deb http:\\/\\/ftp.halifax.rwth-aachen.de\\/raspbian\\/raspbian\\/ buster/s/^/#/g' /etc/apt/sources.list"); mkdir("$lbhomedir/system/apt"); &copy_to_loxberry('/system/apt/loxberry.list'); system("chown root:root $lbhomedir/system/apt/loxberry.list"); system("ln -s $lbhomedir/system/apt/loxberry.list /etc/apt/sources.list.d/loxberry.list"); system ("rm /etc/apt/sources.list.d/pilight.list"); apt_update("update"); LOGOK "Update script $0 finished." if ($errors == 0); LOGERR "Update script $0 finished with errors." if ($errors != 0); # End of script exit($errors);
mschlenstedt/Loxberry
sbin/loxberryupdate/update_v2.2.1.2.pl
Perl
apache-2.0
1,449
package scripts::syllabifier; require Exporter; #use strict qw(vars); #use vars qw(@ISA @EXPORT $VERSION $WIN32CONSOLE); @ISA = qw(Exporter); @EXPORT = qw(Phonification getSyllables getCurSyl isOnsetCodaVowel getPosinSyl isStart isMiddle isFinal getNphones syllableCount phoneCount get2Syllabs get2LastSyllabs getOnset getCoda getNSyllabs IsVowel getPhonePosition); #$VERSION = '1.03'; #open(PS,"Phoneset.txt") || die("File not available"); #open(VOW,"Vowels.txt") || die("File not found"); #open(CON,"Consonents.txt")|| die("File not found"); #@ps = <PS>; #@vow = <VOW>; #@con = <CON>; #chomp(@ps); #chomp(@vow); #chomp(@con); #$word = "bhaaii"; #print &getSyllables($word)."\n"; my $Phoneset = "tra t:ra a a: ax h: aa i ii u uu rx rx~ e ei ai o oo au k kh g gh ng~ ch chh j jh nj~ t th d dh n t: t:h d: d:h nd~ p ph b bh m y r l l: v sh shh s h qs~ r: n: n~ e~ o~ k~ kh~ g~ j~ d~ dh~ ph~ y~ lx lx~ nx l~ om:"; my $Vowels = "tra t:ra a ax h: aa i ii u uu rx rx~ e ei ai o oo au e~ o~ lx lx~ om:"; my $Consonants = "au k kh g gh ng~ ch chh j jh nj~ t th d dh n t: t:h d: d:h nd~ p ph b bh m y r l l: v sh shh s h qs~ r: n~ e~ k~ kh~ g~ j~ d~ dh~ ph~ y~ nx l~"; @ps = split(/\s+/, $Phoneset); @vow = split(/\s+/, $Vowels); @con = split(/\s+/, $Consonants); sub getSyllables { my $in = shift; $Phones = &Phonification($in,\@ps); $Syllables = &Syllabification(\@vow,\@con,$Phones); return $Syllables; } sub Phonification { my $in = $_[0]; my @phone = split(//,$in); #my @ps = @{$_[1]};; my %HashPhone; foreach (@ps) { $HashPhone{$_}++; } chomp(@ps); my($cur,$nex1,$nex2, $nex3); my($four,$tri,$di,$s); my $i; my $Phones; for($i=0;$i<@phone;$i++) { #&PhoneClassification($phone[$i],$phone[$i+1],$phone[$i+2],\@ps); $cur = $phone[$i]; $nex1 = $phone[$i+1]; $nex2 = $phone[$i+2]; $nex3 = $phone[$i+3]; $four = $cur.$nex1.$nex2.$nex3; $tri = $cur.$nex1.$nex2; $di = $cur.$nex1; $s = $cur; $j=0; if(exists $HashPhone{$four}) { $Phones = $Phones.$four." "; $i = $i + 3; } elsif( exists $HashPhone{$tri}) { $Phones = $Phones.$tri." "; $i = $i+2; } elsif(exists $HashPhone{$di}) { $Phones = $Phones.$di." "; $i =$i+1; } elsif(exists $HashPhone{$s}) { $Phones = $Phones.$s." "; } else { return $in; } } return $Phones; } sub Syllabification { my @vow = @{$_[0]}; my @con = @{$_[1]}; my $phone = $_[2]; my @Phones = split(/ /,$phone); my $Syll; my $p; my %HashVow; foreach (@vow) { $HashVow{$_}++; } foreach (@ps) { $HashPhone{$_}++; } for($p=0;$p<@Phones;$p++) { if(exists $HashVow{$Phones[$p]}) { $Syll = $Syll.$Phones[$p]." "; next; } elsif($Phones[$p] eq "n:") { #chop($Syll); $Syll =~ s/ $//g; $Syll = $Syll.$Phones[$p]." "; next; } elsif($p == @Phones-1) { #chop($Syll); $Syll =~ s/ $//g; $Syll = $Syll.$Phones[$p]; } elsif(exists $HashPhone{$Phones[$p]}) { $Syll = $Syll.$Phones[$p]; } else { return $phone; } } my @words = split(/\s+/, $Syll); return $Syll if(@words == 0); if(&getVowelPosition($words[@words-1]) == -1 && @words >= 2) { $words[@words-2] = $words[@words-2].$words[@words-1]; pop(@words); } $Syll = join(" ", @words); return $Syll; } sub getCurSyl { my $phones = shift; my $pos = shift; #index in the phones my $sstr = &getsubstr($phones, $pos); @sylls = split(/\s+/, &getSyllables($sstr)); return $sylls[@sylls]; } sub getVowelFromSyllable { my $syl = shift; my $vpos = &getVowelPosition($syl); my @phones = split(/\s+/, &Phonification($syl)); return $phones[$vpos] if($vpos != -1); return -1; } sub isOnsetCodaVowel { my $ph = shift; my $syl = shift; #my @phs = split(/\s+/, &Phonification($syl)); #my $onset = ""; #my $coda = ""; #my $vpos = &getVowelPosition($syl); #print "Syl: $syl Pos: $vpos\n"; #if($ph eq $phs[$vpos]) #{ # return "coda"; #} #return "onset"; my $temp = &getOnset($syl); if($temp !~ /\$/) { my @words = split(/#/, $temp); foreach my $word(@words) { if($word eq $ph) { return "onset"; } } } $temp = &getVowel($syl); if($temp !~ /\$/ && $ph eq $temp) { return "vowel"; } my $temp = &getCoda($syl); #print $temp."\n"; if($temp !~ /\$/) { my @words = split(/#/, $temp); foreach my $word(@words) { if($word eq $ph) { return "coda"; } } } return "0"; } sub getVowel { return "\$" if($word eq "\$"); my $word = shift; my @phs = split(/\s+/, &Phonification($word)); for(my $i=0; $i<@phs; $i++) { return $phs[$i] if(&IsVowel($phs[$i])); } return "\$"; } sub getOnset { return "\$" if($word eq "\$"); my $word = shift; my @phs = split(/\s+/, &Phonification($word)); my $vow =0; my $onset = ""; for(my $i=0; $i<@phs; $i++) { if(&IsVowel($phs[$i])) { $vow = 1; return "\$" if($onset eq ""); $onset =~ s/#$//g; return $onset; } if($vow == 0) { $onset = $onset.$phs[$i]."#"; } } return "\$"; } sub getCoda { return "\$" if($word eq "\$"); my $word = shift; my @phs = split(/\s+/, &Phonification($word)); my $vow =0; my $coda = ""; for(my $i=0; $i<@phs; $i++) { if(&IsVowel($phs[$i])) { $vow = 1; next; } if($vow == 1) { $coda = $coda.$phs[$i]."#"; } } if($coda ne "") { $code =~ s/#$//g; return $coda; } else { return "\$"; } } sub getVowelPosition { my $syl = shift; my @phs = split(/\s+/, &Phonification($syl)); for($i=0; $i<@phs; $i++) { return $i if(&IsVowel($phs[$i])); } return -1; } sub getPosinSyl { my $ph = shift; my $syl = shift; if($syl eq 0) { return 0; } $phones = &Phonification($syl, \@ps); @phs = split(/\s+/,$phones); for(my $i=0; $i<@phs; $i++) { return $i if($ph eq $phs[$i]); } return 0; } sub getPhonePosition { my $ph = shift; my $syl = shift; if($syl eq "0" || $syl eq "" || $syl !~ /$ph/) { return 0; } my @phones = split(/\s+/,&Phonification($syl)); return 1 if($ph eq $phones[0]); return 3 if($ph eq $phones[@phones-1]); return 2 if(@phones > 2); return 0; } sub IsVowel { my $ph = shift; for(my $i=0; $i<@vow; $i++) { return 1 if($vow[$i] eq $ph); } return 0; } sub isMiddle { my $ph = shift; my $syl = shift; @phs = split(/\s+/, &Phonification($syl)); return 1 if(@phs >0 && $ph ne $phs[0] && $ph ne $phs[@phs-1]); return 0; } sub isStart { my $ph = shift; my $syl = shift; @phs = split(/\s+/, &Phonification($syl)); return 1 if($ph eq $phs[0]); return 0; } sub isFinal { my $ph = shift; my $syl = shift; @phs = split(/\s+/, &Phonification($syl)); return 1 if(@phs > 0 && $ph eq $phs[@phs-1]); return 0; } sub getsubstr { my $phones = shift; my $pos = shift; my @words = split(/\s+/, $phones); my $sstr = ""; for($i=0; $i<=$pos; $i++) { $sstr = $sstr.$words[$i]; } return $sstr; } sub getNphones { my $syl = shift; my @phones = split(/\s+/,&Phonification($syl)); return scalar(@phones); } sub finalSchwa { my $word = shift; my @words = split(/ /, $word); my $lsyl = pop(@words); return $lsyl if($lsyl =~ /a[a-z:~]+$/); my $phones = &Phonification($lsyl); if($phones =~ / a / or $phones =~ / a$/) #if($phones =~ / a$/) { $lsyl =~ s/a//g; return $lsyl; } else { return $lsyl; } } sub syllablecount { my $sylls = shift; #print $sylls; my @words = split(" ",$sylls); #print "Count: @words"; return @words; } sub phoneCount { my $word = shift; my @phs = split(/\s+/, &Phonification($word)); return scalar(@phs); } sub getNSyllabs { my $sylls = shift; my $n = shift; #print $sylls."\n"; my @words = split(" ", $sylls); my $retData = ""; #print "Hello".$words[0]." ".$words[1]."\n"; if(@words >= $n) { chomp($words[$n-1]); #print "Return1: $words[0] $words[1]\n"; #$words[$n-1] =~ s/,$//g; for(my $i=0; $i<$n; $i++) { $retData = $retData.$words[$i]." "; } #return $words[0]." ".$words[1]; } else #if(@words == 1) { for(my $i=0; $i<@words; $i++) { $retData = $retData.$words[$i]." "; } for(my $i=@words; $i<$n; $i++) { $retData = $retData."\$"." "; } } $retData =~ s/\s+/ /g; $retData =~ s/\s+$//g; return $retData; } sub get2Syllabs { my $sylls = shift; #print $sylls."\n"; my @words = split(" ", $sylls); #print "Hello".$words[0]." ".$words[1]."\n"; if(@words >= 2) { chomp($words[1]); #print "Return1: $words[0] $words[1]\n"; #$words[1] =~ s/,$//g; return $words[0]." ".$words[1]; } elsif(@words == 1) { chomp($words[0]); #print "Return2: $words[0]\n"; $words[0] =~ s/,$//g; return $words[0]." \$"; } else { #print "Return3: $ $\n"; return "\$ \$"; } } sub get2LastSyllabs { my $sylls = shift; chomp($sylls); my @words = split(" ",$sylls); if(@words >=2) { $words[@words-2] =~ s/,$//g; return $words[@words-2]." ".$words[@words - 1]; } elsif(@words == 1) { #$words[@words-1] =~ s/,$//g; return $words[@words-1]." \$"; } else { return "\$ \$"; } } sub get1Syllabs { my $sylls = shift; #print $sylls."\n"; my @words = split(" ", $sylls); #print "Hello".$words[0]." ".$words[1]."\n"; if(@words >= 1) { chomp($words[1]); #print "Return1: $words[0] $words[1]\n"; $words[0] =~ s/,$//g; return $words[0]; } else { #print "Return3: $ $\n"; return "\$"; } } sub syllableCount { my $sylls = shift; #print $sylls; my @words = split(" ",$sylls); #print "Count: @words"; return @words; } sub revWord { my $word = shift; my @words = split(" ", $word); return join(" ",reverse @words); }
saikrishnarallabandi/Festival-Speech-Synthesis-System
festvox/src/interslice/scripts/syllabifier.pm
Perl
apache-2.0
10,211
package Google::Ads::AdWords::v201402::CriterionError; use strict; use warnings; __PACKAGE__->_set_element_form_qualified(1); sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201402' }; our $XML_ATTRIBUTE_CLASS; undef $XML_ATTRIBUTE_CLASS; sub __get_attr_class { return $XML_ATTRIBUTE_CLASS; } use base qw(Google::Ads::AdWords::v201402::ApiError); # Variety: sequence use Class::Std::Fast::Storable constructor => 'none'; use base qw(Google::Ads::SOAP::Typelib::ComplexType); { # BLOCK to scope variables my %fieldPath_of :ATTR(:get<fieldPath>); my %trigger_of :ATTR(:get<trigger>); my %errorString_of :ATTR(:get<errorString>); my %ApiError__Type_of :ATTR(:get<ApiError__Type>); my %reason_of :ATTR(:get<reason>); __PACKAGE__->_factory( [ qw( fieldPath trigger errorString ApiError__Type reason ) ], { 'fieldPath' => \%fieldPath_of, 'trigger' => \%trigger_of, 'errorString' => \%errorString_of, 'ApiError__Type' => \%ApiError__Type_of, 'reason' => \%reason_of, }, { 'fieldPath' => 'SOAP::WSDL::XSD::Typelib::Builtin::string', 'trigger' => 'SOAP::WSDL::XSD::Typelib::Builtin::string', 'errorString' => 'SOAP::WSDL::XSD::Typelib::Builtin::string', 'ApiError__Type' => 'SOAP::WSDL::XSD::Typelib::Builtin::string', 'reason' => 'Google::Ads::AdWords::v201402::CriterionError::Reason', }, { 'fieldPath' => 'fieldPath', 'trigger' => 'trigger', 'errorString' => 'errorString', 'ApiError__Type' => 'ApiError.Type', 'reason' => 'reason', } ); } # end BLOCK 1; =pod =head1 NAME Google::Ads::AdWords::v201402::CriterionError =head1 DESCRIPTION Perl data type class for the XML Schema defined complexType CriterionError from the namespace https://adwords.google.com/api/adwords/cm/v201402. Error class used for reporting criteria related errors. =head2 PROPERTIES The following properties may be accessed using get_PROPERTY / set_PROPERTY methods: =over =item * reason =back =head1 METHODS =head2 new Constructor. The following data structure may be passed to new(): =head1 AUTHOR Generated by SOAP::WSDL =cut
gitpan/GOOGLE-ADWORDS-PERL-CLIENT
lib/Google/Ads/AdWords/v201402/CriterionError.pm
Perl
apache-2.0
2,255
=head1 LICENSE Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute Copyright [2016-2017] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =cut =head1 NAME Bio::EnsEMBL::Compara::Production::EPOanchors::ExonerateAnchors =head1 SYNOPSIS $exonate_anchors->fetch_input(); $exonate_anchors->run(); $exonate_anchors->write_output(); writes to database =head1 DESCRIPTION Given a database with anchor sequences and a target genome. This modules exonerates the anchors against the target genome. The required information (anchor batch size, target genome file, exonerate parameters are provided by the analysis, analysis_job and analysis_data tables =head1 AUTHOR Stephen Fitzgerald =head1 CONTACT Please email comments or questions to the public Ensembl developers list at <http://lists.ensembl.org/mailman/listinfo/dev>. Questions may also be sent to the Ensembl help desk at <http://www.ensembl.org/Help/Contact>. =head1 APPENDIX The rest of the documentation details each of the object methods. Internal methods are usually preceded with a _ =cut package Bio::EnsEMBL::Compara::Production::EPOanchors::ExonerateAnchors; use strict; use warnings; use Bio::EnsEMBL::Utils::Exception qw(throw); use base ('Bio::EnsEMBL::Compara::RunnableDB::BaseRunnable'); sub param_defaults { return { 'mapping_exe' => "/usr/local/ensembl/bin/exonerate-1.0.0", }; } sub fetch_input { my ($self) = @_; $self->param('exonerate_options', $self->input_job->analysis->parameters ? eval($self->input_job->analysis->parameters) : { bestn=>11, gappedextension=>'no', softmasktarget=>'no', percent=>75, showalignment=>'no', model=>'affine:local' } ); $self->compara_dba()->dbc->disconnect_if_idle(); my $anchor_seq_adaptor = $self->compara_dba->get_AnchorSeqAdaptor(); my $analysis_data_adaptor = $self->db->get_AnalysisDataAdaptor(); my $target_genome_files = eval $analysis_data_adaptor->fetch_by_dbID($self->param('analysis_data_id')); $self->param('target_file', $target_genome_files->{target_genomes}->{ $self->param('target_genome') } ); my $anchors = $anchor_seq_adaptor->get_anchor_sequences($self->param('ancs_from_to'), $self->param('anchor_sequences_mlssid')); my $query_file = $self->worker_temp_directory . "anchors." . join ("-", @{$self->param('ancs_from_to')}); open F, ">$query_file" || throw("Couldn't open $query_file"); foreach my $anchor_seq( @{ $anchors } ) { print F ">", join(":", @{$anchor_seq}[0..5]), "\n", $anchor_seq->[-1], "\n"; } $self->param('query_file', $query_file); } sub run { my ($self) = @_; my $program = $self->param('mapping_exe'); my $query_file = $self->param('query_file'); my $target_file = $self->param('target_file'); my $option_st; foreach my $option(sort keys %{ $self->param('exonerate_options') }) { $option_st .= " --" . $option . " " . $self->param('exonerate_options')->{$option}; } my $command = join(" ", $program, $option_st, $query_file, $target_file); print $command, "\n"; my $exo_fh; open( $exo_fh, "$command |" ) or throw("Error opening exonerate command: $? $!"); #run exonerate $self->param('exo_file', $exo_fh); } sub write_output { my ($self) = @_; my $anchor_align_adaptor = $self->compara_dba->get_AnchorAlignAdaptor(); my $exo_fh = $self->param('exo_file'); my ($hits, $target2dnafrag); while(my $mapping = <$exo_fh>){ next unless $mapping =~/^vulgar:/; my($anchor_info, $targ_strand, $targ_info, $targ_from, $targ_to, $score) = (split(" ",$mapping))[1,8,5,6,7,9]; ($targ_from, $targ_to) = ($targ_to, $targ_from) if ($targ_from > $targ_to); #exonerate can switch these around $targ_strand = $targ_strand eq "+" ? "1" : "-1"; $targ_from++; #modify the exonerate start position my($anchor_name, $anc_org) = split(":", $anchor_info); push(@{$hits->{$anchor_name}{$targ_info}}, [ $targ_from, $targ_to, $targ_strand, $score, $anc_org ]); $target2dnafrag->{$targ_info}++; } foreach my $target_info (sort keys %{$target2dnafrag}) { my($coord_sys, $dnafrag_name) = (split(":", $target_info))[0,2]; $target2dnafrag->{$target_info} = $anchor_align_adaptor->fetch_dnafrag_id( $coord_sys, $dnafrag_name, $self->param('target_genome') ); die "no dnafrag_id found\n" unless($target2dnafrag->{$target_info}); } my $hit_numbers = $self->merge_overlapping_target_regions($hits); my $records = $self->process_exonerate_hits($hits, $target2dnafrag, $hit_numbers); $anchor_align_adaptor->store_exonerate_hits($records); } sub process_exonerate_hits { my $self = shift; my($hits, $target2dnafrag, $hit_numbers) = @_; my($records_to_load); foreach my $anchor_id (sort keys %{$hits}) { foreach my $targ_dnafrag_info (sort keys %{$hits->{$anchor_id}}) { my $dnafrag_id = $target2dnafrag->{$targ_dnafrag_info}; foreach my $hit_position (@{$hits->{$anchor_id}->{$targ_dnafrag_info}}) { my $index = join(":", $anchor_id, $targ_dnafrag_info, $hit_position->[0]); my $number_of_org_hits = keys %{$hit_numbers->{$index}->{anc_orgs}}; my $number_of_seq_hits = $hit_numbers->{$index}->{seq_nums}; push(@{$records_to_load}, join(":", $self->param('exonerate_mlssid'), $anchor_id, $dnafrag_id, @{$hit_position}[0..3], $number_of_org_hits, $number_of_seq_hits)); } } } return $records_to_load; } sub merge_overlapping_target_regions { #merge overlapping target regions hit by different seqs in the same anchor my $self = shift; my $mapped_anchors = shift; my $HIT_NUMS; foreach my $anchor(sort {$a <=> $b} keys %{$mapped_anchors}) { foreach my $targ_info(sort keys %{$mapped_anchors->{$anchor}}) { @{$mapped_anchors->{$anchor}{$targ_info}} = sort {$a->[0] <=> $b->[0]} @{$mapped_anchors->{$anchor}{$targ_info}}; for(my$i=0;$i<@{$mapped_anchors->{$anchor}{$targ_info}};$i++) { my $anc_look_up_name = join(":", $anchor, $targ_info, $mapped_anchors->{$anchor}{$targ_info}->[$i]->[0]); if($i < @{$mapped_anchors->{$anchor}{$targ_info}} - 1) { if($mapped_anchors->{$anchor}{$targ_info}->[$i]->[1] >= $mapped_anchors->{$anchor}{$targ_info}->[$i+1]->[0]) { unless($mapped_anchors->{$anchor}{$targ_info}->[$i]->[2] eq $mapped_anchors->{$anchor}{$targ_info}->[$i+1]->[2]) { print STDERR "possible palindromic sequences: $anchor ", "$mapped_anchors->{$anchor}{$targ_info}->[$i]->[2] ", $mapped_anchors->{$anchor}{$targ_info}->[$i+1]->[2], "\n"; $mapped_anchors->{$anchor}{$targ_info}->[$i]->[2] = 0; } if($mapped_anchors->{$anchor}{$targ_info}->[$i]->[1] < $mapped_anchors->{$anchor}{$targ_info}->[$i+1]->[1]) { $mapped_anchors->{$anchor}{$targ_info}->[$i]->[1] = $mapped_anchors->{$anchor}{$targ_info}->[$i+1]->[1]; } $mapped_anchors->{$anchor}{$targ_info}->[$i]->[3] += $mapped_anchors->{$anchor}{$targ_info}->[$i+1]->[3]; $mapped_anchors->{$anchor}{$targ_info}->[$i]->[3] /= 2; # simplistic scoring #count the organisms from which the anchor seqs were derived $HIT_NUMS->{$anc_look_up_name}{anc_orgs}{$mapped_anchors->{$anchor}{$targ_info}->[$i+1]->[4]}++; #count number of anchor seqs that map $HIT_NUMS->{$anc_look_up_name}{seq_nums}++; splice(@{$mapped_anchors->{$anchor}{$targ_info}}, $i+1, 1); $i--; next; } } $HIT_NUMS->{$anc_look_up_name}{anc_orgs}{$mapped_anchors->{$anchor}{$targ_info}->[$i]->[4]}++; $HIT_NUMS->{$anc_look_up_name}{seq_nums}++; } } } return $HIT_NUMS; } 1;
danstaines/ensembl-compara
modules/Bio/EnsEMBL/Compara/Production/EPOanchors/ExonerateAnchors.pm
Perl
apache-2.0
8,548
#!/usr/bin/perl use strict; use warnings; use DBI; use Getopt::Long qw(:config no_ignore_case); use Data::Dumper; my $username; my $password; my $dbname; my $pathToServiceFile; my $serviceName; my $tableName; our $FIRSTNAME = 'FIRSTNAME'; our $LASTNAME = 'LASTNAME'; our $GOLD = 'GOLD'; our $ACC_GROUPS = 'ACC_GROUPS'; our $LOGIN = 'LOGIN'; our $TYPE = 'TYPE'; GetOptions ("dbname|d=s" => \$dbname, "pathToServiceFile|p=s" => \$pathToServiceFile, "serviceName|s=s" => \$serviceName); if(!defined $dbname) { print "Missing DBNAME to process service.\n"; exit 10; } if(!defined $pathToServiceFile) { print "Missing path to file with generated data to process service.\n"; exit 11; } if(!defined $serviceName) { print "Missing info about service name to process service.\n"; exit 12; } my $filename = "$pathToServiceFile/$serviceName"; if(! -f $filename) { print "Missing service file with data.\n"; exit 13; } my $configPath = "/etc/perun/services/$serviceName/$dbname"; open FILE, $configPath or die "Could not open config file $configPath: $!"; while(my $line = <FILE>) { if($line =~ /^username: .*/) { $username = ($line =~ m/^username: (.*)$/)[0]; } elsif($line =~ /^password: .*/) { $password = ($line =~ m/^password: (.*)$/)[0]; } elsif($line =~ /^tablename: .*/) { $tableName = ($line =~ m/^tablename: (.*)$/)[0]; } } if(!defined($password) || !defined($username) || !defined($tableName)) { print "Can't get config data from config file.\n"; exit 14; } #Main Structure my $dataByChip = {}; open FILE, $filename or die "Could not open $filename: $!"; while(my $line = <FILE>) { chomp( $line ); my @parts = split /\t/, $line; $dataByChip->{$parts[0]}->{$FIRSTNAME} = $parts[1]; $dataByChip->{$parts[0]}->{$LASTNAME} = $parts[2]; $dataByChip->{$parts[0]}->{$LOGIN} = $parts[3]; $dataByChip->{$parts[0]}->{$TYPE} = $parts[4]; if($parts[5] && $parts[6]) { $dataByChip->{$parts[0]}->{$GOLD} = 3; } elsif ($parts[6]) { $dataByChip->{$parts[0]}->{$GOLD} = 4; } elsif ($parts[5]) { $dataByChip->{$parts[0]}->{$GOLD} = 2; } else { $dataByChip->{$parts[0]}->{$GOLD} = 1; } $dataByChip->{$parts[0]}->{$ACC_GROUPS} = $parts[7]; } close FILE; my $dbh = DBI->connect("dbi:Oracle:$dbname",$username, $password,{RaiseError=>1,AutoCommit=>0,LongReadLen=>65536, ora_charset => 'AL32UTF8'}) or die "Connect to database $dbname Error!\n"; ###TEMP DELETE ALL (clear DB) #my $deleteAllChips = $dbh->prepare(qq{DELETE from $tableName}); #$deleteAllChips->execute(); #commit $dbh; #$dbh->disconnect(); #exit 0; ### TEMP my $DEBUG=0; #statistic and information variables my $foundAndSkipped = 0; my $foundAndUpdated = 0; my $inserted = 0; my $removed = 0; my $deleted = 0; #update and insert new or updated chips my @allChipsArray = (); foreach my $chipNumber (sort keys $dataByChip) { my $firstName = $dataByChip->{$chipNumber}->{$FIRSTNAME}; my $lastName = $dataByChip->{$chipNumber}->{$LASTNAME}; my $UIN = $dataByChip->{$chipNumber}->{$LOGIN}; my $cardType = $dataByChip->{$chipNumber}->{$GOLD}; my $accGroups = $dataByChip->{$chipNumber}->{$ACC_GROUPS}; my $identityType = $dataByChip->{$chipNumber}->{$TYPE}; push @allChipsArray, $chipNumber . ":" . $UIN; #Card Number with UIN is primary key there my $chipExists = $dbh->prepare(qq{select 1 from $tableName where CardNumber=? and UIN=?}); $chipExists->execute($chipNumber, $UIN); if($chipExists->fetch) { if($DEBUG == 1) { print "FIND: $chipNumber\n"; } #we need to know if these two records are without changes, if yes, skip them my $recordAreEquals = $dbh->prepare(qq{SELECT 1 from $tableName where CardNumber=? and UIN=? and CardType=? and AccessGroups=? and FirstName=? and SecondName=? and IdentityType=?}); $recordAreEquals->execute($chipNumber, $UIN, $cardType, $accGroups, $firstName, $lastName, $identityType); if(!$recordAreEquals->fetch) { my $updateChip = $dbh->prepare(qq{UPDATE $tableName SET PROCESSED=1, CardType=?, AccessGroups=?, FirstName=?, SecondName=?, IdentityType=? WHERE CardNumber=? AND UIN=?}); $updateChip->execute($cardType, $accGroups, $firstName, $lastName, $identityType, $chipNumber, $UIN); if($DEBUG == 1) { print "UPDATING EXISTING RECORD: $chipNumber\n"; } $foundAndUpdated++; } else { if($DEBUG == 1) { print "SKIP RECORD: $chipNumber\n"; } $foundAndSkipped++; } } else { if($DEBUG == 1) { print "INSERT NEW RECORD: $chipNumber\n"; } $inserted++; #we will do insert my $insertChip = $dbh->prepare(qq{INSERT INTO $tableName (PROCESSED, UIN, CardNumber, IdentityType, CardType, AccessGroups, FirstName, SecondName) VALUES (1,?,?,?,?,?,?,?)}); $insertChip->execute($UIN, $chipNumber, $identityType, $cardType, $accGroups, $firstName, $lastName); } } my @chipsToDeleteFromDB = (); my $chipsToDeleteFromDBQuerry = $dbh->prepare(qq{SELECT CardNumber, UIN from $tableName where AccessGroups IS NULL and CardType=1 and PROCESSED=0}); $chipsToDeleteFromDBQuerry->execute(); while(my $chtd = $chipsToDeleteFromDBQuerry->fetch) { push @chipsToDeleteFromDB, $$chtd[0] . ":" . $$chtd[1]; } my %tmpDelete = map {$_ => 1} @allChipsArray; my @chipsToDelete = grep {not $tmpDelete{$_}} @chipsToDeleteFromDB; foreach my $chipToDelete (@chipsToDelete) { if($DEBUG == 1) { print "DELETE RECORD: $chipToDelete\n"; } $deleted++; my $deleteChip = $dbh->prepare(qq{DELETE from $tableName where CardNumber=? and UIN=?}); my $deletedChipNumber = (split(/:/, $chipToDelete))[0]; my $deletedChipUID = (split(/:/, $chipToDelete))[1]; $deleteChip->execute($deletedChipNumber, $deletedChipUID); } my @chipsToRemoveFromDB = (); my $chipsToRemoveFromDBQuerry = $dbh->prepare(qq{SELECT CardNumber, UIN from $tableName}); $chipsToRemoveFromDBQuerry->execute(); while(my $chtr = $chipsToRemoveFromDBQuerry->fetch) { push @chipsToRemoveFromDB, $$chtr[0] . ":" . $$chtr[1]; } my %tmpRemove = map {$_ => 1} @allChipsArray; my @chipsToRemove = grep {not $tmpRemove{$_}} @chipsToRemoveFromDB; foreach my $chipToRemove (@chipsToRemove) { if($DEBUG == 1) { print "REMOVE RECORD: $chipToRemove\n"; } $removed++; my $removedChip = $dbh->prepare(qq{UPDATE $tableName SET PROCESSED=1, CardType=1, AccessGroups=NULL where CardNumber=? and UIN=?}); my $removedChipNumber = (split(/:/, $chipToRemove))[0]; my $removedChipUID = (split(/:/, $chipToRemove))[1]; $removedChip->execute($removedChipNumber, $removedChipUID); } commit $dbh; $dbh->disconnect(); #Info about operations print "=======================================\n"; print "Newly inserted: \t$inserted\n"; print "Found and skiped: \t$foundAndSkipped\n"; print "Found and updated:\t$foundAndUpdated\n"; print "Set to remove: \t$removed\n"; print "Deleted: \t$deleted\n"; print "=======================================\n"; exit 0;
frhrdina/perun-services
send/tinia_winpak_process.pl
Perl
bsd-2-clause
6,807
#!/usr/bin/perl # # $Id$ # # Copyright (c) 2007 .SE (The Internet Infrastructure Foundation). # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER # IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN # IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ###################################################################### package DNSCheck::Test::Nameserver; require 5.010001; use warnings; use strict; use utf8; use base 'DNSCheck::Test::Common'; use Net::IP 1.25 qw[ip_get_version]; ###################################################################### sub test { my $self = shift; my $parent = $self->parent; my $zone = shift // $self->zone; my $nameserver = shift // $self->ns; return 0 unless $parent->config->should_run; my $logger = $parent->logger; my $errors = 0; $self->zone( $zone ); $self->ns( $nameserver ); my $packet; $logger->module_stack_push(); $logger->auto( "NAMESERVER:BEGIN", $nameserver ); # REQUIRE: Nameserver must be a valid hostname if ( $parent->host->test( $nameserver ) ) { $errors += $logger->auto( "NAMESERVER:HOST_ERROR", $nameserver ); goto DONE; } # Does this name encode a DNSCurve public key? if ($nameserver =~ m|^uz5[0123456789bcdfghjklmnpqrstuvwxyz]{51}\.|i) { $logger->auto( "NAMESERVER:DNSCURVE", $nameserver); } my @addresses = $parent->dns->find_addresses( $nameserver, $self->qclass ); $errors += $self->_test_ip( @addresses ); DONE: $logger->auto( "NAMESERVER:END", $nameserver ); $logger->module_stack_pop(); return $errors; } sub test_by_ip { my $self = shift; my $parent = $self->parent; my $zone = shift; my $nameserver = shift; my $qclass = $self->qclass; my $logger = $parent->logger; my $errors = 0; $self->zone( $zone ); $self->ns( $nameserver ); my $packet; $logger->module_stack_push(); $logger->auto( "NAMESERVER:BEGIN", $nameserver ); # This only works because we know $errors is zero here. if ( $errors += $parent->address->test( $nameserver ) ) { goto DONE; } $errors += $self->_test_ip( $nameserver ); DONE: $logger->auto( "NAMESERVER:END", $nameserver ); $logger->module_stack_pop(); return $errors; } sub _test_ip { my ( $self, @addresses ) = @_; my $parent = $self->parent; my $zone = $self->zone; my $nameserver = $self->ns; my $qclass = $parent->config->get( "dns" )->{class}; my $logger = $parent->logger; my $errors = 0; my $packet; ADDRESS: foreach my $address ( @addresses ) { my $skip_udp = 0; my $skip_tcp = 0; if ( ip_get_version( $address ) == 4 && !$parent->config->get( "net" )->{ipv4} ) { $logger->auto( "NAMESERVER:SKIPPED_IPV4", $address ); next ADDRESS; } if ( ip_get_version( $address ) == 6 && !$parent->config->get( "net" )->{ipv6} ) { $logger->auto( "NAMESERVER:SKIPPED_IPV6", $address ); next ADDRESS; } my $tmp_udp = $self->ns_udp( $address ); $errors += $tmp_udp; $skip_udp = 1 if $tmp_udp > 0; my $tmp_tcp = $self->ns_tcp( $address ); $errors += $tmp_tcp; $skip_tcp = 1 if $tmp_tcp > 0; # No point in trying to test the server if it's not responding at all if ( $tmp_udp and $tmp_tcp ) { next ADDRESS; } $errors += $self->ns_recursive( $address ); $errors += $self->same_source( $address ); my $tmp = $self->ns_authoritative( $address ); $errors += $tmp; next ADDRESS if $tmp > 0; # REQUIRE: Nameserver may provide AXFR if ( $skip_tcp ) { $logger->auto( "NAMESERVER:AXFR_SKIP", $nameserver, $address, $zone ); } else { $errors += $self->ns_axfr( $address ); } # Check for possible identification unless ( $skip_tcp || $skip_udp ) { $logger->auto( "NAMESERVER:CHECKING_LEGACY_ID", $nameserver, $address ); $self->ns_check_id( $address ); } else { $logger->auto( "NAMESERVER:LEGACY_ID_SKIP", $nameserver, $address ); } $logger->auto("NAMESERVER:CHECKING_NSID", $nameserver, $address); my $nsid = $parent->dns->query_nsid($address, $zone, $qclass, "SOA"); if (defined($nsid)) { # $nsid is a array ref, with two values $logger->auto("NAMESERVER:NSID", $nameserver, $address, $nsid); } } return $errors; } sub zone { my $self = shift; my $zone = shift; if ( defined( $zone ) ) { $self->{zone} = $zone; } return $self->{zone}; } sub ns { my $self = shift; my $ns = shift; if ( defined( $ns ) ) { $self->{nameserver} = $ns; } return $self->{nameserver}; } sub ns_check_id { my $self = shift; my $address = shift; my $nameserver = shift // $self->ns; return 0 unless $self->parent->config->should_run; my $logger = $self->logger; my @domains = ( "hostname.bind", "version.bind", "id.server", "version.server" ); my $packet; for my $domain ( @domains ) { $packet = $self->parent->dns->query_explicit( $domain, "CH", "TXT", $address ); if ( $packet ) { foreach my $rr ( $packet->answer ) { next unless ( ( $rr->type eq "TXT" ) && $rr->txtdata ); $logger->auto( "NAMESERVER:LEGACY_ID", $nameserver, $address, $domain, $rr->txtdata ); } } } return; } ################################################################ # Individual tests ################################################################ sub ns_recursive { my $self = shift; my $address = shift; my $nameserver = shift // $self->ns; return 0 unless $self->parent->config->should_run; # REQUIRE: Nameserver should not be recursive $self->logger->auto( "NAMESERVER:CHECKING_RECURSION", $nameserver, $address ); if ( $self->parent->dns->address_is_recursive( $address, $self->qclass ) ) { return $self->logger->auto( "NAMESERVER:RECURSIVE", $nameserver, $address ); } else { return $self->logger->auto( "NAMESERVER:NOT_RECURSIVE", $nameserver, $address ); } } sub ns_authoritative { my $self = shift; my $address = shift; my $nameserver = shift // $self->ns; my $zone = shift // $self->zone; return 0 unless $self->parent->config->should_run; # REQUIRE: Nameserver must be authoritative for the zone # [IIS.KVSE.001.01/r3,IIS.KVSE.001.01/r6] $self->logger->auto( "NAMESERVER:CHECKING_AUTH", $nameserver, $address ); if ( $self->parent->dns->address_is_authoritative( $address, $zone, $self->qclass ) or $self->parent->dns->address_is_authoritative_tcp( $address, $zone, $self->qclass ) ) { return $self->logger->auto( "NAMESERVER:NOT_AUTH", $nameserver, $address, $zone ); } else { return $self->logger->auto( "NAMESERVER:AUTH", $nameserver, $address, $zone ); } } sub ns_udp { my $self = shift; my $address = shift; my $nameserver = shift // $self->ns; my $zone = shift // $self->zone; return 0 unless $self->parent->config->should_run; $self->logger->auto( "NAMESERVER:TESTING_UDP", $nameserver, $address ); my $packet = $self->parent->dns->query_explicit( $zone, $self->qclass, "SOA", $address, { transport => "udp", aaonly => 0 } ); if ( $packet ) { return $self->logger->auto( "NAMESERVER:UDP_OK", $nameserver, $address, $zone ); } else { return $self->logger->auto( "NAMESERVER:NO_UDP", $nameserver, $address, $zone ); } } sub ns_tcp { my $self = shift; my $address = shift; my $nameserver = shift // $self->ns; my $zone = shift // $self->zone; return 0 unless $self->parent->config->should_run; $self->logger->auto( "NAMESERVER:TESTING_TCP", $nameserver, $address ); my $packet = $self->parent->dns->query_explicit( $zone, $self->qclass, "SOA", $address, { transport => "tcp", aaonly => 0 } ); if ( $packet ) { return $self->logger->auto( "NAMESERVER:TCP_OK", $nameserver, $address, $zone ); } else { return $self->logger->auto( "NAMESERVER:NO_TCP", $nameserver, $address, $zone ); } } sub ns_axfr { my $self = shift; my $address = shift; my $nameserver = shift // $self->ns; my $zone = shift // $self->zone; return 0 unless $self->parent->config->should_run; $self->logger->auto( "NAMESERVER:TESTING_AXFR", $nameserver, $address ); if ( $self->parent->dns->check_axfr( $address, $zone, $self->qclass ) ) { return $self->logger->auto( "NAMESERVER:AXFR_OPEN", $nameserver, $address, $zone ); } else { return $self->logger->auto( "NAMESERVER:AXFR_CLOSED", $nameserver, $address, $zone ); } } sub same_source { my $self = shift; my $address = shift; my $zone = shift // $self->zone; return 0 unless $self->parent->config->should_run; my $p = $self->parent->dns->query_explicit( $zone, 'SOA', 'IN', $address ); return 0 unless $p; my $to = Net::IP->new( $address ); return 0 unless $to; $to = $to->ip; my $from = Net::IP->new( $p->answerfrom ); return 0 unless $from; $from = $from->ip; if ( $to eq $from ) { return $self->parent->logger->auto( 'NAMESERVER:SAME_SOURCE', $address ); } else { return $self->parent->logger->auto( 'NAMESERVER:NOT_SAME_SOURCE', $address ); } } 1; __END__ =head1 NAME DNSCheck::Test::Nameserver - Test a nameserver =head1 DESCRIPTION Test a single name server for a specific zone. The following tests are done: =over 4 =item * The nameserver must be a valid hostname (according to L<DNSCheck::Test::Host>). =item * The nameserver should not be recursive. =item * The nameserver must be authoritative for the zone. =item * The SOA record for the zone must be fetchable over both UDP and TCP. =item * The nameserver may provide AXFR for the zone. =back =head1 METHODS =over =item ->new($parent, $zone, $nameserver) Create a new test object, and optionally set the zone and nameserver name that will be tested. If those two are set, the values will be used as defaults for many other methods. =item ->test($zone, $nameserver); Perform the default set of tests for a nameserver in a delegated domain. Uses the defaults set in L<new()>, if any. Returns the number of problems found at level ERROR and CRITICAL (as well as detiled log info in the parent's logger object as usual). For this test, a lookup on the nameserver name will be made and all tests run on all addresses found (obeying global settings for use of IPv4 and IPv6, of course). =item ->test_by_ip($zone, $ip) Run as many tests as make sense on the nameserver at the given IP address. =item ->zone($zone) Get or set the default zone for this object. =item ->ns($name) Get or set the default nameserver name for this object. =item ->ns_check_id($ip, [$name]) Run the version-fetching test for the specified server. =item ->ns_recursive($ip, [$name]) Test if the specified server is recursive. As all tests here, returns the number of problems on levels ERROR and CRITICAL found, and populates the logger object. =item ->ns_authoritative($ip, [$name, $zone]) Test if the specified server is authoritative for the given zone (nameserver name is only used for logging). =item ->ns_udp($ip, [$name, $zone]) Test if the given server can be queried via UDP. =item ->ns_tcp($ip, [$name, $zone]) Test if the given server can be queried via TCP. =item ->ns_axfr($ip, [$name, $zone]) Test if the given server allows transfer of the given zone. =item ->same_source($address) Checks that queries sent to a given address gets responses coming from the same address. =back =head1 SEE ALSO L<DNSCheck>, L<DNSCheck::Logger>, L<DNSCheck::Test::Host> =cut
NZRS/dnscheck
engine/lib/DNSCheck/Test/Nameserver.pm
Perl
bsd-2-clause
13,338
#!/usr/bin/perl use strict; use warnings; use usbdali; use Data::Dumper; my $lamp = $ARGV[0] || 0; my $dali = usbdali->new('localhost'); if ($dali->connect()) { $dali->send($dali->make_cmd('lamp', $lamp, 'off')); my $resp = $dali->receive(); if ($resp) { if ($resp->{status} eq 'response') { print("Received status:$resp->{status} response:$resp->{response}\n"); } else { print("Received status:$resp->{status}\n"); } } else { print("Receive error\n"); } $dali->disconnect(); } else { print("Can't connect\n"); }
onitake/daliserver
perl/lampoff.pl
Perl
bsd-2-clause
536
# Copyright (c) 2014 Timm Murray # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. package UAV::Pilot::Control; use v5.14; use Moose::Role; has 'driver' => ( is => 'ro', does => 'UAV::Pilot::Driver', ); 1; __END__ =head1 NAME UAV::Pilot::Control =head1 DESCRIPTION Role for high-level interfaces to drones. External programs should usually write against a module that does this role. =head1 ATTRIBUTES =head2 driver Instantiated C<UAV::Pilot::Driver> object. =cut
gitpan/UAV-Pilot
lib/UAV/Pilot/Control.pm
Perl
bsd-2-clause
1,780
#!/usr/bin/perl #Splits the arin bulkwhois xml file into smaller xml files that #only contain the elements asn, poc, net, and org. use warnings; use strict; use Getopt::Long; use Data::Dumper; use Pod::Usage; use Cwd; my $args = { 'verbose' => 0, 'help' => '', 'infile' => '', 'outfiles' => '' }; GetOptions ('verbose+' => \$args->{'verbose'}, 'help' => \$args->{'help'}, 'infile=s' => \$args->{'infile'}, 'outfiles=s' => \$args->{'outfiles'} ); if($args->{'help'}) { print "---------------------------------------------------\n"; print "Splitter takes in a arin bulkwhois xml file and creates four smaller files that only contain asn, org, poc, or net elements.\n"; print "--verbose = tell the script to display parsing information. Repeat this command to increase the verbosity of the script\n"; print "--help = print usage informaiton\n"; print "--infile = the location of the bulk whois file to parse\n"; print "--outfiles = the name of the splitted file. The format is outfilename_[element name].\n"; print "\t For example running ".'"./Splitter --infile bulkwhois.xml --outfiles outs"'." will produce the following files: outs_asn.xml, outs_net.xml, outs_poc.xml, outs_org.xml\n"; print "---------------------------------------------------\n"; exit; } use constant { FL_START => '<?xml version="1.0"?><bulkwhois xmlns="http://www.arin.net/bulkwhois/core/v1">', FL_END => '</bulkwhois>', REFRESH_RATE => 50000, STATES => { 'INIT' => 'INIT', 'ASN' => 'ASN', 'POC' => 'POC', 'NET' => 'NET', 'ORG' => 'ORG' } }; my $STATES = { 'INIT' => 'INIT', 'ASN' => 'ASN', 'POC' => 'POC', 'NET' => 'NET', 'ORG' => 'ORG' }; #BEGIN splitting open IN, "<".$args->{'infile'}; my $currentElement = ''; my $fileToWriteTo = ''; my $lineNumber = 0; my $currentState = $STATES->{'INIT'}; my $currOUT = undef; while (my $line = <IN>) { my $newState = changeState($line); #The state has changed #print "Current State: $currentState\n"; #print "New State: $newState\n"; if($newState && ($newState ne $currentState)) { #Write the end of the previous file if nessessary. if($currentState ne STATES->{'INIT'}) { #print "1\n"; if($args->{'verbose'} >= 1) { print "Ending the $fileToWriteTo\n"; print "Last line: $line\n\n"; } print $currOUT "\n\n".FL_END."\n\n"; close $currOUT; $currOUT = undef; } if($newState ne STATES->{INIT}) { $fileToWriteTo = $args->{'infile'}.$newState.".xml"; if($args->{'verbose'} >= 1) { print "Starting the $newState file\n"; print "File to write: $fileToWriteTo\n"; print "First line: $line\n\n"; } open OUT, ">>$fileToWriteTo"; print OUT FL_START."\n\n"; print OUT $line; $currOUT = *OUT; $currentState = $newState; } } #The state is the same print the line to the file. elsif($currentState ne STATES->{'INIT'}) { print $currOUT $line; } elsif($currentState eq STATES->{'INIT'}) {} elsif($args->{debug}) { print "----------------Uncaught State------------------\n"; print "Current State: $currentState\n"; print "New State: $newState\n"; print "Current Line: $line\n"; print "Line Number: $lineNumber\n"; print "------------------------------------------------\n"; die; } if((($lineNumber % REFRESH_RATE) == 0) && ($args->{'verbose'} >= 2)) { print "Line number: $lineNumber\n"; } $lineNumber++; } print "Total Lines: ". ($lineNumber + 1)."\n" if($args->{'verbose'} >= 1); close(IN); #END splitting sub changeState { my $line = shift; my $state = 0; if($line =~ m/<asn>/) { $state = STATES->{'ASN'}; } elsif($line =~ m/<net>/) { $state = STATES->{'NET'}; } elsif($line =~ m/<poc>/) { $state = STATES->{'POC'}; } elsif($line =~ m/<org>/) { $state = STATES->{'ORG'}; } else {} return $state; } ##### SCRAP ###### #print Dumper $args; #my $infile = $args->{'infile'}; #while($infile =~ /\/?([\w\s\-]+)\/{1}/g) { # print "Path: $1\n"; #} #exit; # <?xml version="1.0"?><bulkwhois xmlns="http://www.arin.net/bulkwhois/core/v1"> # </bulkwhois> #Split the file into 4 files. # 1) asn only file # 2) poc only file # 3) org only file # 4) net only file #
giovino/Arin-XML-Data-to-MySQL
tools/Splitter.pl
Perl
bsd-2-clause
4,695
#!/usr/bin/perl -w # # # Count and print info about duplicate extra data records found in # file produced by extra data validity check. # use strict; use Getopt::Std; use vars qw( $opt_h ); getopts('h'); if (defined $opt_h) { print <<EOM; Count and print info about duplicate extra data records found in file produced by extra data validity check. Usage: $0 [ validity-check file ] Options: -h Display this help. EOM exit 1; } my $edRec = 0; my %nbrEdRecsInMap; my %correction = (); my %itemType = (); my %EDRecsMaps = (); my $nbrOfMaps; my $storeRecordInfo = 0; my $EDRecID; # Global variabel, as we use it both when processing comment and # record. print "Processing ed file...\n"; while (<>) { chomp; if ( length($_) > 2 ) { my @record = split("<¡>", $_, -1); #split each record (row) at <¡> (edSep) foreach my $tmp (@record) { $tmp =~ s/\s+$//g; #remove any space-char at the end of rec } # if comment, # - increase edRec count # - extract map id and store in hash if (m/^\s*#/) { $edRec += 1; my $mapID = $record [ 11 ]; $EDRecID = $record [ 6 ]; if ( ! exists $EDRecsMaps { $EDRecID } ) { # First time EDrecId $nbrOfMaps = 0; } else { $nbrOfMaps = @ { $EDRecsMaps { $EDRecID } }; } if ( length $mapID < 1 ) { print "WARNING: No map defined for edRec $edRec ", $record[6], "\n"; $EDRecsMaps { $EDRecID } [ $nbrOfMaps ] = "-"; } else { $EDRecsMaps { $EDRecID } [ $nbrOfMaps ] = $mapID; } $storeRecordInfo = 1; # 1 = true } else { # record if ( $storeRecordInfo == 1) { $itemType { $EDRecID } = $record[1]; $correction { $EDRecID } = $record[0]; $storeRecordInfo = 0; # 0 = false } } } } print "\nRead $edRec records from ed file\n"; my $duplicateFound = 0; foreach my $edRecId ( sort ( keys ( %itemType ) ) ) { my @maps = @ { $EDRecsMaps { $edRecId } }; if ( (scalar @maps ) > 1 ) { if ( ! $duplicateFound ) { print "\nThe following ed records existed in multiples (undefined map id's\ "; print "are written with a '-'):\n"; print "<ED record id>, <operation>, <item info>, <map id 1>, <map id 2>, ...\n\n"; $duplicateFound = 1; } print $edRecId." "; print " "; print $correction { $edRecId }." "; print " "; print $itemType { $edRecId }." "; print " "; foreach my $mapId ( sort ( @maps ) ) { print $mapId . " "; } print "\n"; } } if ( ! $duplicateFound ) { print "No multiple ed records were found.\n"; } print "\n";
wayfinder/Wayfinder-Server
Server/bin/Scripts/MapGen/ed_countEdRecsDuplicates.pl
Perl
bsd-3-clause
2,844
#! /usr/bin/perl # $Id: pbib-export.pl 24 2005-07-19 11:56:01Z tandler $ =head1 NAME pbib-export.pl - export references from the PBib database =head1 SYNOPSIS perl pbib-export.pl -to I<outfile.bib> perl pbib-export.pl -to I<outfile.bib> I<filename1> ... =head1 DESCRIPTION Export all references in the Biblio DB to a format supported by bp (e.g. bibtex). If input files are given, these are scanned for references and only references found are exported. You can use this, e.g., if you want to distribute the references used in a paper together with the paper in a machine-readable format. Please check the bp documentation if you want to export the references in a format other than BibTeX ... =cut use strict; use warnings; use FindBin; use lib "$FindBin::Bin/../lib", '$FindBin::Bin/../lib/Biblio/bp/lib'; # for debug use Data::Dumper; # used modules use Getopt::Long; # used own modules use Biblio::Biblio; use Biblio::BP; use PBib::Config; use PBib::PBib; # read config my $config = new PBib::Config(); # get all known references print STDERR "query biblio for known references\n"; my $bib = new Biblio::Biblio(%{$config->option('biblio')}) or die "can't open biblio database!\n"; my $refs = $bib->queryPapers(); print "\n"; # select destination format etc. Biblio::BP::format('auto:auto', 'bibtex:tex'); @ARGV = Biblio::BP::stdargs(@ARGV); # # # the following has been taken from bp's conv.pl # and adapted to Biblio package (ptandler, 02-03-28) # # my (@files, $outfile); while (@ARGV) { $_ = shift @ARGV; /^--$/ && do { push(@files, @ARGV); last; }; /^--?help$/ && do { &dieusage; }; /^--?to/ && do { $outfile = shift @ARGV; next; }; /^-/ && do { print STDERR "Unrecognized option: $_\n"; next; }; push(@files, $_); } # Note that unlike some programs like rdup, we can be used as a pipe, so # we can't die with a usage output if we have no arguments. # output to STDOUT if nothing was specified. $outfile = '-' unless defined $outfile; # check that if the file exists, we can write to it. if (-e $outfile && !-w $outfile) { die "Cannot write to $outfile\n"; } # check that we won't be overwriting any files. if ($outfile ne '-') { foreach my $file (@files) { next if $file eq '-'; die "Will not overwrite input file $file\n" if $file eq $outfile; } } # # filter if input files are given # if( @files ) { # scan files for references my $pbib = new PBib::PBib('refs' => $refs); $refs = $pbib->filterReferencesForFiles(@files); } # print out a little message on the screen my ($informat, $outformat) = Biblio::BP::format(); print STDERR "Using bp, version ", Biblio::BP::doc('version'), ".\n"; print STDERR "Writing: $outformat\n"; print STDERR "\n"; # clear errors. Not really necessary. # Biblio::BP::errors('clear'); ### CAUTION: This currently works only if the file is not yet open (I guess ...) Biblio::BP::export($outfile, $refs); sub dieusage { my($prog) = substr($0,rindex($0,'/')+1); my $str =<<"EOU"; Usage: perl pbib-export.pl -to <outfile.bib> perl pbib-export.pl -to <outfile.bib> <filename1> ... If filenames are given, the export will be filtered to the references used in these files only. Arguments: -to Write the output to <outfile> instead of the standard out -bibhelp general help with the bp package -supported display all supported formats and character sets -hush no warnings or error messages -debugging=# set debugging on or off, or to a severity number -error_savelines warning/error messages also include the line number -informat=IF set the input format to IF -outformat=OF set the output format to OF -format=IF,OF set the both the input and output formats -noconverter always use the long conversion, never a special converter -csconv=BOOL turn on or off character set conversion -csprot=BOOL turn on or off character protection -inopts=ARG pass ARG as an option to the input format -outopts=ARG pass ARG as an option to the output format Convert a Refer file to BibTeX: $prog -format=refer,bibtex in.refer -to out.bibtex Convert an Endnote file to an HTML document using the CACM style $prog -format=endnote,output/cacm:html in.endnote -to out.html EOU die $str; }
gitpan/Bundle-PBib
bin/pbib-export.pl
Perl
bsd-3-clause
4,468
#!/usr/bin/perl # # Main authors: # Guido Tack <tack@gecode.org> # # Copyright: # Guido Tack, 2008 # # Last modified: # $Date: 2008-02-26 00:01:43 +1100 (Tue, 26 Feb 2008) $ by $Author: tack $ # $Revision: 6301 $ # # This file is part of Gecode, the generic constraint # development environment: # http://www.gecode.org # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # use File::Basename; open (TESTS, "./test/test -list |"); my %tests; my $blockSize = 40; while ($l = <TESTS>) { chomp($l); $tests{$l} = ""; } close (TESTS); $size = 0; foreach my $k (keys %tests) { $size++; } my $targets = $size / $blockSize; open (EXAMPLES, "find ./examples -maxdepth 1 -type f ! -name '*.*' |"); my @examples; while (my $x = <EXAMPLES>) { chomp($x); my ($filename, $dummydir, $suffix) = fileparse($x); open (HELP, "$x -help 2>&1 |"); my @prop; my @model; while (my $l = <HELP>) { if ($l =~ /-propagation \((.*)\)/) { $l1 = $1; $l1 =~ s/ //g; @prop = split(/,/, $l1); } elsif ($l =~ /-model \((.*)\)/) { $l1 = $1; $l1 =~ s/ //g; @model = split(/,/, $l1); } } if (scalar(@prop) == 0) { if (scalar(@model) == 0) { push(@examples, $filename); } else { foreach $m (@model) { push(@examples, "$filename -model $m"); } } } else { foreach $p (@prop) { if (scalar(@model) == 0) { push(@examples, "$filename -propagation $p"); } else { foreach $m (@model) { push(@examples, "$filename -propagation $p -model $m"); } } } } close (HELP); } close (EXAMPLES); print "LCOVOUTDIR = /srv/gecode/httpd/html/Internal/gcov-trunk\n\n"; print "all: tests examples\n"; print "\tlcov --directory \$(PWD) --base-directory \$(PWD) --capture \\\n"; print "\t --output-file testsandexamples.info\n"; print "\tgenhtml -t \"Gecode tests and examples\" testsandexamples.info -o \$(LCOVOUTDIR) -p \$(PWD)\n\n"; print "tests: \\\n"; for (my $tcount = 0; $tcount <= $targets; $tcount++) { print "\ttest$tcount \\\n"; } print "\tdone\n\n"; print "examples: \\\n"; foreach $e (@examples) { $etarget = $e; $etarget =~ s/[ -]/_/g; print "\t$etarget \\\n"; } print "\tdone\n\n"; $tcount = 1; $count = 0; print "test0:\n"; print "\t./test/test \\\n"; foreach my $k (keys %tests) { if ($count == $blockSize) { print "\t\t\$(TESTOPTIONS)\n\n"; print "test$tcount:\n"; print "\t./test/test \\\n"; $count = 0; $tcount++; } print "\t\t-test ".$k." \\\n"; $count++; } if ($count < $blockSize) { print "\t\t\$(TESTOPTIONS)\n\n"; } foreach $e (@examples) { $etarget = $e; $etarget =~ s/[ -]/_/g; print "$etarget:\n"; print "\t./examples/$e -time 240000\n\n"; } print "done:\n";
h4ck3rm1k3/dep-selector-libgecode
ext/libgecode3/vendor/gecode-3.7.3/misc/genlcovmakefile.perl
Perl
apache-2.0
3,811
#!/usr/bin/perl ############################################################################### # Tool for parse AuthzResolver authorization from java source code # # and print it to STDOUT as html table. # # # # This script parse FILE(s) from comand line agruments, or standart input. # # # # Usage: authz-parser VosManagerEntry.java # # authz-parser VosManagerEntry.java | w3m -T text/html # # find . -name *.java | authz-parser # # # # Known bugs: Can't recognize (and ignore) comments. # # # ############################################################################### use Data::Dumper; my $methodName; my $authorizations; #data structure #my @roles = ("PERUNADMIN", "VOADMIN", "GROUPADMIN", "SELF", "AUTHZRESOLVER", "FACILITYADMIN", "SERVICE"); my @roles = ("VOADMIN", "GROUPADMIN", "SELF", "AUTHZRESOLVER", "FACILITYADMIN", "SERVICE"); print htmlHeader(); print tableHeader(@roles); while(<>) { #line with method definition if(/^\s*public\s+[^\s]+\s+(\w+\s*\(.*\)).*\{\s*$/) { #this regex accept line with method $methodName = $1; next; } if(/AuthzResolver\.isAuthorized/) { #get roles and complementary objects and store them into $authorizations data structure my @rolesAndObjects = ($_ =~ /AuthzResolver\.isAuthorized\(\w+,\s*Role\.([^\)]*)\)/g) ; foreach $roleAndObject (@rolesAndObjects) { $roleAndObject =~ /^(\w+)(,\s*(\w+))?/; my $role = $1; my $complementaryObject = $3; #store $authorizations->{$methodName}->{$role} = $complementaryObject; } } } #output foreach $methodName (keys %$authorizations) { print "<tr><td>$methodName</td>"; for my $role (@roles) { if(defined $authorizations->{$methodName}->{$role}) { print "<td class='ok'>OK"; print ": ", $authorizations->{$methodName}->{$role}; print "</td>"; } else { print "<td class='nook'>--</td>"; } } print "</tr>\n"; } print tableFooter(); print htmlFooter(); print "\n\n"; ##################################################################################################### # Methods for print to HTML sub htmlHeader { return qq{ <html> <head> <title>AuthzResolver</title> <style> .ok \{ background-color: #66FF66 ; \} .nook \{ background-color: #FF3333 ; \} </style> </head> <body> }; } sub htmlFooter { return qq{ </body> </html> }; } sub tableHeader { $out = "<table border=1><tr>"; $out .= "<td></td>"; #first column is used for labels, not for data for $role (@_) { $out .= "<td>$role</td>"; } $out .= "</tr>\n"; return $out; } sub tableFooter { return "</table>\n"; }
Holdo/perun
perun-utils/authz-parser/authz-parser.pl
Perl
bsd-2-clause
3,032
package AsposeSlidesCloud::Object::DocumentProperties; require 5.6.0; use strict; use warnings; use utf8; use JSON qw(decode_json); use Data::Dumper; use Module::Runtime qw(use_module); use Log::Any qw($log); use Date::Parse; use DateTime; use base "AsposeSlidesCloud::Object::BaseObject"; # # # #NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. # my $swagger_types = { 'List' => 'ARRAY[DocumentProperty]', 'SelfUri' => 'ResourceUri', 'AlternateLinks' => 'ARRAY[ResourceUri]', 'Links' => 'ARRAY[ResourceUri]' }; my $attribute_map = { 'List' => 'List', 'SelfUri' => 'SelfUri', 'AlternateLinks' => 'AlternateLinks', 'Links' => 'Links' }; # new object sub new { my ($class, %args) = @_; my $self = { # 'List' => $args{'List'}, # 'SelfUri' => $args{'SelfUri'}, # 'AlternateLinks' => $args{'AlternateLinks'}, # 'Links' => $args{'Links'} }; return bless $self, $class; } # get swagger type of the attribute sub get_swagger_types { return $swagger_types; } # get attribute mappping sub get_attribute_map { return $attribute_map; } 1;
aspose-slides/Aspose.Slides-for-Cloud
SDKs/Aspose.Slides-Cloud-SDK-for-Perl/lib/AsposeSlidesCloud/Object/DocumentProperties.pm
Perl
mit
1,224
#!/usr/bin/perl # # script which can be used in post-commit hook to check if a commit contains private directories # use strict; use warnings; my $file = '/x1/svn/asf-authorization'; my $svn_look = '/usr/bin/svnlook'; my $dir = undef; sub parse_auth_file { my $auth_hash; open (AUTH_FILE, $file); # parse the auth file and store the perms in a hash while (my $line = <AUTH_FILE>) { if ($line =~ /^\[(\/.*)\]$/) { $dir = $1; } elsif (defined $dir && $line =~ /(.+)=(.*)/) { #$auth_hash->{$dir}->{$1} = $2; my $var_hash; my $key = $1; my $value = $2; #strip of blanks $key =~ s/ //g; $value =~ s/ //g; if (exists $auth_hash->{$dir}) { $var_hash = $auth_hash->{$dir}; } $var_hash->{$key} = $value; $auth_hash->{$dir} = $var_hash; } } close AUTH_FILE; return $auth_hash; } sub changed_dirs($$) { my $repos = shift; my $rev = shift; my @dirs = `$svn_look dirs-changed --revision $rev $repos`; return @dirs; } sub check_private { my $num_args = $#ARGV + 1; die "Usage: $0 repos_path revision\n" if ($num_args != 2); my @dirs = &changed_dirs($ARGV[0],$ARGV[1]); my $auth_hash = &parse_auth_file; my $private = 0; # loop over the changed directories foreach my $dir (@dirs) { my $n_dir = '/' . $dir; while (!exists $auth_hash->{$n_dir}) { $n_dir = $1 if $n_dir =~ /(.+)\/.*$/; } my $perm = $auth_hash->{$n_dir}->{'*'}; if (defined $perm && $perm eq '') { $private = 1; } } if ($private == 1) { print "Commit contains private directories!\n"; } else { print "Commit contains no private directories\n"; } exit $private; } &check_private;
stumped2/infrastructure-puppet
modules/subversion_server/files/hooks/check_private_dir.pl
Perl
apache-2.0
2,268
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # =pod =head1 NAME qpid::messaging::Sender =head1 DESCRIPTION A B<qpid::messaging::Sender> is the entity through which messages are sent. An instance can only be created using an active (i.e., not previously closed) B<qpid::messaging::Session>. =head1 EXAMPLE # create a connection and a session my $conn = new qpid::messaging::Connection("mybroker:5672"); conn->open; my $session = $conn->create_session; # create a sender that posts messages to the "updates" queue my $sender = $session->create_sender "updates;{create:always}" # begin sending updates while( 1 ) { my $content = wait_for_event; $sender->send(new qpid::messaging::Message($content)); } =cut package qpid::messaging::Sender; sub new { my ($class) = @_; my ($self) = { _impl => $_[1], _session => $_[2], }; die "Must provide an implementation." unless defined($self->{_impl}); die "Must provide a Session." unless defined($self->{_session}); bless $self, $class; return $self; } =pod =head1 ACTIONS =cut =pod =head2 SENDING MESSAGES =over =item $sender->send( message ) =item $sender->send( message, block) Sends a message, optionally blocking until the message is received by the broker. =back =head3 ARGUMENTS =over =item * message The message to be sent. =item * block If true then blocks until the message is received. =back =cut sub send { my ($self) = @_; my $message = $_[1]; my $sync = $_[2] || 0; die "No message to send." unless defined($message); my $impl = $self->{_impl}; $impl->send($message->get_implementation, $sync); } =pod =head2 CLOSING THE SENDER =item sender->close Closes the sender. This does not affect the ownering B<Session> or B<Connection> =back =cut sub close { my ($self) = @_; my $impl = $self->{_impl}; $impl->close; } =pod =head1 ATTRIBUTES =cut =pod =head2 CAPACITY The capacity is the number of outoing messages that can be held pending confirmation of receipt by the broker. =over =item sender->set_capacity( size ) =item $size = sender->get_capacity =back =back =cut sub set_capacity { my ($self) = @_; my $impl = $self->{_impl}; $impl->setCapacity($_[1]); } sub get_capacity { my ($self) = @_; my $impl = $self->{_impl}; return $impl->getCapacity; } =pod =head2 UNSETTLED The number of messages sent that are pending receipt confirmation by the broker. =over =item $count = sender->get_unsettled =back =cut sub get_unsettled { my ($self) = @_; my $impl = $self->{_impl}; return $impl->getUnsettled; } =pod =head2 AVAILABLE The available slots for sending messages. This differences form B<capacity> in that it is the available slots in the senders capacity for holding outgoing messages. The difference between capacity and available is the number of messages that have no been delivered yet. =over =item $slots = sender->get_available =back =cut sub get_available { my ($self) = @_; my $impl = $self->{_impl}; return $impl->getAvailable(); } =pod =head2 NAME The human-readable name for this sender. =over =item $name = sender-get_name =back =cut sub get_name { my ($self) = @_; my $impl = $self->{_impl}; return $impl->getName; } =pod =head2 SESSION The owning session from which the sender was created. =over =item $session = $sender->get_session =back =cut sub get_session { my ($self) = @_; return $self->{_session}; } =pod =head2 ADDRESS Returns the address for this sender. =over =item $address = $sender->get_address =back =cut sub get_address { my ($self) = @_; my $impl = $self->{_impl}; my $address = $impl->getAddress; return new qpid::messaging::Address($address); } 1;
irinabov/debian-qpid-cpp-1.35.0
bindings/qpid/perl/lib/qpid/messaging/Sender.pm
Perl
apache-2.0
4,585
=pod =head1 NAME RAND_egd, RAND_egd_bytes, RAND_query_egd_bytes - query entropy gathering daemon =head1 SYNOPSIS #include <openssl/rand.h> int RAND_egd_bytes(const char *path, int num); int RAND_egd(const char *path); int RAND_query_egd_bytes(const char *path, unsigned char *buf, int num); =head1 DESCRIPTION On older platforms without a good source of randomness such as C</dev/urandom>, it is possible to query an Entropy Gathering Daemon (EGD) over a local socket to obtain randomness and seed the OpenSSL RNG. The protocol used is defined by the EGDs available at L<http://egd.sourceforge.net/> or L<http://prngd.sourceforge.net>. RAND_egd_bytes() requests B<num> bytes of randomness from an EGD at the specified socket B<path>, and passes the data it receives into RAND_add(). RAND_egd() is equivalent to RAND_egd_bytes() with B<num> set to 255. RAND_query_egd_bytes() requests B<num> bytes of randomness from an EGD at the specified socket B<path>, where B<num> must be less than 256. If B<buf> is B<NULL>, it is equivalent to RAND_egd_bytes(). If B<buf> is not B<NULL>, then the data is copied to the buffer and RAND_add() is not called. OpenSSL can be configured at build time to try to use the EGD for seeding automatically. =head1 RETURN VALUES RAND_egd() and RAND_egd_bytes() return the number of bytes read from the daemon on success, or -1 if the connection failed or the daemon did not return enough data to fully seed the PRNG. RAND_query_egd_bytes() returns the number of bytes read from the daemon on success, or -1 if the connection failed. =head1 SEE ALSO L<RAND_add(3)>, L<RAND_bytes(3)>, L<RAND(7)> =head1 COPYRIGHT Copyright 2000-2018 The OpenSSL Project Authors. All Rights Reserved. Licensed under the Apache License 2.0 (the "License"). You may not use this file except in compliance with the License. You can obtain a copy in the file LICENSE in the source distribution or at L<https://www.openssl.org/source/license.html>. =cut
jens-maus/amissl
openssl/doc/man3/RAND_egd.pod
Perl
bsd-3-clause
1,982
# This file is auto-generated by the Perl DateTime Suite time zone # code generator (0.07) This code generator comes with the # DateTime::TimeZone module distribution in the tools/ directory # # Generated from /tmp/rnClxBLdxJ/northamerica. Olson data version 2013a # # Do not edit this file directly. # package DateTime::TimeZone::America::Atikokan; { $DateTime::TimeZone::America::Atikokan::VERSION = '1.57'; } use strict; use Class::Singleton 1.03; use DateTime::TimeZone; use DateTime::TimeZone::OlsonDB; @DateTime::TimeZone::America::Atikokan::ISA = ( 'Class::Singleton', 'DateTime::TimeZone' ); my $spans = [ [ DateTime::TimeZone::NEG_INFINITY, # utc_start 59768949988, # utc_end 1895-01-01 06:06:28 (Tue) DateTime::TimeZone::NEG_INFINITY, # local_start 59768928000, # local_end 1895-01-01 00:00:00 (Tue) -21988, 0, 'LMT', ], [ 59768949988, # utc_start 1895-01-01 06:06:28 (Tue) 60503616000, # utc_end 1918-04-14 08:00:00 (Sun) 59768928388, # local_start 1895-01-01 00:06:28 (Tue) 60503594400, # local_end 1918-04-14 02:00:00 (Sun) -21600, 0, 'CST', ], [ 60503616000, # utc_start 1918-04-14 08:00:00 (Sun) 60520546800, # utc_end 1918-10-27 07:00:00 (Sun) 60503598000, # local_start 1918-04-14 03:00:00 (Sun) 60520528800, # local_end 1918-10-27 02:00:00 (Sun) -18000, 1, 'CDT', ], [ 60520546800, # utc_start 1918-10-27 07:00:00 (Sun) 61212434400, # utc_end 1940-09-29 06:00:00 (Sun) 60520525200, # local_start 1918-10-27 01:00:00 (Sun) 61212412800, # local_end 1940-09-29 00:00:00 (Sun) -21600, 0, 'CST', ], [ 61212434400, # utc_start 1940-09-29 06:00:00 (Sun) 61255468800, # utc_end 1942-02-09 08:00:00 (Mon) 61212416400, # local_start 1940-09-29 01:00:00 (Sun) 61255450800, # local_end 1942-02-09 03:00:00 (Mon) -18000, 1, 'CDT', ], [ 61255468800, # utc_start 1942-02-09 08:00:00 (Mon) 61366287600, # utc_end 1945-08-14 23:00:00 (Tue) 61255450800, # local_start 1942-02-09 03:00:00 (Mon) 61366269600, # local_end 1945-08-14 18:00:00 (Tue) -18000, 1, 'CWT', ], [ 61366287600, # utc_start 1945-08-14 23:00:00 (Tue) 61370290800, # utc_end 1945-09-30 07:00:00 (Sun) 61366269600, # local_start 1945-08-14 18:00:00 (Tue) 61370272800, # local_end 1945-09-30 02:00:00 (Sun) -18000, 1, 'CPT', ], [ 61370290800, # utc_start 1945-09-30 07:00:00 (Sun) DateTime::TimeZone::INFINITY, # utc_end 61370272800, # local_start 1945-09-30 02:00:00 (Sun) DateTime::TimeZone::INFINITY, # local_end -18000, 0, 'EST', ], ]; sub olson_version { '2013a' } sub has_dst_changes { 4 } sub _max_year { 2023 } sub _new_instance { return shift->_init( @_, spans => $spans ); } 1;
Dokaponteam/ITF_Project
xampp/perl/vendor/lib/DateTime/TimeZone/America/Atikokan.pm
Perl
mit
2,733
# # $Id: NS.pm 932 2011-10-26 12:40:48Z willem $ # package Net::DNS::RR::NS; use strict; BEGIN { eval { require bytes; } } use vars qw(@ISA $VERSION); @ISA = qw(Net::DNS::RR); $VERSION = (qw$LastChangedRevision: 932 $)[1]; sub new { my ($class, $self, $data, $offset) = @_; if ($self->{"rdlength"} > 0) { ($self->{"nsdname"}) = Net::DNS::Packet::dn_expand($data, $offset); } return bless $self, $class; } sub new_from_string { my ($class, $self, $string) = @_; if ($string) { $self->{"nsdname"} = Net::DNS::stripdot($string); } return bless $self, $class; } sub rdatastr { my $self = shift; return "" unless defined $self->{nsdname}; return "$self->{nsdname}."; } sub rr_rdata { my ($self, $packet, $offset) = @_; my $rdata = ""; if (exists $self->{"nsdname"}) { $rdata .= $packet->dn_comp($self->{"nsdname"}, $offset); } return $rdata; } sub _normalize_dnames { my $self=shift; $self->_normalize_ownername(); $self->{'nsdname'}=Net::DNS::stripdot($self->{'nsdname'}) if defined $self->{'nsdname'}; } sub _canonicalRdata { # rdata contains a compressed domainname... we should not have that. my ($self) = @_; my $rdata; $rdata= $self->_name2wire(lc($self->{"nsdname"})); return $rdata; } 1; __END__ =head1 NAME Net::DNS::RR::NS - DNS NS resource record =head1 SYNOPSIS C<use Net::DNS::RR>; =head1 DESCRIPTION Class for DNS Name Server (NS) resource records. =head1 METHODS =head2 nsdname print "nsdname = ", $rr->nsdname, "\n"; Returns the name of the nameserver. =head1 COPYRIGHT Copyright (c) 1997-2002 Michael Fuhr. Portions Copyright (c) 2002-2004 Chris Reinhardt. Portions Copyright (c) 2005 O.M, Kolkman, RIPE NCC. Portions Copyright (c) 2005-2006 O.M, Kolkman, NLnet Labs. All rights reserved. This program is free software; you may redistribute it and/or modify it under the same terms as Perl itself. =head1 SEE ALSO L<perl(1)>, L<Net::DNS>, L<Net::DNS::Resolver>, L<Net::DNS::Packet>, L<Net::DNS::Header>, L<Net::DNS::Question>, L<Net::DNS::RR>, RFC 1035 Section 3.3.11 =cut
btovar/cvmfs
test/mock_services/Net/DNS/RR/NS.pm
Perl
bsd-3-clause
2,069
# file-lib.pl # Common functions for file manager CGIs BEGIN { push(@INC, ".."); }; use WebminCore; &ReadParse(\%prein, 'GET'); if ($prein{'trust'}) { &open_trust_db(); if ($trustdb{$prein{'trust'}}) { $trust_unknown_referers = 1; $trustdb{$prein{'trust'}} = time(); } dbmclose(%trustdb); } &init_config(); @file_buttons = ( "save", "preview", "edit", "info", "acl", "attr", "ext", "search", "delete", "new", "upload", "mkdir", "makelink", "rename", "sharing", "mount", "copy" ); if ($module_info{'usermin'}) { # Usermin gets the allowed list from the module config &switch_to_remote_user(); &create_user_config_dirs(); $hide_dot_files = $userconfig{'hide_dot_files'}; $follow = int($config{'follow'}); $real_home_dir = &simplify_path(&resolve_links($remote_user_info[7])); $upload_max = $config{'max'}; if ($config{'home_only'} == 1) { @allowed_roots = ( $real_home_dir, split(/\s+/, $config{'root'}) ); } elsif ($config{'home_only'} == 2) { @allowed_roots = split(/\s+/, $config{'root'}); } else { @allowed_roots = ( "/" ); } @denied_roots = split(/\s+/, $config{'noroot'}); @allowed_roots = &expand_root_variables(@allowed_roots); @denied_roots = &expand_root_variables(@denied_roots); if ($config{'archive'} eq 'y') { $archive = 1; } elsif ($config{'archive'} eq 'n') { $archive = 0; } else { $archive = 2; $archmax = $config{'archive'}; } $unarchive = 1; $dostounix = 1; $chroot = "/"; @disallowed_buttons = ( ); foreach $k (keys %config) { if ($k =~ /^button_(.*)/ && $config{$k} == 0) { push(@disallowed_buttons, $1); } } $canperms = 1; $canusers = 1; $contents = 1; $running_as_root = 0; } else { # Webmin gets the list of allowed directories from the ACL %access = &get_module_acl(); $hide_dot_files = $config{'hide_dot_files'}; $follow = int($access{'follow'}); $upload_max = $access{'max'}; @allowed_roots = split(/\s+/, $access{'root'}); if ($access{'home'}) { local @u = getpwnam($remote_user); if (@u) { push(@allowed_roots, &simplify_path(&resolve_links($u[7]))); } } @denied_roots = split(/\s+/, $access{'noroot'}); $archive = $access{'archive'}; $archmax = $access{'archmax'}; $unarchive = $access{'unarchive'}; $dostounix = $access{'dostounix'}; $chroot = $access{'chroot'}; $access{'button_search'} = 0 if (!&has_command("find")); $access{'button_makelink'} = 0 if (!&supports_symlinks()); $access{'button_info'} = 0 if (!&supports_users()); @disallowed_buttons = grep { !$access{'button_'.$_} } @file_buttons; if (&is_readonly_mode()) { # Force read-only mode for file manager if global readonly # is in effect. $access{'ro'} = 1; } $canperms = $access{'noperms'} ? 0 : 1; $canusers = $access{'nousers'} ? 0 : 1; $contents = $access{'contents'}; $running_as_root = !$access{'uid'}; } %disallowed_buttons = map { $_, 1 } @disallowed_buttons; $icon_map = ( "c", 1, "txt", 1, "pl", 1, "cgi", 1, "html", 1, "htm", 1, "gif", 2, "jpg", 2, "tar", 3, "png", 2, ); # file_info_line(path, [displaypath]) # Returns a line of text containing encoded details of some file sub file_info_line { local @st; local $islink = (-l $_[0]); local $f = $islink && &must_follow($_[0]); local @st = $f ? stat($_[0]) : lstat($_[0]); local $ext = $_[0] =~ /\S+\.([^\.\/]+)$/ ? $1 : undef; local $dp = $_[1] || $_[0]; $dp =~ s/\\/\\\\/g; $dp =~ s/\t/\\t/g; return undef if ($dp =~ /\r|\n/); return undef if (!@st); local $type = $islink && !$f ? 5 : -d _ ? 0 : -b _ ? 6 : -c _ ? 6 : -p _ ? 7 : -S _ ? 7 : defined($icon_map{$ext}) ? $icon_map{$ext} : 4; local $user = !&supports_users() ? "root" : %uid_to_user ? $uid_to_user{$st[4]} : getpwuid($st[4]); $user = $st[4] if (!$user); local $group = !&supports_users() ? "root" : %gid_to_group ? $gid_to_group{$st[5]} :getgrgid($st[5]); $group = $st[5] if (!$group); local $rl = readlink($_[0]); return join("\t", $dp, $type, $user, $group, $st[7] < 0 ? 2**32+$st[7] : $st[7], $st[2], $st[9], $f ? "" : $islink && !$rl ? "???" : $rl); } # switch_acl_uid([user]) sub switch_acl_uid { local ($user) = @_; return if ($module_info{'usermin'}); # Always already switched local @u = $user ? getpwnam($user) : $access{'uid'} < 0 ? getpwnam($remote_user) : getpwuid($access{'uid'}); if ($u[2]) { @u || &error($text{'switch_euser'}); &switch_to_unix_user(\@u); umask(oct($access{'umask'})); } } # switch_acl_uid_and_chroot() # Combines the switch_acl_uid and go_chroot functions sub switch_acl_uid_and_chroot { if (!$module_info{'usermin'} && $access{'uid'}) { local @u = $access{'uid'} < 0 ? getpwnam($remote_user) : getpwuid($access{'uid'}); @u || &error($text{'switch_euser'}); local @other = &other_groups($u[0]); &go_chroot(); &switch_to_unix_user(\@u); umask(oct($access{'umask'})); } else { &go_chroot(); } } # can_access(file) # Returns 1 if some file can be edited/deleted sub can_access { local ($file) = @_; $file =~ /^\// || return 0; local $path = &simplify_path($file); return &under_root_dir($path, \@allowed_roots) && ($path eq "/" || !&under_root_dir($path, \@denied_roots)); } # under_root_dir(file, &roots) # Returns 1 if some file is under one of the given roots sub under_root_dir { local $path = &simplify_path($_[0]); local $roots = $_[1]; local @f = grep { $_ ne '' } split(/\//, $path); local $r; DIR: foreach $r (@$roots) { return 1 if ($r eq '/' || $path eq '/' || $path eq $r); local @a = grep { $_ ne '' } split(/\//, $r); local $i; for($i=0; $i<@a; $i++) { next DIR if ($a[$i] ne $f[$i]); } return 1; } return 0; } # can_list(dir) # Returns 1 if some directory can be listed. Parent directories of allowed # directories are included as well. sub can_list { local $path = &simplify_path($_[0]); return &under_root_dir_or_parent($path, \@allowed_roots) && ($path eq "/" || !&under_root_dir($path, \@denied_roots)); } # under_root_dir_or_parent(file, &roots) # Returns 1 if some file is under one of the given roots, or their parents sub under_root_dir_or_parent { local @f = grep { $_ ne '' } split(/\//, $_[0]); DIR: foreach $r (@allowed_roots) { return 1 if ($r eq '/' || $_[0] eq '/' || $_[0] eq $r); local @a = grep { $_ ne '' } split(/\//, $r); local $i; for($i=0; $i<@a && $i<@f; $i++) { next DIR if ($a[$i] ne $f[$i]); } return 1; } return 0; } # accessible_subdir(dir) # Returns the path to a dir under the given one that we can access sub accessible_subdir { local ($r, @rv); foreach $r (@allowed_roots) { if ($r =~ /^(\Q$_[0]\E\/[^\/]+)/) { push(@rv, $1); } } return @rv; } sub open_trust_db { local $trust = $ENV{'WEBMIN_CONFIG'} =~ /\/usermin/ ? "/tmp/trust.$ENV{'REMOTE_USER'}" : "$ENV{'WEBMIN_CONFIG'}/file/trust"; eval "use SDBM_File"; dbmopen(%trustdb, $trust, 0700); eval { $trustdb{'1111111111'} = 'foo bar' }; if ($@) { dbmclose(%trustdb); eval "use NDBM_File"; dbmopen(%trustdb, $trust, 0700); } } # must_follow(path) # For symlinks, returns 1 if a link should be follow, 0 if not sub must_follow { if ($follow == 1) { return 1; } elsif ($follow == 0) { return 0; } else { local @s = stat($_[0]); local @l = lstat($_[0]); @st = ($s[4] == $l[4] ? @s : @l); return $s[4] == $l[4]; } } # extract_archive(path, delete-after, get-contents) # Called by upload to extract some zip or tar.gz file. Returns undef if # something was actually done, an error message otherwise. sub extract_archive { local ($path, $delete, $contents) = @_; local $out; $path =~ /^(\S*\/)/ || return 0; local $dir = $1; local $qdir = quotemeta($dir); local $qpath = quotemeta($path); if ($path =~ /\.zip$/i) { # Extract zip file return &text('zip_ecmd', "unzip") if (!&has_command("unzip")); if ($contents) { $out = `(cd $qdir; unzip -l $qpath) 2>&1 </dev/null`; } else { $out = `(cd $qdir; unzip -o $qpath) 2>&1 </dev/null`; } if ($?) { return &text('zip_eunzip', $out); } } elsif ($path =~ /\.tar$/i) { # Extract un-compressed tar file return &text('zip_ecmd', "tar") if (!&has_command("tar")); if ($contents) { $out = `(cd $qdir; tar tf $qpath) 2>&1 </dev/null`; } else { $out = `(cd $qdir; tar xf $qpath) 2>&1 </dev/null`; } if ($?) { return &text('zip_euntar', $out); } } elsif ($path =~ /\.(tar\.gz|tgz|tar\.bz|tbz|tar\.bz2|tbz2)$/i) { # Extract gzip or bzip2-compressed tar file local $zipper = $_[0] =~ /bz(2?)$/i ? "bunzip2" : "gunzip"; return &text('zip_ecmd', "tar") if (!&has_command("tar")); return &text('zip_ecmd', $zipper) if (!&has_command($zipper)); if ($contents) { $out = `(cd $qdir; $zipper -c $qpath | tar tf -) 2>&1`; } else { $out = `(cd $qdir; $zipper -c $qpath | tar xf -) 2>&1`; } if ($?) { return &text('zip_euntar2', $out); } } elsif ($path =~ /\.gz$/i) { # Uncompress gzipped file return &text('zip_ecmd', "gunzip") if (!&has_command("gunzip")); local $final = $_[0]; $final =~ s/\.gz$//; local $qfinal = quotemeta($final); if ($contents) { $out = $final; $out =~ s/^.*\///; } else { $out = `(cd $qdir; gunzip -c $qpath >$qfinal) 2>&1`; } if ($?) { return &text('zip_euntar2', $out); } } else { return $text{'zip_ename'}; } if ($contents) { return (undef, split(/\r?\n/, $out)); } elsif ($delete) { unlink($path); } return undef; } # post_upload(path, dir, unzip) sub post_upload { local ($path, $dir, $zip) = @_; if ($unarchive == 2) { $zip = $path =~ /\.(zip|tgz|tar|tar\.gz)$/i ? 1 : 0; } elsif ($unarchive == 0) { $zip = 0; } local $refresh = $path; local $err; if ($zip) { $err = &extract_archive(&unmake_chroot($path), $zip-1); if (!$err) { # Refresh whole dir $refresh = $dir; } } $info = &file_info_line(&unmake_chroot($refresh), $refresh); print "<script>\n"; print "try {\n"; print " opener.document.FileManager.", "upload_notify(\"".&quote_escape($refresh)."\", ", "\"".&quote_escape($info)."\");\n"; print "} catch(err) { }\n"; if ($err) { $err =~ s/\r//g; $err =~ s/\n/\\n/g; print "opener.document.FileManager.", "upload_error(\"",&quote_escape(&text('zip_err', $err)),"\");\n"; } print "close();\n"; print "</script>\n"; } sub go_chroot { if ($chroot ne "/" && $chroot ne "") { # First build hash of users and groups, which will not be accessible # after a chroot local (@u, @g); setpwent(); while(@u = getpwent()) { $uid_to_user{$u[2]} = $u[0] if (!defined($uid_to_user{$u[2]})); $user_to_uid{$u[0]} = $u[2] if (!defined($user_to_uid{$u[0]})); } endpwent(); setgrent(); while(@g = getgrent()) { $gid_to_group{$g[2]} = $g[0] if(!defined($gid_to_group{$g[2]})); $group_to_gid{$g[0]} = $g[2] if(!defined($group_to_gid{$g[0]})); } endgrent(); chroot($chroot) || die("chroot to $chroot failed"); } } # make_chroot(dir) # Converts some real directory to the chroot form sub make_chroot { if ($chroot eq "/") { return $_[0]; } elsif ($_[0] eq $chroot) { return "/"; } else { local $rv = $_[0]; if ($rv =~ /^$chroot\//) { $rv =~ s/^$chroot//; return $rv; } else { return undef; } } } # unmake_chroot(dir) # Converts some chroot'd directory to the real form sub unmake_chroot { if ($chroot eq "/") { return $_[0]; } elsif ($_[0] eq "/") { return $chroot; } else { return $chroot.$_[0]; } } # print_content_type([type]) # Prints the content-type header, with a charset sub print_content_type { local $type = $_[0] || "text/plain"; if ($userconfig{'nocharset'} || $config{'nocharset'}) { # Never try to use charset print "Content-type: $type\n\n"; } else { my $charset = &get_charset(); print "Content-type: $type; charset=$charset\n\n"; } } # html_extract_head_body(html) # Given some HTML, extracts the header, body and stuff after the body sub html_extract_head_body { local ($html) = @_; if ($html =~ /^([\000-\377]*<body[^>]*>)([\000-\377]*)(<\/body[^>]*>[\000-\377]*)/i) { return ($1, $2, $3); } else { return (undef, $html, undef); } } # expand_root_variables(dir, ...) # Replaces $USER and $HOME in a list of dirs sub expand_root_variables { local @rv; local %hash = ( 'user' => $remote_user_info[0], 'home' => $remote_user_info[7], 'uid' => $remote_user_info[2], 'gid' => $remote_user_info[3] ); my @ginfo = getgrgid($remote_user_info[3]); $hash{'group'} = $ginfo[0]; foreach my $dir (@_) { push(@rv, &substitute_template($dir, \%hash)); } return @rv; } 1;
HasClass0/webmin
file/file-lib.pl
Perl
bsd-3-clause
12,377
=pod =head1 NAME i2d_PKCS7_bio_stream - output PKCS7 structure in BER format =head1 SYNOPSIS #include <openssl/pkcs7.h> int i2d_PKCS7_bio_stream(BIO *out, PKCS7 *p7, BIO *data, int flags); =head1 DESCRIPTION i2d_PKCS7_bio_stream() outputs a PKCS7 structure in BER format. It is otherwise identical to the function SMIME_write_PKCS7(). =head1 NOTES This function is effectively a version of the d2i_PKCS7_bio() supporting streaming. =head1 BUGS The prefix "i2d" is arguably wrong because the function outputs BER format. =head1 RETURN VALUES i2d_PKCS7_bio_stream() returns 1 for success or 0 for failure. =head1 SEE ALSO L<ERR_get_error(3)>, L<PKCS7_sign(3)>, L<PKCS7_verify(3)>, L<PKCS7_encrypt(3)> L<PKCS7_decrypt(3)>, L<SMIME_write_PKCS7(3)>, L<PEM_write_bio_PKCS7_stream(3)> =head1 HISTORY i2d_PKCS7_bio_stream() was added to OpenSSL 1.0.0 =head1 COPYRIGHT Copyright 2008-2016 The OpenSSL Project Authors. All Rights Reserved. Licensed under the OpenSSL license (the "License"). You may not use this file except in compliance with the License. You can obtain a copy in the file LICENSE in the source distribution or at L<https://www.openssl.org/source/license.html>. =cut
openweave/openweave-core
third_party/openssl/openssl/doc/crypto/i2d_PKCS7_bio_stream.pod
Perl
apache-2.0
1,201
=encoding utf8 =head1 NAME perl5121delta - what is new for perl v5.12.1 =head1 DESCRIPTION This document describes differences between the 5.12.0 release and the 5.12.1 release. If you are upgrading from an earlier release such as 5.10.1, first read L<perl5120delta>, which describes differences between 5.10.1 and 5.12.0. =head1 Incompatible Changes There are no changes intentionally incompatible with 5.12.0. If any incompatibilities with 5.12.0 exist, they are bugs. Please report them. =head1 Core Enhancements Other than the bug fixes listed below, there should be no user-visible changes to the core language in this release. =head1 Modules and Pragmata =head2 Pragmata Changes =over =item * We fixed exporting of C<is_strict> and C<is_lax> from L<version>. These were being exported with a wrapper that treated them as method calls, which caused them to fail. They are just functions, are documented as such, and should never be subclassed, so this patch just exports them directly as functions without the wrapper. =back =head2 Updated Modules =over =item * We upgraded L<CGI.pm> to version 3.49 to incorporate fixes for regressions introduced in the release we shipped with Perl 5.12.0. =item * We upgraded L<Pod::Simple> to version 3.14 to get an improvement to \C\<\< \>\> parsing. =item * We made a small fix to the L<CPANPLUS> test suite to fix an occasional spurious test failure. =item * We upgraded L<Safe> to version 2.27 to wrap coderefs returned by C<reval()> and C<rdo()>. =back =head1 Changes to Existing Documentation =over =item * We added the new maintenance release policy to L<perlpolicy.pod> =item * We've clarified the multiple-angle-bracket construct in the spec for POD in L<perlpodspec> =item * We added a missing explanation for a warning about C<:=> to L<perldiag.pod> =item * We removed a false claim in L<perlunitut> that all text strings are Unicode strings in Perl. =item * We updated the Github mirror link in L<perlrepository> to mirrors/perl, not github/perl =item * We fixed a a minor error in L<perl5114delta.pod>. =item * We replaced a mention of the now-obsolete L<Switch.pm> with F<given>/F<when>. =item * We improved documentation about F<$sitelibexp/sitecustomize.pl> in L<perlrun>. =item * We corrected L<perlmodlib.pod> which had unintentionally omitted a number of modules. =item * We updated the documentation for 'require' in L<perlfunc.pod> relating to putting Perl code in @INC. =item * We reinstated some erroneously-removed documentation about quotemeta in L<perlfunc>. =item * We fixed an F<a2p> example in L<perlutil.pod>. =item * We filled in a blank in L<perlport.pod> with the release date of Perl 5.12. =item * We fixed broken links in a number of perldelta files. =item * The documentation for L<Carp.pm> incorrectly stated that the $Carp::Verbose variable makes cluck generate stack backtraces. =item * We fixed a number of typos in L<Pod::Functions> =item * We improved documentation of case-changing functions in L<perlfunc.pod> =item * We corrected L<perlgpl.pod> to contain the correct version of the GNU General Public License. =back =head1 Testing =head2 Testing Improvements =over =item * F<t/op/sselect.t> is now less prone to clock jitter during timing checks on Windows. sleep() time on Win32 may be rounded down to multiple of the clock tick interval. =item * F<lib/blib.t> and F<lib/locale.t>: Fixes for test failures on Darwin/PPC =item * F<perl5db.t>: Fix for test failures when C<Term::ReadLine::Gnu> is installed. =back =head1 Installation and Configuration Improvements =head2 Configuration improvements =over =item * We updated F<INSTALL> with notes about how to deal with broken F<dbm.h> on OpenSUSE (and possibly other platforms) =back =head1 Bug Fixes =over 4 =item * A bug in how we process filetest operations could cause a segfault. Filetests don't always expect an op on the stack, so we now use TOPs only if we're sure that we're not stat'ing the _ filehandle. This is indicated by OPf_KIDS (as checked in ck_ftst). See also: L<http://rt.perl.org/rt3/Public/Bug/Display.html?id=74542> =item * When deparsing a nextstate op that has both a change of package (relative to the previous nextstate) and a label, the package declaration is now emitted first, because it is syntactically impermissible for a label to prefix a package declaration. =item * XSUB.h now correctly redefines fgets under PERL_IMPLICIT_SYS See also: L<http://rt.cpan.org/Public/Bug/Display.html?id=55049> =item * utf8::is_utf8 now respects GMAGIC (e.g. $1) =item * XS code using C<fputc()> or C<fputs()>: on Windows could cause an error due to their arguments being swapped. See also: L<http://rt.perl.org/rt3/Public/Bug/Display.html?id=72704> =item * We fixed a small bug in lex_stuff_pvn() that caused spurious syntax errors in an obscure situation. It happened when stuffing was performed on the last line of a file and the line ended with a statement that lacked a terminating semicolon. See also: L<http://rt.perl.org/rt3/Public/Bug/Display.html?id=74006> =item * We fixed a bug that could cause \N{} constructs followed by a single . to be parsed incorrectly. See also: L<http://rt.perl.org/rt3/Public/Bug/Display.html?id=74978> =item * We fixed a bug that caused when(scalar) without an argument not to be treated as a syntax error. See also: L<http://rt.perl.org/rt3/Public/Bug/Display.html?id=74114> =item * We fixed a regression in the handling of labels immediately before string evals that was introduced in Perl 5.12.0. See also: L<http://rt.perl.org/rt3/Public/Bug/Display.html?id=74290> =item * We fixed a regression in case-insensitive matching of folded characters in regular expressions introduced in Perl 5.10.1. See also: L<http://rt.perl.org/rt3/Public/Bug/Display.html?id=72998> =back =head1 Platform Specific Notes =head2 HP-UX =over =item * Perl now allows -Duse64bitint without promoting to use64bitall on HP-UX =back =head2 AIX =over =item * Perl now builds on AIX 4.2 The changes required work around AIX 4.2s' lack of support for IPv6, and limited support for POSIX C<sigaction()>. =back =head2 FreeBSD 7 =over =item * FreeBSD 7 no longer contains F</usr/bin/objformat>. At build time, Perl now skips the F<objformat> check for versions 7 and higher and assumes ELF. =back =head2 VMS =over =item * It's now possible to build extensions on older (pre 7.3-2) VMS systems. DCL symbol length was limited to 1K up until about seven years or so ago, but there was no particularly deep reason to prevent those older systems from configuring and building Perl. =item * We fixed the previously-broken C<-Uuseperlio> build on VMS. We were checking a variable that doesn't exist in the non-default case of disabling perlio. Now we only look at it when it exists. =item * We fixed the -Uuseperlio command-line option in configure.com. Formerly it only worked if you went through all the questions interactively and explicitly answered no. =back =head1 Known Problems =over =item * C<List::Util::first> misbehaves in the presence of a lexical C<$_> (typically introduced by C<my $_> or implicitly by C<given>). The variable which gets set for each iteration is the package variable C<$_>, not the lexical C<$_>. A similar issue may occur in other modules that provide functions which take a block as their first argument, like foo { ... $_ ...} list See also: L<http://rt.perl.org/rt3/Public/Bug/Display.html?id=67694> =item * C<Module::Load::Conditional> and C<version> have an unfortunate interaction which can cause C<CPANPLUS> to crash when it encounters an unparseable version string. Upgrading to C<CPANPLUS> 0.9004 or C<Module::Load::Conditional> 0.38 from CPAN will resolve this issue. =back =head1 Acknowledgements Perl 5.12.1 represents approximately four weeks of development since Perl 5.12.0 and contains approximately 4,000 lines of changes across 142 files from 28 authors. Perl continues to flourish into its third decade thanks to a vibrant community of users and developers. The following people are known to have contributed the improvements that became Perl 5.12.1: Ævar Arnfjörð Bjarmason, Chris Williams, chromatic, Craig A. Berry, David Golden, Father Chrysostomos, Florian Ragwitz, Frank Wiegand, Gene Sullivan, Goro Fuji, H.Merijn Brand, James E Keenan, Jan Dubois, Jesse Vincent, Josh ben Jore, Karl Williamson, Leon Brocard, Michael Schwern, Nga Tang Chan, Nicholas Clark, Niko Tyni, Philippe Bruhat, Rafael Garcia-Suarez, Ricardo Signes, Steffen Mueller, Todd Rinaldo, Vincent Pit and Zefram. =head1 Reporting Bugs If you find what you think is a bug, you might check the articles recently posted to the comp.lang.perl.misc newsgroup and the perl bug database at http://rt.perl.org/perlbug/ . There may also be information at http://www.perl.org/ , the Perl Home Page. If you believe you have an unreported bug, please run the B<perlbug> program included with your release. Be sure to trim your bug down to a tiny but sufficient test case. Your bug report, along with the output of C<perl -V>, will be sent off to perlbug@perl.org to be analysed by the Perl porting team. If the bug you are reporting has security implications, which make it inappropriate to send to a publicly archived mailing list, then please send it to perl5-security-report@perl.org. This points to a closed subscription unarchived mailing list, which includes all the core committers, who will be able to help assess the impact of issues, figure out a resolution, and help co-ordinate the release of patches to mitigate or fix the problem across all platforms on which Perl is supported. Please only use this address for security issues in the Perl core, not for modules independently distributed on CPAN. =head1 SEE ALSO The F<Changes> file for an explanation of how to view exhaustive details on what changed. The F<INSTALL> file for how to build Perl. The F<README> file for general stuff. The F<Artistic> and F<Copying> files for copyright information. =cut
Dokaponteam/ITF_Project
xampp/perl/lib/pods/perl5121delta.pod
Perl
mit
10,143
/* Part of CHR (Constraint Handling Rules) Author: Bart Demoen, Tom Schrijvers E-mail: Tom.Schrijvers@cs.kuleuven.be WWW: http://www.swi-prolog.org Copyright (c) 2004-2011, K.U. Leuven All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ :- module(chr_find, [ find_with_var_identity/4, forall/3, forsome/3 ]). :- use_module(library(lists)). %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% :- meta_predicate find_with_var_identity(?, +, :, -), forall(-, +, :), forsome(-, +, :). find_with_var_identity(Template, IdVars, Goal, Answers) :- Key = foo(IdVars), copy_term_nat(Template-Key-Goal,TemplateC-KeyC-GoalC), findall(KeyC - TemplateC, GoalC, As), smash(As,Key,Answers). smash([],_,[]). smash([Key-T|R],Key,[T|NR]) :- smash(R,Key,NR). %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% forall(X,L,G) :- \+ (member(X,L), \+ call(G)). %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% forsome(X,L,G) :- member(X,L), call(G), !. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% :- dynamic user:goal_expansion/2. :- multifile user:goal_expansion/2. user:goal_expansion(forall(Element,List,Test), GoalOut) :- nonvar(Test), Test =.. [Functor,Arg], Arg == Element, GoalOut = once(maplist(Functor,List)).
TeamSPoon/logicmoo_workspace
docker/rootfs/usr/local/lib/swipl/library/chr/find.pl
Perl
mit
2,728
:- expects_dialect(lps). /* Original: (The insured is the Holder, the insurer is the Counterparty) insureGoods(goodsPremium, principal, penalty, t1, t2, goodsInsured) = counterpartySecurity = pledge(allGoods(Counterparty)) with to Counterparty getTitle(goodsPremium) insurancePayment(goodsInsured, principal, t1, t2) with when breachedPerformance(insurancePayment) to Holder foreclose(counterpartySecurity, penalty) insurancePayment(goodsInsured, principal, t1, t2) = when safeArrival(goodsInsured) terminate % buggy, IMHO: should check dates when withinPeriod(t1,t2) when choiceOf(Holder) to Holder principal */ :- include(system('date_utils.pl')). % Rather then run live, we'll simulate real time by mapping its time points to simulation cycles: simulatedRealTimeBeginning('2018-05-01'). simulatedRealTimePerCycle(RTPC) :- RTPC is 3600*12. % just 2 LPS cycles per calendar day maxRealTime(M) :- M is 24*3600*120. % 120 days max lifetime of the contract events to(_Agent,_Right), pledge(_Goods,_Security), safeArrival(_GoodsInsured), choiceOf(_Party), foreclose(_Security,_Amount). insureGoods(GoodsPremium, Principal, Penalty, T1-T2, GoodsInsured) from ContractSigned if pledge(allGoods(counterparty),CounterpartySecurity) from ContractSigned, to(counterParty,getTitle(GoodsPremium)) from ContractSigned, T1 @> ContractSigned, ( if insurancePayment(GoodsInsured,Principal) from T, T@>=T1, T@<T2 then true else to(holder,foreclose(CounterpartySecurity, Penalty)) ). insurancePayment(GoodsInsured, _Principal) from T if safeArrival(GoodsInsured) from T. insurancePayment(_GoodsInsured, Principal) from T if choiceOf(holder) from T, to(holder,Principal) from T. false safeArrival(_), choiceOf(_). % can't have both at the same time if true then insureGoods(usd(600), usd(120000), usd(5000), 2018/5/27 - 2018/7/31, "10 John D. tractors") to T, T @=< 2018/8/10, % establish a future limit for the whole contract, namely foreclosing writeln('Compliant!') from T. % simulate shipment from US to Europe % these must precede the insurance period per se: observe pledge(allGoods(counterparty),"Some key to all Counterparty goods") at 2018/5/20. observe to(counterParty,getTitle(usd(600))) at "2018-05-20". % Now events pertaining to the contracted period: % Instead of this file use the ones that include it and add events.
TeamSPoon/logicmoo_workspace
packs_sys/logicmoo_lps/examples/CLOUT_workshop/SzaboLanguage_insurance_base.pl
Perl
mit
2,404
/* % NomicMUD: A MUD server written in Prolog % Maintainer: Douglas Miles % Dec 13, 2035 % % Bits and pieces: % % LogicMOO, Inform7, FROLOG, Guncho, PrologMUD and Marty's Prolog Adventure Prototype % % Copyright (C) 2004 Marty White under the GNU GPL % Sept 20, 1999 - Douglas Miles % July 10, 1996 - John Eikenberry % % Logicmoo Project changes: % % Main file. % */ :- use_module(library(logicmoo_common)). :- '$set_source_module'(mu). :- ensure_loaded(adv_loader). %:- nop(ensure_loaded('adv_chat80')). % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % CODE FILE SECTION :- nop(ensure_loaded('adv_main_commands')). % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %printable_state(L, S):- sort(L, S). printable_state(S, S). include_functor(List, P):- compound(P), safe_functor(P, F, _), member(F, List), !. include_string(Search, P):- compound(P), sformat(S,"~p",[P]), sub_string(S,_,_,_,Search). % invoke_metacmd(Doer, Action, S0, S1) :- discontiguous invoke_metacmd/4. :- add_help(quit, "Quits the game."). :- defn_state_setter( invoke_metacmd( agent, action)). find_world(World, W0, W1, S0):- wdmsg(warn(find_world(World, W0, W1, S0))), W0=S0. save_world(World, W1, S0, S1):- wdmsg(warn(save_world(World, W1, S0, S1))), S1=W1. invoke_metacmd(_Doer, in_world(Agent, World, Cmd), S0, S1):- !, find_world(World, W0, W1, S0), invoke_metacmd(Agent, Cmd, W0, W1), !, save_world(World, W1, S0, S1). invoke_metacmd(Doer, attempts(Agent, Command)) --> !, {Doer\==Agent}, invoke_metacmd(Agent, Command). invoke_metacmd(Doer, quit(Agent)) --> redeclare(wishes(Agent, quit)), {player_format(Doer, 'logging off~w ~n', [Agent]), player_format(Agent, 'Bye! (from ~w)~n', [Doer])}. invoke_metacmd(Doer, help, S0, S0) :- !, with_agent_console(Doer, listing(mu_global:cmd_help)). :- add_help(english, "english <level>: turn on paraphrase generation."). invoke_metacmd(Doer, [english], S0, S0) :- must_security_of(Doer, admin), flag(english, Was, Was), player_format(Doer, '~w=~q~n', [english, Was]). invoke_metacmd(Doer, english(N0), S0, S0) :- must_security_of(Doer, admin), any_to_number(N0, N), flag(english, _Was, N), flag(english, New, New), player_format(Doer, '~w=~q~n', [english, N]). :- add_help(rtrace, "Debbuging: Start the non-interactive tracer."). invoke_metacmd(Doer, rtrace, S0, S0) :- must_security_of(Doer, admin), rtrace. :- add_help(nortrace, "Debbuging: Stop the non-interactive tracer."). invoke_metacmd(Doer, nortrace, S0, S0) :- must_security_of(Doer, admin), nortrace. :- add_help(trace, "Debbuging: Start the interactive tracer."). invoke_metacmd(Doer, trace, S0, S0) :- must_security_of(Doer, admin), trace. :- add_help(notrace, "Debbuging: Stop the interactive tracer."). invoke_metacmd(Doer, notrace, S0, S0) :- must_security_of(Doer, admin), notrace. :- add_help(spy(+pred), "Put a spy point on all predicates meeting the predicate specs"). invoke_metacmd(Doer, spy(Pred), S0, S0) :- must_security_of(Doer, admin), spy(Pred). :- add_help(nospy(+pred), "Remove spy point on all predicates meeting the predicate specs"). invoke_metacmd(Doer, nospy(Pred), S0, S0) :- must_security_of(Doer, admin), nospy(Pred). :- add_help(possess(agent), "Take possession of a character"). invoke_metacmd(Doer, possess(NewAgent), S0, S0) :- must_security_of(Doer, wizard), mu_current_agent(OldAgent), current_input(InputConsole), retractall(console_controls_agent(_, OldAgent)), retractall(console_controls_agent(_, NewAgent)), asserta(console_controls_agent(InputConsole, NewAgent)). invoke_metacmd(Doer, Cmd, S0, S9) :- command_to_list(Cmd,List), invoke_metacmd_l(Doer, List, S0, S9). command_to_list(Atom,[Atom]):- atom(Atom),!. command_to_list(List,List):- is_list(List),!. command_to_list(Cmpd,[Cmd|List]):- compound(Cmpd), compound_name_arguments(Cmpd,Cmd,Args),flatten(Args,List),!. show_state_of(Doer, [], S0):- !, printable_state(S0, S), reverse(S, SR), player_pprint(Doer, SR, always), maybe_pause(Doer). show_state_of(Doer, ['.'|List], S0):- !, show_state_of(Doer, List, S0). show_state_of(Doer, ['"',Atom,'"'|Rest], S0):- atom_string(Atom,Str), show_state_of(Doer, [Str|Rest], S0). show_state_of(Doer, [Atom|Rest], S0):- string(Atom),!, printable_state(S0, S), include(include_string(Atom), S, SP), show_state_of(Doer, Rest, SP). show_state_of(Doer, ['props'|Rest], S0):- printable_state(S0, S),!, include(include_functor([props,h]), S, SP), show_state_of(Doer, Rest, SP). show_state_of(Doer, ['types'|Rest], S0):- printable_state(S0, S),!, include(include_functor([type_props]), S, SP), show_state_of(Doer, Rest, SP). show_state_of(Doer, ['mems'|Rest], S0):- printable_state(S0, S),!, include(@>=(props(_, _)), S, SP), show_state_of(Doer, Rest, SP). show_state_of(Doer, [Atom|Rest], S0):- atom(Atom),!, printable_state(S0, S), include(include_functor(Atom), S, SP), show_state_of(Doer, Rest, SP). invoke_metacmd_l(Doer, [echo|List], S0, S0):- !, player_format(Doer, '~w~n', [List]). invoke_metacmd_l(Doer, [state|List], S, S) :- !, show_state_of(Doer, List, S). invoke_metacmd_l(Doer, [PPT|List], S, S) :- member(PPT,[props,perceptq,types,mems]),!, show_state_of(Doer,[PPT|List], S),!. invoke_metacmd_l(Doer, [model|List], S0, S0) :- agent_thought_model(Doer, Model, S0), show_state_of(Doer, List, Model). invoke_metacmd_l(Doer, [mem|List], S0, S0) :- declared(memories(Doer, Memory), S0), show_state_of(Doer, List, Memory). invoke_metacmd(Doer, make, S0, S0) :- must_security_of(Doer, wizard), thread_signal(main, make), ensure_has_prompt(Doer). invoke_metacmd(Doer, prolog, S0, S0) :- must_security_of(Doer, wizard), '$current_typein_module'(Was), setup_call_cleanup('$set_typein_module'(mu), prolog, '$set_typein_module'(Was)), ensure_has_prompt(Doer). invoke_metacmd(Doer, CLS, S0, S9) :- must_security_of(Doer, wizard), current_predicate(_, CLS), set_advstate(S0), (is_main_console -> catch(CLS, E, (dbug1(CLS:- throw(E)), fail)) ; (redirect_error_to_string(catch(CLS, E, (dbug1(CLS:- throw(E)), fail)), Str), !, write(Str))), !, ensure_has_prompt(Doer), get_advstate(S9). invoke_metacmd(Doer, inspect(Self, NamedProperty, Target), S0, S1) :- invoke_metacmd(Doer, inspect(Self, getprop(Target, NamedProperty)), S0, S1). invoke_metacmd(Doer, inspect(Self, getprop(Target, NamedProperty)), S0, S0) :- must_security_of(Doer, wizard), pred_holder(NamedProperty, PropertyPred), PropAndData=..[PropertyPred, Data], findall(Data, getprop(Target, PropAndData, S0), DataList), player_pprint(Self, DataList, always), maybe_pause(Doer). invoke_metacmd(Doer, rez(Type), S0, S9) :- must_security_of(Doer, wizard), must_mw1((mu_current_agent(Agent), g_h(spatial, Prep, Agent, Here, S0), create_new_unlocated(Type, Object, S0, S1), declare(h(spatial, Prep, Object, Here), S1, S9), player_format(Doer, 'You now see a ~w.~n', [Object]))). invoke_metacmd(Doer, derez(Object), S0, S1) :- must_security_of(Doer, wizard), undeclare(h(spatial, _, Object, _), S0, S1), player_format(Doer, 'It vanishes instantly.~n', []). invoke_metacmd(Doer, PropCmd, S0, S1) :- action_verb_agent_args(PropCmd, setprop, _, [Object | Args]), Prop =.. Args, must_security_of(Doer, wizard), setprop(Object, Prop, S0, S1), player_format(Doer, 'Set ~p ~p.~n', [Object, Prop]), invoke_metacmd(Doer, properties(Object), S1, S1). invoke_metacmd(Doer, PropCmd, S0, S1) :- action_verb_agent_args(PropCmd, updateprop, _, [Object | Args]), Prop =.. Args, must_security_of(Doer, wizard), updateprop(Object, Prop, S0, S1), player_format(Doer, 'Update ~p ~p.~n', [Object, Prop]), invoke_metacmd(Doer, properties(Object), S1, S1). invoke_metacmd(Doer, DelProp, S0, S1) :- action_verb_agent_args(DelProp, delprop, _, [Object | Args]), Prop =.. Args, must_security_of(Doer, wizard), delprop(Object, Prop, S0, S1), player_format(Doer, 'Deleted ~p ~p.~n', [Object, Prop]), invoke_metacmd(Doer, properties(Object), S1, S1). invoke_metacmd(Doer, Properties, S0, S0) :- action_verb_agent_thing(Properties, properties, _, Object), % Prop =.. Args, must_security_of(Doer, wizard), (declared(props(Object, PropList), S0);declared(type_props(Object, PropList), S0)), !, player_format(Doer, 'Properties now: ~@~n', [pprint(props(Object, PropList))]). invoke_metacmd(Doer, undo, S0, S1) :- declare(wants(Doer, undo), S0, S1), player_format(Doer, 'undo...OK~nKO...odnu~n', []). invoke_metacmd(_Doer, save(Basename), S0, S0) :- atom_concat(Basename, '.adv', Filename), save_term_exists(Filename, S0). invoke_metacmd(Doer, WA, S0, S1) :- (( cmd_workarround(WA, WB) -> WB\==WA)), !, invoke_metacmd(Doer, WB, S0, S1). pred_holder(memory, memories). pred_holder(percepts, perceptq). pred_holder(X, X).
TeamSPoon/logicmoo_workspace
packs_sys/logicmoo_agi/prolog/episodic_memory/adv_edit.pl
Perl
mit
8,762
#!/usr/bin/perl # SPDX-FileCopyrightText: 2021 Pragmatic Software <pragma78@gmail.com> # SPDX-License-Identifier: MIT use warnings; use strict; package Languages::perl; use parent 'Languages::_default'; use Text::ParseWords qw(shellwords); sub initialize { my ($self, %conf) = @_; $self->{sourcefile} = 'prog.pl'; $self->{execfile} = 'prog.pl'; $self->{default_options} = '-w'; $self->{cmdline} = 'perl $options $sourcefile'; } sub postprocess_output { my $self = shift; $self->SUPER::postprocess_output; $self->{output} =~ s/\s+at $self->{sourcefile} line \d+, near ".*?"//; $self->{output} =~ s/\s*Execution of $self->{sourcefile} aborted due to compilation errors.//; $self->{cmdline_opening_comment} = "=cut =============== CMDLINE ===============\n"; $self->{cmdline_closing_comment} = "=cut\n"; $self->{output_opening_comment} = "=cut =============== OUTPUT ===============\n"; $self->{output_closing_comment} = "=cut\n"; } 1;
pragma-/pbot
applets/pbot-vm/host/lib/Languages/perl.pm
Perl
mit
990
/* % =================================================================== % File 'mpred_type_constraints.pl' % Purpose: For Emulation of OpenCyc for SWI-Prolog % Maintainer: Douglas Miles % Contact: $Author: dmiles $@users.sourceforge.net ; % Version: 'interface' 1.0.0 % Revision: $Revision: 1.9 $ % Revised At: $Date: 2002/06/27 14:13:20 $ % =================================================================== % File used as storage place for all predicates which change as % the world is run. % % % Dec 13, 2035 % Douglas Miles */ % File: /opt/PrologMUD/pack/logicmmtc_base/prolog/logicmoo/mpred/mpred_type_constraints.pl %:- if(( ( \+ ((current_prolog_flag(logicmmtc_include,Call),Call))) )). :- module(mpred_type_constraints, [ add_cond/2, arg_to_var/3, attempt_attribute_args/3, attempt_attribute_args/5, attempt_attribute_one_arg/4, attribs_to_atoms/2, attribs_to_atoms0/2, cmp_memberchk_0/2, cmp_memberchk_00/2, comp_type/3, iz/2, extend_iz/2, extend_iz_member/2, init_iz/2, inst_cond/2, isa_pred_l/3, isa_pred_l/4, chk_cond/2, call_cond/2, condz_to_isa/2, map_subterms/3, max_cond/3, max_cond_l/2, dif_objs/2, min_cond/3, min_cond_l/2, promp_yn/2, same/2, same_arg/3, samef/2, to_functor/2, type_size/2, extract_conditions/2, unrelax/1, iz_member/1, lazy/1,lazy/2, constrain/1,enforce/1, relax/1,relax_goal/2,thaw/1, mpred_type_constraints_file/0 ]). %:- set_prolog_flag(generate_debug_info, true). :- user:use_module(library(logicmoo_common)). :- user:use_module(library(gvar_globals_api)). % :- use_module(library(logicmoo/common_logic/common_logic_snark)). :- meta_predicate my_when(+,0). :- meta_predicate nrlc(0). :- meta_predicate prolog_current_choice(1,*). :- meta_predicate prolog_current_choice(1,*,*). :- meta_predicate xnr(0). :- meta_predicate xnr(*,0). %:- include('mpred_header.pi'). %:- rtrace. :- kb_shared(baseKB:admittedArgument/3). :- kb_shared(baseKB:argIsa/3). :- kb_shared(baseKB:genls/2). :- kb_global(baseKB:nesc/1). :- kb_global(baseKB:proven_tru/1). :- export_everywhere(mpred_hooks,holds_t,2). :- kb_global(mpred_hooks:holds_t/3). :- kb_global(mpred_storage:equals_call/2). :- kb_global(baseKB:call_e_tru/2). :- kb_global(baseKB:is_fort/1). :- kb_global(common_logic_snark:kif_option_value/2). :- kb_global(baseKB:member_eqz/2). :- op(300,fx,('~')). % :- endif. :- module_transparent(( add_cond/2, arg_to_var/3, attempt_attribute_args/3, attempt_attribute_args/5, attempt_attribute_one_arg/4, attribs_to_atoms/2, attribs_to_atoms0/2, cmp_memberchk_0/2, cmp_memberchk_00/2, comp_type/3, iz/2, extend_iz/2, extend_iz_member/2, init_iz/2, inst_cond/2, isa_pred_l/3, isa_pred_l/4, chk_cond/2, call_cond/2, condz_to_isa/2, map_subterms/3, max_cond/3, max_cond_l/2, dif_objs/2, min_cond/3, min_cond_l/2, promp_yn/2, same/2, same_arg/3, samef/2, to_functor/2, type_size/2, extract_conditions/2, unrelax/1, iz_member/1, lazy/1,lazy/2, constrain/1,enforce/1, relax/1,relax_goal/2,thaw/1, mpred_type_constraints_file/0)). :- if(exists_source(library(multivar))). %:- use_module(library(multivar)). :- endif. %:- rtrace. :- if(exists_source(library(vhar))). %:- use_module(library(vhar)). :- endif. :- if(exists_source(library(vprox))). %:- use_module(library(vprox)). :- endif. :- meta_predicate isa_pred_l(+,*,*), isa_pred_l(+,*,*,*), map_subterms(+,?,?), iz_member(*), constrain(*), map_lits(1,+), boxlog_goal_expansion(*,*), map_each_argnum(?,4,?,?,*), map_argnums(?,4,*), thaw(?), lazy(*), unrelax(*), relax_goal(*,+), lazy(+,*). :- meta_predicate relax(*),relaxing(*). :- kb_local(baseKB:admittedArgument/3). :- thread_local(t_l:no_kif_var_coroutines/1). :- meta_predicate relaxed_call(*). % ?- G=(loves(X,Y),~knows(Y,tHuman(X))),relax_goal(G,Out),writeq(Out). :- meta_predicate map_plits(1,*). map_lits(P1,Lit):- locally($('$outer_stack')=[],once(map_plits(P1,Lit))),!. map_plits(P1,Lit):- \+ compound(Lit),!,call(P1,Lit). map_plits(P1,[Lit1 | Lit2]):- !,map_plits(P1,Lit1),map_plits(P1,Lit2). map_plits(P1,(Lit1 , Lit2)):- !,map_plits(P1,Lit1),map_plits(P1,Lit2). map_plits(P1,(Lit1 ; Lit2)):- !,map_plits(P1,Lit1),map_plits(P1,Lit2). map_plits(P1,(Lit1 :- Lit2)):- !,map_lits(P1,Lit1),with_outer(Lit1,0,map_plits(P1,Lit2)). map_plits(P1, Expr) :- demodalfy_outermost(+,Expr,MExpr,_Outer),!, with_outer(Expr,1,map_plits(P1, MExpr)). map_plits(P1, Expr) :- Expr=..[C,I], tCol(C),!,map_plits(P1, isa(I,C)). map_plits(P1, Expr) :- functor(Expr,F,A),mappable_sentence_functor(F,A), !, Expr =.. [F|Args], map_meta_lit(F,1,P1,Args). map_plits(P1,Lit):- call(P1,Lit). map_meta_lit(F,N,P1,[Arg|Args]):- !, with_outer(F,N,map_plits(P1, Arg)), N2 is N + 1, map_meta_lit(F,N2,P1,Args). map_meta_lit(_,_,_,[]):-!. :- nb_setval('$outer_stack',[]). with_outer(ExprF,N,Goal):- nb_current('$outer_stack',Was), locally($('$outer_stack')=[ExprF-N|Was],Goal). closure_push(Closure,Data):- var(Closure),!,add_cond(Closure,Data). closure_push(Closure,Data):- Closure=[true|_Rest],!,setarg(1,Closure,Data). closure_push(Closure,Data):- Closure=[_First|Rest],!,setarg(2,Closure,[Data|Rest]). constrain_arg_var(Closure,Arg,FA):- closure_push(Closure,add_cond(Arg,FA)). %push_modal(neg(_)):- nb_current('$modal_stack',[neg(_)|Was]),!, b_setval('$modal_stack',Was). %push_modal(Modal):- nb_current('$modal_stack',Was)->b_setval('$modal_stack',[Modal|Was]);b_setval('$modal_stack',[Modal,call]). %last_modal(Modal):- nb_current('$modal_stack',[Modal|_])-> true; Modal=call. map_argnums(_,_,Lit):- \+ compound(Lit), !. map_argnums(Modal,P4,[Lit1 | Lit2]):- !,map_argnums(Modal,P4,Lit1),map_argnums(Modal,P4,Lit2). map_argnums(Modal,P4,isa(I,C)):- !,call(P4,Modal,C,0,I). map_argnums(Modal,P4,Expr) :- demodalfy_outermost(Modal,Expr,MExpr,ModalValue),!,map_argnums(ModalValue,P4, MExpr). map_argnums(Modal,P4,Expr) :- Expr=..[C,I], \+ (clause_b(argIsa(C,1,CC)),CC==C), clause_b(tCol(C)), !,map_argnums(Modal,P4,isa(I,C)). map_argnums(Modal,P4,Expr) :- compound_name_arguments(Expr,F,Args),functor(Expr,F,A), (mappable_sentence_functor(F,A) -> map_argnums(Modal,P4,Args); map_each_argnum(Modal,P4,F,1,Args)). map_each_argnum(Modal,P4,F,N,[Arg|Args]):- !, call(P4,Modal,F,N,Arg), N2 is N + 1, map_each_argnum(Modal,P4,F,N2,Args). map_each_argnum(_Modal,_,_,_,_). % non-backtracking attribute updates demodalfy_outermost(ModalIn,MExpr, Expr, ModalValue):- MExpr=..[Modal,Expr], modal_value(ModalIn,Modal,ModalValue). modal_value(neg(_), Neg , true):- arg(_,v( ( \+ ),'~','-','not'),Neg). modal_value(_, Neg , neg(Neg)):- arg(_,v( ( \+ ),'~','-','not'),Neg). mappable_sentence_functor(call,1). mappable_sentence_functor(=,2):-!,fail. mappable_sentence_functor(call_u,1). mappable_sentence_functor(F,_):- downcase_atom(F,DC),upcase_atom(F,DC). %mappable_sentence_functor(F,1):- \+ tCol(F). %mappable_sentence_functor(F,A):- \+ argIsa(F,A,_). %mtc_put_iza(X,Z):- Z=[id(ID)|_],nonvar(ID),!,put_attr(X,iza,Z). %mtc_put_iza(X,Z):- get_attr(X,iza,[id(ID)|_]),put_attr(X,iza,[id(ID)|Z]). %mtc_put_iza(X,Z):- gensym(id_,ID),!,put_attr(X,iza,[id(ID)|Z]). mtc_put_iza(X,Z):- put_attr(X,iza,Z). mtc_put_attr(X,iza,Z):- mtc_get_attr(X,iza,_Prev),!, mtc_put_iza(X,Z). mtc_put_attr(X,iza,Z):- !, mtc_put_iza(X,[iza_id(X)|Z]). mtc_put_attr(X,Y,Z):- var(X),!,oo_put_attr(X,Y,Z). mtc_put_attr(X,Y,Z):- oo_put_attr(X,Y,Z),nop(dmsg(warn(need_to_error(oo_put_attr(X,Y,Z))))). mtc_get_attr(X,Y,Z):- var(X),!,oo_get_attr(X,Y,Z). mtc_get_attr(X,Y,Z):- oo_get_attr(X,Y,Z),nop(dmsg(warn(need_to_fail(oo_get_attr(X,Y,Z))))),!,fail. mtc_get_attvar(Dom1,X):-memberchk(iza_id(X),Dom1). compound_lit(Arg):- compound(Arg). % ======================================================================== % enforce_bound(G) = check constraints % ======================================================================== :- export(enforce_bound/1). enforce_bound(G):-args_enforce_bound(G,Closure),maplist(call,Closure). :- during_boot(add_history(( G=(loves(X,Y),~(knows(Y,tHuman(X)))),must(args_enforce_bound(G,Out)),writeq(Out) ))). :- export(args_enforce_bound/2). args_enforce_bound(G,Closure):- ignore(Closure=[true]),map_argnums(pos(_),args_enforce_bound3(Closure),G). args_enforce_bound3(Closure,Modal,C,0,I):- !, ignore(( nonvar(I), (Modal\=pos(_) -> closure_push(Closure,modal_isa(I,C)) ; closure_push(Closure,isa(I,C))))). args_enforce_bound3(Closure,Modal,_F,_A,Arg):- compound_lit(Arg),!,map_argnums(Modal,args_enforce_bound3(Closure),Arg). args_enforce_bound3(_Closure,_Modal,_F,_A,Arg):- var(Arg),!. args_enforce_bound3(Closure,Modal,F,A,Arg):-args_enforce_nonvar(Closure,Modal,F,A,Arg). % ======================================================================== % constrain(G) = add constraints to free args % ======================================================================== :- export(constrain/1). constrain(G):-ground(G),!. constrain(G):-args_constrain(G,Closure),maplist(call,Closure). :- export(args_constrain/2). :- during_boot(add_history(( G=(loves(X,Y),~knows(Y,tHuman(X))),must(args_constrain(G,Out)),writeq(Out) ))). args_constrain(G,Closure):- ignore(Closure=[true]),map_argnums(pos(_),args_constrains3(Closure),G). args_constrains3(Closure,Modal,C,0,I):- !, (Modal\=pos(_) -> constrain_arg_var(Closure,I,does_exist(I)) ; constrain_arg_var(Closure,I,isa(I,C))). args_constrains3(Closure,Modal,_F,_A,Arg):- compound_lit(Arg),!,map_argnums(Modal,args_constrains3(Closure),Arg). args_constrains3(_Closure,_Modal,_F,_A,Arg):- nonvar(Arg),!. args_constrains3(Closure,Modal,F,A,Arg):-args_constrain_var(Closure,Modal,F,A,Arg). :- export(does_exist/1). does_exist(_). modal_admittedArgument(F,1,V):-!,admittedArgument(F,1,V). modal_admittedArgument(_,_,_). % ======================================================================== % enforce(G) = enforce_bound/1 + constrain/1 % ======================================================================== :- export(enforce/1). enforce(G):-args_enforce(G,Closure),maplist(call,Closure). :- during_boot(add_history(( G=(loves(X,Y),~knows(Y,tHuman(X))),must(args_enforce(G,Out)),writeq(Out) ))). :- export(args_enforce/2). args_enforce(G,Closure):- ignore(Closure=[true]),map_argnums(pos(_),args_enforces3(Closure),G). args_enforces3(Closure,Modal,C,0,I):- !, (Modal\=pos(_) -> constrain_arg_var(Closure,I,does_exist(I)) ; constrain_arg_var(Closure,I,isa(I,C))). args_enforces3(Closure,Modal,_F,_A,Arg):- compound_lit(Arg),!,map_argnums(Modal,args_enforces3(Closure),Arg). args_enforces3(Closure,Modal,F,A,Arg):- var(Arg),!, args_constrain_var(Closure,Modal,F,A,Arg). args_enforces3(Closure,Modal,F,A,Arg):- args_enforce_nonvar(Closure,Modal,F,A,Arg). % ======================================================================== % remove_constraints(G) = remove constraints % ======================================================================== remove_constraints(G):-args_remove_constraints(G,Closures),maplist(ignore,Closures). :- export(args_remove_constraints/2). :- during_boot(add_history(( G=(loves(X,Y),~knows(Y,tHuman(X))),args_enforce(G,Out),writeq(Out), args_remove_constraints(G,Out2),writeq(Out2) ))). args_remove_constraints(G,Closure):- ignore(Closure=[true]),map_argnums(pos(_),args_remove_constraints3(Closure),G). args_remove_constraints3(Closure,_Modal,C,0,I):- !, transfer_constraints(Closure,I),transfer_constraints(Closure,C). args_remove_constraints3(Closure,Modal,_F,_A,Arg):- compound_lit(Arg),!,map_argnums(Modal,args_remove_constraints3(Closure),Arg). args_remove_constraints3(Closure,_Modal,_F,_A,Arg):- transfer_constraints(Arg,Closure). transfer_constraints(Arg,Closure):- ignore((var(Arg),mtc_get_attr(Arg,iza,ToDo),del_attr(Arg,iza), maplist(constrain_arg_var(Closure,Arg),ToDo))). %:- module_transparent(apply:maplist/2). %:- module_transparent(apply:maplist/3). %% args_constrain_var(?Closure, +Modal, +F, +A, +Arg) is det. % % Datalog Preconditional Expansion. % args_constrain_var(Closure,Modal,F,A,Arg):- (A==1 ; Modal=pos(_)), argIsa(F,A,Type),!,constrain_arg_var(Closure,Arg,isa(Arg,Type)). args_constrain_var(Closure,Modal,F,A,Arg):- (Modal\=pos(_) -> constrain_arg_var(Closure,Arg,modal_admittedArgument(F,A,Arg)) ; constrain_arg_var(Closure,Arg, admittedArgument(F,A,Arg))). %% args_enforce_nonvar(?Closure, +Modal, +F, +A, +Arg) is det. % % Datalog Preconditional Expansion. % args_enforce_nonvar(Closure,Modal,F,A,Arg):- (Modal\=pos(_) -> closure_push(Closure,modal_admittedArgument(F,A,Arg)) ; closure_push(Closure, admittedArgument(F,A,Arg))). %% extract_conditions( +PFCSentence, -Conds) is semidet. % % Datalog Preconditional Expansion. % extract_conditions(Sentence,Conds):- copy_term(Sentence,Sentence,Goals), list_to_set(Goals,GoalSet), (Goals\==GoalSet-> dmsg(cons_odd) ; true), list_to_conjuncts(GoalSet,Conds),!. %% boxlog_goal_expansion( ?G, ?GG) is semidet. % % Datalog Goal Expansion. % boxlog_goal_expansion(relax(G),GG):-!,relax_goal(G,GG). %boxlog_goal_expansion(G,GG):-!,relax_goal(G,GG). /* boxlog_goal_expansion(G,_):- % \+ source_location(_,_), wdmsg(g_s(G)),fail. */ is_iz_or_iza(Var):- zotrace((mtc_get_attr(Var,iz,_);mtc_get_attr(Var,iza,_))). %% relax( :GoalG) is det. % % Relaxen. % relax(G):- map_lits(relax_lit,G). relaxing(G):- term_attvars(G,Gs),quietly(relax(G)),term_attvars(G,Gs0),!,Gs0\==Gs. relax_lit(G):- var(G),!. relax_lit(_:G):-!,relax_lit(G). relax_lit(G):- G=..[_|ARGS],relax_args(G,1,ARGS). %% relaxed_call( :GoalG) is nondet. % % relaxed_call(G):- relax(G), (G *-> unrelax(G) ; (unrelax(G),!,fail)). %% relax_goal( :GoalG ) is det. % % Relaxen Goal. % relax_goal(G,GG):- copy_term(G,GG),relax(GG). relax_goal_alt_old(G,GGG):- copy_term(G,GG,Gs),G=GG,G=..[_|ARGS],relax_args(GG,1,ARGS), GGG=(GG,maplist(iz_member,Gs)). % ?- G=loves(a,b),relax_lit(G). %% relax_N( ?G, ?N, ?A) is semidet. % % Relaxen Argument. % % % relax_N(G,N,Val):- var(Val),!,setarg(N,G,Val). % % relax_N(G,N,Val):- iz(AA,[Val]),!,nb_setarg(N,G,AA). relax_N(_,_,Val):- var(Val),!, ((mtc_get_attr(Val,iz,_);mtc_get_attr(Val,iza,_))->true;mtc_put_attr(Val,iz,[_])). relax_N(G,N,Val):- dont_relax(Val)->true;(nb_setarg(N,G,NewVar),put_value(NewVar,Val)). :- if(exists_source(library(multivar))). % put_value(Var,Value):- multivar(Var),iz(Var,[Value]),mv_set1(Var,Value). % put_value(Var,Value):- Var==Value,!. put_value(Var,Value):- is_dict(Value,Tag),!, (Tag==Var->true;put_value(Var,Tag)), dict_pairs(Value,_Tag2,Pairs), maplist(put_value_attr(Var),Pairs). put_value(Var,Value):- iz(Var,[Value]). put_value_attr(Var,N-V):- put_attr_value(Var,N,V). put_attr_value(Var,iza,V):- !, add_cond(Var,V). put_attr_value(Var,iz,V):- !, iz(Var,V). put_attr_value(Arg,Name,FA):- as_constraint_for(Arg,FA,Constraint),!,put_attr_value0(Arg,Name,Constraint). put_attr_value0(Var,Name,HintE):- (mtc_get_attr(Var,Name,HintL) -> min_cond(HintE,HintL,Hint); Hint=[HintE]), !, mtc_put_attr(Var,Name,Hint). :- else. put_value(Var,Value):- iz(Var,[Value]). :- endif. dont_relax(A):- var(A),!,is_iz_or_iza(A). dont_relax(A):- \+ compound(A), \+ atom(A), \+ string(A). %% relax_args( ?G, ?N, :TermA) is semidet. % % Relaxen Arguments. % relax_args(G,N,[A|RGS]):-relax_N(G,N,A),!,N2 is N + 1,relax_args(G,N2,RGS). relax_args(_,_,[]). %:- set_prolog_flag(verbose_file_search,true). :- use_module(library(clpfd),except([ins/2,sum/3,op(_,_,_)])). % Make predicates defined %:- absolute_file_name(library('clp/clpr.pl'),File),writeln(File). %:- use_module(user:library(clpr)). % Make predicates defined :- use_module(library(clpr),except([{}/1])). % Make predicates defined :- use_module(user:library(simplex)). % Make predicates defined %:- set_prolog_flag(verbose_file_search,false). :- meta_predicate lazy_pfa(*,+,*). % arg1 was 0 :- meta_predicate #(*). % was 0 '#'(G):- map_lits(lazy,G). my_when(If,Goal):- when(If,Goal). %% lazy( :GoalG) is semidet. % % Lazy. % lazy(G):- var(G),!,freeze(G,lazy(G)). lazy(G):- ground(G),!,call(G). lazy((G1,G2)):- !, lazy(G1),lazy(G2). lazy(is(X,G)):- !,clpr:{X =:= G}. lazy(G):- functor(G,F,2),clp_r_arithmetic(F),!,clpr:{G}. lazy(G):- term_variables(G,Vs),maplist(freeze_rev(lazy_1(G)),Vs). lazy_1(G):- var(G),!,freeze(G,lazy_1(G)). lazy_1(G):- ground(G),!,call(G). lazy_1((G1,G2)):- !, lazy_1(G1),lazy_1(G2). lazy_1(is(X,G)):- !,clpr:{X =:= G}. lazy_1(G):- functor(G,F,2),clp_r_arithmetic(F),!,clpr:{G}. lazy_1(G):- term_variables(G,[_]),!,call(G). lazy_1(G):- term_variables(G,Vs),maplist(freeze_rev(lazy_1(G)),Vs). freeze_rev(G,V):- freeze(V,G). % lazy(is(X,G)):-!,term_variables(G,Vs),lazy(Vs,is(X,G)). clp_r_arithmetic(=<). clp_r_arithmetic(=:=). clp_r_arithmetic( := ). clp_r_arithmetic(<). clp_r_arithmetic(>=). clp_r_arithmetic(>). lazy_pfa(G,F,2):- clp_r_arithmetic(F),!,clpr:{G}. /* lazy_pfa(G,_,1):- term_variables(G,[V1|Vs1]),!, (Vs1 = [V2|Vs0] -> lazy([V1,V2|Vs0],G) ; freeze(V1,G)). lazy_pfa(G,_,_):- term_variables(G,[V1|Vs1]), (Vs1 = [V2|Vs0] -> lazy([V1,V2|Vs0],G) ; freeze(V1,G)). */ %% lazy( ?V, :GoalG) is semidet. % % Lazy. % lazy([V],G):- !, freeze(V,G). %lazy([V|Vs],G):- or_any_var([V|Vs],C)->when(C,lazy(G)). lazy([V|Vs],G):- !, lazy(Vs,freeze(V,G)). lazy(_,G):- call(G). or_any_var([V],nonvar(V)). or_any_var([V|Vs],(nonvar(V);C)):-or_any_var(Vs,C). % test lazy(isa(X,Y)),!,X=tCol,melt(Y). %% thaw( ?Var) is semidet. % % Thaw. % thaw(Var):- var(Var),!,thaw_var(Var). thaw(G):- term_attvars(G,Vs),maplist(thaw,Vs). thaw_var(Var):- term_attvars_deep(Var,Vs),Vs\==[Var],!,maplist(melt,Vs). thaw_var(Var):- frozen(Var,G),call(G). term_attvars_deep(Term,VsO):- term_attvars_deep([],Term,VsO). term_attvars_deep(Sofar,Term,Vs):- notrace(ground(Term)),!,Vs=Sofar. term_attvars_deep(Sofar,Term,Vs):- \+ var(Term),!, term_attvars(Term,AVs), maplist(term_attvars_deep(Sofar),AVs,VVs),ord_union([Sofar|VVs],Vs),!. term_attvars_deep(Sofar,Var,VsO):- ord_memberchk(Var,Sofar),!,VsO=Sofar. term_attvars_deep(Sofar,Var,VsO):- get_attrs(Var,Term),term_attvars(Term,AVs), ord_del_element(AVs,Var,Rest),ord_subtract(Rest,Sofar,NewVars),ord_add_element(Sofar,Var,WithNewVar),!, (NewVars==[] -> VsO=WithNewVar; maplist(term_attvars_deep(WithNewVar),NewVars,VVs),ord_union([WithNewVar|VVs],VsO)),!. term_attvars_deep(Sofar,_,Sofar). %% melt( ?G) is semidet. % % melt. % melt(V):-frozen(V,G),call(G). /* call_grounded_constraints,disable_callable_constraints,call_universals,call_each_with_ignore, call newly grounded_constraints enable_callable_constraints call_unground_constraints */ nonground(G):- \+ ground(G). enable_reactions(V):- put_attr(V,enable_reactions,true). disable_reactions(V):- put_attr(V,enable_reactions,false). :- meta_predicate(mpred_label(:)). :- module_transparent(mpred_label/1). :- meta_predicate(mpred_label(+,:)). :- module_transparent(mpred_label/2). mpred_label(M:G):- term_attvars(G,Vars),maplist(mpred_label_var(M,pre),Vars),maplist(mpred_label_var(M,post),Vars). mpred_label(How,M:G):- term_attvars(G,Vars),maplist(mpred_label_var(M,How),Vars). :- module_transparent(mpred_label_var/3). mpred_label_var(M,pre,V):- obtain_conds(V,List),!, put_attr(V,iza,[]), maplist(call_when_and_save(M,V,ground),List,MidList), maplist(call_when_and_save(M,V,nonground),MidList,NewMidList), maplist(call_when_and_save(M,V,nonground),NewMidList,NewList), put_attr(V,iza,NewList). mpred_label_var(M,while,V):- obtain_conds(V,List),!, maplist(call_when_and_save(M,V,ground),List,MidList), maplist(call_when_and_save(M,V,nonground),MidList,NewMidList), maplist(call_when_and_save(M,V,nonground),NewMidList,NewList), put_attr(V,iza,NewList). mpred_label_var(M,post,V):- obtain_conds(V,List), put_attr(V,iza,[]),!, maplist(call_when_and_save(M,V,ground),List,MidList), maplist(call_when_and_save(M,V,nonground),MidList,NewMidList), maplist(call_when_and_save(M,V,nonground),NewMidList,NewList), put_attr(V,iza,NewList). mpred_label_var(M,Stage,V):- obtain_conds(V,List), maplist(call_when_and_save(M,V,Stage),List,NewList), put_attr(V,iza,NewList). call_when_and_save(M,V,When,Cond,Cond):- M:call(When,Cond)-> call_and_save_as_proof(M,V,Cond,Cond) ; true. call_and_save_as_proof(_,_,call(proved,_),_CCond):- !. call_and_save_as_proof(M,_,call(call,_),CCond):- !, M:call(CCond),setarg(1,CCond,proved). call_and_save_as_proof(M,_V,call(ignore,_),CCond):- (M:call(CCond)->setarg(1,CCond,proved);true). call_and_save_as_proof(_,_V,aoc(_SK,_What),_CCond):-!. call_and_save_as_proof(M,_V,dif_objs(X,Y),_CCond):- !, M:dif_objs(X,Y). call_and_save_as_proof(M,_,CCond,CCond):- M:call(CCond),!. %% inst_cond( ?X, ?List) is semidet. % % An attributed variable with attribute value DVar has been % % assigned the value Y % % Inst Isac. % inst_cond(X, List):- predsort(comp_type,List,SList),call_cond(X,SList). iza_id(_). :- module_transparent unify_attr_iza/2. :- module_transparent unify_attr_iza/3. :- module_transparent unify_attr_iza_1/3. :- module_transparent iza:attr_unify_hook/2. iza:attr_unify_hook(DVar, Y):- unify_attr_iza(DVar, Y). unify_attr_iza(Dom1, Y):- show_failure(mtc_get_attvar(Dom1,Self)),!,unify_attr_iza_self(Self,Dom1, Y). unify_attr_iza(Dom1, Y):- dumpST, dmsg(lhs=(Dom1)), dmsg(rhs=(Y)), must(show_failure(attvar(Y))),!, mtc_put_attr(Y, iza, Dom1 ). unify_attr_iza_self(Self,Dom1, Y):- atom(Y),as_existential(Y,YY),% isNamed(YY,What),!, mtc_get_attr(YY, iza, Dom2),!, unify_conds(Dom1,Dom2,Result1),!, unify_conds(Dom2,Dom1,Result2),!, mtc_put_attr(YY, iza, Result2), mtc_put_attr(Self, iza, Result1). unify_attr_iza_self(Self,Dom1, Y):- is_existential(Y),=(Y,YY),!, mtc_get_attr(YY, iza, Dom2),!, unify_conds(Dom1,Dom2,Result1),!, unify_conds(Dom2,Dom1,Result2),!, mtc_put_attr(YY, iza, Result2), mtc_put_attr(Self, iza, Result1). unify_attr_iza_self(Self,Dom1, Y):- nonvar(Y),isNamed(Y,What),!, (attvar(Self)-> \+ \+ (((attv_bind(Self,Y),chk_cond(Y,Dom1)))) ; chk_cond(Y,Dom1)),!, add_cond(Self,aoc(isName,What)). unify_attr_iza_self(Self,Dom1, Y):- must(show_failure(var(Self))), (show_failure(attvar(Y))),!, mtc_put_attr(Y, iza, Dom1 ). unify_attr_iza_self(_Self,Dom1, Y):- chk_cond(Y,Dom1). local_memberchk_variant(H,Dom1):- memberchk_variant(H,Dom1). :- module_transparent unify_conds/3. unify_conds(Dom1,Dom2,Dom1):- Dom1=@=Dom2,!. unify_conds(Dom1,[],Dom1):-!. unify_conds(Dom1,[H|Dom2],NewDomain):- local_memberchk_variant(H,Dom1),!,unify_conds(Dom1,Dom2,NewDomain). unify_conds(Dom1,[H|Dom2],NewDomain):- \+ rejects_cond(H,Dom1),!, unify_conds(Dom1,Dom2,NewDomain1), (private_cond(H) -> NewDomain1=NewDomain ; \+ local_cond(H) -> ord_union(NewDomain1,[H],NewDomain) ; \+ memberchk_variant(H,Dom1) -> ord_union(NewDomain1,[H],NewDomain) ; NewDomain1=NewDomain). hide_unify_conds(Dom1,Dom2,NewDomain):- show_failure(( \+ disjoint_conds(Dom1,Dom2))), % sanity(must(\+ disjoint_conds(Dom2,Dom1))), % ensure the checks got both ways ord_union(Dom1, Dom2, NewDomain). get_typeinfos(Var,List):- obtain_conds(Var,Pre),include(is_typeinfo,Pre,List). get_post_labeling(Var,List):- obtain_conds(Var,Pre),exclude(is_typeinfo,Pre,List). is_typeinfo(Pre):- compound(Pre),!,functor(Pre,_,1). is_typeinfo(Pre):- atom(Pre),!. % add_all_differnt(QuantsList):- bagof(differentFromAll(I,O),QuantsList,O),L),maplist(call,L). add_all_differnt(QuantsList):- maplist(add_all_differnt2(QuantsList),QuantsList),!. add_all_differnt2(QuantsList,Ex):- delete_eq(QuantsList,Ex,DisjExs), differentFromAll(Ex,DisjExs). add_cond_differentFromAll(Ex,DisjExs):- add_cond(Ex,differentFromAll(Ex,DisjExs)). differentFromAll(One,List):- maplist(dif_objs(One),List). %% dif_objs( ?A, ?B) is semidet. % % Mdif. % % dif_objs(A,B):- tlbugger:attributedVars,!,dif(A,B). dif_objs(A,B):- A==B,!,fail. dif_objs(A,B):- obtain_object_conds(A,B,Dom1,Dom2),!, dif_objs_doms(Dom1,Dom2). dif_objs(A,B):- dif(A,B),add_cond(A,dif_objs(A,B)),add_cond(B,dif_objs(B,A)). dif_objs_doms(Dom1,Dom2):- ((member(aoc(SK,N1),Dom1),memberchk(aoc(SK,N2),Dom2),N1=@=N2)),!,fail. dif_objs_doms(Dom1,Dom2):- \+ non_disjoint_conds(Dom1,Dom2), disjoint_conds(Dom1,Dom2). disjoint_object_conds(Var1,Var2):- obtain_object_conds(Var1,Var2,Dom1,Dom2), disjoint_conds(Dom1,Dom2). obtain_object_conds(Var1,Var2,Dom1,Dom2):- obtain_conds(Var1,Dom1),obtain_conds(Var2,Dom2). obtain_conds(Var,Doms):- mtc_get_attr(Var,iza,Doms),!. obtain_conds(Var,DomsO):- compound(Var),\+ is_fort(Var),functor(Var,_,A),arg(A,Var,Doms), (is_list(Doms)->DomsO=Doms; obtain_conds(Doms,DomsO)). obtain_conds(Var,DomsO):- as_existential(Var,X),obtain_conds(X,DomsO). % obtain_conds(_,[]). % conds may not be merged disjoint_conds(Dom1,Dom2):- member(Prop,Dom1), rejects_cond(Prop,Dom2). % disjoint skolems rejects_cond(aoc(SK,W1),Dom2):- !, memberchk(aoc(SK,W2),Dom2),'#\\='(W1,W2),!. rejects_cond(male,Dom2):- !, memberchk(female,Dom2). rejects_cond(_,_):- fail. % conds may not be merged non_disjoint_conds(Dom1,Dom2):- member(Prop,Dom1), not_rejected_cond(Prop,Dom2). aoc(_,_). % already same skolems not_rejected_cond(aoc(SK,W1),Dom2):- !, memberchk(aoc(SK,W2),Dom2),'#='(W1 , W2),!. not_rejected_cond(male,Dom2):- memberchk(female,Dom2). as_existential(In,Out):- is_existential(In),!,must(In=Out). as_existential(In,Out):- var(In),!,decl_existential(In),must(In=Out). % as_existential(In,Out):- strip_module(In,M,X), oo_deref(M,X,Out)->(X\==Out,is_existential(Out)),!. as_existential(In,Out):- \+ is_fort(In),!,trace_or_throw(as_existential(In,Out)). as_existential(In,Out):- nb_current_value(?('$fort2exist$'),In,Out),!. as_existential(In,Out):- decl_existential(Out0),!,add_cond(Out0,aoc(isNamed,In)),!, must(nb_set_value(?('$fort2exist$'),In,Out0)),!, must(nb_current_value(?('$fort2exist$'),In,Out)), must(add_var_to_env(In,Out)). % :- ensure_loaded(library(multivar)). l_xvarx(Var):- xvarx(Var). decl_existential(Var):- is_existential(Var),!. decl_existential(Var):- var(Var),!,l_xvarx(Var),put_attr(Var,x,Var),mtc_put_iza(Var,[iza_id(Var)]). decl_existential(Atomic):- trace_or_throw(\+ decl_existential(Atomic)). is_existential(Var):- var(Var),!,get_attr(Var,x,V),var(V). is_existential(the(_)):-!. :- if(\+ current_predicate(attv_bind/2)). attv_bind(Var,Value):- Var=Value -> true; put_value(Var,Value). :- endif. x:attr_unify_hook(_Was,_Becoming):-!. x:attr_unify_hook(Was,Becoming):- (attvar(Was),attvar(Becoming)) -> attv_bind(Was,Becoming) ; true. x:attribute_goals(Var) --> ({is_existential(Var)} -> [decl_existential(Var)] ; []). x:attr_portray_hook(Attr,Var):- one_portray_hook(Var,x(Var,Attr)). one_portray_hook(Var,Attr):- locally(set_prolog_flag(write_attributes,ignore), ((setup_call_cleanup(set_prolog_flag(write_attributes,ignore), ((subst(Attr,Var,SName,Disp),!, get_var_name(Var,Name), (atomic(Name)->SName=Name;SName=self), format('~p',[Disp]))), set_prolog_flag(write_attributes,portray))))). :- module_transparent(user:portray_var_hook/1). :- multifile(user:portray_var_hook/1). :- dynamic(user:portray_var_hook/1). user:portray_var_hook(Var) :- current_prolog_flag(write_attributes,portray), attvar(Var), get_attr(Var,x,Val), current_prolog_flag(write_attributes,Was), setup_call_cleanup(set_prolog_flag(write_attributes,ignore), writeq({exists(Var,Val)}), set_prolog_flag(write_attributes,Was)),!. show_frame_and_goal(Prefix,Frame):- prolog_frame_attribute(Frame,has_alternatives,Alt), prolog_frame_attribute(Frame,goal,Goal), prolog_frame_attribute(Frame,parent,Parent), prolog_frame_attribute(Parent,goal,PGoal), dmsg(frame(Prefix,Frame,Alt,Goal,PGoal)),!. clause_or_top(clause). clause_or_top(top). % non-repeating var xnr_var(Var):- nonvar(Var) ->true; (get_attr(Var,xnr,_)->true; ((gensym(xnr_,Id), ((prolog_current_choice(clause_or_top,CP),prolog_choice_attribute(CP,frame,Frame))->true;prolog_current_frame(Frame)), % show_frame_and_goal(xnr_var,Frame), put_attr(Var,xnr,old_vals(Var,xnr_dif,Id,[],Frame,State)), l_xvarx(Var), nop(setup_call_cleanup(true,(true;(State=state(redoing))),setarg(1,State,exited)))))). xnr_var(Cmp,Var):- nonvar(Var) ->true; (get_attr(Var,xnr,_)->true;(gensym(xnr_,Id),put_attr(Var,xnr,old_vals(Var,Cmp,Id,[])))). xnr:attr_unify_hook(AttValue,VarValue):- ((prolog_current_choice(clause_or_top,CP),prolog_choice_attribute(CP,frame,Frame))->true;prolog_current_frame(Frame)), AttValue=old_vals(Var,_Cmp,_Id,WazU,OldFrame,State), nb_setarg(4,AttValue,[VarValue|WazU]), once(has_redos(Frame,OldFrame,N)->true;N=0), (var(State)->(nb_setarg(6,AttValue,N));true), ((N==0) -> ((arg(4,AttValue,List),show_frame_and_goal(has_redos(N),Frame),merge_compatibles(List,Set),!, (member(X,Set),attv_bind(Var,X))));(show_frame_and_goal(has_redos(N),Frame),fail)). % :- ain(((((deduce_neg(P):- _), \+ (deduce_tru(P):-_))) ==> ((deduce_tru(P):- on_bind(P, \+ deduce_neg(P)))))). xnr(Goal):-term_variables(Goal,Vars),xnr(Vars,Goal). xnr([A],Goal):- xnr_var(A),!,Goal. xnr([A|AA],Goal):- xnr_var(xnr_dif_l,[A|AA]),!,Goal. xnr(_,Goal):-Goal,!. has_redos(CPFrame,OldCPFrame,0):- OldCPFrame==CPFrame,!. has_redos(CPFrame,OldCPFrame,N):- (prolog_frame_attribute(CPFrame,parent,Parent),has_redos(Parent,OldCPFrame,Nm1)), (prolog_frame_attribute(CPFrame,has_alternatives,true)-> ( N is Nm1 + 1) ; N is Nm1). prolog_current_choice(Type,CPO):-prolog_current_choice(CP),prolog_current_choice(Type,CP,CPO). prolog_current_choice(Type,CP,CPO):-prolog_choice_attribute(CP,type,WasType),(call(Type,WasType) -> CP=CPO ; (prolog_choice_attribute(CP,parent,CPP)->prolog_current_choice(Type,CPP,CPO);CPO=null)). /* xnr:attr_unify_hook(AttValue,VarValue):- AttValue=old_vals(_Var,_Cmp,_Id,WazU,_Frame,_CP), (WazU = [Old|Waz] -> xnr_attr_unify_hook(AttValue,Old,Waz,VarValue) ; nb_setarg(4,AttValue,[VarValue])). */ xnr_attr_unify_hook(_,Old,Waz,VarValue):- member_eqz(VarValue,[Old|Waz]),!,fail. xnr_attr_unify_hook(AttValue,Old,Waz,VarValue):- (is_existential(Old);is_existential(VarValue)),xnr_attr_unify_hook_ex(AttValue,Old,Waz,VarValue). xnr_attr_unify_hook(AttValue,Old,Waz,VarValue):- (var(Old);var(VarValue)),!,nb_setarg(4,AttValue,[VarValue,Old|Waz]). xnr_attr_unify_hook(AttValue,Old,Waz,VarValue):- Old\=@=VarValue,!,nb_setarg(4,AttValue,[VarValue,Old|Waz]). xnr_attr_unify_hook_ex(AttValue,Old,Waz,VarValue):- ( \+ \+ (Old=VarValue) ),!, nb_setarg(4,AttValue,[VarValue,Old|Waz]),member(VarValue,[Old|Waz]). xnr_attr_unify_hook_ex(AttValue,Old,Waz,VarValue):- nb_setarg(4,AttValue,[VarValue,Old|Waz]). xnr:attribute_goals(_Var) --> !. xnr:attribute_goals(Var) --> {fail}, ({is_existential(Var)} -> [] ; [xnr_var(Var)]). xnr_dif(Old,VarValue):- Old\==VarValue,!,fail. xnr_dif(Old,VarValue):- (is_existential(Old);is_existential(VarValue)),!,=(Old,VarValue),!,get_attrs(Old,Attrs),nb_put_attrs(Old,Attrs),!,fail. xnr_dif(Old,VarValue):- (is_fort(Old);is_fort(VarValue)),!,\=(Old,VarValue). xnr_dif(Old,VarValue):- (var(Old);var(VarValue)),!. xnr_dif(Old,VarValue):- is_list(Old),!,xnr_dif_l(Old,VarValue). xnr_dif(Old,VarValue):- nonvar(VarValue),Old\=@=VarValue. xnr_dif_l([A|Old],[B|VarValue]):- !,(xnr_dif(A,B);xnr_dif_l(Old,VarValue)). xnr_dif_l(_,_). merge_compatibles([],[]):-!. merge_compatibles([N],[N]):-!. merge_compatibles([N|List],ListOut):- member(N,List) *-> merge_compatibles(List,ListOut); (merge_compatibles(List,ListMid),ListOut=[N|ListMid]). existential_var(Var,_):- nonvar(Var),!. existential_var(Var,_):- attvar(Var),!. existential_var(Var,P):- put_attr(Var,x,P),!. :- meta_predicate add_constraint_ex(*,*,*). % add_constraint_ex(_Call,_P,_V):-!,fail. add_constraint_ex(_,P,V):- \+ contains_var(V,P),!. add_constraint_ex(_,P,V):- add_cond(V,P),!. add_constraint_ex(Call,P,V):-freeze(V,call(Call,V,P)). unify_two(AN,AttrX,V):- nonvar(V),!, (V='$VAR'(_)->true;throw(unify_two(AN,AttrX,V))). unify_two(AN,AttrX,V):- get_attr(V,AN,OAttr),!,OAttr=@=AttrX,!. % ,show_call(OAttr=@=AttrX). unify_two(AN,AttrX,V):- put_attr(V,AN,AttrX). add_cond_list_val(_,_,_,[]):- !. add_cond_list_val(Pred1,_,X,[Y]):- atom(Pred1), X==Y -> true;P=..[Pred1,X,Y],add_cond(X,P). add_cond_list_val(Pred1,Pred,X,FreeVars):- list_to_set(FreeVars,FreeVarSet),FreeVars\==FreeVarSet,!, add_cond_list_val(Pred1,Pred,X,FreeVarSet). add_cond_list_val(_Pred,Pred,X,FreeVars):- P=..[Pred,X,FreeVars],add_cond(X,P). :- meta_predicate never_cond(?,*). never_cond(Var,nesc(b_d(_,nesc,poss), ~ P )):- !, ensure_cond(Var,poss(P)). never_cond(Var,nesc(~ P )):- !, ensure_cond(Var,poss(P)). never_cond(Var,(~ P )):- !, ensure_cond(Var,poss(P)). never_cond(NonVar,Closure):- nonvar(NonVar),!, \+ call_e_tru(NonVar,Closure). never_cond(_Var,Closure):- ground(Closure),!, call_u(~Closure). never_cond(Var,Closure):- attvar(Var),!,add_cond(Var,~Closure). %never_cond(Var,Closure):- add_cond(Var,Closure). private_cond(iza_id(_)). local_cond(iza_id(_)). not_nameOf(Ex,V):- \+ nesc(isNamed(Ex,V)). var_plain(Var):-var(Var), \+ attvar(Var). :- module_transparent(isNamed_impl/2). :- module_transparent(isNamed_const_var/2). :- module_transparent(isNamed_var/2). isNamed_impl(Var,Str):- Var=@=Str,!. isNamed_impl(Var,Str):- atom(Str),!,as_existential(Str,SVar),!,SVar=Var. isNamed_impl(Var,Str):- var(Var),!,isNamed_var(Var,Str). isNamed_impl(Var,Str):- atom(Var),!,as_existential(Var,X),!,isNamed_var(X,Str). isNamed_impl(Var,Str):- !, Var=Str. isNamed_impl(Var,Str):- isNamed_const_var(Var,Str). isNamed_const_var(Var,Str):- compound(Str),!,proven_tru(isNamed(Var,Str)). isNamed_const_var(Var,Str):- number(Var),!,number_string(Var,Str). isNamed_const_var(Var,Str):- atomic(Var),!,text_to_string(Var,Str). isNamed_const_var(Var,Str):- term_string(Var,Str). isNamed_var(Var,Str):- var_plain(Var),var_plain(Str),!,strip_module(_,M,_), my_when((nonvar(Str);nonvar(Var);?=(Var,Str)),M:isNamed(Var,Str)). isNamed_var(Var,Str):- nonvar(Str),(has_cond(Var,isNamed(Var,V0));has_cond(Var,aoc(isNamed,V0))),!,text_to_string(V0,Str). isNamed_var(Var,Str):- nrlc(proven_tru(isNamed(Var,Str))). isNamed_var(Var,Str):- nonvar(Str),!,add_cond(Var,isNamed(Var,Str)),add_cond(Var,aoc(isNamed,Str)),!,add_var_to_env(Str,Var). isNamed_var(Var,Str):- var(Str),(has_cond(Var,isNamed(Var,Str));has_cond(Var,aoc(isNamed,Str))),!, (nonvar(Str)->add_var_to_env(Str,Var);true). % isNamed_impl(Var,Str):- proven_tru(isNamed(Var,Str)). % isNamed_impl(Var,Str):- var(Str),!,add_cond(Var,isNamed(Var,Str)),!. :- export(isNamed_impl/2). :- baseKB:import(isNamed_impl/2). :- module_transparent(baseKB:isNamed/2). baseKB:isNamed(X,Y):- strip_module(_,M,_),M:isNamed_impl(X,Y). %:- ain((mtHybrid(Mt)==> {kb_local(Mt:isNamed/2)})). nrlc(G):- no_repeats(loop_check(G,(((dmsg(warn(looped(G)))),fail)))). % Translate attributes from this module to residual goals iza:attribute_goals(X) --> { mtc_get_attr(X, iza, List) },!, [add_cond(X, List)]. %% add_cond( ?Var, ?HintE) is semidet. % % Add Iza. % as_constraint_for(Arg,isa(AArg,FA),FA):- \+ kif_option_value(iza_atoms,false), atom(FA),AArg==Arg,!. as_constraint_for(Arg,ISA,FA):- \+ kif_option_value(iza_atoms,false), compound(ISA), ISA=..[FA,AArg],AArg==Arg,!. as_constraint_for(Arg,props(AArg,FA),props(FA)):- \+ kif_option_value(iza_atoms,false), atom(FA),AArg==Arg,!. as_constraint_for(Arg,PROP,props(ASPROP)):- \+ kif_option_value(iza_atoms,false), compound(PROP), PROP=..[FA,AArg|Rest],AArg==Arg,ASPROP=..[FA|Rest]. as_constraint_for(_,FA,FA). add_cond_rev(Prop,Var):- add_cond(Var,Prop). :- meta_predicate ensure_cond(?,*). :- module_transparent(ensure_cond/1). ensure_cond(Var,Closure):-!, add_cond(Var,Closure). ensure_cond(NonVar,Closure):- nonvar(NonVar),!,call_e_tru(NonVar,Closure). ensure_cond(Var,Closure):- is_existential(Var),!,show_failure(add_cond(Var,Closure)). ensure_cond(Var,Closure):- attvar(Var),!,show_failure(add_cond(Var,Closure)). ensure_cond(Var,Closure):- as_existential(Var,VarX),must(add_cond(VarX,Closure)),!. add_cond(Var,Prop):- is_list(Prop),!,as_existential(Var,VarX),obtain_conds(VarX,Dom1),!,maplist(add_cond3(VarX,Dom1),Prop). add_cond(Var,Prop):- as_existential(Var,VarX),obtain_conds(VarX,Dom1),add_cond3(VarX,Dom1,Prop). add_cond1(Var,Prop):- obtain_conds(Var,Dom1),add_cond3(Var,Dom1,Prop). add_cond3(Var,Dom1,Prop):- as_constraint_for(Var,Prop,Constraint), show_failure(( \+ rejects_cond(Constraint,Dom1))), ord_union(Dom1, [Constraint], NewDomain), mtc_put_attr(Var,iza,NewDomain). :- meta_predicate map_one_or_list(1,?). map_one_or_list(Call2,ArgOrL):- is_list(ArgOrL)->maplist(Call2,ArgOrL);call(Call2,ArgOrL). has_cond(Var,Prop):- obtain_conds(Var,Doms),map_one_or_list(has_cond(Doms,Var),Prop). has_cond(Doms,Var,Prop):- as_constraint_for(Var,Prop,C),member(C,Doms). rem_cond(Var,Prop):- obtain_conds(Var,Doms),map_one_or_list(rem_cond(Doms,Var),Prop). rem_cond(Doms,Var,Prop):- as_constraint_for(Var,Prop,C),select(C,Doms,NewDoms),mtc_put_attr(Var,iza,NewDoms). not_has_cond(Var,Prop):- obtain_conds(Var,Doms),map_one_or_list(not_has_cond(Doms,Var),Prop). not_has_cond(Doms,Var,Prop):- \+ has_cond(Doms,Var,Prop). %% chk_cond( ?E, ?Cs) is semidet. % % Isac Checking. % :- module_transparent(chk_cond/2). chk_cond(_,_):- local_override(no_kif_var_coroutines,G),!,call(G). chk_cond(E,Cs):- once(call_cond(E,Cs)). :- module_transparent(call_cond/2). :- module_transparent(call_cond_x/2). %% call_cond( ?VALUE1, :TermARG2) is semidet. % % Isac Gen. % call_cond(Var):- as_existential(Var,X),obtain_conds(X,Conds),call_cond_x(X,Conds). call_cond(Var,Conds):- is_fort(Var),!,as_existential(Var,X),call_cond_x(X,Conds). call_cond(Var,Conds):- call_cond_x(Var,Conds). call_cond_x(Y, [H|List]):- ground(Y),!,cond_call0(Y,H),!,cond_call00(Y, List). call_cond_x(Y, [H|List]):- !,maplist(cond_call0(Y),[H|List]). call_cond_x(_, _). cond_call00(Y, [H|List]):-!,cond_call0(Y,H),!,cond_call00(Y, List). cond_call00(_, _). cond_call0(Y,H):- atom(H),!,nesc(isa(Y,H)). cond_call0(_,dif_objs(X,Y)):-!,X\==Y. cond_call0(Y,props(H)):- ereq(props(Y,H)). cond_call0(Y,H):- arg(_,H,E),Y==E,!,call_u(H). cond_call0(_,H):- call_u(H). /* enforce_fa_unify_hook([Goal|ArgIsas],Value):- !, enforce_fa_call(Goal,Value), enforce_fa_unify_hook(ArgIsas,Value). enforce_fa_unify_hook(_,_). enforce_fa_call(Goal,Value):- atom(Goal),!,call(Goal,Value). enforce_fa_call(Goal,Value):- arg(_,Goal,Var),Var==Value,!,call(Goal). enforce_fa_call(Goal,Value):- prepend_arg(Goal,Value,GVoal),!,call(GVoal). prepend_arg(M:Goal,Value,M:GVoal):- !, prepend_arg(Goal,Value,GVoal). prepend_arg(Goal,Value,GVoal):- Goal=..[F|ARGS],GVoal=..[F,Value|ARGS]. */ /* G=(loves(X,Y),~knows(Y,tHuman(X))),args_enforce(G,Out),maplist(call,Out). */ %% attribs_to_atoms( ?ListA, ?List) is semidet. % % Attribs Converted To Atoms. % attribs_to_atoms(ListA,List):-map_subterms(attribs_to_atoms0,ListA,List). %% map_subterms( :PRED2Pred, ?I, ?O) is semidet. % % Map Subterms. % map_subterms(Pred,I,O):-is_list(I),!,maplist(map_subterms(Pred),I,O). map_subterms(Pred,I,O):-call(Pred,I,O),!. map_subterms(Pred,I,O):-compound(I),!,I=..IL,maplist(map_subterms(Pred),IL,OL),O=..OL. map_subterms(_Pred,IO,IO). %% condz_to_isa( :TermAA, :TermAB) is semidet. % % iza Converted To (iprops/2). % condz_to_isa(Iza,ftTerm):-var(Iza),!. condz_to_isa((A,B),isAnd(ListO)):-!,conjuncts_to_list((A,B),List),list_to_set(List,Set),min_cond_l(Set,ListO). condz_to_isa((A;B),isOr(Set)):-!,conjuncts_to_list((A,B),List),list_to_set(List,Set). condz_to_isa(AA,AB):-must(AA=AB). %% attribs_to_atoms0( ?Var, ?Isa) is semidet. % % Attribs Converted To Atoms Primary Helper. % attribs_to_atoms0(Var,Isa):-mtc_get_attr(Var,iza,Iza),!,must(condz_to_isa(Iza,Isa)). attribs_to_atoms0(O,O):- \+ (compound(O)). %% min_cond_l( ?List, ?ListO) is semidet. % % min (sub_super/2) (List version). % min_cond_l(List,ListO):-isa_pred_l(lambda(Y,X,sub_super(X,Y)),List,ListO). %% max_cond_l( ?List, ?ListO) is semidet. % % max (sub_super/2) (List version). % max_cond_l(List,ListO):-isa_pred_l(sub_super,List,ListO). %% isa_pred_l( :PRED2Pred, ?List, ?ListO) is semidet. % % (iprops/2) Predicate (List version). % isa_pred_l(Pred,List,ListO):-isa_pred_l(Pred,List,List,ListO). %% isa_pred_l( :PRED2Pred, ?UPARAM2, ?List, ?UPARAM4) is semidet. % % (iprops/2) Predicate (List version). % isa_pred_l(_Pred,[],_List,[]). isa_pred_l(Pred,[X|L],List,O):-member(Y,List),X\=Y,call_u(call(Pred,X,Y)),!,isa_pred_l(Pred,L,List,O). isa_pred_l(Pred,[X|L],List,[X|O]):-isa_pred_l(Pred,L,List,O). %% min_cond( :TermHintA, ?HintE, ?HintE) is semidet. % % min (sub_super/2). % min_cond([H],In,Out):- !, min_cond0(H,In,Out). min_cond([H|T],In,Out):- !, min_cond0(H,In,Mid),min_cond(T,Mid,Out). min_cond(E,In,Out):- min_cond0(E,In,Out). min_cond0(HintA,[],[HintA]). min_cond0(HintA,[HintB|HintL],[HintB|HintL]):- HintA==HintB,!. min_cond0(HintA,[HintB|HintL],[HintA,HintB|HintL]):- functor(HintA,_,A),functor(HintB,_,B),B>A,!. min_cond0(HintA,[HintB|HintL],[HintA|HintL]):- sub_super(HintA,HintB),!. min_cond0(HintA,[HintB|HintL],[HintB|HintL]):- sub_super(HintB,HintA),!. min_cond0(HintA,[HintB|HintL],[HintB|HintS]):- !,min_cond0(HintA,HintL,HintS). sub_super(Col1,Col2):- tCol(Col1),!,genls(Col1,Col2). %% max_cond( :TermHintA, ?HintE, ?HintE) is semidet. % % max (sub_super/2). % max_cond([H],In,Out):- !, max_cond0(H,In,Out). max_cond([H|T],In,Out):- !, max_cond0(H,In,Mid),max_cond(T,Mid,Out). max_cond(E,In,Out):- max_cond0(E,In,Out). max_cond0(HintA,[],[HintA]). max_cond0(HintA,[HintB|HintL],[HintB|HintL]):- HintA==HintB,!. max_cond0(HintA,[HintB|HintL],[HintA,HintB|HintL]):- functor(HintA,_,A),functor(HintB,_,B),B>A,!. max_cond0(HintA,[HintB|HintL],[HintA|HintL]):- sub_super(HintB,HintA),!. max_cond0(HintA,[HintB|HintL],[HintB|HintL]):- sub_super(HintA,HintB),!. max_cond0(HintA,[HintB|HintL],[HintB|HintS]):- !,max_cond0(HintA,HintL,HintS). :- style_check(-singleton). %% unrelax( ?X) is semidet. % % Domain Labeling (residuals). % unrelax(X):-copy_term(X,X,Gs),maplist(iz_member,Gs). %% iz_member( :GoalG) is semidet. % % Domain Member. % iz_member(iz(X,List)):-!,member(X,List). iz_member(G):-G. :- style_check(-singleton). %% attempt_attribute_args( ?AndOr, ?Hint, :TermVar) is semidet. % % Attempt Attribute Arguments. % attempt_attribute_args(_AndOr,Hint,Var):- var(Var),add_cond(Var,Hint),!. attempt_attribute_args(_AndOr,_Hint,Grnd):-ground(Grnd),!. attempt_attribute_args(_AndOr,_Hint,Term):- \+ (compound(Term)),!. attempt_attribute_args(AndOr,Hint,+(A)):-!,attempt_attribute_args(AndOr,Hint,A). attempt_attribute_args(AndOr,Hint,-(A)):-!,attempt_attribute_args(AndOr,Hint,A). attempt_attribute_args(AndOr,Hint,?(A)):-!,attempt_attribute_args(AndOr,Hint,A). attempt_attribute_args(AndOr,Hint,(A,B)):-!,attempt_attribute_args(AndOr,Hint,A),attempt_attribute_args(AndOr,Hint,B). attempt_attribute_args(AndOr,Hint,[A|B]):-!,attempt_attribute_args(AndOr,Hint,A),attempt_attribute_args(AndOr,Hint,B). attempt_attribute_args(AndOr,Hint,(A;B)):-!,attempt_attribute_args(';'(AndOr),Hint,A),attempt_attribute_args(';'(AndOr),Hint,B). attempt_attribute_args(_AndOr,_Hint,Term):- use_was_isa(Term,I,C), add_cond(I,C). attempt_attribute_args(AndOr,_Hint,Term):- Term=..[F,A],tCol(F),!,attempt_attribute_args(AndOr,F,A). attempt_attribute_args(AndOr,Hint,Term):- Term=..[F|ARGS],!,attempt_attribute_args(AndOr,Hint,F,1,ARGS). %% attempt_attribute_args( ?AndOr, ?Hint, ?F, ?N, :TermARG5) is semidet. % % Attempt Attribute Arguments. % attempt_attribute_args(_AndOr,_Hint,_F,_N,[]):-!. attempt_attribute_args(AndOr,_Hint,t,1,[A]):-attempt_attribute_args(AndOr,callable,A). attempt_attribute_args(AndOr,Hint,t,N,[A|ARGS]):-atom(A),!,attempt_attribute_args(AndOr,Hint,A,N,ARGS). attempt_attribute_args(_AndOr,_Hint,t,_N,[A|_ARGS]):- \+ (atom(A)),!. attempt_attribute_args(AndOr,Hint,F,N,[A|ARGS]):-attempt_attribute_one_arg(Hint,F,N,A),N2 is N+1,attempt_attribute_args(AndOr,Hint,F,N2,ARGS). %% attempt_attribute_one_arg( ?Hint, ?F, ?N, ?A) is semidet. % % Attempt Attribute One Argument. % attempt_attribute_one_arg(_Hint,F,N,A):-call_u(argIsa(F,N,Type)),Type\=ftTerm, \+ (compound(Type)),!,attempt_attribute_args(and,Type,A). attempt_attribute_one_arg(_Hint,F,N,A):-call_u(argQuotedIsa(F,N,Type)),Type\=ftTerm, \+ (compound(Type)),!,attempt_attribute_args(and,Type,A). attempt_attribute_one_arg(_Hint,F,N,A):-call_u(argIsa(F,N,Type)),Type\=ftTerm,!,attempt_attribute_args(and,Type,A). attempt_attribute_one_arg(_Hint,F,N,A):-attempt_attribute_args(and,argi(F,N),A). :- was_export((samef/2,same/2)). %% same( ?X, ?Y) is semidet. % % Same. % same(X,Y):- samef(X,Y),!. same(X,Y):- compound(X),arg(1,X,XX)->same(XX,Y),!. same(Y,X):- compound(X),arg(1,X,XX),!,same(XX,Y). %% samef( ?X, ?Y) is semidet. % % Samef. % samef(X,Y):- quietly(((to_functor(X,XF),to_functor(Y,YF),(XF=YF->true;string_equal_ci(XF,YF))))). %% to_functor( ?A, ?O) is semidet. % % Converted To Functor. % to_functor(A,O):-is_ftVar(A),!,A=O. to_functor(A,O):-compound(A),get_functor(A,O),!. % ,to_functor(F,O). to_functor(A,A). :- was_export(arg_to_var/3). %% arg_to_var( ?Type, ?String, ?Var) is semidet. % % Argument Converted To Variable. % arg_to_var(_Type,_String,_Var). :- was_export(same_arg/3). %% same_arg( ?How, ?X, ?Y) is semidet. % % Same Argument. % same_arg(_How,X,Y):-var(X),var(Y),!,X=Y. same_arg(equals,X,Y):-!,equals_call(X,Y). same_arg(tCol(_Type),X,Y):-!, unify_with_occurs_check(X,Y). same_arg(ftText,X,Y):-(var(X);var(Y)),!,X=Y. same_arg(ftText,X,Y):-!, string_equal_ci(X,Y). same_arg(same_or(equals),X,Y):- same_arg(equals,X,Y). same_arg(same_or(sub_super),X,Y):- same_arg(equals,X,Y). same_arg(same_or(sub_super),Sub,Sup):- holds_t(sub_super,Sub,Sup),!. same_arg(same_or(isa),X,Y):- same_arg(equals,X,Y). same_arg(same_or(isa),I,Sup):- !, holds_t(Sup,I),!. same_arg(same_or(_Pred),X,Y):- same_arg(equals,X,Y). same_arg(same_or(Pred),I,Sup):- holds_t(Pred,I,Sup),!. % same_arg(I,X):- promp_yn('~nSame Objects: ~q== ~q ?',[I,X]). %% promp_yn( ?Fmt, ?A) is semidet. % % Promp Yn. % promp_yn(Fmt,A):- format(Fmt,A),get_single_char(C),C=121. % :-swi_module(iz, [ iz/2 ]). % Var, ?Domain :- use_module(library(ordsets)). %% iz( ?X, ?Dom) is semidet. % % Domain. % :- was_export(iz/2). iz(X, Dom) :- var(Dom), !, mtc_get_attr(X, iz, Dom). % iz(X, Dom) :- var(Dom), !, (mtc_get_attr(X, iz, Dom)->true;mtc_put_attr(X, iz, [iziz(Dom)])). iz(X, List) :- listify(List,List0), list_to_ord_set(List0, Domain), mtc_put_attr(Y, iz, Domain), X = Y. :- was_export(extend_iz_member/2). %% extend_iz_member( ?X, ?DomL) is semidet. % % Extend Domain. % extend_iz_member(X, DomL):- init_iz(X, Dom2), ord_union(Dom2, DomL, NewDomain),mtc_put_attr( X, iz, NewDomain ). :- was_export(extend_iz/2). %% extend_iz( ?X, ?DomE) is semidet. % % Extend Domain. % extend_iz(X, DomE):- init_iz(X, Dom2),ord_add_element(Dom2, DomE, NewDomain),mtc_put_attr( X, iz, NewDomain ). :- was_export(init_iz/2). %% init_iz( ?X, ?Dom) is semidet. % % Init Domain. % init_iz(X,Dom):-mtc_get_attr(X, iz, Dom),!. init_iz(X,Dom):-Dom =[_], mtc_put_attr(X, iz, Dom),!. % An attributed variable with attribute value Domain has been % assigned the value Y iz:attr_unify_hook([Y], Value) :- same(Y , Value),!. iz:attr_unify_hook(Domain, Y) :- ( mtc_get_attr(Y, iz, Dom2) -> ord_intersection(Domain, Dom2, NewDomain), ( NewDomain == [] -> fail ; NewDomain = [Value] -> same(Y , Value) ; mtc_put_attr(Y, iz, NewDomain) ) ; var(Y) -> mtc_put_attr( Y, iz, Domain ) ; (\+ \+ (cmp_memberchk_0(Y, Domain))) ). % Translate attributes from this module to residual goals iz:attribute_goals(X) --> { mtc_get_attr(X, iz, List) },!,[iz(X, List)]. %iz:attr_portray_hook(Val, _) :- write('iz:'), write(Val),!. %iza:attr_portray_hook(Val, _) :- write('iza:'), write(Val),!. %% cmp_memberchk_0( ?X, ?Y) is semidet. % % Cmp Memberchk. % cmp_memberchk_0(X,Y):-numbervars(X,0,_,[attvars(skip)]),member(X,Y),!. %% cmp_memberchk_00( ?Item, :TermX1) is semidet. % % Cmp Memberchk Primary Helper. % cmp_memberchk_00(Item, [X1,X2,X3,X4|Xs]) :- !, compare(R4, Item, X4), ( R4 = (>) -> cmp_memberchk_00(Item, Xs) ; R4 = (<) -> compare(R2, Item, X2), ( R2 = (>) -> Item = X3 ; R2 = (<) -> Item = X1 ;/* R2 = (=), Item = X2 */ true ) ;/* R4 = (=) */ true ). cmp_memberchk_00(Item, [X1,X2|Xs]) :- !, compare(R2, Item, X2), ( R2 = (>) -> cmp_memberchk_00(Item, Xs) ; R2 = (<) -> Item = X1 ;/* R2 = (=) */ true ). cmp_memberchk_00(Item, [X1]) :- Item = X1. :- meta_predicate(call_engine_m(?,0,-,-)). call_engine_m(Templ,Goal,Engine,Det):- call_engine_start_m(Templ,Goal,Engine), call_engine_next_m(Engine,Templ,Det). :- meta_predicate(call_engine_start_m(?,0,-)). call_engine_start_m(Templ,Goal,Engine):- engine_create(Templ-TF0,(Goal,deterministic(TF0)),Engine). call_engine_next_m(Engine,Templ,Det):- repeat, engine_next(Engine,Templ-Det), (Det==true->!;true). metapred_plus(_,_):-!. metapred_plus(Cmp,Plus):- (\+ compound(Cmp) -> S=0 ; compound_name_arity(Cmp,F,S)), A is S + Plus, current_predicate(F/A),!. metapred_plus(_,_). not_dif_objs(A,B):- \+ dif_objs(A,B). :- meta_predicate(pred1_to_unique_pairs(1,-,-)). pred1_to_unique_pairs(Pred1,Obj1,Obj2):- sanity(assertion(metapred_plus(Pred1,1))), lazy_findall(Elem,call(Pred1,Elem),List), list_to_unique_pairs(List,Obj1,Obj2). :- meta_predicate(pred1_to_unique_pairs_confirmed(1,-,-)). pred1_to_unique_pairs_confirmed(Pred1,Obj1,Obj2):- Tracker = '$t'([]), Same2 = not_dif_objs, pred1_to_unique_pairs(Pred1,ObjA,ObjB), different_pairs(Same2,Tracker,ObjA,ObjB,Obj1,Obj2). list_to_unique_pairs(List,Obj1,Obj2):- append(_Left,[Obj1|Rest],List),member(Obj2,Rest). :- meta_predicate different_pairs(2,+,?,?,?,?). different_pairs(Same2,Tracker,ObjA,ObjB,Obj1,Obj2):- Test = p(TObj1,TObj2), zotrace(sanity((must_be(compound,Tracker), assertion(metapred_plus(Pred2InstsDiff,2))))), zotrace((\+ call(Same2, ObjA, ObjA))), zotrace((( ObjA @> ObjB -> (ObjA = Obj1, ObjB = Obj2) ; (ObjA = Obj2, ObjB = Obj1)))), must(arg(1,Tracker,PrevPairs)), (((member(Test,PrevPairs),call(Same2,Obj1,TObj1),call(Same2,Obj2,TObj2)))-> fail ; true), must(nb_setarg(1,Tracker,[p(Obj1,Obj2)|PrevPairs])). /** <module> The difv/2 constraint */ %! difv(+Term1, +Term2) is semidet. % % Constraint that expresses that Term1 and Term2 never become % variant (=@@=/2). Fails if `Term1 =@@= Term2`. Succeeds if Term1 % can never become identical to Term2. In other cases the % predicate succeeds after attaching constraints to the relevant % parts of Term1 and Term2 that prevent the two terms to become % identical. =@@=(X,Y):-!, ==(X,Y). % =@@=(X,Y):- (attvar(X);attvar(Y))-> X==Y ;((var(X);var(Y))-> X==Y ; X=@=Y). :- op(700,xfx,user:('=@@=')). % difv(_X,_Y):-!. difv(X,Y) :- \+ (X =@@= Y), difv_c_c(X,Y,_). difv_unifiable(X, Y, Us) :- ( current_prolog_flag(occurs_check, error) -> catch(unifiable(X,Y,Us), error(occurs_check(_,_),_), false) ; unifiable(X, Y, Us) ). difv_c_c(X,Y,OrNode) :- ( difv_unifiable(X, Y, Unifier) -> ( Unifier == [] -> or_one_failv(OrNode) ; difv_c_c_l(Unifier,OrNode) ) ; or_succeedv(OrNode) ). difv_c_c_l(Unifier,OrNode) :- length(Unifier,N), extend_ornodevv(OrNode,N,List,Tail), difv_c_c_l_aux(Unifier,OrNode,List,Tail). extend_ornodevv(OrNode,N,List,Vars) :- ( get_attr(OrNode,difv,Attr) -> Attr = nodev(M,Vars), O is N + M - 1 ; O = N, Vars = [] ), put_attr(OrNode,difv,nodev(O,List)). difv_c_c_l_aux([],_,List,List). difv_c_c_l_aux([X=Y|Unifier],OrNode,List,Tail) :- List = [X=Y|Rest], add_ornodevv(X,Y,OrNode), difv_c_c_l_aux(Unifier,OrNode,Rest,Tail). add_ornodevv(X,Y,OrNode) :- add_ornodev_var1(X,Y,OrNode), ( var(Y) -> add_ornodev_var2(X,Y,OrNode) ; true ). add_ornodev_var1(X,Y,OrNode) :- ( get_attr(X,difv,Attr) -> Attr = vardifv(V1,V2), put_attr(X,difv,vardifv([OrNode-Y|V1],V2)) ; put_attr(X,difv,vardifv([OrNode-Y],[])) ). add_ornodev_var2(X,Y,OrNode) :- ( get_attr(Y,difv,Attr) -> Attr = vardifv(V1,V2), put_attr(Y,difv,vardifv(V1,[OrNode-X|V2])) ; put_attr(Y,difv,vardifv([],[OrNode-X])) ). difv:attr_unify_hook(vardifv(V1,V2),Other) :- ( var(Other) -> reverse_lookupsv(V1,Other,OrNodes1,NV1), or_one_failvsv(OrNodes1), get_attr(Other,difv,OAttr), OAttr = vardifv(OV1,OV2), reverse_lookupsv(OV1,Other,OrNodes2,NOV1), or_one_failvsv(OrNodes2), remove_obsoletev(V2,Other,NV2), remove_obsoletev(OV2,Other,NOV2), append(NV1,NOV1,CV1), append(NV2,NOV2,CV2), ( CV1 == [], CV2 == [] -> del_attr(Other,difv) ; put_attr(Other,difv,vardifv(CV1,CV2)) ) ; verify_compoundsv(V1,Other), verify_compoundsv(V2,Other) ). remove_obsoletev([], _, []). remove_obsoletev([N-Y|T], X, L) :- ( Y=@@=X -> remove_obsoletev(T, X, L) ; L=[N-Y|RT], remove_obsoletev(T, X, RT) ). reverse_lookupsv([],_,[],[]). reverse_lookupsv([N-X|NXs],Value,Nodes,Rest) :- ( X =@@= Value -> Nodes = [N|RNodes], Rest = RRest ; Nodes = RNodes, Rest = [N-X|RRest] ), reverse_lookupsv(NXs,Value,RNodes,RRest). verify_compoundsv([],_). verify_compoundsv([OrNode-Y|Rest],X) :- ( var(Y) -> true ; OrNode == (-) -> true ; difv_c_c(X,Y,OrNode) ), verify_compoundsv(Rest,X). %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% or_succeedv(OrNode) :- ( attvar(OrNode) -> get_attr(OrNode,difv,Attr), Attr = nodev(_Counter,Pairs), del_attr(OrNode,difv), OrNode = (-), del_or_difv(Pairs) ; true ). or_one_failvsv([]). or_one_failvsv([N|Ns]) :- or_one_failv(N), or_one_failvsv(Ns). or_one_failv(OrNode) :- ( attvar(OrNode) -> get_attr(OrNode,difv,Attr), Attr = nodev(Counter,Pairs), NCounter is Counter - 1, ( NCounter == 0 -> fail ; put_attr(OrNode,difv,nodev(NCounter,Pairs)) ) ; fail ). del_or_difv([]). del_or_difv([X=Y|Xs]) :- cleanup_dead_nodesv(X), cleanup_dead_nodesv(Y), del_or_difv(Xs). cleanup_dead_nodesv(X) :- ( attvar(X) -> get_attr(X,difv,Attr), Attr = vardifv(V1,V2), filter_dead_orsv(V1,NV1), filter_dead_orsv(V2,NV2), ( NV1 == [], NV2 == [] -> del_attr(X,difv) ; put_attr(X,difv,vardifv(NV1,NV2)) ) ; true ). filter_dead_orsv([],[]). filter_dead_orsv([Or-Y|Rest],List) :- ( var(Or) -> List = [Or-Y|NRest] ; List = NRest ), filter_dead_orsv(Rest,NRest). /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - The attribute of a variable X is vardifv/2. The first argument is a list of pairs. The first component of each pair is an OrNode. The attribute of each OrNode is node/2. The second argument of node/2 is a list of equations A = B. If the LHS of the first equation is X, then return a goal, otherwise don''t because someone else will. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ difv:attribute_goals(Var) --> !. difv:attribute_goals(Var) --> ( { get_attr(Var, difv, vardifv(Ors,_)) } -> or_nodesv(Ors, Var) ; or_nodev(Var) ). or_nodev(O) --> ( { get_attr(O, difv, nodev(_, Pairs)) } -> { eqs_lefts_rightsv(Pairs, As, Bs) }, mydifv(As, Bs), { del_attr(O, difv) } ; [] ). or_nodesv([], _) --> []. or_nodesv([O-_|Os], X) --> ( { get_attr(O, difv, nodev(_, Eqs)) } -> ( { Eqs = [LHS=_|_], LHS =@@= X } -> { eqs_lefts_rightsv(Eqs, As, Bs) }, mydifv(As, Bs), { del_attr(O, difv) } ; [] ) ; [] % or-node already removed ), or_nodesv(Os, X). mydifv([X], [Y]) --> !, difv_if_necessary(X, Y). mydifv(Xs0, Ys0) --> { reverse(Xs0, Xs), reverse(Ys0, Ys), % follow original order X =.. [f|Xs], Y =.. [f|Ys] }, difv_if_necessary(X, Y). difv_if_necessary(X, Y) --> ( { difv_unifiable(X, Y, _) } -> [difv(X,Y)] ; [] ). eqs_lefts_rightsv([], [], []). eqs_lefts_rightsv([A=B|ABs], [A|As], [B|Bs]) :- eqs_lefts_rightsv(ABs, As, Bs). %% type_size( ?VALUE1, :PRED1000VALUE2) is semidet. % % Type Size. % type_size(C,S):-a(completeExtentEnumerable,C),!,setof(E,call_u(t(C,E)),L),length(L,S). type_size(C,1000000):-a(ttExpressionType,C),!. type_size(_,1000). /* ?- Z #=:= 2 + X, Z #< 2 . succ(succ(0)). S2I I2E 2 2 2 E2S S = succ/1. I = integer E = 2 a:p(1). a:p(X):-b:p(X). b:p(X):-c:p(X). b:p(2). */ %% comp_type( ?Comp, ?Col1, ?Col2) is semidet. % % Comp Type. % comp_type(Comp,Col1,Col2):-type_size(Col1,S1),type_size(Col2,S2),compare(Comp,S1,S2). :- fixup_exports. mpred_type_constraints_file. %% goal_expansion( ?LC, ?LCOO) is semidet. % % Hook To [system:goal_expansion/2] For Module Mpred_type_constraints. % Goal Expansion. % % system:goal_expansion(G,O):- \+ current_prolog_flag(xref,true),\+ pldoc_loading, nonvar(G),boxlog_goal_expansion(G,O).
TeamSPoon/logicmoo_base
prolog/logicmoo/typesystem/mpred_type_constraints.pl
Perl
mit
59,221
package DT; require Exporter; use strict; use Carp; use Data::Dumper; use utf8; binmode(STDOUT, ":utf8"); sub print_dynamic_table { my ($head, $rows, $opts) = @_; my $extra_css = $opts->{extra_css}; my $no_html_header = $opts->{no_html_header}; my $no_html_end = $opts->{no_html_end}; my $title = $opts->{title}; print_html_header($title) unless $no_html_header; print $extra_css; print " <table class=dynamicTable>\n"; print " <thead>\n"; my $c = "column_1_"; my $ci = 1; print " <tr>". join('', map { "<th class=$c".++$ci.">$_</th>" } @$head) . "</tr>\n"; print " </thead>\n"; for my $row (@$rows) { print " <tr>"; print join('', map { "<td>$_</td>" } @$row); print " </tr>\n"; } print " </table>\n"; print_html_end() unless $no_html_end; } sub print_html_header { my ($title) = @_; $title ||= "Dynamic Table"; print <<End_of_Header; <html> <head> <meta http-equiv="Content-Type" content="text/html;charset=UTF-8" /> End_of_Header print "<title>$title</title>\n"; print <<End_of_Style; <script type="text/javascript" src="dt.js"></script> <script language="JavaScript" type="text/javascript" src="ms.js"></script> </head> <body> <style type="text/css"> .dynamicTable { font-size: 13px; font-family: sans-serif; border-width: 1px; border-spacing: 0; border-style: outset; border-color: gray; border-collapse: separate; background-color: white; } .dynamicTable th { border-width: 1px; padding: 3px; border-style: inset; border-color: rgb(170, 170, 170); background-color: white; -moz-border-radius: ; } .dynamicTable td.header { font-size: 15px; border-width: 1px; padding: 3px; border-style: inset; border-color: rgb(170, 170, 170); background-color: white; -moz-border-radius: ; } .dynamicTable td { border-width: 1px; padding: 2px; white-space: nowrap; text-align: center; border-style: inset; border-color: rgb(170, 170, 170); -moz-border-radius: ; } .dynamicTable tr:nth-child(even) { background-color: rgba(0, 0, 255, 0.08); /* greenish blue, 8% alpha */ } .centeredImage { text-align:center; vertical-align:middle; margin-top:0px; margin-bottom:0px; padding:0px; } .centerText { text-align:center; white-space:nowrap; } .wrap { white-space:normal; } .mouseover { font-size: 13px; font-family: sans-serif; } .mouseoverTable { font-size: 13px; font-family: sans-serif; } .mouseoverTable td { white-space: nowrap; } .transparent a:link, a:visited { font-size: 9px; color: rgba(0, 0, 255, 0.0); } </style> <style type="text/css"> <!-- table { } --> </style> End_of_Style } sub print_html_end { print <<End_of_Table; <div class=transparent> <a title="Dynamic Table - A javascript table sort widget." href="http://dynamictable.com">Quick and easy table sorting powered by Dynamic Table</a> </div> </body> </html> End_of_Table } sub mouseover_javascript { '<script language="JavaScript" type="text/javascript" src="ms.js"></script>'; } sub span_mouseover { my ($text, $title, $html, $menu, $parent, $titlecolor, $bodycolor) = @_; $title ||= "Title bar <i>text</i> goes here"; $html ||= "Body text.<br />This can have any <b>HTML</b> tags you like."; my $tip = mouseover($title, $html, $menu, $parent, $titlecolor, $bodycolor); return $html ? "<span $tip>$text</span>" : $text; } #------------------------------------------------------------------------------- # Return a string for adding an onMouseover tooltip handler: # # mouseover( $title, $text, $menu, $parent, $titlecolor, $bodycolor) # # The code here is virtually identical to that in FIGjs.pm, but makes this # SEED independent. #------------------------------------------------------------------------------- sub mouseover { # if ( $have_FIGjs ) { return &FIGjs::mouseover( @_ ) } my ( $title, $text, $menu, $parent, $titlecolor, $bodycolor ) = @_; defined( $title ) or $title = ''; $title =~ s/'/\\'/g; # escape ' $title =~ s/"/&quot;/g; # escape " defined( $text ) or $text = ''; $text =~ s/'/\\'/g; # escape ' $text =~ s/"/&quot;/g; # escape " defined( $menu ) or $menu = ''; $menu =~ s/'/\\'/g; # escape ' $menu =~ s/"/&quot;/g; # escape " $parent = '' if ! defined $parent; $titlecolor = '' if ! defined $titlecolor; $bodycolor = '' if ! defined $bodycolor; qq( onMouseover="javascript:if(!this.tooltip) this.tooltip=new Popup_Tooltip(this,'$title','$text','$menu','$parent','$titlecolor','$bodycolor');this.tooltip.addHandler(); return false;" ); } sub span_css { my ($text, $class) = @_; return $class ? "<span class=\"$class\">$text</span>" : $text; } 1;
levinas/kb_seq_comp
tools/DT.pm
Perl
mit
4,957
# Time-stamp: "Sat Jul 14 00:27:39 2001 by Automatic Bizooty (__blocks2pm.plx)" $Text::\SEPA\Unicode\Unidecode::Char[0xbe] = [ 'byum', 'byub', 'byubs', 'byus', 'byuss', 'byung', 'byuj', 'byuc', 'byuk', 'byut', 'byup', 'byuh', 'beu', 'beug', 'beugg', 'beugs', 'beun', 'beunj', 'beunh', 'beud', 'beul', 'beulg', 'beulm', 'beulb', 'beuls', 'beult', 'beulp', 'beulh', 'beum', 'beub', 'beubs', 'beus', 'beuss', 'beung', 'beuj', 'beuc', 'beuk', 'beut', 'beup', 'beuh', 'byi', 'byig', 'byigg', 'byigs', 'byin', 'byinj', 'byinh', 'byid', 'byil', 'byilg', 'byilm', 'byilb', 'byils', 'byilt', 'byilp', 'byilh', 'byim', 'byib', 'byibs', 'byis', 'byiss', 'bying', 'byij', 'byic', 'byik', 'byit', 'byip', 'byih', 'bi', 'big', 'bigg', 'bigs', 'bin', 'binj', 'binh', 'bid', 'bil', 'bilg', 'bilm', 'bilb', 'bils', 'bilt', 'bilp', 'bilh', 'bim', 'bib', 'bibs', 'bis', 'biss', 'bing', 'bij', 'bic', 'bik', 'bit', 'bip', 'bih', 'bba', 'bbag', 'bbagg', 'bbags', 'bban', 'bbanj', 'bbanh', 'bbad', 'bbal', 'bbalg', 'bbalm', 'bbalb', 'bbals', 'bbalt', 'bbalp', 'bbalh', 'bbam', 'bbab', 'bbabs', 'bbas', 'bbass', 'bbang', 'bbaj', 'bbac', 'bbak', 'bbat', 'bbap', 'bbah', 'bbae', 'bbaeg', 'bbaegg', 'bbaegs', 'bbaen', 'bbaenj', 'bbaenh', 'bbaed', 'bbael', 'bbaelg', 'bbaelm', 'bbaelb', 'bbaels', 'bbaelt', 'bbaelp', 'bbaelh', 'bbaem', 'bbaeb', 'bbaebs', 'bbaes', 'bbaess', 'bbaeng', 'bbaej', 'bbaec', 'bbaek', 'bbaet', 'bbaep', 'bbaeh', 'bbya', 'bbyag', 'bbyagg', 'bbyags', 'bbyan', 'bbyanj', 'bbyanh', 'bbyad', 'bbyal', 'bbyalg', 'bbyalm', 'bbyalb', 'bbyals', 'bbyalt', 'bbyalp', 'bbyalh', 'bbyam', 'bbyab', 'bbyabs', 'bbyas', 'bbyass', 'bbyang', 'bbyaj', 'bbyac', 'bbyak', 'bbyat', 'bbyap', 'bbyah', 'bbyae', 'bbyaeg', 'bbyaegg', 'bbyaegs', 'bbyaen', 'bbyaenj', 'bbyaenh', 'bbyaed', 'bbyael', 'bbyaelg', 'bbyaelm', 'bbyaelb', 'bbyaels', 'bbyaelt', 'bbyaelp', 'bbyaelh', 'bbyaem', 'bbyaeb', 'bbyaebs', 'bbyaes', 'bbyaess', 'bbyaeng', 'bbyaej', 'bbyaec', 'bbyaek', 'bbyaet', 'bbyaep', 'bbyaeh', 'bbeo', 'bbeog', 'bbeogg', 'bbeogs', 'bbeon', 'bbeonj', 'bbeonh', 'bbeod', 'bbeol', 'bbeolg', 'bbeolm', 'bbeolb', 'bbeols', 'bbeolt', 'bbeolp', 'bbeolh', 'bbeom', 'bbeob', 'bbeobs', 'bbeos', 'bbeoss', 'bbeong', 'bbeoj', 'bbeoc', 'bbeok', 'bbeot', 'bbeop', 'bbeoh', 'bbe', 'bbeg', 'bbegg', 'bbegs', 'bben', 'bbenj', 'bbenh', 'bbed', 'bbel', 'bbelg', 'bbelm', 'bbelb', 'bbels', 'bbelt', 'bbelp', 'bbelh', 'bbem', 'bbeb', 'bbebs', 'bbes', ]; 1;
dmitrirussu/php-sepa-xml-generator
src/Unicode/data/perl_source/xbe.pm
Perl
mit
2,411
#!/usr/bin/perl use strict; package CSL::MinDicString; use IPC::Open2; sub new { my $self = {}; ( my $class, %$self ) = @_; if( ! -r $self->{dicFile} ) { print STDERR "MinDicString: provide 'dicFile' as argument for the constructor.\n"; return undef; } my $lookupBinary = "/mounts/data/proj/impact/software/uli/$ENV{HOSTNAME}/bin/lookupMDString"; my $binary = "$lookupBinary $self->{dicFile}"; open2( $self->{BINARY_OUT}, $self->{BINARY_IN}, $binary ) or die "Perl::MinDicString: $!"; if( defined $self->{encoding} ) { if( $self->{encoding} eq 'iso' ) { $self->{encoding} = 'bytes'; } } else { # if nothing is specified, use utf8 $self->{encoding} = 'utf8'; } binmode( $self->{BINARY_OUT}, ':'.$self->{encoding} ); binmode( $self->{BINARY_IN}, ':'.$self->{encoding} ); bless( $self, $class ); return $self; } sub close { close(BINARY_IN); close(BINARY_OUT); } sub lookup { my($self, $key) = @_; print { $self->{BINARY_IN} } ( "$key\n" ); my $output = readline( $self->{BINARY_OUT} ); chomp $output; return( $output ); } 1;
ulir/FSDict
PERLLIB/CSL/MinDicString.pm
Perl
apache-2.0
1,138
# # Copyright 2022 Centreon (http://www.centreon.com/) # # Centreon is a full-fledged industry-strength solution that meets # the needs in IT infrastructure and application monitoring for # service performance. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # package hardware::pdu::gude::epc::snmp::mode::resources; use strict; use warnings; use Exporter; our @ISA = qw(Exporter); our @EXPORT_OK = qw(find_gude_version); my $regexp_gude_branch = { '1104' => '.1.3.6.1.4.1.28507.68', '1105' => '.1.3.6.1.4.1.28507.69', '1202' => '.1.3.6.1.4.1.28507.43', '8021' => '.1.3.6.1.4.1.28507.77', '8025' => '.1.3.6.1.4.1.28507.79', '8031' => '.1.3.6.1.4.1.28507.81', '8035' => '.1.3.6.1.4.1.28507.83', '8041' => '.1.3.6.1.4.1.28507.85', '8045' => '.1.3.6.1.4.1.28507.87', '8101' => '.1.3.6.1.4.1.28507.89', '8221' => '.1.3.6.1.4.1.28507.56', '8226' => '.1.3.6.1.4.1.28507.58', '8316' => '.1.3.6.1.4.1.28507.64' }; sub find_gude_branch { my ($self, %options) = @_; my $oid_sysDescr = '.1.3.6.1.2.1.1.1.0'; my $snmp_result = $options{snmp}->get_leef( oids => [ $oid_sysDescr ], nothing_quit => 1 ); my $branch; foreach my $re (keys %$regexp_gude_branch) { if ($snmp_result->{$oid_sysDescr} =~ /$re/) { $branch = $regexp_gude_branch->{$re}; last; } } if (!defined($branch)) { $self->{output}->add_option_msg(short_msg => 'unsupported device: ' . $snmp_result->{$oid_sysDescr}); $self->{output}->option_exit(); } return $branch; } 1; __END__
centreon/centreon-plugins
hardware/pdu/gude/epc/snmp/mode/resources.pm
Perl
apache-2.0
2,102
=head1 LICENSE Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute Copyright [2016-2018] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =cut ## DESCRIPTION: Adding new js file from external link for gene expression atlas package EnsEMBL::Web::Document::Element::BodyJavascript; use strict; use warnings; use previous qw(content); sub content { my $self = shift; my $main_js = $self->PREV::content(@_); if ($self->hub->action && $self->hub->action eq 'ExpressionAtlas' && $self->hub->gxa_status) { # adding js only for gxa view and do not add them if their site is down # don't forget to remove their jquery lib as this will cause conflict with our one which is the latest one $main_js .= qq{ <script language="JavaScript" type="text/javascript" src="$SiteDefs::GXA_EBI_URL/js-bundles/vendorCommons.bundle.js"></script> <script language="JavaScript" type="text/javascript" src="$SiteDefs::GXA_EBI_URL/js-bundles/expressionAtlasHeatmapHighcharts.bundle.js"></script> }; } if ($self->hub->action && $self->hub->action eq 'Pathway' && $self->hub->pathway_status) { #adding js for pathway my $js_file = $self->species_defs->REACTOME_JS_LIBRARY; if ($js_file) { $main_js .= qq{<script type="text/javascript" language="javascript" src="$js_file"></script>}; } } return $main_js; } 1;
muffato/public-plugins
widgets/modules/EnsEMBL/Web/Document/Element/BodyJavascript.pm
Perl
apache-2.0
1,931
% CVS: $Id: rotate.pl,v 1.3 1998/10/21 04:26:12 pets Exp $ goal :- ground(Ground), rotate(Ground, _). rotate(Xs,Ys) :- append(As,Bs,Xs), append(Bs,As,Ys). append(Xs,Ys,Zs) :- Xs = [], Ys = Zs. append(Xs,Ys,Zs) :- Xs = [X|Xs1], Zs = [X|Zs1], append(Xs1,Ys,Zs1).
pschachte/groundness
benchmarks/rotate.pl
Perl
apache-2.0
435
package Paws::ServiceCatalog::UpdateConstraint; use Moose; has AcceptLanguage => (is => 'ro', isa => 'Str'); has Description => (is => 'ro', isa => 'Str'); has Id => (is => 'ro', isa => 'Str', required => 1); use MooseX::ClassAttribute; class_has _api_call => (isa => 'Str', is => 'ro', default => 'UpdateConstraint'); class_has _returns => (isa => 'Str', is => 'ro', default => 'Paws::ServiceCatalog::UpdateConstraintOutput'); class_has _result_key => (isa => 'Str', is => 'ro'); 1; ### main pod documentation begin ### =head1 NAME Paws::ServiceCatalog::UpdateConstraint - Arguments for method UpdateConstraint on Paws::ServiceCatalog =head1 DESCRIPTION This class represents the parameters used for calling the method UpdateConstraint on the AWS Service Catalog service. Use the attributes of this class as arguments to method UpdateConstraint. You shouldn't make instances of this class. Each attribute should be used as a named argument in the call to UpdateConstraint. As an example: $service_obj->UpdateConstraint(Att1 => $value1, Att2 => $value2, ...); Values for attributes that are native types (Int, String, Float, etc) can passed as-is (scalar values). Values for complex Types (objects) can be passed as a HashRef. The keys and values of the hashref will be used to instance the underlying object. =head1 ATTRIBUTES =head2 AcceptLanguage => Str The language code. =over =item * C<en> - English (default) =item * C<jp> - Japanese =item * C<zh> - Chinese =back =head2 Description => Str The updated text description of the constraint. =head2 B<REQUIRED> Id => Str The identifier of the constraint to update. =head1 SEE ALSO This class forms part of L<Paws>, documenting arguments for method UpdateConstraint in L<Paws::ServiceCatalog> =head1 BUGS and CONTRIBUTIONS The source code is located here: https://github.com/pplu/aws-sdk-perl Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues =cut
ioanrogers/aws-sdk-perl
auto-lib/Paws/ServiceCatalog/UpdateConstraint.pm
Perl
apache-2.0
1,979
package Google::Ads::AdWords::v201809::TimeUnit; use strict; use warnings; sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201809'}; # derivation by restriction use base qw( SOAP::WSDL::XSD::Typelib::Builtin::string); 1; __END__ =pod =head1 NAME =head1 DESCRIPTION Perl data type class for the XML Schema defined simpleType TimeUnit from the namespace https://adwords.google.com/api/adwords/cm/v201809. Unit of time the cap is defined at. This clase is derived from SOAP::WSDL::XSD::Typelib::Builtin::string . SOAP::WSDL's schema implementation does not validate data, so you can use it exactly like it's base type. # Description of restrictions not implemented yet. =head1 METHODS =head2 new Constructor. =head2 get_value / set_value Getter and setter for the simpleType's value. =head1 OVERLOADING Depending on the simple type's base type, the following operations are overloaded Stringification Numerification Boolification Check L<SOAP::WSDL::XSD::Typelib::Builtin> for more information. =head1 AUTHOR Generated by SOAP::WSDL =cut
googleads/googleads-perl-lib
lib/Google/Ads/AdWords/v201809/TimeUnit.pm
Perl
apache-2.0
1,090
# # Copyright 2022 Centreon (http://www.centreon.com/) # # Centreon is a full-fledged industry-strength solution that meets # the needs in IT infrastructure and application monitoring for # service performance. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # package network::juniper::common::junos::mode::hardware; use base qw(centreon::plugins::templates::hardware); use strict; use warnings; use centreon::plugins::statefile; sub set_system { my ($self, %options) = @_; $self->{regexp_threshold_numeric_check_section_option} = '^(operating-temperature|operating-cpu|operating-buffer|operating-heap)$'; $self->{cb_hook1} = 'init_cache'; $self->{cb_hook2} = 'snmp_execute'; $self->{cb_hook3} = 'get_type'; $self->{thresholds} = { fru => [ ['unknown', 'UNKNOWN'], ['present', 'OK'], ['ready', 'OK'], ['announce online', 'OK'], ['online', 'OK'], ['announce offline', 'WARNING'], ['offline', 'CRITICAL'], ['diagnostic', 'WARNING'], ['standby', 'WARNING'], ['empty', 'OK'] ], operating => [ ['runningAtFullSpeed', 'WARNING'], ['unknown', 'UNKNOWN'], ['running', 'OK'], ['ready', 'OK'], ['reset', 'WARNING'], ['down', 'CRITICAL'], ['standby', 'OK'] ], alarm => [ ['other', 'OK'], ['off', 'OK'], ['on', 'CRITICAL'] ] }; $self->{components_path} = 'network::juniper::common::junos::mode::components'; $self->{components_module} = ['fru', 'operating', 'alarm']; } sub snmp_execute { my ($self, %options) = @_; $self->{snmp} = $options{snmp}; $self->{results} = $self->{snmp}->get_multiple_table(oids => $self->{request}); $self->write_cache(); } sub new { my ($class, %options) = @_; my $self = $class->SUPER::new(package => __PACKAGE__, %options); bless $self, $class; $options{options}->add_options(arguments => { 'reload-cache-time:s' => { name => 'reload_cache_time', default => 180 } }); $self->{statefile_cache} = centreon::plugins::statefile->new(%options); return $self; } sub check_options { my ($self, %options) = @_; $self->SUPER::check_options(%options); $self->{statefile_cache}->check_options(%options); } sub init_cache { my ($self, %options) = @_; $self->{hostname} = $options{snmp}->get_hostname(); $self->{snmp_port} = $options{snmp}->get_port(); $self->{oids_fru} = { jnxFruEntry => '.1.3.6.1.4.1.2636.3.1.15.1', jnxFruName => '.1.3.6.1.4.1.2636.3.1.15.1.5', jnxFruType => '.1.3.6.1.4.1.2636.3.1.15.1.6', }; $self->{oids_operating} = { jnxOperatingEntry => '.1.3.6.1.4.1.2636.3.1.13.1', jnxOperatingDescr => '.1.3.6.1.4.1.2636.3.1.13.1.5', }; $self->{write_cache} = 0; my $has_cache_file = $self->{statefile_cache}->read(statefile => 'cache_juniper_mseries_' . $self->{hostname} . '_' . $self->{snmp_port}); my $timestamp_cache = $self->{statefile_cache}->get(name => 'last_timestamp'); if ($has_cache_file == 0 || !defined($timestamp_cache) || ((time() - $timestamp_cache) > (($self->{option_results}->{reload_cache_time}) * 60)) && $self->{option_results}->{reload_cache_time} != '-1') { push @{$self->{request}}, { oid => $self->{oids_fru}->{jnxFruEntry}, start => $self->{oids_fru}->{jnxFruName}, end => $self->{oids_fru}->{jnxFruType} }; push @{$self->{request}}, { oid => $self->{oids_operating}->{jnxOperatingEntry}, start => $self->{oids_operating}->{jnxOperatingDescr}, end => $self->{oids_operating}->{jnxOperatingDescr} }; $self->{write_cache} = 1; } } sub write_cache { my ($self, %options) = @_; if ($self->{write_cache} == 1) { my $datas = {}; $datas->{last_timestamp} = time(); $datas->{$self->{oids_fru}->{jnxFruEntry}} = $self->{results}->{$self->{oids_fru}->{jnxFruEntry}}; $datas->{$self->{oids_operating}->{jnxOperatingEntry}} = $self->{results}->{$self->{oids_operating}->{jnxOperatingEntry}}; $self->{statefile_cache}->write(data => $datas); } else { $self->{results}->{$self->{oids_fru}->{jnxFruEntry}} = $self->{statefile_cache}->get(name => $self->{oids_fru}->{jnxFruEntry}); $self->{results}->{$self->{oids_operating}->{jnxOperatingEntry}} = $self->{statefile_cache}->get(name => $self->{oids_operating}->{jnxOperatingEntry}); } } sub get_cache { my ($self, %options) = @_; return $self->{results}->{$options{oid_entry}}->{$options{oid_name} . '.' . $options{instance}}; } sub get_instances { my ($self, %options) = @_; my @instances = (); foreach (keys %{$self->{results}->{$options{oid_entry}}}) { if (/^$options{oid_name}\.(.*)/) { push @instances, $1; } } return @instances; } sub get_type { my ($self, %options) = @_; my $oid_jnxBoxDescr = ".1.3.6.1.4.1.2636.3.1.2.0"; my $result = $options{snmp}->get_leef(oids => [$oid_jnxBoxDescr]); $self->{env_type} = defined($result->{$oid_jnxBoxDescr}) ? $result->{$oid_jnxBoxDescr} : 'unknown'; $self->{output}->output_add(long_msg => sprintf("environment type: %s", $self->{env_type})); } sub load_components { my ($self, %options) = @_; foreach (@{$self->{components_module}}) { if (/$self->{option_results}->{component}/) { my $mod_name = $self->{components_path} . "::$_"; centreon::plugins::misc::mymodule_load( output => $self->{output}, module => $mod_name, error_msg => "Cannot load module '$mod_name'.") if ($self->{load_components} == 1); $self->{loaded} = 1; } } } sub exec_components { my ($self, %options) = @_; foreach (@{$self->{components_module}}) { if (/$self->{option_results}->{component}/) { my $mod_name = $self->{components_path} . "::$_"; my $func = $mod_name->can('check'); $func->($self); } } } 1; __END__ =head1 MODE Check Hardware (JUNIPER-MIB) (frus, operating). =over 8 =item B<--component> Which component to check (Default: '.*'). Can be: 'fru', 'operating', 'alarm'. =item B<--add-name-instance> Add literal description for instance value (used in filter, absent-problem and threshold options). =item B<--filter> Exclude some parts (comma seperated list) (Example: --filter=fru) Can also exclude specific instance: --filter=fru,7.3.0.0 =item B<--absent-problem> Return an error if an entity is not 'present' (default is skipping) (comma seperated list) Can be specific or global: --absent-problem=fru,7.1.0.0 =item B<--no-component> Return an error if no compenents are checked. If total (with skipped) is 0. (Default: 'critical' returns). =item B<--threshold-overload> Set to overload default threshold values (syntax: section,[instance,]status,regexp) It used before default thresholds (order stays). Example: --threshold-overload='operating,CRITICAL,^(?!(running)$)' =item B<--warning> Set warning threshold (syntax: type,regexp,threshold) Example: --warning='operating-temperature,.*,30' =item B<--critical> Set critical threshold (syntax: type,regexp,threshold) Example: --critical='operating-temperature,.*,40' =item B<--reload-cache-time> Time in minutes before reloading cache file (Default: 180). Use '-1' to disable cache reload. =back =cut
centreon/centreon-plugins
network/juniper/common/junos/mode/hardware.pm
Perl
apache-2.0
8,097
=head1 The Perl 6 Summary for the week ending 20030420 You know how it is, you go away for a lovely weekend folk festival in Wales, you have a really good, relaxed time, singing yourself hoarse and generally forgetting all about technology before coming home to email from the perl.com editor asking if he could have the summary about half an hour ago, and then you skim through the lists and find nearly 300 messages unread? You do? I thought it was just me. So, having utterly failed (by virtue of being elsewhere) to get a summary written by Monday, I'm currently shooting for 'getting it written'. Welcome to this week's Perl 6 summary; all the fun of the Perl 6 lists with none of the tedious 'reading every message'. Let's see if I can't ease myself back into the Perl 6 vibe by summarizing the still rather quiet perl6-internals list first... =head2 Building Parrot on Win32 Steve Fink has been busy committing (in the CVS rather than the culpability sense) Mattia Barbon's patches to get Parrot building happily in a Win32 environment. If you have such an environment, now would probably be a good time to grab the latest Parrot from CVS and see if it builds for you. I'm sure the list would be grateful to hear of your experience, good or bad. =head2 PMC documentation After seemingly weeks in the wilderness with very little feedback, Alberto SimE<otilde>es finally got some comments on (and thanks for) his latest PMC doc patches from Steve Fink and Brent Dax. The docs haven't made it into the distribution yet though, but it can only be a matter of time. L<http://groups.google.com/groups?threadm=20030414083708.GA1510%40alfarrabio.di.uminho.pt> =head2 Is PMC size fixed? Mattia Barbon wanted to know if it would eventually become possible to create PMCs with additional data members. Dan says not; PMCs are allocated from arenas which apparently means they need to be the same size (variable sized PMCs would mean adding complexity to the garbage collector, which is already complicated enough thanks very much...) L<http://groups.google.com/groups?threadm=Mahogany-0.64.2-628-20030415-212901.00%40rbnet.it> =head2 Dan Does Design Decisions Dan announced a few design decisions: =over =item * It's time to start assigning permanent opcode numbers to some of the opcodes. =item * There's some new stack ops, C<halfpop[insp]>. =item * We now have C<can> and C<does> ops. =item * Dan explained that C<can> and C<does> were there to support fast interface polymorphism. =back L<http://groups.google.com/groups?threadm=a0521060abac206fe31be%40%5B63.120.19.221%5D> -- permanent opcode numbers L<http://groups.google.com/groups?threadm=a05210609bac203bc6e40%40%5B63.120.19.221%5D> -- halfpop L<http://groups.google.com/groups?threadm=a0521060bbac22af49f56%40%5B63.120.19.221%5D> -- can/does L<http://groups.google.com/groups?threadm=a05210609bac4968b906b%40%5B63.120.19.221%5D> -- Interfaces/Classes =head2 Short-lived memory allocation Luke Palmer wondered what the Right Way was to allocate dynamic memory that wouldn't be needed beyond a function invocation. The answer, of course, was 'use Parrot memory management and let Garbage Collection work its shiny magic'. Toward the end of the thread Dan let on that Parrots Garbage Collector is 'always going to be walking the system stack' so there was no need to worry about anchoring the newly allocated buffer to the root set for the duration of the function invocation, which seems to be a new commitment. Both Dan and Steve Fink observed that the memory documentation could use updating to clarify best practice for everyone. Volunteers? L<http://groups.google.com/groups?threadm=ygc65pffcoo.fsf%40babylonia.flatirons.org> =head2 How deep is C<clone>? Alberto SimE<otilde>es asked how deeply the C<clone> operator worked. According to Leopold TE<ouml>tsch it's a deep, recursive clone, which he noted makes for interesting times when dealing with self referencing structures (Dan reckoned that it shouldn't be too bad if you take advantage of the GC system's graph traversal smarts...). Luke Palmer wondered why the default was a deep copy as, he claimed, deep copies were seldom needed. He wondered how to make a shallow copy. Leo suggested extending clone with an extra parameter to specify deep or shallow copying. Dan said that it is the way it is because he said so, and that one would make a shallow copy with assign. L<http://groups.google.com/groups?threadm=20030416112524.GA9757%40alfarrabio.di.uminho.pt> =head2 Shared memory David Robins wondered whether Parrot's memory allocation system would cope with sharing memory between processes and found some messages in the archive that seemed to imply that 'it will cope eventually'. He wondered I<how> it would cope. Warnock's Dilemma applies... L<http://groups.google.com/groups?threadm=Pine.LNX.4.44.0304170912260.19245-100000%40davidrobins.net> =head2 A New GC approach? Kurt Stephens announced that he had a partially written 'conservative, non-copying "treadmill"' GC system that could work in real time without stopping the world. He wondered if it could be useful for Parrot. No comments so far... L<http://groups.google.com/groups?threadm=3E9F3F20.306%40kurtstephens.com> L<http://kurtstephens.com/research/index.html> =head2 IMC and variable number of arguments K Stol wondered how to handle a variable number of function arguments in IMC code. Dan remarked that it was covered by the Parrot calling conventions (presumably IMC code doesn't do the dfull Parrot calling conventions though). Leo TE<ouml>tsch suggested making sure that the last thing pushed onto the argument stack was the number of arguments, and Will Coleda suggested passing a single PMC like a PerlArray... L<http://groups.google.com/groups?threadm=BAY1-DAV58yhJBbdbuN0000d25d%40hotmail.com> =head1 Meanwhile, over in perl6-language If I were asked to summarize this week's traffic on perl6-language with one word, that word would be 'Types'. It turns out that thinking about types, and how they should behave in Perl 6, is hard. I don't envy Damian the writing of the next Exegesis, that's for sure. Instead of presenting the threads in roughly chronological order this week I'm going to deal with the none type related threads first and then attempt to sketch the current issues with types without quite so much reference to individual threads. Cover me, I'm going in... =head2 Currying questions Last week, Ralph Mellor had asked whether currying assumptions could be overridden when the curried function was called and Luke Palmer had said he didn't think so. This week Damian answered with a rather more authoritative "No, they can't be overridden, just make a call to the original function.". Ralph had also wondered if there would be a way to specify whether currying assumptions were made by binding or by copying a value (currently, they get bound, just like they do when you call a function normally (I wonder what happens when the function prototype specifies C<is copy>)). Damian said that, if you wanted to make an assumption based on a copy then you needed to explicitly make that copy. L<http://groups.google.com/groups?threadm=3E968850.2000604%40self-reference.com> -- Ralph's original questions =head2 Are all list constructors iterators? 'Marek Ph.' admired the shiny goodness that is lazy evaluation and wondered if I<all> list constructors were actually iterators. He wanted to know if that meant that @a = 1 .. Inf; splice @a, 5, 2; would yield @a == (1 .. 4, 7 .. Inf) He also asked if the C<x> operator would generate an iterator too. Luke Palmer thought the answer to both questions was "Yes". L<http://groups.google.com/groups?threadm=200304140800.04087.philipp.marek%40bmlv.gv.at> =head2 ... but foo('bar') StE<eacute>phane Payrard spotted a possible ambiguity in Perl 6's grammar. He wanted to know if ... but foo('bar') set the property 'foo' to the value 'bar', or did it create a property with the name being the value returned by a function call of C<foo('bar')>. He wondered what the syntax would be to get the 'other' meaning. Luke Palmer thought the first part was that the property 'foo' would get set to 'bar' (so do I, unless the thing implementing the property has some special semantics). He suggested that to force the call to the function to get a property name one would do one of: ... but $(foo('bar')) or ... but &foo.('bar') I prefer the second of those two. L<http://groups.google.com/groups?threadm=20030414194736.GF2610%40stefp.dyndns.org> =head2 Perl 6 parser questions Right at the end of last week, Austin Hastings asked a bunch of questions about the behaviour of the Perl 6 parser. He wondered, for instance, if, in the future, he'd be able to (usefully) say: #!/usr/bin/perl6.1 use Perl6::Grammar v6.0.0.2; Larry answered this question ("I don't see why not") and all of Austin's other questions on this topic. Apparently the Perl 6 Parser will be documented 'whenever Apocalypse 18 comes out'. L<http://groups.google.com/groups?threadm=20030413195322.44208.qmail%40web12306.mail.yahoo.com> =head2 Initializations outside of control flow Mark J. Reed asked about elegant ways of initializing shared variables. He wanted something a little neater than the blunderbuss of a C<BEGIN> block. Larry obliged with one of his 'thinking aloud' posts which, while not giving us a final answer does give us a few signposts. It's looking like we'll have traits along the lines of: state $where is begin($value); state $where is check($value); state $where is init($value); state $where is first($value); Where the traits work analogously to C<BEGIN>, C<CHECK>, C<INIT> and C<FIRST>. L<http://groups.google.com/groups?threadm=20030415143435.GD7176%40charm.turner.com> L<http://groups.google.com/groups?threadm=20030415181942.GA11245%40wall.org> =head2 The new C<returns> keyword David Storrs was a little worried about the possible clash between the new C<returns> keyword -- introduced in Apocalypse 6 -- and C<return>. Michael Lazzaro pointed out that the 'possible clash' was almost certainly deliberate, after all: sub foo returns Bar {...} reads rather well. David had used the example C<my $spot returns Dog>, which does look rather ugly, but Michael pointed out, in the case of a variable declaration, it made more sense to use C<my $spot of Dog> or even C<my Dog $spot>. Michael commented that this choice of syntax meant the programmer was able to pick the most readable phrase for a given situation. L<http://groups.google.com/groups?threadm=CCE85CBE-6F72-11D7-A307-00050245244A%40cognitivity.com> =head2 A17 early discussion: Perl 6 threading proposal Austin Hastings posted what would once have been called an RFC about Perl 6's threading model. No comments so far. L<http://groups.google.com/groups?threadm=20030415182938.6630.qmail%40web12306.mail.yahoo.com> =head2 C<wrap> from Synopsis 6 David Storrs wondered if the new C<.wrap> method, which returns a unique id identifying the particular 'wrapper' could have an associated warning if the resulting id wasn't stored somewhere. Adam D. Lopresto and Austin Hastings weren't keen... L<http://groups.google.com/groups?threadm=20030416074404.A72173%40megazone.bigpanda.com> =head2 The difference between C<-> $arg {...}> and C<sub ($arg) {...}> David Storrs asked for a 'micro-Exegesis' on the difference between C<-> $foo {...}> and C<sub ($foo) {...}> since they both seemed to generate anonymous subroutines. There were an awful lot of responses to this. Essentially the difference is that a 'pointy block' (my coinage I think) is just a block that has a signature. The main difference is what happens to a C<return>. In a block or a pointy block, a C<return> returns from the subroutine that lexically contains that block, not simply from the block itself. If you want to leave a block prematurely without returning from its enclosing subroutine, you would use the C<leave> keyword. This distinction between a Block and a Sub allows for some rather neat (Smalltalkish) idioms multi iterate_over_file( String $path: Block &block ) { my $fh = open File: '<', $path or fail "Couldn't open $path: $!"; while <$fh> { &block($_); } } sub find_user ($user_name) { iterate_over_file "/etc/passwd" -> $line { return $line but true if /^$user_name/; } return undef; } This is a somewhat contrived example, but I think it's useful as an illustration. If a Block were exactly the same as a sub, then the C<return> in C<find_user> would return to the middle of the C<while> loop in C<iterate_over_file> and C<iterate_over_file> would only return after it had gone through every line in the password file, which would mean that C<find_user> would always return undef. However, a return from inside a block returns from the subroutine I<containing> the block so C<find_user> behaves as expected and we get to write powerful control structures without having to resort to macros. I do wonder if the it would be possible for a function like C<iterate_over_file> to C<CATCH> the block's Return exception though... L<http://groups.google.com/groups?threadm=20030416074937.B72173%40megazone.bigpanda.com> =head2 Compulsorily named parameters? The debate over declaring non optional named parameters continued as Damian joining in. The current consensus appears to be that the various optional/named/slurpy shorthands introduced in Apocalypse 6 should stay pretty much as they are, but that it should be possible to declare more complex parameter requirements using sensibly named traits. John Siracusa still wants a more 'powerful' shorthand, but there doesn't seem to be anyone taking his side on that. L<http://groups.google.com/groups?threadm=3E99F811.30603%40conway.org> =head2 Multimethod invocants Multimethods still appear to be causing some confusion, mostly to do with how they are called and dispatched, and which method parameters participate in the dispatch. There's a largish contingent (and I probably my count myself a member of that contingent, spot the bias) who would like to be able to write: multi infix:@ (Number $x, Number $y) { new Point: $x, $y } class point { ... multi make_rectangle ( Point $p ) { new Rectangle: $_, $p; } multi make_rectangle ( Number $x, Number $y ) { .make_rectangle( $x @ $y ); } } Which isn't allowed. Instead you would have to write: multi make_rectangle ( Point $p, Point $q ) { new Rectangle: $p, $q; } multi make_rectangle ( Point $p, Number $x, Number $y ) { make_rectangle ($p, $x @ $y); } And you also have to be wary of my Point $p; ... $p.make_rectangle($x @ $y); which would first try to dispatch to Point's C<make_rectangle> 'unimethod', only attempting to dispatch via a multimethod if there is no such method. Personally, I think there's room for a spoonful or two of syntactic sugar to allow for the 'method variant' style of declaration as well as the full on generic multimethod style (which would, of course, underpin the more restricted method variant style). However, if it doesn't exist out of the box I expect someone (me?) will write a set of macros to make things work. In the message referenced, Damian explains the current state of the multimethod art... L<http://groups.google.com/groups?threadm=3E99FEA8.2090009%40conway.org> =head1 Types... Well, that's 100 or so messages accounted for. Which leaves another 173 messages remaining all of which concern types. The problem as I see it is that different people seem to understand different things from the word type, and there's a lot of people talking at cross purposes as well as a fair amount of axe grinding going on. Now, I could just punt and write something like "Everyone except Leon Brocard talked for ages about types. Here are the links to those threads" which would at least has the virtue of getting the god awful running joke out of the way, but that would smack of cheating. So, what I'm going to do is to only cheat slightly. At the bottom of this section you'll find links to all the threads that discussed types this week. However, before that I'll try give you a (biased) overview of the issues involved and the areas of confusion. =head2 An illustrative quotation from Lewis Carroll =for html <blockquote> "The name of the song is called 'Haddocks' Eyes.'" [said the White Knight.] "Oh, that's the name of the song, is it?" Alice said, trying to feel interested. "No, you don't understand," the Knight said, looking a little vexed. "That's what the name is called. The name really is 'The Aged, Aged Man.'" "Then I ought to have said 'That's what the song is called'?" Alice corrected herself. "No you oughtn't: that's another thing. The song is called 'Ways and Means' but that's only what it's I<called>, you know!" "Well, what is the song then?" said Alice, who was by this time completely bewildered. "I was coming to that," the Knight said. "The song really is 'A-sitting On a Gate': and the tune's my own invention." -- From Alice Through The Looking Glass, by Lewis Carroll =for html </blockquote> =head2 Two types of type Perl 6 draws an important distinction between 'variable type' and 'value type'. A variable is a binding between a name and a container. The variable type is the type of the container associated with the variable's name. A variable's 'value type' is the expected type of the value stored in the variable's container. As far as I can tell, Perl is weirder than the average programming language in this respect in that it allows the programmer to specify both sorts of type. In C for instance, a value doesn't know its own type, it's just an area of memory that is interpreted according to the type of the variable that it is accessed via (or according to the type it is cast into). Meanwhile, in lisp like languages, 'variables' are simply keys in a symbol table, and the values in that symbol table are untyped pointers to values which know their own type. Perl 6's symbol tables are rather more like Lisp symbol tables than C's, with the added wrinkle that the symbol table values are rather more sophisticated containers than simple generic pointers. This complexity arises for a couple of reasons: =over =item Tied variables. Instead of storing a variable's value in one of the core container types (Array, Hash, Scalar), it can be useful to use a custom container type to allow for 'magical' behaviour: my $FTSE is ShareIndex('FTSE'); print "$FTSE"; # FTSE 100 Index: 3916.70 (+27.50/+0.7%) at 2003042216:40 =item Context Context is really important to Perl. If you look at an array variable in a numeric context, then you get the number of items in the array; in list context, a list of all the items in the array; in a scalar context, a pointer to the array. This context dependent behaviour is best handled by the container object, possibly with the assistance of the contained object, but not always. =back Scalars turn out to be one of the more remarkable types of Perl containers. At their simplest they can be thought of as a container which can hold at most one 'atomic' thing. Perl 5 scalars have three(?) slots for Number, String and Reference values (On IRC, Dan tells me that Perl 6 scalars will probably have slots for String, Float, Integer, Boolean and Reference values). These different 'scalar value types' can, with certain restrictions be treated without regard to their 'actual' type: A Number in a string context will give a sensible string representation, a string in a number context will give an appropriate numeric value, but not every possible scalar value type can be sensibly viewed as any other type; if you try and use a number in a reference context, you're going to get an error for instance. For added fun, it's perfectly possible for a scalar variable to contain both a Number value and a String value (In Perl 5, L<Scalar::Util> provides a nice interface to this preexisting capability. On IRC, Dan suggests that the Perl 6ish way of doing this will probably be C<my $i = 4 but "Bibble!";>). =head2 What are value type declarations for? Some people see value type declarations as being important for programmer safety. They want to see a situation where: my Number $foo = some_function_returning_a_string(); or sub a_func (Number $param) { ... } a_func("A string"); will throw exceptions, preferably at compile time. Others want to see those same code fragments coerce any values assigned to them into the appropriate types (possibly with a warning) and see value type declarations simply as a way of letting the compiler do automatic optimization of code (if you have declared that a given variable will only contain, say, a number, you can (at least) get rid of a layer of indirection in accessing that value). Others don't really care one way or the other about whether or not to coerce, they just want to use value types in setting up multimethods. Still others don't really like the idea of declaring types at all, but do see value in ML like type inference for programmer safety reasons... Others want to let the programmer choose, and worry about how to implement something which will let that happen. I'm a 'let the programmer choose, and the compiler optimize what it can' kind of guy. =head2 Are types the same as objects? If types are the same as objects, do they all inherit from a common base class? If they do, what does the hierarchy look like? What about interfaces? Do they need to be explicitly declared or can they be inferred. If they can be inferred, what about the problem of: class Tree { method feed {...} method grow {...} method bark {...} ... } interface Canine { method feed {...} method grow {...} method bark {...} } class Borzoi { method feed {...} method grow {...} method bark {...} } ... multi treat($vet, Canine $critter ) {...} treat($some_vet, Tree.new); # Should this fail? Arrghh!!! Make the hurting stop! However, I don't care what Dan says, I want every type to have an associated class, and I want them all to inherit from some sort of common base class (at least conceptually, and, if I'm prepared to take the performance hit and jump through the hoops, actually. Sometimes you need to override Scalar's behaviour (or whatever)) but I don't think the inheritance trees that have been bandied about so far even come close to expressing the semantics we need. Expect a longish post to Perl 6 language on this at some point. Probably with (more or less) pseudo code. =head2 Another distinction to think about OO theory talks about value objects and reference objects. (I'm using 'object' here to try and get come conceptual distance from 'value type'). Here's an abstract example of what I mean my $a = new ValueObject: value => 10; my $b = $a; $b.set_value(20); print "$a $b"; # 10 20 my $c = new ReferenceObject: value => 10; my $d = $c; $c.set_value(20) print "$c $d"; # 20 20 =head2 Just when you thought you understood value types... Along come compound value types to mess with your head. Assuming a strict interpretation of value type declarations (assigning the the 'wrong' type to a variable throws an error), consider the following: my @a of Int = (1, 2, 3); my @b = @a; my @c of Str; What happens to each of the following? If it's an error does it happen at runtime or compile time? @c = @a; @c = @b; push @a, "String"; push @b, "String"; Are you sure about those? Now, what happens if you start with: my @a of Int = (1 .. Inf); my @b = @a; my @c of Str; And there's more thorny problems where they came from. =head2 Those thread links L<http://groups.google.com/groups?threadm=200304140058.h3E0wlpa001034%40bruce.csse.monash.edu.au> -- Types of literals L<http://groups.google.com/groups?threadm=20030411204244.GG561%40wall.org> -- Do we really need the dual type system L<http://groups.google.com/groups?threadm=200304142143.11432.afaus%40corp.vlex.com> -- User defined hierarchical types L<http://groups.google.com/groups?threadm=a05210606bac48f32d796%40%5B63.120.19.221%5D> -- Mind the difference between value types and reference types L<http://groups.google.com/groups?threadm=ygcof343oc6.fsf%40babylonia.flatirons.org> -- Static typing with Interfaces L<http://groups.google.com/groups?threadm=001D81EE-71D1-11D7-BA15-00050245244A%40cognitivity.com> -- Michael Lazzaro's superb summary of how containers and values interact. Not sure it's the whole story though... =head1 Acknowledgements, Announcements and Apologies Sorry it's late. I blame the perl6-language people. It has nothing whatsoever to do with weekend spent in Wales and a Bank Holiday Monday spent at an Easter egg hunt and barbecue at my aunt's. This has been one of the harder Perl 6 summaries to write, mostly because the language list has been dealing with a complicated subject and finding lots of interesting corners and ambiguities. Many thanks to Michael Lazzaro for his careful summation of his understanding of how things work which certainly clarified my thinking, to StE<eacute>phane Payrard for his sanity check of the types summary and to Dan Sugalski for a few answers on IRC about Scalar behaviour. If you've appreciated this summary, please consider one or more of the following options: =over =item * Send money to the Perl Foundation at L<http://donate.perl-foundation.org/> and help support the ongoing development of Perl. =item * Get involved in the Perl 6 process. The mailing lists are open to all. L<http://dev.perl.org/perl6/> and L<http://www.parrotcode.org/> are good starting points with links to the appropriate mailing lists. =item * Send feedback, flames, money, photographic and writing commissions, or a full set of Bertrand Harris Bronson's I<The Traditional Tunes of the Child Ballads> to L<p6summarizer@bofh.org.uk> =back
autarch/perlweb
docs/dev/perl6/list-summaries/2003/p6summary.2003-04-20.pod
Perl
apache-2.0
26,294
#! /usr/bin/perl -w use utf8; use strict; die " generate combination between sampls in RNAsew pipeline Usage: perl $0 <input.lib> "if(@ARGV != 1); my @tags; my @out; while (<>){ push(@tags, $1) if (/label\s+=\s+(\w+)/); } for(my $i = 0; $i <= $#tags; $i++){ for(my $j = $i + 1; $j <= $#tags; $j++){ push(@out, "$tags[$i]&$tags[$j]"); } } my $output = join ";", @out; print "$output\n";
BaconKwan/Perl_programme
utility/cal_DIFF_combination_in_RNAseq.pl
Perl
apache-2.0
399
=head1 LICENSE Copyright [1999-2014] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =cut =head1 CONTACT Please email comments or questions to the public Ensembl developers list at <http://lists.ensembl.org/mailman/listinfo/dev>. Questions may also be sent to the Ensembl help desk at <helpdesk.org>. =cut use strict; use warnings; #generic object for the dbSNP data. Contains the general methods to dump the data into the new Variation database. Any change in the methods # will need to overload the correspondent method in the subclass for the specie package dbSNP::GenericContig; use POSIX; use Bio::EnsEMBL::Utils::Argument qw(rearrange); use Bio::EnsEMBL::Utils::Exception qw(throw); use Bio::EnsEMBL::Utils::Sequence qw(reverse_comp); use ImportUtils qw(dumpSQL debug create_and_load load loadfile get_create_statement); use dbSNP::ImportTask; use Bio::EnsEMBL::Variation::Utils::dbSNP qw(get_alleles_from_pattern); use Progress; use DBI qw(:sql_types); use Fcntl qw( LOCK_SH LOCK_EX ); use List::Util qw ( min max ); our $FARM_BINARY = 'bsub'; our %FARM_PARAMS = ( 'default' => { 'script' => 'run_task.pl', 'max_concurrent_jobs' => 5, 'memory' => 8000, 'queue' => 1, 'wait_queue' => 0 }, 'allele_table' => { 'script' => 'run_task.pl', 'max_concurrent_jobs' => 10, ## reduced from 40 using same mysql server for both 'memory' => 2000, 'queue' => 1, 'wait_queue' => 0 }, 'allele_table_load' => { 'script' => 'run_task.pl', 'max_concurrent_jobs' => 5, 'memory' => 4000, #was 8000 for human 'queue' => 2, 'wait_queue' => 0 }, ## individual_genotypes human 137: ## ran most of these max_concurrent_jobs=50 & memory=2000 ## 58 failed =>rerun with memory=8000 ## 3 failed =>rerun with memory=10000 ## mouse - 20/29 failed 4G limit - 5 needed 9.5G 'individual_genotypes' => { 'script' => 'run_task.pl', 'max_concurrent_jobs' => 20, 'memory' => 4000, 'queue' => 1, 'wait_queue' => 0 }, 'individual_genotypes_load' => { 'script' => 'run_task.pl', 'max_concurrent_jobs' => 5, 'memory' =>4000, #was 9000 for human 'queue' => 2, 'wait_queue' => 0 }, ); #The queues in ascending run time limit order our @FARM_QUEUES = ( 'small', 'normal', 'long', 'basement' ); #The maximum amount of farm memory that we will request our $MAX_FARM_MEMORY = 15900; our $FARM_MEMORY_INCREMENT = 4000; #creates the object and assign the attributes to it (connections, basically) sub new { my $caller = shift; my $class = ref($caller) || $caller; my ($dbm, $tmp_dir, $tmp_file, $limit, $mapping_file_dir, $dbSNP_BUILD_VERSION, $ASSEMBLY_VERSION, $GROUP_TERM,$GROUP_LABEL, $skip_routines, $scriptdir, $logh, $source_engine) = rearrange([qw(DBMANAGER TMPDIR TMPFILE LIMIT MAPPING_FILE_DIR DBSNP_VERSION ASSEMBLY_VERSION GROUP_TERM GROUP_LABEL SKIP_ROUTINES SCRIPTDIR LOG SOURCE_ENGINE )],@_); my $dbSNP = $dbm->dbSNP()->dbc(); my $dbCore = $dbm->dbCore()->dbc(); my $dbVar = $dbm->dbVar()->dbc(); my $dbInt = $dbm->dbInt()->dbc(); my $snp_dbname = $dbSNP->dbname(); my $species = $dbm->species(); my $shared_db = $dbm->dbSNP_shared(); my $registry_file = $dbm->registryfile(); $shared_db .="." if defined $source_engine && $source_engine =~/mssql/ ; debug(localtime() . "\tThe shared database is $shared_db"); return bless {'dbSNP' => $dbSNP, 'dbCore' => $dbCore, 'dbInt' => $dbInt, 'dbVar' => $dbVar, ##this is a dbconnection 'snp_dbname' => $snp_dbname, 'species' => $species, 'tmpdir' => $tmp_dir, 'tmpfile' => $tmp_file, 'limit' => $limit, 'mapping_file_dir' => $mapping_file_dir, 'dbSNP_version' => $dbSNP_BUILD_VERSION, 'dbSNP_share_db' => $shared_db, 'assembly_version' => $ASSEMBLY_VERSION, 'skip_routines' => $skip_routines, 'log' => $logh, 'registry_file' => $registry_file, 'scriptdir' => $scriptdir, 'dbm' => $dbm, 'group_term' => $GROUP_TERM, 'group_label' => $GROUP_LABEL, 'source_engine' => $source_engine, }, $class; } #main and only function in the object that dumps all dbSNP data sub dump_dbSNP{ my $self = shift; #Put the log filehandle in a local variable my $logh = $self->{'log'}; #the following steps need to be run when initial starting the job. If job failed for some reason and some steps below are already finished, then can comment them out my @subroutines = ( 'create_coredb', 'source_table', 'population_table', 'individual_table', 'variation_table', 'subsnp_synonyms', 'archive_rs_synonyms', 'dbSNP_annotations', 'pubmed_citations', 'parallelized_individual_genotypes', 'population_genotypes', 'parallelized_allele_table', # 'flanking_sequence_table', 'variation_feature', 'cleanup' ); #The GenericContig object has an array where routines that should be skipped can be specified. For now, add create_coredb and cleanup by default push(@{$self->{'skip_routines'}},('create_coredb', 'cleanup' )); # When resuming after a crash, put already finished modules into this array push(@{$self->{'skip_routines'}},()); my $resume; my $clock = Progress->new(); # Loop over the subroutines and run each one foreach my $subroutine (@subroutines) { #Check if this subroutine should be skipped if (grep($_ eq $subroutine,@{$self->{'skip_routines'}})) { debug(localtime() . "\tSkipping $subroutine"); print $logh localtime() . "\tSkipping $subroutine\n"; next; } $clock->checkpoint($subroutine); print $logh $clock->to_string($subroutine); $self->$subroutine($resume); print $logh $clock->duration(); } } sub run_on_farm { my $self = shift; my $jobname = shift; my $file_prefix = shift; my $task = shift; my $task_manager_file = shift; my $start = shift; my $end = shift; my @args = @_; #Put the log filehandle in a local variable my $logh = $self->{'log'}; my $param_key = $jobname; if (!exists($FARM_PARAMS{$jobname})) { warn("No farm resource parameters defined for job $jobname, will use default parameters"); $param_key = 'default'; } my $script = $FARM_PARAMS{$param_key}->{'script'}; my $max_jobs = $FARM_PARAMS{$param_key}->{'max_concurrent_jobs'}; my $memory = $FARM_PARAMS{$param_key}->{'memory'}; my $queue = $FARM_QUEUES[$FARM_PARAMS{$param_key}->{'queue'}]; my $wait_queue = $FARM_QUEUES[$FARM_PARAMS{$param_key}->{'wait_queue'}]; my $memory_long = $memory . '000'; my $logfile_prefix = $file_prefix . "_" . $jobname . "-"; my $array_options = ""; #If just the start was defined, it should be a list of subtasks that should be re-run if (defined($start) && defined($end)) { if ($start < $end) { $array_options = qq{[$start-$end]%$max_jobs}; } else { $array_options = qq{[$start]%$max_jobs}; } } elsif (defined($start)) { $array_options = "[" . join(",",@{$start}) . qq{]%$max_jobs}; } #Wrap the command to be executed into a shell script my $tempfile = $task . "_" . $self->{'tmpfile'}; my $task_command = qq{perl $self->{'scriptdir'}/$script -species $self->{'species'} -dbSNP_shared $self->{'dbSNP_share_db'} -registry_file $self->{'registry_file'} -task $task -file_prefix $file_prefix -task_management_file $task_manager_file -tempdir $self->{'tmpdir'} -tempfile $tempfile -source_engine $self->{source_engine} } . join(" ",@args); warn"Sending task_command $task_command\n"; my $script_wrapper = $file_prefix . '_command.sh'; open(CMD,'>',$script_wrapper); flock(CMD,LOCK_EX); print CMD qq{#!/usr/local/bin/bash\n}; print CMD qq{$task_command\n}; close(CMD); print $logh "Running $script with task management file: $task_manager_file\n"; print $logh Progress::location(); ### changed $memory_long to $memory for farm3 # my $bsub_cmd = qq{$FARM_BINARY -R'select[mem>$memory\] rusage[mem=$memory\]' -M$memory_long -q $queue -J'$jobname$array_options' -o $logfile_prefix\%J.%I.out -e $logfile_prefix\%J.%I.err bash $script_wrapper}; my $bsub_cmd = qq{$FARM_BINARY -R'select[mem>$memory\] rusage[mem=$memory\]' -M$memory -q $queue -J'$jobname$array_options' -o $logfile_prefix\%J.%I.out -e $logfile_prefix\%J.%I.err bash $script_wrapper}; warn "sending to farm: $bsub_cmd\n"; #Submit the job array to the farm my $submission = `$bsub_cmd`; my ($jobid) = $submission =~ m/^Job \<([0-9]+)\>/i; print $logh Progress::location(); warn "Need to wait for job id $jobid\nSenfin:$FARM_BINARY -J $jobid\_waiting -q $wait_queue -w'ended($jobid)' -K -o $file_prefix\_waiting.out sleep 1\n\n"; #Submit a job that depends on the job array so that the script will halt system(qq{$FARM_BINARY -J $jobid\_waiting -q $wait_queue -w'ended($jobid)' -K -o $file_prefix\_waiting.out sleep 1}); print $logh Progress::location(); #Check the error and output logs for each subtask. If the error file is empty, delete it. If not, warn that the task generated errors. If the output file doesn't say that it completed successfully, report the job as unseccessful and report which tasks that failed my $all_successful = 1; sleep(60); ## checking files before they are written on farm3 my %job_details; for (my $index = $start; $index <= $end; $index++) { my $outfile = "$logfile_prefix$jobid\.$index\.out"; my $errfile = "$logfile_prefix$jobid\.$index\.err"; # Is error file empty? if (-z $errfile) { unlink($errfile); } else { $job_details{$index}->{'generated_error'} = 1; } #Does the outfile say that the process exited successfully? Faster than querying bhist... (?) $job_details{$index}->{'success'} = 0; open(FH,'<',$outfile)||die "Failed to open $outfile : $!\n";; flock(FH,LOCK_SH); my $content = ""; while (<FH>) { $content .= "$_ "; } close(FH); if ($content =~ m/Successfully completed/i) { $job_details{$index}->{'success'} = 1; } # Else, did we hit the memory limit? elsif ($content =~ m/TERM_MEMLIMIT/) { $job_details{$index}->{'fail_reason'} = 'OUT_OF_MEMORY'; } # Else, time limit for the queue? elsif ($content =~ m/TERM_RUNLIMIT/) { $job_details{$index}->{'fail_reason'} = 'OUT_OF_TIME'; } # Else, we don't know why it failed else { $job_details{$index}->{'fail_reason'} = 'UNKNOWN'; } } print $logh Progress::location(); my $message = ""; if ((my $count = grep(!$job_details{$_}->{'success'},keys(%job_details)))) { $all_successful = 0; $message = qq{$count subtasks failed. You should re-run them before proceeding!\n}; } if ((my $count = grep($job_details{$_}->{'generated_error'},keys(%job_details)))) { $message .= qq{$count subtasks generated error messages, please check the logfiles!\n}; } my $result = { 'success' => $all_successful, 'jobid' => $jobid, 'subtask_details' => \%job_details, 'message' => $message }; return $result; } sub rerun_farm_job { my $self = shift; my $iteration = shift; my $jobname = shift; my $file_prefix = shift; my @args = @_; #Put the log filehandle in a local variable my $logh = $self->{'log'}; my $param_key = $jobname; if (!exists($FARM_PARAMS{$jobname})) { warn("No farm resource parameters defined for job $jobname, will use default parameters"); $param_key = 'default'; } my $max_time = 0; my $max_memory = 0; #This is a preparation step for re-running a farm job. What we do is to re-submit to a longer queue (if one is available) and request more memory my $current_queue = $FARM_QUEUES[$FARM_PARAMS{$param_key}->{'queue'}]; if (scalar(@FARM_QUEUES) > ($current_queue + 1)) { $FARM_PARAMS{$param_key}->{'queue'}++; } else { $max_time = 1; } # Increase the memory requirements in steps of $FARM_MEMORY_INCREMENT unless we're at the limit already if ($FARM_PARAMS{$param_key}->{'memory'} < $MAX_FARM_MEMORY) { $FARM_PARAMS{$param_key}->{'memory'} = min($MAX_FARM_MEMORY,$FARM_MEMORY_INCREMENT + $FARM_PARAMS{$param_key}->{'memory'}); } else { $max_memory = 1; } #If we have already maxed out the resources, it won't help running the job again return undef if ($max_time && $max_memory); return $self->run_on_farm($jobname,$file_prefix . "_submission",$iteration,@args); } sub create_coredb { my $self = shift; #Put the log filehandle in a local variable my $logh = $self->{'log'}; my $coredb_name = $self->{'dbCore'}->dbname(); $self->{'dbVar'}->do(qq{CREATE DATABASE $coredb_name}); print $logh Progress::location(); debug(localtime() . "\tmake sure create $coredb_name.coord_system"); my $csid_ref = $self->{'dbCore'}->selectall_arrayref(qq{SELECT coord_system_id from coord_system WHERE name = 'chromosome' and attrib = 'default_version'}); my $csid; if ($csid_ref->[0][0]) { $csid = $csid_ref->[0][0]; } $self->{'dbVar'}->do(qq{create table coord_system(coord_system_id int(10) unsigned,species_id int(10) unsigned default 1)}); print $logh Progress::location(); $self->{'dbVar'}->do(qq{insert into coord_system(coord_system_id) values($csid)}); print $logh Progress::location(); $self->{'dbVar'}->do(qq{RENAME TABLE coord_system TO $coredb_name.coord_system}); print $logh Progress::location(); } sub source_table { my $self = shift; my $source_name = shift; #get the version of the dbSNP release my $dbSNP_db_name = $self->{'snp_dbname'}; $dbSNP_db_name =~ s/dbSNP\_//; ## remove extra pre-fix on mysql dbs my ($species,$tax_id,$version) = $dbSNP_db_name =~ m/^(.+)?\_([0-9]+)\_([0-9]+)$/; my $url = 'http://www.ncbi.nlm.nih.gov/projects/SNP/'; if(defined $source_name && $source_name=~ /Archive/){ $self->{'dbVar'}->do(qq{INSERT IGNORE INTO source (source_id,name,version,description,url,somatic_status, data_types) VALUES (2, "$source_name",$version,"Former variants names imported from dbSNP", "$url", "mixed","variation_synonym")}); } else{ my $dbname = 'dbSNP'; $self->{'dbVar'}->do(qq{INSERT INTO source (source_id,name,version,description,url,somatic_status, data_types) VALUES (1,"$dbname",$version,"Variants (including SNPs and indels) imported from dbSNP", "$url", "mixed","variation")}); } } sub table_exists_and_populated { # check if a table is present in dbSNP, and that it has some data in it my $self = shift; my $table = shift; my $sql; if($self->{source_engine} =~/mssql|sqlserver/ ){ $sql = qq[SELECT OBJECT_ID('$table')]; } else{ $sql = qq[show tables like '$table']; } my ($obj_id) = $self->{'dbSNP'}->db_handle->selectrow_array($sql); if (defined $obj_id) { # the table exists my ($count) = $self->{'dbSNP'}->db_handle->selectrow_array(qq{SELECT COUNT(*) FROM $table}); if (defined $count && $count > 0) { # the table is populated debug(localtime() . "\t$table table exists and is populated"); return 1; } } debug(localtime() . "\t$table table either doesn't exist or is empty"); return 0; } ### This is no longer used - data comes from directly clinvar sub clin_sig { my $self = shift; return unless $self->table_exists_and_populated('SNPClinSig'); my $logh = $self->{'log'}; # dump the data from dbSNP and store it in a temporary clin_sig table # in the variation database print $logh Progress::location(); debug(localtime() . "\tDumping clinical significance"); my $stmt = qq{ SELECT cs.snp_id, csc.descrip FROM SNPClinSig cs, ClinSigCode csc WHERE cs.clin_sig_id = csc.code }; dumpSQL($self->{'dbSNP'}, $stmt); print $logh Progress::location(); debug(localtime() . "\tLoading clinical significance"); create_and_load( $self->{'dbVar'}, "clin_sig", "snp_id i*", "descrip"); # check that we know about all the possible values $stmt = qq{ SELECT count(descrip) FROM clin_sig WHERE descrip NOT IN ( SELECT a.value FROM attrib a, attrib_type att WHERE a.attrib_type_id = att.attrib_type_id AND att.code = 'dbsnp_clin_sig' ) }; my ($count) = $self->{'dbVar'}->db_handle->selectrow_array($stmt); if ($count > 0) { die "There are unexpected (probably new) clinical significance types, add them to the attrib table first"; } # update the variation table with the correct attrib_ids print $logh Progress::location(); debug(localtime() . "\tUpdating variation table with clinical significance"); $stmt = qq{ UPDATE variation v, clin_sig c, attrib a, attrib_type att SET v.clinical_significance_attrib_id = a.attrib_id WHERE att.code = 'dbsnp_clin_sig' AND a.attrib_type_id = att.attrib_type_id AND c.descrip = a.value AND v.name = CONCAT('rs', c.snp_id) }; $self->{'dbVar'}->do($stmt); # in case dbsnp also assign clin sigs to synonyms we also join to the variation_synonym table # (for build 135 they (redundantly) had both the original and the synonym rsID associated with # a clinical significance, but this may change in the future as it has for MAF and suspect # SNPs and it doesn't do any harm to run this statement) $stmt = qq{ UPDATE variation v, variation_synonym vs, clin_sig c, attrib a, attrib_type att SET v.clinical_significance_attrib_id = a.attrib_id WHERE att.code = 'dbsnp_clin_sig' AND a.attrib_type_id = att.attrib_type_id AND c.descrip = a.value AND vs.name = CONCAT('rs', c.snp_id) AND vs.variation_id = v.variation_id }; $self->{'dbVar'}->do($stmt); $self->{'dbVar'}->do(qq{DROP TABLE clin_sig}); } sub minor_allele_freq { my $self = shift; return unless $self->table_exists_and_populated('SNPAlleleFreq_TGP'); my $logh = $self->{'log'}; # dump the data from dbSNP and store in the temporary maf table in # the variation database [2hrs to dump 137] print $logh Progress::location(); debug(localtime() . "\tDumping global minor allele freqs"); my $shared = $self->{'dbSNP_share_db'}; my $stmt = qq{ SELECT af.snp_id, a.allele, af.freq, af.count, af.is_minor_allele FROM SNPAlleleFreq_TGP af, $shared\.Allele a WHERE af.allele_id = a.allele_id }; dumpSQL($self->{'dbSNP'}, $stmt); print $logh Progress::location(); debug(localtime() . "\tLoading global minor allele freqs"); create_and_load($self->{'dbVar'}, "maf", "snp_id i* not_null", "allele l", "freq f", "count i", "is_minor_allele i"); =head do the updates in post processing when writing new copies of the tables print $logh Progress::location(); # debug(localtime() . "\tUpdating variations with global minor allele frequencies"); my $variation_sql = qq{ UPDATE variation v, maf m SET v.minor_allele_freq = m.freq, v.minor_allele_count = m.count, v.minor_allele = m.allele WHERE v.snp_id = m.snp_id }; my $synonym_sql = qq{ UPDATE variation v, maf m, variation_synonym vs SET v.minor_allele_freq = m.freq, v.minor_allele_count = m.count, v.minor_allele = m.allele WHERE vs.name = CONCAT('rs', m.snp_id) AND v.variation_id = vs.variation_id }; my $get_max_sth = $self->{'dbVar'}->prepare(qq[select min(snp_id), max(snp_id) from maf ]); $get_max_sth->execute()||die; my $range = $get_max_sth->fetchall_arrayref(); my $batch_size = 100000; my $start = $range->[0]->[0]; my $max = $range->[0]->[1]; while ( $start < $max ){ my $end = $start + $batch_size; # update the variation table with the frequencies for only the minor alleles $self->{'dbVar'}->do($variation_sql . " AND m.is_minor_allele AND m.snp_id BETWEEN $start AND $end"); print $logh Progress::location(); # it seems dbSNP also store frequencies on synonyms but don't copy this across to the # merged refsnp for some reason, we want to do this though so we also join to the # variation_synonym table and copy across the data $self->{'dbVar'}->do($synonym_sql . " AND m.is_minor_allele AND m.snp_id BETWEEN $start AND $end"); # we also copy across data where the MAF = 0.5 which do not have the is_minor_allele # flag set, we will ensure this is set on the non-reference allele later in the post-processing # when we have the allele string from the variation_feature table to check which is the # reference allele $self->{'dbVar'}->do($variation_sql . " AND m.freq = 0.5 AND m.snp_id BETWEEN $start AND $end"); $self->{'dbVar'}->do($synonym_sql . " AND m.freq = 0.5 AND m.snp_id BETWEEN $start AND $end"); $start = $end +1 } =cut debug(localtime() . "\tComplete MAF update"); # we don't delete the maf temporary table because we need it for post-processing MAFs = 0.5 } sub suspect_snps { my $self = shift; return unless $self->table_exists_and_populated('SNPSuspect'); my $logh = $self->{'log'}; # dump the data into a temporary suspect table print $logh Progress::location(); debug(localtime() . "\tDumping suspect SNPs"); # create a table to store this data in the variation database, # we use a mysql SET column which means the dbSNP values which # are stored essentially as a bitfield will automatically be # assigned the correct reasons. # # XXX: If dbSNP change anything though this CREATE statement # will need to be changed accordingly. # my $var_table_sql = qq{ CREATE TABLE suspect ( snp_id INTEGER(10) NOT NULL DEFAULT 0, reason_code SET('Paralog','byEST','oldAlign','Para_EST','1kg_failed','','','','','','other') DEFAULT NULL, PRIMARY KEY (snp_id) ) }; $self->{'dbVar'}->do($var_table_sql); my $stmt = qq{ SELECT ss.snp_id, ss.reason_code FROM SNPSuspect ss }; dumpSQL($self->{'dbSNP'}, $stmt); print $logh Progress::location(); debug(localtime() . "\tLoading suspect SNPs"); load($self->{'dbVar'}, "suspect", "snp_id", "reason_code"); # fail the variations tagged as suspect by adding them to the failed_variation table # for the moment we just fail these all for the same reason (using the same # failed_description_id), we don't group them by dbSNP's reason_code, but we # keep this information in the suspect table in case we want to do this at # some point in the future print $logh Progress::location(); debug(localtime() . "\tFailing suspect variations"); $stmt = qq{ INSERT INTO failed_variation (variation_id, failed_description_id) SELECT v.variation_id, fd.failed_description_id FROM suspect s, variation v, failed_description fd WHERE fd.description = 'Flagged as suspect by dbSNP' AND v.snp_id = s.snp_id AND s.snp_id between ? and ? }; my $fail_rs_ins_sth = $self->{'dbVar'}->prepare($stmt); # $self->{'dbVar'}->do($stmt); # also fail any variants with synonyms, use INSERT IGNORE because dbSNP # redundantly fail both the refsnp and the synonym sometimes, and we have # a unique constraint on (variation_id, failed_description_id) debug(localtime() . "\tFailing suspect variations by synonym"); $stmt = qq{ INSERT IGNORE INTO failed_variation (variation_id, failed_description_id) SELECT vs.variation_id, fd.failed_description_id FROM suspect s, variation_synonym vs, failed_description fd WHERE fd.description = 'Flagged as suspect by dbSNP' AND vs.name = CONCAT('rs', s.snp_id) AND s.snp_id between ? and ? }; my $fail_old_rs_ins_sth = $self->{'dbVar'}->prepare($stmt); my $get_max_sth = $self->{'dbVar'}->prepare(qq[select min(snp_id), max(snp_id) from suspect ]); $get_max_sth->execute()||die; my $range = $get_max_sth->fetchall_arrayref(); my $batch_size = 100000; my $start = $range->[0]->[0]; my $max = $range->[0]->[1]; while ($start < $max){ my $end = $start + $batch_size; $fail_rs_ins_sth->execute($start, $end)||die; $fail_old_rs_ins_sth->execute($start, $end)||die; $start = $end + 1; } $self->{'dbVar'}->do(qq{DROP TABLE suspect}); } =head Flag named elements (like (Z6867)) in zebrafish) - not enough information provided for later QC or annotation =cut sub named_variants { my $self = shift; my $logh = $self->{'log'}; print $logh Progress::location(); debug(localtime() . "\tExtracting named variants"); ## check if named variants present for the release my $named_ext_stmt = qq[ select SNP.snp_id from SNP, $self->{'dbSNP_share_db'}.UniVariation uv, $self->{'dbSNP_share_db'}.SnpClassCode scc where scc.abbrev ='Named' and uv.univar_id = SNP.univar_id and scc.code = uv.subsnp_class ]; my $named_ext_sth = $self->{'dbSNP'}->prepare($named_ext_stmt); $named_ext_sth->execute(); my $named_rs_ids = $named_ext_sth->fetchall_arrayref(); return unless defined $named_rs_ids->[0]->[0]; ## get attrib id for sequence alteration my $attrib_ext_stmt = qq[ select attrib_id from attrib where value ='sequence_alteration' ]; my $attrib_ext_sth = $self->{'dbVar'}->prepare($attrib_ext_stmt); $attrib_ext_sth->execute() || die; my $attrib_id = $attrib_ext_sth->fetchall_arrayref(); die "attribs to be loaded\n\n" unless defined $attrib_id->[0]->[0]; ## update my $var_upd_stmt = qq[ update variation set class_attrib_id = ? where snp_id = ? ]; my $var_upd_sth = $self->{'dbVar'}->prepare($var_upd_stmt); debug(localtime() . "\tUpdating named variants"); foreach my $rs_id(@{$named_rs_ids}){ $var_upd_sth->execute( $attrib_id->[0]->[0], $rs_id->[0]) || die; } } ## extract pubmed ids linked to refsnps sub pubmed_citations{ my $self = shift; return unless $self->table_exists_and_populated('SNPPubmed'); my $logh = $self->{'log'}; print $logh Progress::location(); debug(localtime() . "\tExporting pubmed cited SNPs"); ## create tmp table & populate with rs ids & pmids $self->{'dbVar'}->do(qq[ create table tmp_pubmed ( snp_id varchar(255) not null, pubmed_id int(10) unsigned not null, key snp_idx (snp_id ) )]); my $pubmed_ins_sth = $self->{'dbVar'}->prepare(qq[ insert into tmp_pubmed (snp_id, pubmed_id) values (?,?)]); my $sth = $self->{'dbSNP'}->prepare(qq[ SELECT snp_id, pubmed_id from SNPPubmed ]); $sth->execute(); print $logh Progress::location(); while (my $l = $sth->fetchrow_arrayref()){ $pubmed_ins_sth->execute($l->[0], $l->[1]) ||die "Failed to enter pubmed_id for rs: $l->[0], PMID:$l->[1] \n"; } ## move data to correct structure my $pubmed_ext_sth = $self->{'dbVar'}->prepare(qq [ select variation.variation_id, tmp_pubmed.pubmed_id from variation,tmp_pubmed where variation.snp_id = tmp_pubmed.snp_id]); ## linked to PMID via study my $publication_ext_sth = $self->{'dbVar'}->prepare(qq[select publication_id from publication where pmid = ?]); my $publication_ins_sth = $self->{'dbVar'}->prepare(qq[insert into publication (pmid ) values (?)]); my $citation_ins_sth = $self->{'dbVar'}->prepare(qq[insert into variation_citation ( variation_id, publication_id ) values (?,?)]); $pubmed_ext_sth->execute()||die "Failed to extract pubmed data from tmp table\n";; my $data2 = $pubmed_ext_sth->fetchall_arrayref(); my %done; foreach my $l (@{$data2}){ next if $done{$l->[0]}{$l->[1]}; $done{$l->[0]}{$l->[1]} = 1; $publication_ext_sth->execute( $l->[1] )||die; my $publication_id = $publication_ext_sth->fetchall_arrayref(); unless ( defined $publication_id->[0]->[0]){ ## create one study per PMID $publication_ins_sth->execute( $l->[1]); $publication_ext_sth->execute( $l->[1] )||die; $publication_id = $publication_ext_sth->fetchall_arrayref(); } warn "problem adding new publication $l->[1]\n" unless defined $publication_id->[0]->[0]; $citation_ins_sth->execute($l->[0], $publication_id->[0]->[0])||die; } ## remove tmp table $self->{'dbVar'}->do(qq [ drop table tmp_pubmed ]); debug(localtime() . "\tCompleted pubmed cited SNPs"); } # filling of the variation table from SubSNP and SNP # creating of a link table variation_id --> subsnp_id sub variation_table { my $self = shift; #If this variable is set, variations with a subsnp_id below this integer will not be imported. This is useful for resuming when resuming an import that crashed at a particular subsnp_id. Also, any SQL statements preparing for the import will be skipped. my $resume_at_subsnp_id = -1; #Put the log filehandle in a local variable my $logh = $self->{'log'}; my $stmt; print $logh Progress::location(); debug(localtime() . "\tDumping RefSNPs"); $stmt = "SELECT "; if ($self->{'limit'}) { $stmt .= "TOP $self->{'limit'} "; } if( $self->{source_engine} =~/mssql/ ){ $stmt .= qq{ 1, 'rs'+LTRIM(STR(snp.snp_id)) AS sorting_id, CASE WHEN snp.validation_status = 0 THEN NULL ELSE snp.validation_status END, NULL, snp.snp_id FROM SNP snp WHERE exemplar_subsnp_id != 0 } ; } else{ $stmt .= qq{ 1, CONCAT('rs', CAST(snp.snp_id AS CHAR)) AS sorting_id, snp.validation_status, NULL, snp.snp_id FROM SNP snp WHERE exemplar_subsnp_id != 0 }; } if ($self->{'limit'}) { $stmt .= qq{ ORDER BY sorting_id ASC }; } dumpSQL($self->{'dbSNP'},$stmt) unless ($resume_at_subsnp_id > 0); debug(localtime() . "\tLoading RefSNPs into variation table"); ### time test this unless ($resume_at_subsnp_id > 0){ $self->{'dbVar'}->do( qq[ALTER TABLE variation add column snp_id int NOT NULL] ) ; $self->{'dbVar'}->do( qq[ALTER TABLE variation disable keys]); } load( $self->{'dbVar'}, "variation", "source_id", "name", "validation_status", "ancestral_allele", "snp_id" ) unless ($resume_at_subsnp_id > 0); $self->{'dbVar'}->do( "ALTER TABLE variation ADD INDEX snpidx( snp_id )" ) unless ($resume_at_subsnp_id > 0); debug(localtime() . "\tVariation table loaded"); $self->{'dbVar'}->do( qq[ALTER TABLE variation enable keys] ); debug(localtime() . "\tVariation table indexed"); debug(localtime() . "\tStarting subSNPhandle"); #create a subsnp_handle table $stmt = qq{ SELECT s.subsnp_id, b.handle FROM SubSNP s, Batch b WHERE s.batch_id = b.batch_id }; dumpSQL($self->{'dbSNP'},$stmt); load( $self->{'dbVar'}, "subsnp_handle", "subsnp_id", "handle"); debug(localtime() . "\tFinished subSNPhandle"); return unless $self->{'dbm'}->dbCore()->species =~ /homo/i ; debug(localtime() . "\tLooking for Somatic variants"); #Get somatic flag from dbSNP but only if the snp exclusively has somatic subsnps $stmt = qq{ SELECT sssl.snp_id FROM SubSNP ss JOIN SNPSubSNPLink sssl ON ( sssl.subsnp_id = ss.subsnp_id AND ss.SOMATIC_ind = 'Y') where not exists (select * from SubSNP ss2 JOIN SNPSubSNPLink sssl2 ON ( sssl2.subsnp_id = ss2.subsnp_id AND ss2.SOMATIC_ind = 'N') where sssl2.snp_id = sssl.snp_id ) and sssl.snp_id between ? and ? }; my $sth = $self->{'dbSNP'}->prepare($stmt); print $logh Progress::location(); $sth->execute(); print $logh Progress::location(); my $snp_id; $sth->bind_columns(\$snp_id); # Loop over the somatic SNPs and set the flag in the variation table $stmt = qq{ UPDATE variation SET somatic = 1 WHERE snp_id = ? }; my $up_sth = $self->{'dbVar'}->prepare($stmt); while ($sth->fetch()) { $up_sth->execute($snp_id); } $sth->finish(); $up_sth->finish(); print $logh Progress::location(); debug(localtime() . "\tFinished Somatic variants"); return; } # import any clinical significance, global minor allele frequencies or suspect SNPs # these subroutines all check if the table is present and populated before doing # anything so should work fine on species without the necessary tables sub dbSNP_annotations{ my $self = shift; ## This is no longer run - ClinVar export has more information #debug(localtime() . "\tStarting clin_sig"); #$self->clin_sig; debug(localtime() . "\tStarting MAF"); $self->minor_allele_freq; debug(localtime() . "\tStarting suspect SNP"); $self->suspect_snps; debug(localtime() . "\tStarting named variants"); $self->named_variants; debug(localtime() . "\tFinished variation table with dbSNP annotations"); return; } # extract ss id & strand relative to rs # this mapping is used extensively in the import process # but discarded for human when import id complete due to Mart problems sub subsnp_synonyms{ my $self = shift; my $logh = $self->{'log'}; # create a temp table of subSNP info debug(localtime() . "\tDumping SubSNPs"); my $stmt = "SELECT "; if ($self->{'limit'}) { $stmt .= "TOP $self->{'limit'} "; } $stmt .= qq{ subsnp.subsnp_id , subsnplink.snp_id, subsnplink.substrand_reversed_flag, b.moltype FROM SubSNP subsnp, SNPSubSNPLink subsnplink, Batch b WHERE subsnp.batch_id = b.batch_id AND subsnp.subsnp_id = subsnplink.subsnp_id }; if ($self->{'limit'}) { $stmt .= qq{ ORDER BY subsnp_id ASC }; } dumpSQL($self->{'dbSNP'},$stmt) ; create_and_load( $self->{'dbVar'}, "tmp_var_allele", "subsnp_id i* not_null", "refsnp_id v* not_null", "substrand_reversed_flag i", "moltype", "allele_id i"); print $logh Progress::location(); # load the synonym table with the subsnp identifiers debug(localtime() . "\tloading variation_synonym table with subsnps"); print $logh Progress::location(); # Subsnp_id interval to export at each go my $interval = 5e5; my $offset = 0; #Get the minimum and maximum subsnp_id. We will export rows based on subsnp_id rather than using limit and offset in order to avoid having the same subsnp_id split across two exports $stmt = qq{ SELECT MIN(tv.subsnp_id) AS mn, MAX(tv.subsnp_id) AS mx FROM tmp_var_allele tv }; my ($min_id,$max_id) = @{$self->{'dbVar'}->db_handle()->selectall_arrayref($stmt)->[0]}; print $logh Progress::location(); debug(localtime() . "\tDoing variation_synonym insert"); ## remove indexes for quicker loading $self->{'dbVar'}->do("ALTER TABLE variation_synonym disable keys "); $self->{'dbVar'}->do(qq{ALTER TABLE variation_synonym add column substrand_reversed_flag tinyint}); while ($offset < $max_id) { my $end = $offset + $interval; $self->{'dbVar'}->do( qq{ insert into variation_synonym (variation_id, subsnp_id, source_id, name, moltype, substrand_reversed_flag) (SELECT distinct v.variation_id, tv.subsnp_id, 1, CONCAT( 'ss', tv.subsnp_id), tv.moltype, tv.substrand_reversed_flag FROM tmp_var_allele tv USE INDEX (subsnp_id_idx, refsnp_id_idx), variation v USE INDEX (snpidx) WHERE tv.subsnp_id BETWEEN $offset AND $end AND v.snp_id = tv.refsnp_id ORDER BY tv.subsnp_id ASC) }); # Increase the offset $offset += ($interval + 1); } debug(localtime() . "\tIndexing variation_synonym table "); $self->{'dbVar'}->do("ALTER TABLE variation_synonym enable keys"); debug(localtime() . "\tCreating subsnp_map table "); ## create subsnp_map table $self->{'dbVar'}->do(qq[ CREATE TABLE subsnp_map ( variation_id int(11) unsigned NOT NULL, subsnp_id int(11) unsigned DEFAULT NULL) ]); $self->{'dbVar'}->do( qq[ insert into subsnp_map (variation_id, subsnp_id) select variation_id, subsnp_id from variation_synonym]); $self->{'dbVar'}->do(qq[ CREATE INDEX variation_idx on subsnp_map (variation_id) ]); print $logh Progress::location(); return; } # # dumps subSNPs # sub dump_subSNPs { my $self = shift; #Put the log filehandle in a local variable my $logh = $self->{'log'}; my $stmt = "SELECT "; if ($self->{'limit'}) { $stmt .= "TOP $self->{'limit'} "; } $stmt .= qq{ subsnp.subsnp_id AS sorting_id, subsnplink.snp_id, b.pop_id, ov.pattern, subsnplink.substrand_reversed_flag, b.moltype FROM SubSNP subsnp, SNPSubSNPLink subsnplink, $self->{'dbSNP_share_db'}.ObsVariation ov, Batch b WHERE subsnp.batch_id = b.batch_id AND subsnp.subsnp_id = subsnplink.subsnp_id AND ov.var_id = subsnp.variation_id }; if ($self->{'limit'}) { $stmt .= qq{ ORDER BY sorting_id ASC }; } my $sth = $self->{'dbSNP'}->prepare($stmt,{mysql_use_result => 1}); $sth->execute(); open ( FH, ">" . $self->{'tmpdir'} . "/" . $self->{'tmpfile'} ); my ($row); while($row = $sth->fetchrow_arrayref()) { my $prefix; my @alleles = split('/', $row->[3]); if ($row->[3] =~ /^(\(.*\))\d+\/\d+/) { $prefix = $1; } my @row = map {(defined($_)) ? $_ : '\N'} @$row; # split alleles into multiple rows foreach my $a (@alleles) { if ($prefix and $a !~ /\(.*\)/) {#incase (CA)12/13/14 CHANGE TO (CA)12/(CA)13/(CA)14 $a = $prefix.$a; #$prefix = ""; } $row[3] = $a; print FH join("\t", @row), "\n"; } } $sth->finish(); close FH; } # # loads the population table # # This subroutine produces identical results as the MySQL equivalence # sub population_table { my $self = shift; #Put the log filehandle in a local variable my $logh = $self->{'log'}; $self->{'dbVar'}->do("ALTER TABLE population ADD column pop_id int"); $self->{'dbVar'}->do("ALTER TABLE population ADD column pop_class_id int"); ## hold id of super pop in super pop print $logh Progress::location(); # load PopClassCode data as populations # - these are super_populations like 'MULTI-NATIONAL' and 'NORTH/EAST AFRICA & MIDDLE EAST' debug(localtime() . "\tDumping population class data"); my $stmt = qq{ SELECT RTRIM(pop_class), RTRIM(pop_class_id), RTRIM(pop_class_text) FROM $self->{'dbSNP_share_db'}.PopClassCode }; dumpSQL($self->{'dbSNP'},$stmt); load($self->{'dbVar'}, 'population', 'name', 'pop_class_id', 'description'); $self->{'dbVar'}->do(qq{ALTER TABLE population ADD INDEX pop_class_id (pop_class_id)}); print $logh Progress::location(); debug(localtime() . "\tDumping population data"); # load Population data as populations my $concat_syntax ; if($self->{source_engine} =~/mssql/){ $concat_syntax = qq[ p.handle+':'+p.loc_pop_id ]; } else{ $concat_syntax = qq[ CONCAT(p.handle,':',p.loc_pop_id) ]; } $stmt = qq{ SELECT DISTINCT $concat_syntax, p.pop_id, pc.pop_class_id, pl.line, pl.line_num FROM Population p LEFT JOIN $self->{'dbSNP_share_db'}.PopClass pc ON p.pop_id = pc.pop_id LEFT JOIN PopLine pl ON p.pop_id = pl.pop_id ORDER BY p.pop_id ASC, pc.pop_class_id ASC, pl.line_num ASC }; #table size is small, so no need to change dumpSQL($self->{'dbSNP'},$stmt); debug(localtime() . "\tLoading population data"); create_and_load( $self->{'dbVar'}, "tmp_pop", "name", "pop_id i*", "pop_class_id i*", "description l", "line_num i*" ); print $logh Progress::location(); #populate the population table with the populations $self->{'dbVar'}->do("SET SESSION group_concat_max_len = 10000"); $self->{'dbVar'}->do(qq{INSERT INTO population (name, pop_id,description) SELECT tp.name, tp.pop_id, GROUP_CONCAT(description ORDER BY tp.pop_class_id ASC, tp.line_num ASC) FROM tmp_pop tp GROUP BY tp.pop_id }); #table size is small, so no need to change print $logh Progress::location(); $self->{'dbVar'}->do(qq{ALTER TABLE population ADD INDEX pop_id (pop_id)}); print $logh Progress::location(); debug(localtime() . "\tLoading population_synonym table"); # build super/sub population relationships $self->{'dbVar'}->do(qq{INSERT INTO population_structure (super_population_id,sub_population_id) SELECT DISTINCT p1.population_id, p2.population_id FROM tmp_pop tp, population p1, population p2 WHERE tp.pop_class_id = p1.pop_class_id AND tp.pop_id = p2.pop_id}); print $logh Progress::location(); #load population_synonym table with dbSNP population id $self->{'dbVar'}->do(qq{INSERT INTO population_synonym (population_id,source_id,name) SELECT population_id, 1, pop_id FROM population WHERE pop_id is NOT NULL }); print $logh Progress::location(); $self->{'dbVar'}->do("DROP TABLE tmp_pop"); print $logh Progress::location(); } # loads the individual table # # pre-e!70 samples were merged on dbSNP ind_id and a submitted name chosen at random # post-e!70 samples are only merged if they have the same name and ind_id # gender and ped info are held on the ind_id # - logic exists to select the correct parent name and avoid multiple conventions within the same family sub individual_table { my $self = shift; #Put the log filehandle in a local variable my $logh = $self->{'log'}; debug(localtime() . "\tStarting on Individual data"); #decide which individual_type should this species be make sure it's correct when adding new species my $individual_type_id; if ($self->{'dbm'}->dbCore()->species =~ /homo|pan|anoph/i) { $individual_type_id = 3; } elsif ($self->{'dbm'}->dbCore()->species =~ /mus/i) { $individual_type_id = 1; } else { $individual_type_id = 2; } ## create temp table to hold dbSNP submitted_ind_id to simplify genotype loading $self->{'dbVar'}->do(qq{ CREATE TABLE tmp_ind ( submitted_ind_id int(10) unsigned not null, individual_id int(10) unsigned not null, primary key(submitted_ind_id), key individual_id_idx (individual_id)) }); ## get pedigree info - held at dbSNP cluster level my $ped = $self->get_ped_data(); ## get individual names, submitters and dbSNPcluster info ## merged hash is used to pick the right name format for parents when 2 name type for the same SunmittedInd are held [human only] my ($individuals, $merged) = $self->get_ind_data(); ## get population ids for pre-loaded populations my $pop_ids = $self->get_pop_ids(); ## prepare insert statements my $ind_ins_sth = $self->{'dbVar'}->prepare(qq[ INSERT INTO individual ( name, description, individual_type_id) values (?,?,?)]); my $ind_upd_sth = $self->{'dbVar'}->prepare(qq[ update individual set father_individual_id =?, mother_individual_id =?, gender =? where individual_id = ? ]); my $tmp_ins_sth = $self->{'dbVar'}->prepare(qq[ INSERT INTO tmp_ind (individual_id, submitted_ind_id) values (?,?)]); my $pop_ins_sth = $self->{'dbVar'}->prepare(qq[ INSERT INTO individual_population (individual_id, population_id) values (?,?) ]); my $syn_ins_sth = $self->{'dbVar'}->prepare(qq[ INSERT INTO individual_synonym (individual_id,source_id,name) values (?,?,?) ]); ## insert individual data my %individual_id; my %done; my $n = 1000000; foreach my $ind (keys %$individuals){ ## not all SubmittedIndividual are currated to dbSNP Individuals $individuals->{$ind}{ind} = $n unless defined $individuals->{$ind}{ind} ; ## not clustered into Individual entry by dbSNP $n++; unless (defined $done{$individuals->{$ind}{name}}{$individuals->{$ind}{ind}} ){ ## insert individual $ind_ins_sth->execute( $individuals->{$ind}{name}, $individuals->{$ind}{des},$individual_type_id ); my $individual_id = $self->{'dbVar'}->db_handle->last_insert_id(undef, undef, 'individual', 'individual_id'); if(defined $individuals->{$ind}{ind} && $individuals->{$ind}{ind} < 1000000){ ## insert dbSNP synonym [not fakes] $syn_ins_sth->execute( $individual_id, 1, $individuals->{$ind}{ind}); } ## save individual_id based on name and dbSNP merged id (merging only these on import) $done{ $individuals->{$ind}{name} }{ $individuals->{$ind}{ind} } = $individual_id; } $tmp_ins_sth->execute($done{$individuals->{$ind}{name}}{$individuals->{$ind}{ind}}, $ind); ## save individual ids for ped look up $individual_id{$ind} = $done{$individuals->{$ind}{name}}{$individuals->{$ind}{ind}}; if(defined $individuals->{$ind}{pid}){ #warn "Adding ind_pop link $individuals->{$ind}{name} ($sample_id{$ind}) to $individuals->{$ind}{pid} ($pop_ids->{$individuals->{$ind}{pid}})\n"; if(defined $pop_ids->{$individuals->{$ind}{pid}}){ $pop_ins_sth->execute( $individual_id{$ind} , $pop_ids->{$individuals->{$ind}{pid}}); } else{ warn "No individual id for population id $individuals->{$ind}{pid} for individual $individuals->{$ind}{name}\n"; } } } ## insert individual data inc ped info my %ind_done;## entering once per merged individual foreach my $ind (keys %$individuals){ next if $ind_done{$individual_id{$ind}}; $ind_done{$individual_id{$ind}} = 1; my $merged_id = $individuals->{$ind}{ind}; ## for readablity my $gender; if(defined $ped->{$merged_id}{gender} && $ped->{$merged_id}{gender} =~/ale/ ){ $gender = $ped->{$merged_id}{gender}; } else{ $gender = "Unknown"; } my $mother_id = '\\N'; my $father_id = '\\N'; ## get submitted_ind_id and correct name from same population to link to (if available) if(defined $ped->{$merged_id}{father} && defined $merged->{ $ped->{$merged_id}{father} }{ $individuals->{$ind}{pid}}){ my $father_submitted_id = $merged->{ $ped->{$merged_id}{father} }{ $individuals->{$ind}{pid}}; if(defined $individual_id{$father_submitted_id}){ $father_id = $individual_id{$father_submitted_id}; }else{ warn "No ensembl id found for father : $father_submitted_id from child name $individuals->{$ind}{name}\n"; } } if(defined $ped->{$merged_id}{mother} && defined $merged->{ $ped->{$merged_id}{mother} }{ $individuals->{$ind}{pid}}){ my $mother_submitted_id = $merged->{ $ped->{$merged_id}{mother} }{ $individuals->{$ind}{pid}}; if(defined $individual_id{$mother_submitted_id}){ $mother_id = $individual_id{$mother_submitted_id}; }else{ warn "No ensembl id found for mother : $mother_submitted_id from child name $individuals->{$ind}{name}\n"; } } next unless (defined $gender || $father_id =~ /\d+/ || $mother_id =~ /\d+/); $ind_upd_sth->execute( $father_id, $mother_id, $gender, $individual_id{$ind}, ); } ## update population.size once all individuals loaded $self->update_population_size(); debug(localtime() . "\tIndividual data loaded"); print $logh Progress::location(); return; } ## export family and gender info for individual ## held at level of curate Individual rather than SubmittedIndividual sub get_ped_data{ my $self = shift; my %ped; my $all_ped_sth = $self->{'dbSNP'}->prepare(qq[ SELECT ind_id, pa_ind_id, ma_ind_id, sex FROM PedigreeIndividual ])||die "ERROR preparing ss_sth: $DBI::errstr\n"; $all_ped_sth->execute()||die "ERROR executing: $DBI::errstr\n"; my $ped = $all_ped_sth->fetchall_arrayref(); foreach my $l(@{$ped}){ if(defined $l->[3]){ $l->[3] =~ s/M/Male/; $l->[3] =~ s/F/Female/; } $ped{$l->[0]}{father} = $l->[1]; $ped{$l->[0]}{mother} = $l->[2]; $ped{$l->[0]}{gender} = $l->[3]; } return \%ped; } ## Extract submitted individual data sub get_ind_data{ my $self = shift; my %individuals; my %merged; my $all_ind_sth = $self->{'dbSNP'}->prepare(qq[ SELECT si.submitted_ind_id, si.loc_ind_id_upp, i.descrip, i.ind_id , si.pop_id FROM SubmittedIndividual si LEFT OUTER JOIN Individual i on (si.ind_id = i.ind_id) ])||die "ERROR preparing ss_sth: $DBI::errstr\n"; $all_ind_sth->execute()||die "ERROR executing: $DBI::errstr\n"; my $inds = $all_ind_sth->fetchall_arrayref(); foreach my $l(@{$inds}){ $individuals{$l->[0]}{name} = $l->[1]; if( defined $l->[2] && $l->[2] !~ /unknown source|byPopDesc/i ){ ## save individual description if useful $individuals{$l->[0]}{des} = $l->[2]; } if( defined $l->[3] ){ ## save dbSNP clustered individual id $individuals{$l->[0]}{ind} = $l->[3]; } if( defined $l->[4]) { ## save population id $individuals{$l->[0]}{pid} = $l->[4]; ## look up merged id/pop id => submitted_ind_id for ped linking $merged{$l->[3]}{$l->[4]} = $l->[0] if defined $l->[3] ; } } return (\%individuals, \%merged); } ## look up population_id for dbSNP pop_id to link individuals to pops sub get_pop_ids{ my $self = shift; my $pop_ext_sth = $self->{'dbVar'}->prepare(qq[ select population_id, pop_id from population where pop_id is not null ]); my %pop_id; $pop_ext_sth->execute(); my $pop_link = $pop_ext_sth->fetchall_arrayref(); foreach my $l (@{$pop_link}){ $pop_id{$l->[1]} = $l->[0]; } return \%pop_id; } ## count and store the number of individuals in a population sub update_population_size{ my $self = shift; my $size_ext_sth = $self->{'dbVar'}->prepare(qq[ select population.population_id, count(*) from population, individual_population where population.population_id = individual_population.population_id group by population.population_id ]); my $size_upd_sth = $self->{'dbVar'}->prepare(qq[ update population set size =? where population_id = ? ]); $size_ext_sth->execute()||die "Failed to extact individual counts for populations\n"; my $sizes = $size_ext_sth->fetchall_arrayref(); foreach my $l (@{$sizes}){ $size_upd_sth->execute( $l->[1], $l->[0])||die "Failed to update individual counts for populations\n"; } return; } sub parallelized_allele_table { my $self = shift; my $load_only = shift; #Put the log filehandle in a local variable my $logh = $self->{'log'}; my $stmt; debug(localtime() . "\tStarting allele table"); ## revert schema to use letter rather than number coded alleles update_allele_schema($self->{'dbVar'}); # The tempfile to be used for loading my $file_prefix = $self->{'tmpdir'} . '/allele_table'; my $loadfile = $file_prefix . '_loadfile.txt'; # Sample file is used for caching the samples my $samplefile = $file_prefix . '_population_samples.txt'; # Allele file is used for caching the alleles my $allelefile = $file_prefix . '_alleles.txt'; # Do the extraction of the alleles unless we're resuming and will only load the alleles my $task_manager_file = $file_prefix . '_task_management.txt'; my $jobindex; my $jobid; my $jobname; my $result; print $logh Progress::location(); unless ($load_only) { ##=head hashed out to re-run failed job #First, get the population_id -> sample_id mapping and write it to the file. The subroutine can also get it but it's faster to get all at once since we expect many to be used. $stmt = qq{ SELECT DISTINCT p.pop_id, p.population_id FROM population p WHERE p.pop_id IS NOT NULL AND p.pop_id > 0 }; my $sth = $self->{'dbVar'}->prepare($stmt); $sth->execute(); my @samples; while (my @row = $sth->fetchrow_array()) { push(@samples,@row); } my %s = (@samples); dbSNP::ImportTask::write_samples($samplefile,\%s); print $logh Progress::location(); #Process the alleles in chunks based on the SubSNP id. This number should be kept at a reasonable level, ideally so that we don't need to request more than 4GB memory on the farm. If so, we'll have access to the most machines. #The limitation is that the results from the dbSNP query is read into memory. If necessary, we can dump it to a file (if the results are sorted) ## improve binning for sparsely submitted species #hash out task file creation if re-running 1 failed job $jobindex = write_allele_task_file($self->{'dbSNP'}->db_handle(),$task_manager_file, $loadfile, $allelefile, $samplefile,$self->{limit}); # my $jobindex = 795; # my $start = 795; my $start = 1 ; debug(localtime() . "\tAt allele table - written export task file"); # Run the job on the farm $jobname = 'allele_table'; $result = $self->run_on_farm($jobname,$file_prefix,'allele_table',$task_manager_file,$start,$jobindex); $jobid = $result->{'jobid'}; debug(localtime() . "\tAt allele table - export jobs run on farm"); # Check if any subtasks generated errors if ((my @error_subtasks = grep($result->{'subtask_details'}{$_}{'generated_error'},keys(%{$result->{'subtask_details'}})))) { warn($result->{'message'}); print $logh Progress::location() . "\t " . $result->{'message'}; print $logh Progress::location() . "\tThe following subtasks generated errors for job $jobid:\n"; foreach my $index (@error_subtasks) { print $logh qq{\t\t$jobname\[$index\] ($jobid\[$index\])\n}; } } # Check the result, if anything failed if (!$result->{'success'}) { warn($result->{'message'}); print $logh Progress::location() . "\t " . $result->{'message'}; print $logh Progress::location() . "\tThe following subtasks failed for unknown reasons for job $jobid:\n"; foreach my $index (grep($result->{'subtask_details'}{$_}{'fail_reason'} =~ m/UNKNOWN/,keys(%{$result->{'subtask_details'}}))) { print $logh qq{\t\t$jobname\[$index\] ($jobid\[$index\])\n}; } print $logh Progress::location() . "\tThe following subtasks failed because they ran out of resources for job $jobid:\n"; foreach my $index (grep($result->{'subtask_details'}{$_}{'fail_reason'} =~ m/OUT_OF_[MEMORY|TIME]/,keys(%{$result->{'subtask_details'}}))) { print $logh qq{\t\t$jobname\[$index\] ($jobid\[$index\])\n}; } } # If we still have subtasks that fail, this needs to be resolved before proceeding die("Some subtasks are failing (see log output). This needs to be resolved before proceeding with the loading of genotypes!") unless ($result->{'success'}); } debug(localtime() . " Creating single allele file to load "); ## merge all data files into one for loading die "Exiting: Number of allele files to merge not known - is load_only set?\n" unless defined $jobindex; my $new_load_file_name = $self->{'tmpdir'} . "/allele_load_file_new.txt"; open my $new_load_file, ">", $new_load_file_name || die "Failed to open allele load file to write:$!\n"; foreach my $n(1..$jobindex){ my $cat_file = $self->{'tmpdir'} ."/allele_table_loadfile.txt_$n"; open my $subfile, $cat_file ||die "Failed to open $cat_file to read:$!\n"; while(<$subfile>){print $new_load_file $_;} } close $new_load_file; ### start loading process - run as single job $self->{'dbVar'}->do( qq[ LOAD DATA LOCAL INFILE "$new_load_file_name" INTO TABLE allele( variation_id,subsnp_id,population_id,allele,frequency,count,frequency_submitter_handle )]) || die "Erro loading allele data: $self->{'dbVar'}::errstr \n"; ## add indexes post load debug(localtime() . "\tAt allele table - data load complete"); $self->{'dbVar'}->do( qq[ alter table allele enable keys]); debug(localtime() . "\tAt allele table - indexing complete"); #Finally, create the allele_string table needed for variation_feature ## change to store A/T rather than seperate rows per allele $stmt = qq{ SELECT snp.snp_id, uv.var_str FROM SNP snp JOIN $self->{'dbSNP_share_db'}.UniVariation uv ON ( uv.univar_id = snp.univar_id ) }; dumpSQL($self->{'dbSNP'},$stmt); print $logh Progress::location(); create_and_load($self->{'dbVar'},"tmp_allele_string","snp_name * not_null","allele"); print $logh Progress::location(); $stmt = qq{ CREATE TABLE allele_string SELECT v.variation_id AS variation_id, tas.allele AS allele_string FROM variation v, tmp_allele_string tas WHERE v.name = CONCAT( "rs", tas.snp_name ) }; $self->{'dbVar'}->do($stmt); print $logh Progress::location(); $stmt = qq{ ALTER TABLE allele_string ADD INDEX variation_idx (variation_id) }; $self->{'dbVar'}->do($stmt); print $logh Progress::location(); debug(localtime() . "\tEnding allele table"); } ## revert schema to use letter rather than number coded alleles sub update_allele_schema{ my $dbVar_dbh = shift; $dbVar_dbh->do( qq[ drop table allele ] ); $dbVar_dbh->do( qq [ CREATE TABLE allele ( allele_id int(10) unsigned NOT NULL AUTO_INCREMENT, variation_id int(10) unsigned NOT NULL, subsnp_id int(15) unsigned NOT NULL, allele varchar(25000) NOT NULL, frequency float DEFAULT NULL, population_id int(10) unsigned DEFAULT NULL, count int(10) unsigned DEFAULT NULL, frequency_submitter_handle int(10) DEFAULT NULL, PRIMARY KEY (allele_id), KEY subsnp_idx (subsnp_id), KEY variation_idx (variation_id)) ]); ## disable keys for quick loading $dbVar_dbh->do( qq[ alter table allele disable keys]); return; } sub write_allele_task_file{ my ($dbh, $task_manager_file, $loadfile, $allelefile, $samplefile, $limit) = @_; #warn "Starting allele_task file \n"; ### previously binning at 500,000, switched to 400,000 my ($first, $previous, $jobindex, $ssid); debug(localtime() . "\tAt write_allele_task_file - starting"); open(MGMT,'>',$task_manager_file) || die "Failed to open allele table task management file ($task_manager_file): $!\n";; my $stmt = "SELECT "; if ($limit) { $stmt .= "TOP $limit "; } $stmt .= qq[ ss.subsnp_id FROM SubSNP ss order by ss.subsnp_id ]; my $counter = 0; my $ss_extract_sth = $dbh->prepare($stmt); $ss_extract_sth->execute() ||die "Error extracting ss ids for allele_table binning\n"; $ss_extract_sth->bind_columns(\$ssid); debug(localtime() . "\tAt write_allele_task_file - executed"); while( $ss_extract_sth->fetchrow_arrayref()){ $counter++; unless(defined $first){ ### set up first bin $first = $ssid; # warn "setting first to $ssid\n"; $jobindex = 1; next; } if($counter >=100000){ ## end bin print MGMT qq{$jobindex $loadfile\_$jobindex $first $previous $allelefile $samplefile\n}; ## start new bin $first = $ssid; $counter = 1; $jobindex++; } else{ $previous = $ssid; } } ### write out last bin print MGMT qq{$jobindex $loadfile\_$jobindex $first $previous $allelefile $samplefile\n}; close MGMT; debug(localtime() . "\tFinished allele task file"); return ($jobindex); } # # loads the flanking sequence table # sub flanking_sequence_table { my $self = shift; #Put the log filehandle in a local variable my $logh = $self->{'log'}; my $stmt; $self->{'dbVar'}->do(qq{CREATE TABLE tmp_seq (variation_id int NOT NULL, subsnp_id int NOT NULL, seq_type int NOT NULL, line_num int, type enum ('5','3'), line varchar(255), revcom tinyint) MAX_ROWS = 100000000}); ## Create flanking sequence table as no longer part of production schema $self->{'dbVar'}->do(qq{ create table if not exists flanking_sequence ( variation_id int(10) unsigned not null, up_seq text, down_seq text, up_seq_region_start int, up_seq_region_end int, down_seq_region_start int, down_seq_region_end int, seq_region_id int(10) unsigned, seq_region_strand tinyint, primary key( variation_id ) ) MAX_ROWS = 100000000}); print $logh Progress::location(); # import both the 5prime and 3prime flanking sequence tables ## in human the flanking sequence tables have been partitioned if($self->{'dbm'}->dbCore()->species =~ /human|homo/i) { foreach my $type ('3','5') { foreach my $partition('p1_human','p2_human','p3_human','ins') { warn "Dumping $type flank from $partition\n"; debug("Dumping $type\_$partition flanking sequence"); ## TEST THIS $stmt = "SELECT "; if ($self->{'limit'}) { $stmt .= "TOP $self->{'limit'} "; } $stmt .= qq{ seq.subsnp_id AS sorting_id, seq.type, seq.line_num, seq.line, subsnplink.substrand_reversed_flag FROM SubSNPSeq$type\_$partition seq, SNP snp, SNPSubSNPLink subsnplink WHERE snp.exemplar_subsnp_id = seq.subsnp_id AND seq.subsnp_id = subsnplink.subsnp_id }; if ($self->{'limit'}) { $stmt .= qq{ ORDER BY sorting_id ASC }; } dumpSQL($self->{'dbSNP'}, $stmt); $self->{'dbVar'}->do(qq{CREATE TABLE tmp_seq_$type\_$partition ( subsnp_id int NOT NULL, seq_type int NOT NULL, line_num int, line varchar(255), revcom tinyint ) MAX_ROWS = 100000000 }); print $logh Progress::location(); load($self->{'dbVar'}, "tmp_seq_$type\_$partition", "subsnp_id", "seq_type", "line_num", "line"); print $logh Progress::location(); $self->{'dbVar'}->do("CREATE INDEX subsnp_id_idx on tmp_seq_$type\_$partition (subsnp_id)"); # merge the tables into a single tmp table $self->{'dbVar'}->do(qq{INSERT INTO tmp_seq (variation_id, subsnp_id, seq_type, line_num, type, line, revcom) SELECT vs.variation_id, ts.subsnp_id, ts.seq_type, ts.line_num, '$type', ts.line, ts.revcom FROM tmp_seq_$type\_$partition ts, variation_synonym vs WHERE vs.subsnp_id = ts.subsnp_id}); print $logh Progress::location(); #drop tmp table to free space $self->{'dbVar'}->do(qq{DROP TABLE tmp_seq_$type\_$partition}); print $logh Progress::location(); } } } ## other species no partitions else { foreach my $type ('3','5') { debug(localtime() . "\tDumping $type' flanking sequence"); $stmt = "SELECT "; if ($self->{'limit'}) { $stmt .= "TOP $self->{'limit'} "; } $stmt .= qq{ seq.subsnp_id AS sorting_id, seq.type, seq.line_num, seq.line, subsnplink.substrand_reversed_flag FROM SubSNPSeq$type seq, SNP snp, SNPSubSNPLink subsnplink WHERE snp.exemplar_subsnp_id = seq.subsnp_id AND seq.subsnp_id = subsnplink.subsnp_id }; if ($self->{'limit'}) { $stmt .= qq{ ORDER BY sorting_id ASC }; } dumpSQL($self->{'dbSNP'},$stmt); $self->{'dbVar'}->do(qq{CREATE TABLE tmp_seq_$type ( subsnp_id int NOT NULL, seq_type int NOT NULL, line_num int, line varchar(255), revcom tinyint, KEY subsnp_id_idx(subsnp_id) ) MAX_ROWS = 100000000 }); print $logh Progress::location(); load($self->{'dbVar'}, "tmp_seq_$type", "subsnp_id", "seq_type", "line_num", "line", "revcom"); print $logh Progress::location(); # merge the tables into a single tmp table $self->{'dbVar'}->do(qq{INSERT INTO tmp_seq (variation_id, subsnp_id, seq_type, line_num, type, line, revcom) SELECT vs.variation_id, ts.subsnp_id, ts.seq_type, ts.line_num, '$type', ts.line, ts.revcom FROM tmp_seq_$type ts, variation_synonym vs WHERE vs.subsnp_id = ts.subsnp_id}); print $logh Progress::location(); #drop tmp table to free space $self->{'dbVar'}->do(qq{DROP TABLE tmp_seq_$type}); print $logh Progress::location(); } } $self->{'dbVar'}->do("ALTER TABLE tmp_seq ADD INDEX idx (subsnp_id, type, seq_type, line_num)"); print $logh Progress::location(); my $sth = $self->{'dbVar'}->prepare(qq{SELECT ts.variation_id, ts.subsnp_id, ts.type, ts.line, ts.revcom FROM tmp_seq ts FORCE INDEX (idx) ORDER BY ts.subsnp_id, ts.type, ts.seq_type, ts.line_num},{mysql_use_result => 1}); $sth->execute(); my ($vid, $ssid, $type, $line, $revcom); $sth->bind_columns(\$vid, \$ssid, \$type, \$line, \$revcom); open(FH, ">" . $self->{'tmpdir'} . "/" . $self->{'tmpfile'}); my $upstream = ''; my $dnstream = ''; my $cur_vid; my $cur_revcom; #=head Not flipping & merging flanks as part of import process debug(localtime() . "\tRearranging flanking sequence data"); # dump sequences to file that can be imported all at once while($sth->fetch()) { if(defined($cur_vid) && $cur_vid != $vid) { # if subsnp in reverse orientation to refsnp, # reverse compliment flanking sequence if($cur_revcom) { ($upstream, $dnstream) = ($dnstream, $upstream); reverse_comp(\$upstream); reverse_comp(\$dnstream); } print FH join("\t", $cur_vid, $upstream, $dnstream), "\n"; $upstream = ''; $dnstream = ''; } $cur_vid = $vid; $cur_revcom = $revcom; if($type == 5) { $upstream .= $line; } else { $dnstream .= $line; } } # do not forget last row... if($cur_revcom) { ($upstream, $dnstream) = ($dnstream, $upstream); reverse_comp(\$upstream); reverse_comp(\$dnstream); } print FH join("\t", $cur_vid, $upstream, $dnstream), "\n"; $sth->finish(); close FH; #$self->{'dbVar'}->do("DROP TABLE tmp_seq"); print $logh Progress::location(); debug(localtime() . "\tLoading flanking sequence data"); # import the generated data load($self->{'dbVar'},"flanking_sequence","variation_id","up_seq","down_seq"); print $logh Progress::location(); unlink($self->{'tmpdir'} . "/" . $self->{'tmpfile'}); #=cut return; } sub variation_feature { my $self = shift; #Put the log filehandle in a local variable my $logh = $self->{'log'}; debug(localtime() . "\tDumping seq_region data"); dumpSQL($self->{'dbCore'}->db_handle, qq{SELECT sr.seq_region_id, sr.name, sr.coord_system_id FROM seq_region sr, coord_system cs WHERE sr.coord_system_id = cs.coord_system_id AND cs.attrib like "%default_version%"}); debug(localtime() . "\tLoading seq_region data"); load($self->{'dbVar'}, "seq_region", "seq_region_id", "name"); print $logh Progress::location(); debug(localtime() . "\tDumping SNPLoc data"); my ($tablename1,$tablename2,$row); my $version = substr($self->{'dbSNP_version'} ,1); if( $version < 137){ ## table rename for dbSNP - keeping this temporarily for backwards comparibility ## 201206 - dbSNP no longer using assembly version in table names ## - leaving this temporarily for backwards compatibility # my ($assembly_version) = $self->{'assembly_version'} =~ /^[a-zA-Z]+(\d+)\.*.*$/; # override for platypus # $assembly_version = 1 if $self->{'dbm'}->dbCore()->species =~ /ornith/i; #$assembly_version = 3 if $self->{'dbm'}->dbCore()->species =~ /rerio/i; my $stmt = qq{ SELECT name FROM $self->{'snp_dbname'}..sysobjects WHERE name LIKE '$self->{'dbSNP_version'}\_SNPContigLoc\__' }; my $sth = $self->{'dbSNP'}->prepare($stmt); $sth->execute(); while($row = $sth->fetchrow_arrayref()) { next if $row->[0] =~/Locus/; $tablename1 = $row->[0]; } $stmt = qq{ SELECT name FROM $self->{'snp_dbname'}..sysobjects WHERE name LIKE '$self->{'dbSNP_version'}\_ContigInfo%' }; my $sth1 = $self->{'dbSNP'}->prepare($stmt); $sth1->execute(); while($row = $sth1->fetchrow_arrayref()) { $tablename2 = $row->[0]; } } else{ $tablename1 = $self->{'dbSNP_version'} . "_SNPContigLoc" ; $tablename2 = $self->{'dbSNP_version'} . "_ContigInfo"; } ## SNPContigLoc ContigInfo debug(localtime() . "\ttable_name1 is $tablename1 table_name2 is $tablename2"); #note the contig based cordinate is 0 based, ie. start at 0, lc_ngbr+2, t1.rc_ngbr my $stmt = "SELECT "; if ($self->{'limit'}) { $stmt .= "TOP $self->{'limit'} "; } $stmt .= qq{ t1.snp_id AS sorting_id, t2.contig_acc, t1.lc_ngbr+2,t1.rc_ngbr, CASE WHEN t1.orientation = 1 THEN -1 ELSE 1 END, t1.aln_quality FROM $tablename1 t1, $tablename2 t2 WHERE t1.ctg_id=t2.ctg_id }; if ($self->{'limit'}) { $stmt .= qq{ ORDER BY sorting_id ASC }; } dumpSQL($self->{'dbSNP'},$stmt); debug(localtime() . "\tLoading SNPLoc data"); create_and_load($self->{'dbVar'}, "tmp_contig_loc", "snp_id i* not_null", "contig * not_null", "start i", "end i", "strand i", "aln_quality d"); print $logh Progress::location(); debug(localtime() . "\tCreating genotyped variations"); #creating the temporary table with the genotyped variations $self->{'dbVar'}->do(qq{CREATE TABLE tmp_genotyped_var SELECT DISTINCT variation_id FROM tmp_individual_genotype_single_bp}); print $logh Progress::location(); $self->{'dbVar'}->do(qq{CREATE UNIQUE INDEX variation_idx ON tmp_genotyped_var (variation_id)}); print $logh Progress::location(); $self->{'dbVar'}->do(qq{INSERT IGNORE INTO tmp_genotyped_var SELECT DISTINCT variation_id FROM individual_genotype_multiple_bp}); print $logh Progress::location(); debug(localtime() . "\tCreating tmp_variation_feature data"); dumpSQL($self->{'dbVar'},qq{SELECT v.variation_id, ts.seq_region_id, tcl.start, tcl.end, tcl.strand, v.name, v.source_id, v.validation_status, tcl.aln_quality, v.somatic, v.class_attrib_id FROM variation v, tmp_contig_loc tcl, seq_region ts WHERE v.snp_id = tcl.snp_id AND ts.name = tcl.contig}); create_and_load($self->{'dbVar'},'tmp_variation_feature',"variation_id i* not_null","seq_region_id i", "seq_region_start i", "seq_region_end i", "seq_region_strand i", "variation_name", "source_id i not_null", "validation_status i", "aln_quality d", "somatic i", "class_attrib_id i"); print $logh Progress::location(); debug(localtime() . "\tDumping data into variation_feature table"); $self->{'dbVar'}->do(qq{INSERT INTO variation_feature (variation_id, seq_region_id,seq_region_start, seq_region_end, seq_region_strand,variation_name, flags, source_id, validation_status, alignment_quality, somatic, class_attrib_id) SELECT tvf.variation_id, tvf.seq_region_id, tvf.seq_region_start, tvf.seq_region_end, tvf.seq_region_strand,tvf.variation_name,IF(tgv.variation_id,'genotyped',NULL), tvf.source_id, tvf.validation_status, tvf.aln_quality, tvf.somatic, tvf.class_attrib_id FROM tmp_variation_feature tvf LEFT JOIN tmp_genotyped_var tgv ON tvf.variation_id = tgv.variation_id }); print $logh Progress::location(); $self->{'dbVar'}->do("DROP TABLE tmp_contig_loc"); $self->{'dbVar'}->do("DROP TABLE tmp_genotyped_var"); $self->{'dbVar'}->do("DROP TABLE tmp_variation_feature"); } #The task of getting the genotype will be chunked up and distributed to the farm in small pieces. Each result will be written to a loadfile that in the end # will be used to populate the tmp_individual.. tables after everything has finished. We will chunk up the results by a) chromosome and b) individual. Because #the number of genotypes per individual varies _drastically_ with 1000 genomes data having lots of genotypes and most other data only having a few, we need #to determine the best way to chunk up the individuals in order to get an even distribution of the workload sub parallelized_individual_genotypes { my $self = shift; my $load_only = shift; debug(localtime() . "\tStarting parallelized_individual_genotypes"); my $genotype_table = 'tmp_individual_genotype_single_bp'; my $multi_bp_gty_table = 'individual_genotype_multiple_bp'; my $jobindex; #Get the create statement for tmp_individual_genotype_single_bp from master schema. We will need this to create the individual chromosome tables my $ind_gty_stmt = get_create_statement($genotype_table,$self->{'schema_file'}); my $failure_recovery ; ## set to allow re-running of individual export jobs after memory failure #Put the log filehandle in a local variable my $logh = $self->{'log'}; my $task_manager_file = 'individual_genotypes_task_management.txt'; # Get the SubInd tables that may be split by chromosomes my $sql; if($self->{source_engine} =~/mssql|sqlserver/ ){ $sql = qq[ SELECT name FROM $self->{'snp_dbname'}..sysobjects WHERE name LIKE 'SubInd%'SELECT OBJECT_ID('SubInd%')]; } else{ $sql = qq[show tables like 'SubInd%']; } my @subind_tables = map {$_->[0]} @{$self->{'dbSNP'}->db_handle()->selectall_arrayref($sql)}; print $logh "Found subind tables for genotypes: " . join ",", @subind_tables . "\n"; # Prepared statement to find out if a table exists my $table2_ext_stmt = qq{ SHOW TABLES LIKE ? }; my $table_sth = $self->{'dbVar'}->prepare($table2_ext_stmt); # Use one common file for alleles and one for samples. my $file_prefix = $self->{'tmpdir'} . '/individual_genotypes'; #Multi-bp genotypes will be written to a separate loadfile my $multi_bp_gty_file = $file_prefix . '_multi_bp_gty'; # Allele file is used for caching the alleles my $allele_file = $file_prefix . '_alleles.txt'; # Sample file is used for caching the samples my $sample_file = $file_prefix . '_individual_samples.txt'; # For each SubInd_ch.. table, determine the best chunking of the individuals. We aim to get $target_rows number of rows to work on for each subtask but this will just be an approximate number. my $target_rows = 1e6; ## reduced from 10e6 my $genotype_counts; my %gty_tables; #my @skip_tables; my $sth; print $logh Progress::location() . "\tDividing the import task into suitable chunks\n"; foreach my $subind_table (@subind_tables) { print $logh Progress::location() . "\t\tProcessing $subind_table\n"; #The subtable to store the data for this subind table and the loadfile to use. The mapping file is used to temporarily store the subsnp_id -> variation_id mapping my $dst_table = "tmp_individual_genotype_single_bp\_$subind_table"; my $loadfile = $file_prefix . '_' . $dst_table . '.txt'; my $mapping_file = $file_prefix . '_subsnp_mapping_' . $subind_table . '.txt'; #warn "setting up gty_tables: $subind_table=> $dst_table,$loadfile,$mapping_file\n"; $gty_tables{$subind_table} = [$dst_table,$loadfile,$mapping_file]; # If the subtable already exists, warn about this but skip the iteration (perhaps we are resuming after a crash) $table_sth->execute($dst_table); print $logh Progress::location(); if (defined($table_sth->fetchrow_arrayref())) { warn("Table $dst_table already exists in destination database. Will skip importing from $subind_table"); # push(@skip_tables,$subind_table); delete $gty_tables{$subind_table}; next; } } ### write task file unless($load_only){ unless(defined $failure_recovery && $failure_recovery == 1){ ## set up task file unless rerunning ($jobindex,$task_manager_file) = $self->create_parallelized_individual_genotypes_task_file(\%gty_tables, $target_rows); return unless defined $jobindex; ## there may be nothing to do } $task_manager_file = "individual_genotypes_task_management.txt"; # Run the job on the farm print $logh Progress::location() . "\tSubmitting the importing of the genotypes to the farm\n"; my $jobname = 'individual_genotypes'; my $result = $self->run_on_farm($jobname,$file_prefix,'calculate_gtype',$task_manager_file,1,$jobindex); my $jobid = $result->{'jobid'}; # Check if any subtasks generated errors if ((my @error_subtasks = grep($result->{'subtask_details'}{$_}{'generated_error'},keys(%{$result->{'subtask_details'}})))) { warn($result->{'message'}); print $logh Progress::location() . "\t " . $result->{'message'}; print $logh Progress::location() . "\tThe following subtasks generated errors for job $jobid:\n"; foreach my $index (@error_subtasks) { print $logh qq{\t\t$jobname\[$index\] ($jobid\[$index\])\n}; } } # Check the result, if anything failed if (!$result->{'success'}) { warn($result->{'message'}); print $logh Progress::location() . "\t " . $result->{'message'}; print $logh Progress::location() . "\tThe following subtasks failed for unknown reasons for job $jobid:\n"; foreach my $index (grep($result->{'subtask_details'}{$_}{'fail_reason'} =~ m/UNKNOWN/,keys(%{$result->{'subtask_details'}}))) { print $logh qq{\t\t$jobname\[$index\] ($jobid\[$index\])\n}; } print $logh Progress::location() . "\tThe following subtasks failed because they ran out of resources for job $jobid:\n"; foreach my $index (grep($result->{'subtask_details'}{$_}{'fail_reason'} =~ m/OUT_OF_[MEMORY|TIME]/,keys(%{$result->{'subtask_details'}}))) { print $logh qq{\t\t$jobname\[$index\] ($jobid\[$index\])\n}; } } # If we still have subtasks that fail, this needs to be resolved before proceeding die("Some subtasks are failing (see log output). This needs to be resolved before proceeding with the loading of genotypes!") unless ($result->{'success'}); } ## merge seperate export files into table specific load files ## db storage of dump file => load file link ( expect loading of larger files to be more efficient) my $file_data_ext_sth = $self->{'dbVar'}->prepare(qq[SELECT sub_file, destination_file from tmp_indgeno_file]); $file_data_ext_sth->execute()||die "Problem extracting list of files to merge\n"; my $file_list = $file_data_ext_sth->fetchall_arrayref(); # print "Cat'ing files\n"; foreach my $pair (@{$file_list}){ unless (-e $pair->[0] ){ warn "No file of name $pair->[0] created\n" unless $pair->[0] =~/multi/; ## many bins have multi files missing - report only missing singles next; } open my $load_file, ">>", $pair->[1] || die "Failed to open $pair->[1] to write: $!\n"; # print "Cat'ing $pair->[0] to $pair->[1]\n"; open my $subfile, "<", $pair->[0] ||die "Failed to open $pair->[0] to read: $!\n"; while(<$subfile>){print $load_file $_ ;} close $load_file; close $subfile; } print "Load files written\n"; # Loop over the subtables and load each of them my @subtables; $jobindex = 0; ## reset for loading jobs $task_manager_file = $file_prefix . '_task_management_load.txt'; open(MGMT,'>',$task_manager_file); foreach my $subind_table (keys(%gty_tables)) { my $dst_table = $gty_tables{$subind_table}->[0]; my $loadfile = $gty_tables{$subind_table}->[1]; # Skip if necessary # next if (grep(/^$subind_table$/,@skip_tables)); # Create the sub table my $stmt = $ind_gty_stmt; $stmt =~ s/$genotype_table/$dst_table/; $self->{'dbVar'}->do($stmt); print $logh Progress::location(); # If the loadfile doesn't exist, we can't load anything next unless (-e $loadfile); $jobindex++; print "Writing to load file $jobindex $loadfile $dst_table variation_id subsnp_id individual_id allele_1 allele_2\n"; print MGMT qq{$jobindex $loadfile $dst_table variation_id subsnp_id individual_id allele_1 allele_2\n}; } # Include the multiple bp genotype file here as well if (-e $multi_bp_gty_file) { $jobindex++; print MGMT qq{$jobindex $multi_bp_gty_file $multi_bp_gty_table variation_id subsnp_id individual_id allele_1 allele_2\n}; } close(MGMT); # Run the job on the farm print $logh Progress::location() . "\tSubmitting loading of the genotypes to the farm\n"; my $jobname = 'individual_genotypes_load'; my $result = $self->run_on_farm($jobname,$file_prefix,'load_data_infile',$task_manager_file,1,$jobindex); my $jobid = $result->{'jobid'}; # Check if any subtasks generated errors if ((my @error_subtasks = grep($result->{'subtask_details'}{$_}{'generated_error'},keys(%{$result->{'subtask_details'}})))) { warn($result->{'message'}); print $logh Progress::location() . "\t " . $result->{'message'}; print $logh Progress::location() . "\tThe following subtasks generated errors for job $jobid:\n"; foreach my $index (@error_subtasks) { print $logh qq{\t\t$jobname\[$index\] ($jobid\[$index\])\n}; } } # Check the result, if anything failed if (!$result->{'success'}) { warn($result->{'message'}); print $logh Progress::location() . "\t " . $result->{'message'}; print $logh Progress::location() . "\tThe following subtasks failed for unknown reasons for job $jobid:\n"; foreach my $index (grep($result->{'subtask_details'}{$_}{'fail_reason'} =~ m/UNKNOWN/,keys(%{$result->{'subtask_details'}}))) { print $logh qq{\t\t$jobname\[$index\] ($jobid\[$index\])\n}; } print $logh Progress::location() . "\tThe following subtasks failed because they ran out of resources for job $jobid:\n"; foreach my $index (grep($result->{'subtask_details'}{$_}{'fail_reason'} =~ m/OUT_OF_[MEMORY|TIME]/,keys(%{$result->{'subtask_details'}}))) { print $logh qq{\t\t$jobname\[$index\] ($jobid\[$index\])\n}; } } # If we still have subtasks that fail, this needs to be resolved before proceeding die("Some subtasks are failing (see log output). This needs to be resolved before proceeding with the loading of genotypes!") unless ($result->{'success'}); #Drop the tmp_individual.. table if it exists my $drop_tmp_stmt = qq{ DROP TABLE IF EXISTS $genotype_table }; $self->{'dbVar'}->do($drop_tmp_stmt); print $logh Progress::location(); # Merge all the subtables into the final table, or if there is just one subtable, rename it to the final table name print $logh Progress::location() . "\tMerging the genotype subtables into a big $genotype_table table\n"; my $merge_subtables = join(",",map {$gty_tables{$_}->[0]} keys(%gty_tables)); my $stmt; if (scalar(keys(%gty_tables)) > 1) { #Add an empty table where any subsequent inserts will end up my $extra_table = $genotype_table . '_extra'; $stmt = $ind_gty_stmt; $stmt =~ s/$genotype_table/$extra_table/; debug(localtime() . "\tDoing $stmt in individual_genotypes"); $self->{'dbVar'}->do($stmt); print $logh Progress::location(); $merge_subtables .= ",$extra_table"; $stmt = $ind_gty_stmt; $stmt .= " ENGINE=MERGE INSERT_METHOD=LAST UNION=($merge_subtables)"; } else { $stmt = qq{ RENAME TABLE $merge_subtables TO $genotype_table }; } $self->{'dbVar'}->do($stmt); print $logh Progress::location(); =head no longer needed as all go to the same compressed table #Move multiple bp genotypes into the multiple bp table $stmt = qq{ SELECT DISTINCT variation_id FROM $multi_bp_gty_table }; dumpSQL($self->{'dbVar'},$stmt); print $logh Progress::location(); create_and_load($self->{'dbVar'},"tmp_multiple_bp_gty_variations","variation_id i* not_null"); print $logh Progress::location(); $stmt = qq{ INSERT INTO $multi_bp_gty_table ( variation_id, subsnp_id, individual_id, allele_1, allele_2 ) SELECT s.variation_id, s.subsnp_id, s.individual_id, s.allele_1, s.allele_2 FROM tmp_multiple_bp_gty_variations t JOIN $genotype_table s ON ( s.variation_id = t.variation_id ) }; $self->{'dbVar'}->do($stmt); print $logh Progress::location(); $stmt = qq{ DELETE FROM s USING tmp_multiple_bp_gty_variations t JOIN $genotype_table s ON ( s.variation_id = t.variation_id ) }; $self->{'dbVar'}->do($stmt); print $logh Progress::location(); $stmt = qq{ DROP TABLE tmp_multiple_bp_gty_variations }; $self->{'dbVar'}->do($stmt); print $logh Progress::location(); =cut } sub create_parallelized_individual_genotypes_task_file{ my $self = shift; my $gty_tables = shift; my $target_rows = shift; my $logh = $self->{'log'}; print $logh Progress::location(); my $task_manager_file = 'individual_genotypes_task_management.txt'; #Multi-bp genotypes will be written to a separate loadfile my $multi_bp_gty_file = $self->{'tmpdir'} . '/individual_genotypes_multi_bp_gty'; # Store the data for the farm submission in a hash with the data as key. This way, the jobs will be "randomized" w.r.t. chromosomes and chunks when submitted so all processes shouldn't work on the same tables/files at the same time. my %job_data; debug(localtime() . "\tCreating parallelized_individual_genotypes task file"); ## temp table to hold files to create & load $self->{'dbVar'}->do(qq[ DROP TABLE IF EXISTS tmp_indgeno_file ]); $self->{'dbVar'}->do(qq[ CREATE TABLE tmp_indgeno_file (sub_file varchar(255), destination_file varchar(255) ) ] ); my $file_ins_sth = $self->{'dbVar'}->prepare(qq[insert into tmp_indgeno_file (sub_file ,destination_file) values (?,?)]); foreach my $subind_table (keys(%$gty_tables)) { my $stmt = qq{ SELECT submitted_ind_id, COUNT(*) FROM $subind_table GROUP BY submitted_ind_id ORDER BY submitted_ind_id ASC }; my $count_lines_sth = $self->{'dbSNP'}->prepare($stmt); $count_lines_sth->execute(); print $logh Progress::location(); my ($submitted_ind_id,$genotype_count); $count_lines_sth->bind_columns(\$submitted_ind_id,\$genotype_count); #Loop over the counts and when a large enough number of genotypes have been counted, split that off as a chunk to be submitted to the farm my $total_count = 0; my $start_id = -1; while ($count_lines_sth->fetch()) { $total_count += $genotype_count; # Set the start id if it's not specified $start_id = $submitted_ind_id if ($start_id < 0); #Break off the chunk if it's large enough if ($total_count >= $target_rows) { $job_data{qq{$subind_table $start_id $submitted_ind_id}}++; $total_count = 0; $start_id = -1; } } #Add the rest of the individual_ids if a new chunk has been started $job_data{qq{$subind_table $start_id $submitted_ind_id}}++ if ($start_id >= 0); print $logh Progress::location(); } # If there are no genotypes at all to be imported, just return here return unless (scalar(keys(%job_data))); # Print the job parameters to a file my $jobindex = 0; open(MGMT,'>',$task_manager_file); foreach my $params (keys(%job_data)) { $jobindex++; my @arr = split(/\s+/,$params); print $logh "writing genotype task management file : $params\n"; $file_ins_sth->execute("$$gty_tables{$arr[0]}->[1]\_$jobindex", $$gty_tables{$arr[0]}->[1])|| die "Problem entering geno file info\n"; $file_ins_sth->execute("$multi_bp_gty_file\_$jobindex", $multi_bp_gty_file)|| die "Problem entering geno file info\n"; print MGMT qq{$jobindex $arr[0] $$gty_tables{$arr[0]}->[1]\_$jobindex $multi_bp_gty_file\_$jobindex $arr[1] $arr[2] $$gty_tables{$arr[0]}->[2]\n}; } close(MGMT); return ($jobindex,$task_manager_file); } # # loads population genotypes into the population_genotype table # sub population_genotypes { my $self = shift; #Put the log filehandle in a local variable my $logh = $self->{'log'}; ## revert schema to letter rather than number coded genotype $self->{'dbVar'}->do( qq[drop table population_genotype]); $self->{'dbVar'}->do( qq[CREATE TABLE population_genotype ( population_genotype_id int(10) unsigned NOT NULL AUTO_INCREMENT, variation_id int(10) unsigned NOT NULL, subsnp_id int(15) unsigned DEFAULT NULL, allele_1 varchar(25000) DEFAULT NULL, allele_2 varchar(25000) DEFAULT NULL, frequency float DEFAULT NULL, population_id int(10) unsigned DEFAULT NULL, count int(10) unsigned DEFAULT NULL, PRIMARY KEY (population_genotype_id), KEY variation_idx (variation_id), KEY subsnp_idx (subsnp_id), KEY population_idx (population_id))]); my $stmt; my $allele_table_ref = $self->{'dbVar'}->db_handle->selectall_arrayref(qq{show tables like "tmp_rev_allele"}); my $allele_table = $allele_table_ref->[0][0]; if (! $allele_table) { debug(localtime() . "\tDumping allele data"); $stmt = qq{ SELECT a1.allele_id, a1.allele, a2.allele FROM $self->{'dbSNP_share_db'}.Allele a1, $self->{'dbSNP_share_db'}.Allele a2 WHERE a1.rev_allele_id = a2.allele_id }; dumpSQL($self->{'dbSNP'},$stmt); create_and_load($self->{'dbVar'}, "tmp_rev_allele", "allele_id i* not_null","allele * not_null", "rev_allele not_null"); print $logh Progress::location(); } debug(localtime() . "\tDumping GtyFreqBySsPop and UniGty data"); $stmt = "SELECT "; if ($self->{'limit'}) { $stmt .= "TOP $self->{'limit'} "; } $stmt .= qq{ gtfsp.subsnp_id AS sorting_id, gtfsp.pop_id, gtfsp.freq, ROUND(gtfsp.cnt,0), a1.allele, a2.allele FROM GtyFreqBySsPop gtfsp, $self->{'dbSNP_share_db'}.UniGty ug, $self->{'dbSNP_share_db'}.Allele a1, $self->{'dbSNP_share_db'}.Allele a2 WHERE gtfsp.unigty_id = ug.unigty_id AND ug.allele_id_1 = a1.allele_id AND ug.allele_id_2 = a2.allele_id }; if ($self->{'limit'}) { $stmt .= qq{ ORDER BY sorting_id ASC }; } dumpSQL($self->{'dbSNP'},$stmt); debug(localtime() . "\tloading population_genotype data"); create_and_load($self->{'dbVar'}, "tmp_pop_gty", 'subsnp_id i* not_null', 'pop_id i* not_null', 'freq','count','allele_1', 'allele_2'); print $logh Progress::location(); $self->{'dbVar'}->do(qq{CREATE UNIQUE INDEX pop_genotype_idx ON population_genotype(variation_id,subsnp_id,frequency,population_id,allele_1(5),allele_2(5))}); ## human is too big to do in one go - breaking up crudely my $get_max_sth = $self->{'dbVar'}->prepare(qq[select max(variation_synonym_id) from variation_synonym]); $get_max_sth->execute()||die; my $max = $get_max_sth->fetchall_arrayref(); if($max > 2000000){ pop_geno_batched($self, $max); } else{ ## This is done to remove duplicates $self->{'dbVar'}->do(qq{INSERT IGNORE INTO population_genotype (variation_id,subsnp_id,allele_1, allele_2, frequency, population_id, count) SELECT vs.variation_id,vs.subsnp_id,tra1.rev_allele as allele_1,tra2.rev_allele as allele_2,tg.freq,p.population_id, tg.count FROM variation_synonym vs, tmp_pop_gty tg,tmp_rev_allele tra1,tmp_rev_allele tra2, population p WHERE vs.subsnp_id = tg.subsnp_id AND tg.allele_1 = tra1.allele AND tg.allele_2 = tra2.allele AND vs.substrand_reversed_flag = 1 AND p.pop_id = tg.pop_id}); print $logh Progress::location(); $self->{'dbVar'}->do(qq{INSERT IGNORE INTO population_genotype (variation_id,subsnp_id,allele_1, allele_2, frequency, population_id, count) SELECT vs.variation_id,vs.subsnp_id,tg.allele_1,tg.allele_2,tg.freq,p.population_id, tg.count FROM variation_synonym vs, tmp_pop_gty tg,population p WHERE vs.subsnp_id = tg.subsnp_id AND vs.substrand_reversed_flag = 0 AND p.pop_id = tg.pop_id }); } print $logh Progress::location(); $self->{'dbVar'}->do(qq{DROP INDEX pop_genotype_idx ON population_genotype}); $self->{'dbVar'}->do("DROP TABLE tmp_pop_gty"); debug(localtime() . "\tFinished population_genotype"); } ## crude break up of population_genotype import for larger data sets sub pop_geno_batched{ my $self = shift; my $max = shift; my $logh = $self->{'log'}; print $logh Progress::location(); my $batch = 1000000; my $start = 1; debug(localtime() . "\tStarting binned pop geno"); while( $start < $max->[0]->[0] ){ my $end = $start + $batch ; print $logh Progress::location(); ## This is done to remove duplicates $self->{'dbVar'}->do(qq{INSERT IGNORE INTO population_genotype (variation_id,subsnp_id,allele_1, allele_2, frequency, population_id, count) SELECT vs.variation_id,vs.subsnp_id,tra1.rev_allele as allele_1,tra2.rev_allele as allele_2,tg.freq,p.population_id, tg.count FROM variation_synonym vs, tmp_pop_gty tg,tmp_rev_allele tra1,tmp_rev_allele tra2, population p WHERE vs.subsnp_id = tg.subsnp_id AND tg.allele_1 = tra1.allele AND tg.allele_2 = tra2.allele AND vs.substrand_reversed_flag = 1 AND p.pop_id = tg.pop_id AND variation_synonym_id between $start and $end }); $self->{'dbVar'}->do(qq{INSERT IGNORE INTO population_genotype (variation_id,subsnp_id,allele_1, allele_2, frequency, population_id, count) SELECT vs.variation_id,vs.subsnp_id,tg.allele_1,tg.allele_2,tg.freq,p.population_id, tg.count FROM variation_synonym vs, tmp_pop_gty tg,population p WHERE vs.subsnp_id = tg.subsnp_id AND vs.substrand_reversed_flag = 0 AND p.pop_id = tg.pop_id AND variation_synonym_id between $start and $end }); $start = $end + 1; } print $logh "Completed batched population_genotype import\n"; print $logh Progress::location(); debug(localtime() . "\tFinished binned pop geno"); } ## Extract old rs ids to add to synonym table sub archive_rs_synonyms { my $self = shift; return unless $self->table_exists_and_populated('RsMergeArch'); debug(localtime() . "\tLooking for old rs"); ## Add source description only if old refSNP names found $self->source_table("Archive dbSNP"); my $logh = $self->{'log'}; print $logh Progress::location(); my $concat_sql; if($self->{source_engine} =~/mssql/ ){ $concat_sql = qq[ SELECT 'rs' + CAST(rsHigh as VARCHAR(20)), 'rs' + CAST(rsCurrent as VARCHAR(20)) , orien2Current, 'rs' + CAST(rsLow as VARCHAR(20)) FROM RsMergeArch ] ; } else{ $concat_sql = qq[ SELECT CONCAT('rs', CAST(rsHigh as CHAR) ), CONCAT('rs', CAST(rsCurrent as CHAR)) , orien2Current, CONCAT('rs', CAST(rsLow as CHAR) ) FROM RsMergeArch ]; } # export old rs id from dbSNP dumpSQL($self->{'dbSNP'}, $concat_sql ) ; #loading it to variation database in temp rshist table create_and_load( $self->{'dbVar'}, "rsHist", "rsHigh * not_null", "rsCurrent * not_null","orien2Current not_null", "rsLow") ; print $logh Progress::location(); debug(localtime() . "\tAdding old rs as synonyms"); my $get_max_sth = $self->{'dbVar'}->prepare(qq[select min(snp_id), max(snp_id) from variation ]); $get_max_sth->execute()||die; my $range = $get_max_sth->fetchall_arrayref(); my $batch_size = 100000; my $start = $range->[0]->[0]; my $max = $range->[0]->[1]; # append to synonym table with database 'Archive dbSNP' - slow query binned while ( $start < $max ){ my $end = $start + $batch_size; $self->{'dbVar'}->do(qq{INSERT INTO variation_synonym (variation_id, source_id, name) (SELECT v.variation_id, 2, r.rsHigh FROM variation v, rsHist r WHERE v.name = r.rsCurrent AND v.snp_id between $start and $end) }); ## take from rsLow if rsCurrent has no id if($self->{source_engine} =~/mssql/ ){ $self->{'dbVar'}->do(qq{INSERT INTO variation_synonym (variation_id, source_id, name) (SELECT v.variation_id, 2, r.rsHigh FROM variation v, rsHist r WHERE v.name = r.rsLow AND r.rsCurrent = 'rs' AND v.snp_id between $start and $end) }); } else{ ## for missing data in mysql mirror $self->{'dbVar'}->do(qq{INSERT INTO variation_synonym (variation_id, source_id, name) (SELECT v.variation_id, 2, r.rsHigh FROM variation v, rsHist r WHERE v.name = r.rsLow AND r.rsCurrent is null AND v.snp_id between $start and $end) }); } $start = $end +1; } ## clean up temp table $self->{'dbVar'}->do(qq[drop table rsHist]); debug(localtime() . "\tArchive rs synonyms done"); } # cleans up some of the necessary temporary data structures after the # import is complete sub cleanup { my $self = shift; #Put the log filehandle in a local variable my $logh = $self->{'log'}; debug(localtime() . "\tIn cleanup..."); #remove populations that are not present in the Individual or Allele table for the specie $self->{'dbVar'}->do('CREATE TABLE tmp_pop (population_id int PRIMARY KEY)'); #create a temporary table with unique populations print $logh Progress::location(); $self->{'dbVar'}->do('INSERT IGNORE INTO tmp_pop SELECT distinct(population_id) FROM allele'); #add the populations from the alleles print $logh Progress::location(); $self->{'dbVar'}->do('INSERT IGNORE INTO tmp_pop SELECT distinct(population_id) FROM population_genotype'); #add the populations from the population_genotype print $logh Progress::location(); $self->{'dbVar'}->do('INSERT IGNORE INTO tmp_pop SELECT population_id FROM individual_population'); #add the populations from the individuals print $logh Progress::location(); $self->{'dbVar'}->do(qq{INSERT IGNORE INTO tmp_pop SELECT super_population_id FROM population_structure ps, tmp_pop tp WHERE tp.population_id = ps.sub_population_id}); #add the populations from the super-populations print $logh Progress::location(); ### delete population entries without alleles, population_genotypes or individuals my $sql = qq{DELETE FROM p USING population p LEFT JOIN tmp_pop tp ON p.population_id = tp.population_id WHERE tp.population_id is null }; ### delete sample_synonym entries without alleles, population_genotypes or individuals my $sql_2 = qq{DELETE FROM ps USING population_synonym ps LEFT JOIN tmp_pop tp ON ps.population_id = tp.population_id WHERE tp.population_id is null }; $self->{'dbVar'}->do($sql); #delete from population print $logh Progress::location(); # populations not present $self->{'dbVar'}->do($sql_2); #delete from population_synonym print $logh Progress::location(); $self->{'dbVar'}->do('DROP TABLE tmp_pop'); #and finally remove the temporary table print $logh Progress::location(); $self->{'dbVar'}->do('ALTER TABLE variation_synonym DROP COLUMN substrand_reversed_flag'); print $logh Progress::location(); $self->{'dbVar'}->do('ALTER TABLE population DROP COLUMN pop_class_id, DROP COLUMN pop_id'); print $logh Progress::location(); ## link between ensembl sample_id and dbSNP submitted_ind_id $self->{'dbVar'}->do('DROP TABLE tmp_ind'); ## list of genotype files to create, check and load $self->{'dbVar'}->do('DROP TABLE tmp_indgeno_file'); ## subsnp strand rs info from synonym creation $self->{'dbVar'}->do('DROP TABLE tmp_var_allele'); } sub sort_num{ return $a<=>$b; } 1;
dbolser-ebi/ensembl-variation
scripts/import/dbSNP/GenericContig.pm
Perl
apache-2.0
106,452
package Paws::WorkDocs::DeactivateUser; use Moose; has AuthenticationToken => (is => 'ro', isa => 'Str', traits => ['ParamInHeader'], header_name => 'Authentication'); has UserId => (is => 'ro', isa => 'Str', traits => ['ParamInURI'], uri_name => 'UserId', required => 1); use MooseX::ClassAttribute; class_has _api_call => (isa => 'Str', is => 'ro', default => 'DeactivateUser'); class_has _api_uri => (isa => 'Str', is => 'ro', default => '/api/v1/users/{UserId}/activation'); class_has _api_method => (isa => 'Str', is => 'ro', default => 'DELETE'); class_has _returns => (isa => 'Str', is => 'ro', default => 'Paws::API::Response'); class_has _result_key => (isa => 'Str', is => 'ro'); 1; ### main pod documentation begin ### =head1 NAME Paws::WorkDocs::DeactivateUser - Arguments for method DeactivateUser on Paws::WorkDocs =head1 DESCRIPTION This class represents the parameters used for calling the method DeactivateUser on the Amazon WorkDocs service. Use the attributes of this class as arguments to method DeactivateUser. You shouldn't make instances of this class. Each attribute should be used as a named argument in the call to DeactivateUser. As an example: $service_obj->DeactivateUser(Att1 => $value1, Att2 => $value2, ...); Values for attributes that are native types (Int, String, Float, etc) can passed as-is (scalar values). Values for complex Types (objects) can be passed as a HashRef. The keys and values of the hashref will be used to instance the underlying object. =head1 ATTRIBUTES =head2 AuthenticationToken => Str Amazon WorkDocs authentication token. This field should not be set when using administrative API actions, as in accessing the API using AWS credentials. =head2 B<REQUIRED> UserId => Str The ID of the user. =head1 SEE ALSO This class forms part of L<Paws>, documenting arguments for method DeactivateUser in L<Paws::WorkDocs> =head1 BUGS and CONTRIBUTIONS The source code is located here: https://github.com/pplu/aws-sdk-perl Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues =cut
ioanrogers/aws-sdk-perl
auto-lib/Paws/WorkDocs/DeactivateUser.pm
Perl
apache-2.0
2,093
:- module(dummy2,_,[profiler,expander]). %:- module(dummy2,_,[]). :- use_module(library(aggregates)). choice1(x) :- display(choice1_x),nl. choice1(y) :- display(choice1_y),nl. choice1(z) :- display(choice1_z),nl. choice1(k) :- display(choice1_k),nl. choice1(l) :- display(choice1_l),nl. choice1(m) :- display(choice1_m),nl. choice2(_,x) :- display(choice2_x),nl. choice2(_,y) :- display(choice2_y),nl. choice2(_,z) :- display(choice2_z),nl. choice2(_,k) :- display(choice2_k),nl. choice2(_,l) :- display(choice2_l),nl. choice2(_,m) :- display(choice2_m),nl. list1 :- choice1(A), display(A),nl, fail. list1. list2 :- choice2(_,A), display(A),nl, fail. list2. main0 :- list1, list2, choice1(l), choice2(_,l). % findall(A,choice1(A),L), % display(L). :- use_module(library('profiler/profiler_utils')). main :- profile_start,profile_reset,main0,profile_stop,profile_dump.
leuschel/ecce
www/CiaoDE/ciao/contrib/profiler/examples/dummy2.pl
Perl
apache-2.0
886
package OpenXPKI::Server::Workflow::Validator::CertIdentifierExists; use strict; use warnings; use Moose; use Workflow::Exception qw( validation_error ); use OpenXPKI::Debug; use Data::Dumper; use OpenXPKI::Server::Context qw( CTX ); extends 'OpenXPKI::Server::Workflow::Validator'; sub _preset_args { return [ qw(cert_identifier) ]; } sub _validate { my ( $self, $wf, $cert_identifier ) = @_; ##! 1: 'start' ##! 16: 'check identifier' . $cert_identifier my $cert = CTX('dbi')->select_one( from => 'certificate', columns => [ 'pki_realm', 'req_key' ], where => { identifier => $cert_identifier }, ); if (!$cert) { ##! 16: 'unknown identifier ' . $cert_identifier validation_error("I18N_OPENXPKI_UI_VALIDATOR_CERT_IDENTIFIER_EXISTS_NO_SUCH_ID"); } my $pki_realm = $self->param('pki_realm') || CTX('session')->data->pki_realm; if ($self->param('entity_only') && !$cert->{req_key}) { validation_error("I18N_OPENXPKI_UI_VALIDATOR_CERT_IDENTIFIER_EXISTS_NOT_AN_ENTITY"); } my $group = $self->param('in_alias_group'); if ($self->param('is_token')) { $group = CTX('config')->get(['crypto','type', $self->param('is_token')]); } if ($group) { my $alias = CTX('dbi')->select_one( from => 'aliases', columns => [ 'alias' ], where => { identifier => $cert_identifier, group_id => $group, pki_realm => CTX('session')->data->pki_realm, }, ); validation_error("I18N_OPENXPKI_UI_VALIDATOR_CERT_IDENTIFIER_IS_NOT_IN_GROUP") unless($alias); CTX('log')->application()->trace("Found alias " . $alias->{alias} ); } elsif (($cert->{pki_realm} ne $pki_realm) && ($pki_realm ne '_any')) { validation_error("I18N_OPENXPKI_UI_VALIDATOR_CERT_IDENTIFIER_EXISTS_NOT_IN_REALM"); } CTX('log')->application()->trace("Found certificate, hash is " . Dumper $cert); return 1; } 1; __END__ =head1 NAME OpenXPKI::Server::Workflow::Validator::CertIdentifierExists =head1 SYNOPSIS class: OpenXPKI::Server::Workflow::Validator::CertIdentifierExists param: entity_only: 1 pki_realm: _any arg: - $cert_identifier =head1 DESCRIPTION This validator checks whether a given certificate identifier exists. Based on the parameters it can check weather the certificate is in a given realm and if it is an entity certificate. Both parameters are optional. Note that there is no check on the validity of the certificate. To check if the certificate identifier is an register alias, you can set I<is_token> or I<in_alias_group>. This requires that an entry in the alias table exists with the given properties. Note that those flags expect the alias to be registered in the current session realm and do not check the realm of the certificate itself, any value given to I<pki_realm> is ignored. =head2 Argument =over =item $cert_identifier The certificate identifier =back =head2 Parameter =over =item pki_realm Can be the name of a realm or the special word I<_any>. If not given, default ist to check in the session realm only! =item entity_only If set, the certificate must be an entity certificate. =item is_token Expects the name of a token type as defined in crypto.type and checks if the certificate has an registered alias matching this token type in the current realm. =item in_alias_group Expects the name of an alias group and checks if the certificate has an registered alias in this group. =back
oliwel/openxpki
core/server/OpenXPKI/Server/Workflow/Validator/CertIdentifierExists.pm
Perl
apache-2.0
3,597
package Google::Ads::AdWords::v201809::StringAttribute; use strict; use warnings; __PACKAGE__->_set_element_form_qualified(1); sub get_xmlns { 'https://adwords.google.com/api/adwords/o/v201809' }; our $XML_ATTRIBUTE_CLASS; undef $XML_ATTRIBUTE_CLASS; sub __get_attr_class { return $XML_ATTRIBUTE_CLASS; } use base qw(Google::Ads::AdWords::v201809::Attribute); # Variety: sequence use Class::Std::Fast::Storable constructor => 'none'; use base qw(Google::Ads::SOAP::Typelib::ComplexType); { # BLOCK to scope variables my %Attribute__Type_of :ATTR(:get<Attribute__Type>); my %value_of :ATTR(:get<value>); __PACKAGE__->_factory( [ qw( Attribute__Type value ) ], { 'Attribute__Type' => \%Attribute__Type_of, 'value' => \%value_of, }, { 'Attribute__Type' => 'SOAP::WSDL::XSD::Typelib::Builtin::string', 'value' => 'SOAP::WSDL::XSD::Typelib::Builtin::string', }, { 'Attribute__Type' => 'Attribute.Type', 'value' => 'value', } ); } # end BLOCK 1; =pod =head1 NAME Google::Ads::AdWords::v201809::StringAttribute =head1 DESCRIPTION Perl data type class for the XML Schema defined complexType StringAttribute from the namespace https://adwords.google.com/api/adwords/o/v201809. {@link Attribute} type that contains a string value. =head2 PROPERTIES The following properties may be accessed using get_PROPERTY / set_PROPERTY methods: =over =item * value =back =head1 METHODS =head2 new Constructor. The following data structure may be passed to new(): =head1 AUTHOR Generated by SOAP::WSDL =cut
googleads/googleads-perl-lib
lib/Google/Ads/AdWords/v201809/StringAttribute.pm
Perl
apache-2.0
1,635
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. use strict; use warnings; use AI::MXNet; use AI::MXNet::Gluon::Contrib::NN::BasicLayers; our $VERSION = '1.3'; 1;
rahul003/mxnet
perl-package/AI-MXNet-Gluon-Contrib/lib/AI/MXNet/Gluon/Contrib.pm
Perl
apache-2.0
900
package Sprawl::App::Command; use base 'App::Cmd::Command'; use strict; use warnings; use YAML::XS qw'LoadFile'; my $config_file = $ENV{HOME} . "/.sprawl.yml"; sub get_config { my $config = LoadFile($config_file); return $config; } sub assert_args_count { my ($self, $n, $args) = @_; if ($n == 0) { $self->usage_error('no arguments allowed') unless @$args == 0; } else { $self->usage_error('wrong number of args') unless @$args == $n; } } 1;
bdha/sprawl
lib/Sprawl/App/Command.pm
Perl
bsd-3-clause
469
# Copyright 2016 Dominique Revuz <dr@univ-mlv.fr> author=Dominique Revuz title=un exemple de fonction text== Veuiller ecrire une fonction "binop" qui retourne le produit de ces deux paramètres. == code== # Veuillez saisir votre code ici == # le grader suivant verifie que la fonction binop est définie dans le code fournis par l'étudiant # si le code lève une exception (de compilation par exemple) ou que la fonction ne # retourne pas la bonne valeur c'est un échec de l'exercice grader=@hiddenimport.py
plgitlogin/demo
python/exemples/exemple2.pl
Perl
cc0-1.0
517
package Google::Ads::AdWords::v201409::IdeaTextFilterSearchParameter; use strict; use warnings; __PACKAGE__->_set_element_form_qualified(1); sub get_xmlns { 'https://adwords.google.com/api/adwords/o/v201409' }; our $XML_ATTRIBUTE_CLASS; undef $XML_ATTRIBUTE_CLASS; sub __get_attr_class { return $XML_ATTRIBUTE_CLASS; } use base qw(Google::Ads::AdWords::v201409::SearchParameter); # Variety: sequence use Class::Std::Fast::Storable constructor => 'none'; use base qw(Google::Ads::SOAP::Typelib::ComplexType); { # BLOCK to scope variables my %SearchParameter__Type_of :ATTR(:get<SearchParameter__Type>); my %included_of :ATTR(:get<included>); my %excluded_of :ATTR(:get<excluded>); __PACKAGE__->_factory( [ qw( SearchParameter__Type included excluded ) ], { 'SearchParameter__Type' => \%SearchParameter__Type_of, 'included' => \%included_of, 'excluded' => \%excluded_of, }, { 'SearchParameter__Type' => 'SOAP::WSDL::XSD::Typelib::Builtin::string', 'included' => 'SOAP::WSDL::XSD::Typelib::Builtin::string', 'excluded' => 'SOAP::WSDL::XSD::Typelib::Builtin::string', }, { 'SearchParameter__Type' => 'SearchParameter.Type', 'included' => 'included', 'excluded' => 'excluded', } ); } # end BLOCK 1; =pod =head1 NAME Google::Ads::AdWords::v201409::IdeaTextFilterSearchParameter =head1 DESCRIPTION Perl data type class for the XML Schema defined complexType IdeaTextFilterSearchParameter from the namespace https://adwords.google.com/api/adwords/o/v201409. A {@link SearchParameter} for {@code KEYWORD} {@link IdeaType}s that specifies a collection of strings by which the results should be constrained. This guarantees that each idea in the result will match at least one of the {@code included} values. For this {@link SearchParameter}, excluded items will always take priority over included ones. This can handle a maximum of 200 (included + excluded) elements. <p>This element is supported by following {@link IdeaType}s: KEYWORD. <p>This element is supported by following {@link RequestType}s: IDEAS, STATS. =head2 PROPERTIES The following properties may be accessed using get_PROPERTY / set_PROPERTY methods: =over =item * included =item * excluded =back =head1 METHODS =head2 new Constructor. The following data structure may be passed to new(): =head1 AUTHOR Generated by SOAP::WSDL =cut
gitpan/GOOGLE-ADWORDS-PERL-CLIENT
lib/Google/Ads/AdWords/v201409/IdeaTextFilterSearchParameter.pm
Perl
apache-2.0
2,474
package Fixtures::Type; # # Copyright 2015 Comcast Cable Communications Management, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # use Moose; extends 'DBIx::Class::EasyFixture'; use namespace::autoclean; use Digest::SHA1 qw(sha1_hex); my %definition_for = ( EDGE => { new => 'Type', using => { id => 1, name => 'EDGE', description => 'Edge Cache', use_in_table => 'server', }, }, MID => { new => 'Type', using => { id => 2, name => 'MID', description => 'Mid Tier Cache', use_in_table => 'server', }, }, ORG => { new => 'Type', using => { id => 3, name => 'ORG', description => 'Origin', use_in_table => 'server', }, }, CCR => { new => 'Type', using => { id => 4, name => 'CCR', description => 'Kabletown Content Router', use_in_table => 'server', }, }, EDGE_LOC => { new => 'Type', using => { id => 5, name => 'EDGE_LOC', description => 'Edge Cachegroup', use_in_table => 'cachegroup', }, }, MID_LOC => { new => 'Type', using => { id => 6, name => 'MID_LOC', description => 'Mid Cachegroup', use_in_table => 'deliveryservice', }, }, DNS => { new => 'Type', using => { id => 7, name => 'DNS', description => 'DNS Content Routing', use_in_table => 'deliveryservice', }, }, OTHER_CDN => { new => 'Type', using => { id => 8, name => 'OTHER_CDN', description => 'Other CDN (CDS-IS, Akamai, etc)', use_in_table => 'server', }, }, HTTP_NO_CACHE => { new => 'Type', using => { id => 9, name => 'HTTP_NO_CACHE', description => 'HTTP Content Routing, no caching', use_in_table => 'deliveryservice', }, }, HTTP_LIVE => { new => 'Type', using => { id => 11, name => 'HTTP_LIVE', description => 'HTTP Content routing cache in RAM ', use_in_table => 'deliveryservice', }, }, HTTP_LIVE => { new => 'Type', using => { id => 12, name => 'HTTP_LIVE', description => 'HTTP Content routing cache in RAM ', use_in_table => 'deliveryservice', }, }, RASCAL => { new => 'Type', using => { id => 14, name => 'RASCAL', description => 'Rascal health polling & reporting', use_in_table => 'server', }, }, HOST_REGEXP => { new => 'Type', using => { id => 19, name => 'HOST_REGEXP', description => 'Host header regular expression', use_in_table => 'regex', }, }, PATH_REGEXP => { new => 'Type', using => { id => 20, name => 'PATH_REGEXP', description => 'Path regular expression', use_in_table => 'regex', }, }, A_RECORD => { new => 'Type', using => { id => 21, name => 'A_RECORD', description => 'Static DNS A entry', use_in_table => 'staticdnsentry', } }, AAAA_RECORD => { new => 'Type', using => { id => 22, name => 'AAAA_RECORD', description => 'Static DNS AAAA entry', use_in_table => 'staticdnsentry', } }, CNAME_RECORD => { new => 'Type', using => { id => 23, name => 'CNAME_RECORD', description => 'Static DNS CNAME entry', use_in_table => 'staticdnsentry', } }, HTTP_LIVE_NATNL => { new => 'Type', using => { id => 24, name => 'HTTP_LIVE_NATNL', description => 'HTTP Content routing, RAM cache, National', use_in_table => 'deliveryservice', } }, REDIS => { new => 'Type', using => { id => 25, name => 'REDIS', description => 'Redis stats gateway', use_in_table => 'server', } }, DNS_LIVE_NATNL => { new => 'Type', using => { id => 26, name => 'DNS_LIVE_NATNL', description => 'DNS Content routing, RAM cache, National', use_in_table => 'deliveryservice', } }, DNS_LIVE_NATNL => { new => 'Type', using => { id => 27, name => 'DNS_LIVE_NATNL', description => 'DNS Content routing, RAM cache, National', use_in_table => 'deliveryservice', } }, LOCAL => { new => 'Type', using => { id => 28, name => 'LOCAL', description => 'Local User', use_in_table => 'tm_user', } }, ACTIVE_DIRECTORY => { new => 'Type', using => { id => 29, name => 'ACTIVE_DIRECTORY', description => 'Active Directory User', use_in_table => 'tm_user', } }, TOOLS_SERVER => { new => 'Type', using => { id => 30, name => 'TOOLS_SERVER', description => 'Ops hosts for management', use_in_table => 'server', } }, RIAK => { new => 'Type', using => { id => 31, name => 'RIAK', description => 'riak type', use_in_table => 'server', } }, INFLUXDB => { new => 'Type', using => { id => 32, name => 'INFLUXDB', description => 'influxdb type', use_in_table => 'server', } }, RESOLVE4 => { new => 'Type', using => { id => 33, name => 'RESOLVE4', description => 'federation type resolve4', use_in_table => 'federation', } }, RESOLVE6 => { new => 'Type', using => { id => 34, name => 'RESOLVE6', description => 'federation type resolve6', use_in_table => 'federation', }, }, ANY_MAP => { new => 'Type', using => { id => 35, name => 'ANY_MAP', description => 'any_map type', use_in_table => 'deliveryservice', } }, ); sub get_definition { my ( $self, $name ) = @_; return $definition_for{$name}; } sub all_fixture_names { return keys %definition_for; } __PACKAGE__->meta->make_immutable; 1;
dewrich/traffic_control
traffic_ops/app/lib/Fixtures/Type.pm
Perl
apache-2.0
6,425
package DateTime::Format::Builder::Parser::Strptime; =head1 NAME DateTime::Format::Builder::Parser::Strptime - strptime based date parsing =head1 SYNOPSIS my $parser = DateTime::Format::Builder->create_parser( strptime => '%e/%b/%Y:%H:%M:%S %z', ); =head1 SPECIFICATION =over 4 =item * B<strptime> takes as its argument a strptime string. See L<DateTime::Format::Strptime> for more information on valid patterns. =back =cut use strict; use vars qw( $VERSION @ISA ); use Params::Validate qw( validate SCALAR HASHREF ); $VERSION = '0.77'; use DateTime::Format::Builder::Parser::generic; @ISA = qw( DateTime::Format::Builder::Parser::generic ); __PACKAGE__->valid_params( strptime => { type => SCALAR|HASHREF, # straight pattern or options to DTF::Strptime }, ); sub create_parser { my ($self, %args) = @_; # Arguments to DTF::Strptime my $pattern = $args{strptime}; # Create our strptime parser require DateTime::Format::Strptime; my $strptime = DateTime::Format::Strptime->new( ( ref $pattern ? %$pattern : ( pattern => $pattern ) ), ); unless (ref $self) { $self = $self->new( %args ); } $self->{strptime} = $strptime; # Create our parser return $self->generic_parser( ( map { exists $args{$_} ? ( $_ => $args{$_} ) : () } qw( on_match on_fail preprocess postprocess ) ), label => $args{label}, ); } sub do_match { my $self = shift; my $date = shift; local $^W; # bizarre bug # Do the match! my $dt = eval { $self->{strptime}->parse_datetime( $date ) }; return $@ ? undef : $dt; } sub post_match { return $_[2]; } 1; __END__ =head1 THANKS See L<the main module's section|DateTime::Format::Builder/"THANKS">. =head1 SUPPORT Support for this module is provided via the datetime@perl.org email list. See http://lists.perl.org/ for more details. Alternatively, log them via the CPAN RT system via the web or email: http://perl.dellah.org/rt/dtbuilder bug-datetime-format-builder@rt.cpan.org This makes it much easier for me to track things and thus means your problem is less likely to be neglected. =head1 LICENCE AND COPYRIGHT Copyright E<copy> Iain Truskett, 2003. All rights reserved. This library is free software; you can redistribute it and/or modify it under the same terms as Perl itself, either Perl version 5.000 or, at your option, any later version of Perl 5 you may have available. The full text of the licences can be found in the F<Artistic> and F<COPYING> files included with this module, or in L<perlartistic> and L<perlgpl> as supplied with Perl 5.8.1 and later. =head1 AUTHOR Iain Truskett <spoon@cpan.org> =head1 SEE ALSO C<datetime@perl.org> mailing list. http://datetime.perl.org/ L<perl>, L<DateTime>, L<DateTime::Format::Builder> =cut
jfinkhaeuser/metalmittwoch
lib/DateTime/Format/Format/Builder/Parser/Strptime.pm
Perl
apache-2.0
2,819
package Google::Ads::AdWords::v201409::BudgetOrderService::ApiExceptionFault; use strict; use warnings; { # BLOCK to scope variables sub get_xmlns { 'https://adwords.google.com/api/adwords/billing/v201409' } __PACKAGE__->__set_name('ApiExceptionFault'); __PACKAGE__->__set_nillable(); __PACKAGE__->__set_minOccurs(); __PACKAGE__->__set_maxOccurs(); __PACKAGE__->__set_ref(); use base qw( SOAP::WSDL::XSD::Typelib::Element Google::Ads::AdWords::v201409::ApiException ); } 1; =pod =head1 NAME Google::Ads::AdWords::v201409::BudgetOrderService::ApiExceptionFault =head1 DESCRIPTION Perl data type class for the XML Schema defined element ApiExceptionFault from the namespace https://adwords.google.com/api/adwords/billing/v201409. A fault element of type ApiException. =head1 METHODS =head2 new my $element = Google::Ads::AdWords::v201409::BudgetOrderService::ApiExceptionFault->new($data); Constructor. The following data structure may be passed to new(): $a_reference_to, # see Google::Ads::AdWords::v201409::ApiException =head1 AUTHOR Generated by SOAP::WSDL =cut
gitpan/GOOGLE-ADWORDS-PERL-CLIENT
lib/Google/Ads/AdWords/v201409/BudgetOrderService/ApiExceptionFault.pm
Perl
apache-2.0
1,101
# # Copyright 2016 Centreon (http://www.centreon.com/) # # Centreon is a full-fledged industry-strength solution that meets # the needs in IT infrastructure and application monitoring for # service performance. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # package centreon::common::force10::snmp::mode::hardware; use base qw(centreon::plugins::templates::hardware); use strict; use warnings; sub set_system { my ($self, %options) = @_; $self->{regexp_threshold_overload_check_section_option} = '^(psu|fan)$'; $self->{regexp_threshold_numeric_check_section_option} = '^(temperature)$'; $self->{cb_hook2} = 'snmp_execute'; $self->{thresholds} = { fan => [ ['up', 'OK'], ['absent', 'OK'], ['down', 'CRITICAL'], ], psu => [ ['up', 'OK'], ['absent', 'OK'], ['down', 'CRITICAL'], ['normal', 'OK'], ['warning', 'WARNING'], ['critical', 'CRITICAL'], ['shutdown', 'CRITICAL'], ['notPresent', 'OK'], ['notFunctioning', 'CRITICAL'], ], }; $self->{components_path} = 'centreon::common::force10::snmp::mode::components'; $self->{components_module} = ['fan', 'psu', 'temperature']; } sub snmp_execute { my ($self, %options) = @_; $self->{snmp} = $options{snmp}; $self->{results} = $self->{snmp}->get_multiple_table(oids => $self->{request}); } sub new { my ($class, %options) = @_; my $self = $class->SUPER::new(package => __PACKAGE__, %options); bless $self, $class; $self->{version} = '1.0'; $options{options}->add_options(arguments => { }); return $self; } 1; __END__ =head1 MODE Check Hardware (Fan, Power Supply, Temperature). =over 8 =item B<--component> Which component to check (Default: '.*'). Can be: 'temperature', 'fan', 'psu'. =item B<--filter> Exclude some parts (comma seperated list) (Example: --filter=temperature --filter=psu) Can also exclude specific instance: --filter=fan,1 =item B<--absent-problem> Return an error if an entity is not 'present' (default is skipping) (comma seperated list) Can be specific or global: --absent-problem=fan,1 =item B<--no-component> Return an error if no compenents are checked. If total (with skipped) is 0. (Default: 'critical' returns). =item B<--threshold-overload> Set to overload default threshold values (syntax: section,[instance,]status,regexp) It used before default thresholds (order stays). Example: --threshold-overload='psu,WARNING,^(?!(up)$)' =item B<--warning> Set warning threshold for temperatures (syntax: type,instance,threshold) Example: --warning='temperature,.*,30' =item B<--critical> Set critical threshold for temperatures (syntax: type,instance,threshold) Example: --critical='temperature,.*,40' =back =cut
bcournaud/centreon-plugins
centreon/common/force10/snmp/mode/hardware.pm
Perl
apache-2.0
3,462
package Paws::Pinpoint::DeleteAdmChannelResponse; use Moose; has ADMChannelResponse => (is => 'ro', isa => 'Paws::Pinpoint::ADMChannelResponse', required => 1); use MooseX::ClassAttribute; class_has _stream_param => (is => 'ro', default => 'ADMChannelResponse'); has _request_id => (is => 'ro', isa => 'Str'); 1; ### main pod documentation begin ### =head1 NAME Paws::Pinpoint::DeleteAdmChannelResponse =head1 ATTRIBUTES =head2 B<REQUIRED> ADMChannelResponse => L<Paws::Pinpoint::ADMChannelResponse> =head2 _request_id => Str =cut
ioanrogers/aws-sdk-perl
auto-lib/Paws/Pinpoint/DeleteAdmChannelResponse.pm
Perl
apache-2.0
555
package DBD::mysql::GetInfo; ######################################## # DBD::mysql::GetInfo # # # Generated by DBI::DBD::Metadata # $Author$ <-- the person to blame # $Revision$ # $Date$ use strict; use warnings; use DBD::mysql; # Beware: not officially documented interfaces... # use DBI::Const::GetInfoType qw(%GetInfoType); # use DBI::Const::GetInfoReturn qw(%GetInfoReturnTypes %GetInfoReturnValues); my $sql_driver = 'mysql'; # SQL_DRIVER_VER should be formatted as dd.dd.dddd my $dbdversion = $DBD::mysql::VERSION; $dbdversion .= '_00' if $dbdversion =~ /^\d+\.\d+$/; my $sql_driver_ver = sprintf("%02d.%02d.%04d", split(/[\._]/,$dbdversion)); my @Keywords = qw( BIGINT BLOB DEFAULT KEYS LIMIT LONGBLOB MEDIMUMBLOB MEDIUMINT MEDIUMTEXT PROCEDURE REGEXP RLIKE SHOW TABLES TINYBLOB TINYTEXT UNIQUE UNSIGNED ZEROFILL ); sub sql_keywords { return join ',', @Keywords; } sub sql_data_source_name { my $dbh = shift; return "dbi:$sql_driver:" . $dbh->{Name}; } sub sql_user_name { my $dbh = shift; # Non-standard attribute return $dbh->{CURRENT_USER}; } #################### # makefunc() # returns a ref to a sub that calls into XS to get # values for info types that must needs be coded in C sub makefunk ($) { my $type = shift; return sub {dbd_mysql_get_info(shift, $type)} } our %info = ( 20 => 'N', # SQL_ACCESSIBLE_PROCEDURES 19 => 'Y', # SQL_ACCESSIBLE_TABLES 0 => 0, # SQL_ACTIVE_CONNECTIONS 116 => 0, # SQL_ACTIVE_ENVIRONMENTS 1 => 0, # SQL_ACTIVE_STATEMENTS 169 => 127, # SQL_AGGREGATE_FUNCTIONS 117 => 0, # SQL_ALTER_DOMAIN 86 => 3, # SQL_ALTER_TABLE 10021 => makefunk 10021, # SQL_ASYNC_MODE 120 => 2, # SQL_BATCH_ROW_COUNT 121 => 2, # SQL_BATCH_SUPPORT 82 => 0, # SQL_BOOKMARK_PERSISTENCE 114 => 1, # SQL_CATALOG_LOCATION 10003 => 'Y', # SQL_CATALOG_NAME 41 => makefunk 41, # SQL_CATALOG_NAME_SEPARATOR 42 => makefunk 42, # SQL_CATALOG_TERM 92 => 29, # SQL_CATALOG_USAGE 10004 => '', # SQL_COLLATING_SEQUENCE 10004 => '', # SQL_COLLATION_SEQ 87 => 'Y', # SQL_COLUMN_ALIAS 22 => 0, # SQL_CONCAT_NULL_BEHAVIOR 53 => 259071, # SQL_CONVERT_BIGINT 54 => 0, # SQL_CONVERT_BINARY 55 => 259071, # SQL_CONVERT_BIT 56 => 259071, # SQL_CONVERT_CHAR 57 => 259071, # SQL_CONVERT_DATE 58 => 259071, # SQL_CONVERT_DECIMAL 59 => 259071, # SQL_CONVERT_DOUBLE 60 => 259071, # SQL_CONVERT_FLOAT 48 => 0, # SQL_CONVERT_FUNCTIONS # 173 => undef, # SQL_CONVERT_GUID 61 => 259071, # SQL_CONVERT_INTEGER 123 => 0, # SQL_CONVERT_INTERVAL_DAY_TIME 124 => 0, # SQL_CONVERT_INTERVAL_YEAR_MONTH 71 => 0, # SQL_CONVERT_LONGVARBINARY 62 => 259071, # SQL_CONVERT_LONGVARCHAR 63 => 259071, # SQL_CONVERT_NUMERIC 64 => 259071, # SQL_CONVERT_REAL 65 => 259071, # SQL_CONVERT_SMALLINT 66 => 259071, # SQL_CONVERT_TIME 67 => 259071, # SQL_CONVERT_TIMESTAMP 68 => 259071, # SQL_CONVERT_TINYINT 69 => 0, # SQL_CONVERT_VARBINARY 70 => 259071, # SQL_CONVERT_VARCHAR 122 => 0, # SQL_CONVERT_WCHAR 125 => 0, # SQL_CONVERT_WLONGVARCHAR 126 => 0, # SQL_CONVERT_WVARCHAR 74 => 1, # SQL_CORRELATION_NAME 127 => 0, # SQL_CREATE_ASSERTION 128 => 0, # SQL_CREATE_CHARACTER_SET 129 => 0, # SQL_CREATE_COLLATION 130 => 0, # SQL_CREATE_DOMAIN 131 => 0, # SQL_CREATE_SCHEMA 132 => 1045, # SQL_CREATE_TABLE 133 => 0, # SQL_CREATE_TRANSLATION 134 => 0, # SQL_CREATE_VIEW 23 => 2, # SQL_CURSOR_COMMIT_BEHAVIOR 24 => 2, # SQL_CURSOR_ROLLBACK_BEHAVIOR 10001 => 0, # SQL_CURSOR_SENSITIVITY 2 => \&sql_data_source_name, # SQL_DATA_SOURCE_NAME 25 => 'N', # SQL_DATA_SOURCE_READ_ONLY 119 => 7, # SQL_DATETIME_LITERALS 17 => 'MySQL', # SQL_DBMS_NAME 18 => makefunk 18, # SQL_DBMS_VER 170 => 3, # SQL_DDL_INDEX 26 => 2, # SQL_DEFAULT_TRANSACTION_ISOLATION 26 => 2, # SQL_DEFAULT_TXN_ISOLATION 10002 => 'N', # SQL_DESCRIBE_PARAMETER # 171 => undef, # SQL_DM_VER 3 => 137076632, # SQL_DRIVER_HDBC # 135 => undef, # SQL_DRIVER_HDESC 4 => 137076088, # SQL_DRIVER_HENV # 76 => undef, # SQL_DRIVER_HLIB # 5 => undef, # SQL_DRIVER_HSTMT 6 => 'libmyodbc3.so', # SQL_DRIVER_NAME 77 => '03.51', # SQL_DRIVER_ODBC_VER 7 => $sql_driver_ver, # SQL_DRIVER_VER 136 => 0, # SQL_DROP_ASSERTION 137 => 0, # SQL_DROP_CHARACTER_SET 138 => 0, # SQL_DROP_COLLATION 139 => 0, # SQL_DROP_DOMAIN 140 => 0, # SQL_DROP_SCHEMA 141 => 7, # SQL_DROP_TABLE 142 => 0, # SQL_DROP_TRANSLATION 143 => 0, # SQL_DROP_VIEW 144 => 0, # SQL_DYNAMIC_CURSOR_ATTRIBUTES1 145 => 0, # SQL_DYNAMIC_CURSOR_ATTRIBUTES2 27 => 'Y', # SQL_EXPRESSIONS_IN_ORDERBY 8 => 63, # SQL_FETCH_DIRECTION 84 => 0, # SQL_FILE_USAGE 146 => 97863, # SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES1 147 => 6016, # SQL_FORWARD_ONLY_CURSOR_ATTRIBUTES2 81 => 11, # SQL_GETDATA_EXTENSIONS 88 => 3, # SQL_GROUP_BY 28 => 4, # SQL_IDENTIFIER_CASE #29 => sub {dbd_mysql_get_info(shift,$GetInfoType {SQL_IDENTIFIER_QUOTE_CHAR})}, 29 => makefunk 29, # SQL_IDENTIFIER_QUOTE_CHAR 148 => 0, # SQL_INDEX_KEYWORDS 149 => 0, # SQL_INFO_SCHEMA_VIEWS 172 => 7, # SQL_INSERT_STATEMENT 73 => 'N', # SQL_INTEGRITY 150 => 0, # SQL_KEYSET_CURSOR_ATTRIBUTES1 151 => 0, # SQL_KEYSET_CURSOR_ATTRIBUTES2 89 => \&sql_keywords, # SQL_KEYWORDS 113 => 'Y', # SQL_LIKE_ESCAPE_CLAUSE 78 => 0, # SQL_LOCK_TYPES 34 => 64, # SQL_MAXIMUM_CATALOG_NAME_LENGTH 97 => 0, # SQL_MAXIMUM_COLUMNS_IN_GROUP_BY 98 => 32, # SQL_MAXIMUM_COLUMNS_IN_INDEX 99 => 0, # SQL_MAXIMUM_COLUMNS_IN_ORDER_BY 100 => 0, # SQL_MAXIMUM_COLUMNS_IN_SELECT 101 => 0, # SQL_MAXIMUM_COLUMNS_IN_TABLE 30 => 64, # SQL_MAXIMUM_COLUMN_NAME_LENGTH 1 => 0, # SQL_MAXIMUM_CONCURRENT_ACTIVITIES 31 => 18, # SQL_MAXIMUM_CURSOR_NAME_LENGTH 0 => 0, # SQL_MAXIMUM_DRIVER_CONNECTIONS 10005 => 64, # SQL_MAXIMUM_IDENTIFIER_LENGTH 102 => 500, # SQL_MAXIMUM_INDEX_SIZE 104 => 0, # SQL_MAXIMUM_ROW_SIZE 32 => 0, # SQL_MAXIMUM_SCHEMA_NAME_LENGTH 105 => makefunk 105, # SQL_MAXIMUM_STATEMENT_LENGTH # 20000 => undef, # SQL_MAXIMUM_STMT_OCTETS # 20001 => undef, # SQL_MAXIMUM_STMT_OCTETS_DATA # 20002 => undef, # SQL_MAXIMUM_STMT_OCTETS_SCHEMA 106 => makefunk 106, # SQL_MAXIMUM_TABLES_IN_SELECT 35 => 64, # SQL_MAXIMUM_TABLE_NAME_LENGTH 107 => 16, # SQL_MAXIMUM_USER_NAME_LENGTH 10022 => makefunk 10022, # SQL_MAX_ASYNC_CONCURRENT_STATEMENTS 112 => 0, # SQL_MAX_BINARY_LITERAL_LEN 34 => 64, # SQL_MAX_CATALOG_NAME_LEN 108 => 0, # SQL_MAX_CHAR_LITERAL_LEN 97 => 0, # SQL_MAX_COLUMNS_IN_GROUP_BY 98 => 32, # SQL_MAX_COLUMNS_IN_INDEX 99 => 0, # SQL_MAX_COLUMNS_IN_ORDER_BY 100 => 0, # SQL_MAX_COLUMNS_IN_SELECT 101 => 0, # SQL_MAX_COLUMNS_IN_TABLE 30 => 64, # SQL_MAX_COLUMN_NAME_LEN 1 => 0, # SQL_MAX_CONCURRENT_ACTIVITIES 31 => 18, # SQL_MAX_CURSOR_NAME_LEN 0 => 0, # SQL_MAX_DRIVER_CONNECTIONS 10005 => 64, # SQL_MAX_IDENTIFIER_LEN 102 => 500, # SQL_MAX_INDEX_SIZE 32 => 0, # SQL_MAX_OWNER_NAME_LEN 33 => 0, # SQL_MAX_PROCEDURE_NAME_LEN 34 => 64, # SQL_MAX_QUALIFIER_NAME_LEN 104 => 0, # SQL_MAX_ROW_SIZE 103 => 'Y', # SQL_MAX_ROW_SIZE_INCLUDES_LONG 32 => 0, # SQL_MAX_SCHEMA_NAME_LEN 105 => 8192, # SQL_MAX_STATEMENT_LEN 106 => 31, # SQL_MAX_TABLES_IN_SELECT 35 => makefunk 35, # SQL_MAX_TABLE_NAME_LEN 107 => 16, # SQL_MAX_USER_NAME_LEN 37 => 'Y', # SQL_MULTIPLE_ACTIVE_TXN 36 => 'Y', # SQL_MULT_RESULT_SETS 111 => 'N', # SQL_NEED_LONG_DATA_LEN 75 => 1, # SQL_NON_NULLABLE_COLUMNS 85 => 2, # SQL_NULL_COLLATION 49 => 16777215, # SQL_NUMERIC_FUNCTIONS 9 => 1, # SQL_ODBC_API_CONFORMANCE 152 => 2, # SQL_ODBC_INTERFACE_CONFORMANCE 12 => 1, # SQL_ODBC_SAG_CLI_CONFORMANCE 15 => 1, # SQL_ODBC_SQL_CONFORMANCE 73 => 'N', # SQL_ODBC_SQL_OPT_IEF 10 => '03.80', # SQL_ODBC_VER 115 => 123, # SQL_OJ_CAPABILITIES 90 => 'Y', # SQL_ORDER_BY_COLUMNS_IN_SELECT 38 => 'Y', # SQL_OUTER_JOINS 115 => 123, # SQL_OUTER_JOIN_CAPABILITIES 39 => '', # SQL_OWNER_TERM 91 => 0, # SQL_OWNER_USAGE 153 => 2, # SQL_PARAM_ARRAY_ROW_COUNTS 154 => 3, # SQL_PARAM_ARRAY_SELECTS 80 => 3, # SQL_POSITIONED_STATEMENTS 79 => 31, # SQL_POS_OPERATIONS 21 => 'N', # SQL_PROCEDURES 40 => '', # SQL_PROCEDURE_TERM 114 => 1, # SQL_QUALIFIER_LOCATION 41 => '.', # SQL_QUALIFIER_NAME_SEPARATOR 42 => 'database', # SQL_QUALIFIER_TERM 92 => 29, # SQL_QUALIFIER_USAGE 93 => 3, # SQL_QUOTED_IDENTIFIER_CASE 11 => 'N', # SQL_ROW_UPDATES 39 => '', # SQL_SCHEMA_TERM 91 => 0, # SQL_SCHEMA_USAGE 43 => 7, # SQL_SCROLL_CONCURRENCY 44 => 17, # SQL_SCROLL_OPTIONS 14 => '\\', # SQL_SEARCH_PATTERN_ESCAPE 13 => makefunk 13, # SQL_SERVER_NAME 94 => 'ÇüéâäàåçêëèïîìÄÅÉæÆôöòûùÿÖÜáíóúñÑ', # SQL_SPECIAL_CHARACTERS 155 => 7, # SQL_SQL92_DATETIME_FUNCTIONS 156 => 0, # SQL_SQL92_FOREIGN_KEY_DELETE_RULE 157 => 0, # SQL_SQL92_FOREIGN_KEY_UPDATE_RULE 158 => 8160, # SQL_SQL92_GRANT 159 => 0, # SQL_SQL92_NUMERIC_VALUE_FUNCTIONS 160 => 0, # SQL_SQL92_PREDICATES 161 => 466, # SQL_SQL92_RELATIONAL_JOIN_OPERATORS 162 => 32640, # SQL_SQL92_REVOKE 163 => 7, # SQL_SQL92_ROW_VALUE_CONSTRUCTOR 164 => 255, # SQL_SQL92_STRING_FUNCTIONS 165 => 0, # SQL_SQL92_VALUE_EXPRESSIONS 118 => 4, # SQL_SQL_CONFORMANCE 166 => 2, # SQL_STANDARD_CLI_CONFORMANCE 167 => 97863, # SQL_STATIC_CURSOR_ATTRIBUTES1 168 => 6016, # SQL_STATIC_CURSOR_ATTRIBUTES2 83 => 7, # SQL_STATIC_SENSITIVITY 50 => 491519, # SQL_STRING_FUNCTIONS 95 => 0, # SQL_SUBQUERIES 51 => 7, # SQL_SYSTEM_FUNCTIONS 45 => 'table', # SQL_TABLE_TERM 109 => 0, # SQL_TIMEDATE_ADD_INTERVALS 110 => 0, # SQL_TIMEDATE_DIFF_INTERVALS 52 => 106495, # SQL_TIMEDATE_FUNCTIONS 46 => 3, # SQL_TRANSACTION_CAPABLE 72 => 15, # SQL_TRANSACTION_ISOLATION_OPTION 46 => 3, # SQL_TXN_CAPABLE 72 => 15, # SQL_TXN_ISOLATION_OPTION 96 => 0, # SQL_UNION 96 => 0, # SQL_UNION_STATEMENT 47 => \&sql_user_name, # SQL_USER_NAME 10000 => 1992, # SQL_XOPEN_CLI_YEAR ); 1; __END__
ray66rus/vndrv
local/lib/perl5/x86_64-linux-thread-multi/DBD/mysql/GetInfo.pm
Perl
apache-2.0
16,081
package NP::Model::UserEquipmentApplication; use strict; use NP::Util qw(convert_to_html); sub validate { my $vz = shift; my $errors = {}; for my $f (qw(contact_information application)) { $errors->{$f} = 'Required field!' unless $vz->$f and $vz->$f =~ m/\S/; } $vz->{_validation_errors} = $errors; %$errors ? 0 : 1; } sub validation_errors { my $self = shift; $self->{_validation_errors} || {}; } sub can_edit { my ($self, $user) = @_; return 0 unless $user; return 1 if $user->privileges->equipment_admin; return 1 if $self->status eq 'New' and $user->id == $self->user_id; # TODO: many<->many return 0; } sub can_view { my ($self, $user) = @_; return 0 unless $user; return 1 if $user->privileges->equipment_admin; return 1 if $user->id == $self->user_id; # TODO: many<->many return 0; } sub contact_information_html { convert_to_html(shift->contact_information) } sub application_html { convert_to_html(shift->application) } 1;
punitvara/ntppool
lib/NP/Model/UserEquipmentApplication.pm
Perl
apache-2.0
1,058
# # Copyright 2017 Centreon (http://www.centreon.com/) # # Centreon is a full-fledged industry-strength solution that meets # the needs in IT infrastructure and application monitoring for # service performance. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # package hardware::pdu::emerson::snmp::mode::psusage; use base qw(centreon::plugins::templates::counter); use strict; use warnings; use Digest::MD5 qw(md5_hex); sub set_counters { my ($self, %options) = @_; $self->{maps_counters_type} = [ { name => 'ps', type => 1, cb_prefix_output => 'prefix_ps_output', message_multiple => 'All power sources are ok' }, { name => 'line', type => 1, cb_prefix_output => 'prefix_line_output', message_multiple => 'All power source lines are ok' }, ]; $self->{maps_counters}->{ps} = [ { label => 'power', set => { key_values => [ { name => 'PwrTotal' }, { name => 'display' } ], output_template => 'Total input power : %s W', output_error_template => "total input power : %s", perfdatas => [ { label => 'power', value => 'PwrTotal_absolute', template => '%s', unit => 'W', min => 0, label_extra_instance => 1, instance_use => 'display_absolute' }, ], } }, { label => 'energy', set => { key_values => [ { name => 'EnergyAccum', diff => 1 }, { name => 'display' } ], output_template => 'Total energy : %.3f kWh', output_error_template => "Total energy : %s", perfdatas => [ { label => 'energy', value => 'EnergyAccum_absolute', template => '%.3f', unit => 'kWh', min => 0, label_extra_instance => 1, instance_use => 'display_absolute' }, ], } }, { label => 'current-neutral', set => { key_values => [ { name => 'EcNeutral' }, { name => 'display' } ], output_template => 'Current neutral : %s Amp AC RMS', output_error_template => "Current neutral : %s", perfdatas => [ { label => 'current_neutral', value => 'EcNeutral_absolute', template => '%s', unit => 'AmpAcRMS', min => 0, label_extra_instance => 1, instance_use => 'display_absolute' }, ], } }, ]; $self->{maps_counters}->{line} = [ { label => 'line-load', set => { key_values => [ { name => 'load' }, { name => 'display' } ], output_template => 'Load : %.2f %%', output_error_template => "Load : %s", perfdatas => [ { label => 'line_load', value => 'load_absolute', template => '%.2f', unit => '%', min => 0, max => 100, label_extra_instance => 1, instance_use => 'display_absolute' }, ], } }, { label => 'line-current', set => { key_values => [ { name => 'current' }, { name => 'display' } ], output_template => 'Current : %.2f A', output_error_template => "Current : %s", perfdatas => [ { label => 'line_current', value => 'current_absolute', template => '%.2f', unit => 'A', min => 0, label_extra_instance => 1, instance_use => 'display_absolute' }, ], } }, ]; } sub prefix_ps_output { my ($self, %options) = @_; return "Power source '" . $options{instance_value}->{display} . "' "; } sub prefix_line_output { my ($self, %options) = @_; return "Power source line '" . $options{instance_value}->{display} . "' "; } sub new { my ($class, %options) = @_; my $self = $class->SUPER::new(package => __PACKAGE__, %options, statefile => 1); bless $self, $class; $self->{version} = '1.0'; $options{options}->add_options(arguments => { "filter-name:s" => { name => 'filter_name' }, }); return $self; } my %map_phase = (1 => 'phase1', 2 => 'phase2', 3 => 'phase3'); my $mapping = { lgpPduPsEntrySysAssignLabel => { oid => '.1.3.6.1.4.1.476.1.42.3.8.30.20.1.15' }, lgpPduPsEntryEnergyAccum => { oid => '.1.3.6.1.4.1.476.1.42.3.8.30.20.1.50' }, # 0.1 Kilowatt-Hour lgpPduPsEntryPwrTotal => { oid => '.1.3.6.1.4.1.476.1.42.3.8.30.20.1.65' }, # Watt lgpPduPsEntryEcNeutral => { oid => '.1.3.6.1.4.1.476.1.42.3.8.30.20.1.70' }, # 0.1 Amp-AC-RMS }; my $mapping2 = { lgpPduPsLineEntryLine => { oid => '.1.3.6.1.4.1.476.1.42.3.8.30.40.1.15', map => \%map_phase }, lgpPduPsLineEntryEcHundredths => { oid => '.1.3.6.1.4.1.476.1.42.3.8.30.40.1.22' }, # 0.01 A lgpPduPsLineEntryEcUsedBeforeAlarm => { oid => '.1.3.6.1.4.1.476.1.42.3.8.30.40.1.39' }, # % }; my $oid_lgpPduEntryUsrLabel = '.1.3.6.1.4.1.476.1.42.3.8.20.1.10'; my $oid_lgpPduPsEntry = '.1.3.6.1.4.1.476.1.42.3.8.30.20.1'; my $oid_lgpPduPsLineEntry = '.1.3.6.1.4.1.476.1.42.3.8.30.40.1'; sub manage_selection { my ($self, %options) = @_; $self->{cache_name} = "pdu_emerson_" . $options{snmp}->get_hostname() . '_' . $options{snmp}->get_port() . '_' . $self->{mode} . '_' . (defined($self->{option_results}->{filter_name}) ? md5_hex($self->{option_results}->{filter_name}) : md5_hex('all')) . '_' . (defined($self->{option_results}->{filter_counters}) ? md5_hex($self->{option_results}->{filter_counters}) : md5_hex('all')); $self->{ps} = {}; $self->{results} = $options{snmp}->get_multiple_table(oids => [ { oid => $oid_lgpPduEntryUsrLabel }, { oid => $oid_lgpPduPsEntry }, { oid => $oid_lgpPduPsLineEntry } ], nothing_quit => 1); foreach my $oid (keys %{$self->{results}->{$oid_lgpPduPsEntry}}) { next if ($oid !~ /^$mapping->{lgpPduPsEntryPwrTotal}->{oid}\.(\d+)\.(\d+)/); my ($pdu_index, $ps_index) = ($1, $2); my $result = $options{snmp}->map_instance(mapping => $mapping, results => $self->{results}->{$oid_lgpPduPsEntry}, instance => $pdu_index . '.' . $ps_index); my $pdu_name = defined($self->{results}->{$oid_lgpPduEntryUsrLabel}->{$oid_lgpPduEntryUsrLabel . '.' . $pdu_index}) && $self->{results}->{$oid_lgpPduEntryUsrLabel}->{$oid_lgpPduEntryUsrLabel . '.' . $pdu_index} ne '' ? $self->{results}->{$oid_lgpPduEntryUsrLabel}->{$oid_lgpPduEntryUsrLabel . '.' . $pdu_index} : $pdu_index; my $ps_name = defined($result->{lgpPduPsEntrySysAssignLabel}) && $result->{lgpPduPsEntrySysAssignLabel} ne '' ? $result->{lgpPduPsEntrySysAssignLabel} : $ps_index; my $name = $pdu_name . '/' . $ps_name; if (defined($self->{option_results}->{filter_name}) && $self->{option_results}->{filter_name} ne '' && $name !~ /$self->{option_results}->{filter_name}/) { $self->{output}->output_add(long_msg => "skipping '" . $name . "': no matching filter.", debug => 1); next; } $self->{ps}->{$pdu_index . '.' . $ps_index} = { display => $name, EnergyAccum => $result->{lgpPduPsEntryEnergyAccum} * 0.1, PwrTotal => $result->{lgpPduPsEntryPwrTotal}, EcNeutral => $result->{lgpPduPsEntryEcNeutral} * 0.1}; } $self->{line} = {}; foreach my $oid (keys %{$self->{results}->{$oid_lgpPduPsLineEntry}}) { next if ($oid !~ /^$mapping2->{lgpPduPsLineEntryEcUsedBeforeAlarm}->{oid}\.(\d+)\.(\d+)\.(\d+)/); my ($pdu_index, $ps_index, $line_index) = ($1, $2, $3); my $result = $options{snmp}->map_instance(mapping => $mapping2, results => $self->{results}->{$oid_lgpPduPsLineEntry}, instance => $pdu_index . '.' . $ps_index . '.' . $line_index); next if (!defined($self->{ps}->{$pdu_index . '.' . $ps_index})); $self->{line}->{$pdu_index . '.' . $ps_index . '.' . $line_index} = { display => $self->{ps}->{$pdu_index . '.' . $ps_index}->{display} . '/' . $result->{lgpPduPsLineEntryLine}, current => $result->{lgpPduPsLineEntryEcHundredths} * 0.01, load => $result->{lgpPduPsLineEntryEcUsedBeforeAlarm}, }; } if (scalar(keys %{$self->{ps}}) <= 0) { $self->{output}->add_option_msg(short_msg => "Cannot found power sources."); $self->{output}->option_exit(); } } 1; __END__ =head1 MODE Check power source usage. =over 8 =item B<--filter-name> Filter power source name (can be a regexp). =item B<--filter-counters> Only display some counters (regexp can be used). Example: --filter-counters='^(power|energy)$' =item B<--warning-*> Threshold warning. Can be: 'power', 'energy', 'current-neutral', 'line-load', 'line-current'. =item B<--critical-*> Threshold critical. Can be: 'power', 'energy', 'current-neutral', 'line-load', 'line-current'. =back =cut
Shini31/centreon-plugins
hardware/pdu/emerson/snmp/mode/psusage.pm
Perl
apache-2.0
9,878
/* Part of SWI-Prolog Author: Jan Wielemaker E-mail: J.Wielemaker@vu.nl WWW: http://www.swi-prolog.org Copyright (c) 2019, VU University Amsterdam All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ :- module(setof, [ excess_vars/4 % +Term, +ExistVarTerm, +AddVarList, -VarList ]). :- use_module(library(lists)). %! excess_vars(+Term, +ExistVarTerm, +AddVarList, -VarList) % % Returns in VarList the list of (free) variables found in Term % concatenated to the end of AddVarList. (In normal usage AddVarList % is passed in as an empty list.) ExistVarTerm is a term containing % variables assumed to be quantified in Term so none of these % variables are returned in the resulting list (unless they are in % AddVarList.) Subterms of Term of the form `(VarTerm^SubTerm)` are % treated specially: all variables in VarTerm are assumed to be % quantified in SubTerm, and so no occurrence of these variables in % SubTerm is collected into the resulting list. excess_vars(Term, ExistVarTerm, AddVarList, VarList) :- '$free_variable_set'(ExistVarTerm^Term, _Goal, Vars0), append(AddVarList, Vars0, VarList).
josd/eye
eye-wasm/swipl-wasm/home/library/dialect/xsb/setof.pl
Perl
mit
2,539
# This file is auto-generated by the Perl DateTime Suite time zone # code generator (0.07) This code generator comes with the # DateTime::TimeZone module distribution in the tools/ directory # # Generated from /tmp/rnClxBLdxJ/europe. Olson data version 2013a # # Do not edit this file directly. # package DateTime::TimeZone::Asia::Yekaterinburg; { $DateTime::TimeZone::Asia::Yekaterinburg::VERSION = '1.57'; } use strict; use Class::Singleton 1.03; use DateTime::TimeZone; use DateTime::TimeZone::OlsonDB; @DateTime::TimeZone::Asia::Yekaterinburg::ISA = ( 'Class::Singleton', 'DateTime::TimeZone' ); my $spans = [ [ DateTime::TimeZone::NEG_INFINITY, # utc_start 60543071856, # utc_end 1919-07-14 23:57:36 (Mon) DateTime::TimeZone::NEG_INFINITY, # local_start 60543086400, # local_end 1919-07-15 04:00:00 (Tue) 14544, 0, 'LMT', ], [ 60543071856, # utc_start 1919-07-14 23:57:36 (Mon) 60888139200, # utc_end 1930-06-20 20:00:00 (Fri) 60543086256, # local_start 1919-07-15 03:57:36 (Tue) 60888153600, # local_end 1930-06-21 00:00:00 (Sat) 14400, 0, 'SVET', ], [ 60888139200, # utc_start 1930-06-20 20:00:00 (Fri) 62490596400, # utc_end 1981-03-31 19:00:00 (Tue) 60888157200, # local_start 1930-06-21 01:00:00 (Sat) 62490614400, # local_end 1981-04-01 00:00:00 (Wed) 18000, 0, 'SVET', ], [ 62490596400, # utc_start 1981-03-31 19:00:00 (Tue) 62506404000, # utc_end 1981-09-30 18:00:00 (Wed) 62490618000, # local_start 1981-04-01 01:00:00 (Wed) 62506425600, # local_end 1981-10-01 00:00:00 (Thu) 21600, 1, 'SVEST', ], [ 62506404000, # utc_start 1981-09-30 18:00:00 (Wed) 62522132400, # utc_end 1982-03-31 19:00:00 (Wed) 62506422000, # local_start 1981-09-30 23:00:00 (Wed) 62522150400, # local_end 1982-04-01 00:00:00 (Thu) 18000, 0, 'SVET', ], [ 62522132400, # utc_start 1982-03-31 19:00:00 (Wed) 62537940000, # utc_end 1982-09-30 18:00:00 (Thu) 62522154000, # local_start 1982-04-01 01:00:00 (Thu) 62537961600, # local_end 1982-10-01 00:00:00 (Fri) 21600, 1, 'SVEST', ], [ 62537940000, # utc_start 1982-09-30 18:00:00 (Thu) 62553668400, # utc_end 1983-03-31 19:00:00 (Thu) 62537958000, # local_start 1982-09-30 23:00:00 (Thu) 62553686400, # local_end 1983-04-01 00:00:00 (Fri) 18000, 0, 'SVET', ], [ 62553668400, # utc_start 1983-03-31 19:00:00 (Thu) 62569476000, # utc_end 1983-09-30 18:00:00 (Fri) 62553690000, # local_start 1983-04-01 01:00:00 (Fri) 62569497600, # local_end 1983-10-01 00:00:00 (Sat) 21600, 1, 'SVEST', ], [ 62569476000, # utc_start 1983-09-30 18:00:00 (Fri) 62585290800, # utc_end 1984-03-31 19:00:00 (Sat) 62569494000, # local_start 1983-09-30 23:00:00 (Fri) 62585308800, # local_end 1984-04-01 00:00:00 (Sun) 18000, 0, 'SVET', ], [ 62585290800, # utc_start 1984-03-31 19:00:00 (Sat) 62601022800, # utc_end 1984-09-29 21:00:00 (Sat) 62585312400, # local_start 1984-04-01 01:00:00 (Sun) 62601044400, # local_end 1984-09-30 03:00:00 (Sun) 21600, 1, 'SVEST', ], [ 62601022800, # utc_start 1984-09-29 21:00:00 (Sat) 62616747600, # utc_end 1985-03-30 21:00:00 (Sat) 62601040800, # local_start 1984-09-30 02:00:00 (Sun) 62616765600, # local_end 1985-03-31 02:00:00 (Sun) 18000, 0, 'SVET', ], [ 62616747600, # utc_start 1985-03-30 21:00:00 (Sat) 62632472400, # utc_end 1985-09-28 21:00:00 (Sat) 62616769200, # local_start 1985-03-31 03:00:00 (Sun) 62632494000, # local_end 1985-09-29 03:00:00 (Sun) 21600, 1, 'SVEST', ], [ 62632472400, # utc_start 1985-09-28 21:00:00 (Sat) 62648197200, # utc_end 1986-03-29 21:00:00 (Sat) 62632490400, # local_start 1985-09-29 02:00:00 (Sun) 62648215200, # local_end 1986-03-30 02:00:00 (Sun) 18000, 0, 'SVET', ], [ 62648197200, # utc_start 1986-03-29 21:00:00 (Sat) 62663922000, # utc_end 1986-09-27 21:00:00 (Sat) 62648218800, # local_start 1986-03-30 03:00:00 (Sun) 62663943600, # local_end 1986-09-28 03:00:00 (Sun) 21600, 1, 'SVEST', ], [ 62663922000, # utc_start 1986-09-27 21:00:00 (Sat) 62679646800, # utc_end 1987-03-28 21:00:00 (Sat) 62663940000, # local_start 1986-09-28 02:00:00 (Sun) 62679664800, # local_end 1987-03-29 02:00:00 (Sun) 18000, 0, 'SVET', ], [ 62679646800, # utc_start 1987-03-28 21:00:00 (Sat) 62695371600, # utc_end 1987-09-26 21:00:00 (Sat) 62679668400, # local_start 1987-03-29 03:00:00 (Sun) 62695393200, # local_end 1987-09-27 03:00:00 (Sun) 21600, 1, 'SVEST', ], [ 62695371600, # utc_start 1987-09-26 21:00:00 (Sat) 62711096400, # utc_end 1988-03-26 21:00:00 (Sat) 62695389600, # local_start 1987-09-27 02:00:00 (Sun) 62711114400, # local_end 1988-03-27 02:00:00 (Sun) 18000, 0, 'SVET', ], [ 62711096400, # utc_start 1988-03-26 21:00:00 (Sat) 62726821200, # utc_end 1988-09-24 21:00:00 (Sat) 62711118000, # local_start 1988-03-27 03:00:00 (Sun) 62726842800, # local_end 1988-09-25 03:00:00 (Sun) 21600, 1, 'SVEST', ], [ 62726821200, # utc_start 1988-09-24 21:00:00 (Sat) 62742546000, # utc_end 1989-03-25 21:00:00 (Sat) 62726839200, # local_start 1988-09-25 02:00:00 (Sun) 62742564000, # local_end 1989-03-26 02:00:00 (Sun) 18000, 0, 'SVET', ], [ 62742546000, # utc_start 1989-03-25 21:00:00 (Sat) 62758270800, # utc_end 1989-09-23 21:00:00 (Sat) 62742567600, # local_start 1989-03-26 03:00:00 (Sun) 62758292400, # local_end 1989-09-24 03:00:00 (Sun) 21600, 1, 'SVEST', ], [ 62758270800, # utc_start 1989-09-23 21:00:00 (Sat) 62773995600, # utc_end 1990-03-24 21:00:00 (Sat) 62758288800, # local_start 1989-09-24 02:00:00 (Sun) 62774013600, # local_end 1990-03-25 02:00:00 (Sun) 18000, 0, 'SVET', ], [ 62773995600, # utc_start 1990-03-24 21:00:00 (Sat) 62790325200, # utc_end 1990-09-29 21:00:00 (Sat) 62774017200, # local_start 1990-03-25 03:00:00 (Sun) 62790346800, # local_end 1990-09-30 03:00:00 (Sun) 21600, 1, 'SVEST', ], [ 62790325200, # utc_start 1990-09-29 21:00:00 (Sat) 62806050000, # utc_end 1991-03-30 21:00:00 (Sat) 62790343200, # local_start 1990-09-30 02:00:00 (Sun) 62806068000, # local_end 1991-03-31 02:00:00 (Sun) 18000, 0, 'SVET', ], [ 62806050000, # utc_start 1991-03-30 21:00:00 (Sat) 62821778400, # utc_end 1991-09-28 22:00:00 (Sat) 62806068000, # local_start 1991-03-31 02:00:00 (Sun) 62821796400, # local_end 1991-09-29 03:00:00 (Sun) 18000, 1, 'SVEST', ], [ 62821778400, # utc_start 1991-09-28 22:00:00 (Sat) 62831455200, # utc_end 1992-01-18 22:00:00 (Sat) 62821792800, # local_start 1991-09-29 02:00:00 (Sun) 62831469600, # local_end 1992-01-19 02:00:00 (Sun) 14400, 0, 'SVET', ], [ 62831455200, # utc_start 1992-01-18 22:00:00 (Sat) 62837488800, # utc_end 1992-03-28 18:00:00 (Sat) 62831473200, # local_start 1992-01-19 03:00:00 (Sun) 62837506800, # local_end 1992-03-28 23:00:00 (Sat) 18000, 0, 'YEKT', ], [ 62837488800, # utc_start 1992-03-28 18:00:00 (Sat) 62853210000, # utc_end 1992-09-26 17:00:00 (Sat) 62837510400, # local_start 1992-03-29 00:00:00 (Sun) 62853231600, # local_end 1992-09-26 23:00:00 (Sat) 21600, 1, 'YEKST', ], [ 62853210000, # utc_start 1992-09-26 17:00:00 (Sat) 62868949200, # utc_end 1993-03-27 21:00:00 (Sat) 62853228000, # local_start 1992-09-26 22:00:00 (Sat) 62868967200, # local_end 1993-03-28 02:00:00 (Sun) 18000, 0, 'YEKT', ], [ 62868949200, # utc_start 1993-03-27 21:00:00 (Sat) 62884674000, # utc_end 1993-09-25 21:00:00 (Sat) 62868970800, # local_start 1993-03-28 03:00:00 (Sun) 62884695600, # local_end 1993-09-26 03:00:00 (Sun) 21600, 1, 'YEKST', ], [ 62884674000, # utc_start 1993-09-25 21:00:00 (Sat) 62900398800, # utc_end 1994-03-26 21:00:00 (Sat) 62884692000, # local_start 1993-09-26 02:00:00 (Sun) 62900416800, # local_end 1994-03-27 02:00:00 (Sun) 18000, 0, 'YEKT', ], [ 62900398800, # utc_start 1994-03-26 21:00:00 (Sat) 62916123600, # utc_end 1994-09-24 21:00:00 (Sat) 62900420400, # local_start 1994-03-27 03:00:00 (Sun) 62916145200, # local_end 1994-09-25 03:00:00 (Sun) 21600, 1, 'YEKST', ], [ 62916123600, # utc_start 1994-09-24 21:00:00 (Sat) 62931848400, # utc_end 1995-03-25 21:00:00 (Sat) 62916141600, # local_start 1994-09-25 02:00:00 (Sun) 62931866400, # local_end 1995-03-26 02:00:00 (Sun) 18000, 0, 'YEKT', ], [ 62931848400, # utc_start 1995-03-25 21:00:00 (Sat) 62947573200, # utc_end 1995-09-23 21:00:00 (Sat) 62931870000, # local_start 1995-03-26 03:00:00 (Sun) 62947594800, # local_end 1995-09-24 03:00:00 (Sun) 21600, 1, 'YEKST', ], [ 62947573200, # utc_start 1995-09-23 21:00:00 (Sat) 62963902800, # utc_end 1996-03-30 21:00:00 (Sat) 62947591200, # local_start 1995-09-24 02:00:00 (Sun) 62963920800, # local_end 1996-03-31 02:00:00 (Sun) 18000, 0, 'YEKT', ], [ 62963902800, # utc_start 1996-03-30 21:00:00 (Sat) 62982046800, # utc_end 1996-10-26 21:00:00 (Sat) 62963924400, # local_start 1996-03-31 03:00:00 (Sun) 62982068400, # local_end 1996-10-27 03:00:00 (Sun) 21600, 1, 'YEKST', ], [ 62982046800, # utc_start 1996-10-26 21:00:00 (Sat) 62995352400, # utc_end 1997-03-29 21:00:00 (Sat) 62982064800, # local_start 1996-10-27 02:00:00 (Sun) 62995370400, # local_end 1997-03-30 02:00:00 (Sun) 18000, 0, 'YEKT', ], [ 62995352400, # utc_start 1997-03-29 21:00:00 (Sat) 63013496400, # utc_end 1997-10-25 21:00:00 (Sat) 62995374000, # local_start 1997-03-30 03:00:00 (Sun) 63013518000, # local_end 1997-10-26 03:00:00 (Sun) 21600, 1, 'YEKST', ], [ 63013496400, # utc_start 1997-10-25 21:00:00 (Sat) 63026802000, # utc_end 1998-03-28 21:00:00 (Sat) 63013514400, # local_start 1997-10-26 02:00:00 (Sun) 63026820000, # local_end 1998-03-29 02:00:00 (Sun) 18000, 0, 'YEKT', ], [ 63026802000, # utc_start 1998-03-28 21:00:00 (Sat) 63044946000, # utc_end 1998-10-24 21:00:00 (Sat) 63026823600, # local_start 1998-03-29 03:00:00 (Sun) 63044967600, # local_end 1998-10-25 03:00:00 (Sun) 21600, 1, 'YEKST', ], [ 63044946000, # utc_start 1998-10-24 21:00:00 (Sat) 63058251600, # utc_end 1999-03-27 21:00:00 (Sat) 63044964000, # local_start 1998-10-25 02:00:00 (Sun) 63058269600, # local_end 1999-03-28 02:00:00 (Sun) 18000, 0, 'YEKT', ], [ 63058251600, # utc_start 1999-03-27 21:00:00 (Sat) 63077000400, # utc_end 1999-10-30 21:00:00 (Sat) 63058273200, # local_start 1999-03-28 03:00:00 (Sun) 63077022000, # local_end 1999-10-31 03:00:00 (Sun) 21600, 1, 'YEKST', ], [ 63077000400, # utc_start 1999-10-30 21:00:00 (Sat) 63089701200, # utc_end 2000-03-25 21:00:00 (Sat) 63077018400, # local_start 1999-10-31 02:00:00 (Sun) 63089719200, # local_end 2000-03-26 02:00:00 (Sun) 18000, 0, 'YEKT', ], [ 63089701200, # utc_start 2000-03-25 21:00:00 (Sat) 63108450000, # utc_end 2000-10-28 21:00:00 (Sat) 63089722800, # local_start 2000-03-26 03:00:00 (Sun) 63108471600, # local_end 2000-10-29 03:00:00 (Sun) 21600, 1, 'YEKST', ], [ 63108450000, # utc_start 2000-10-28 21:00:00 (Sat) 63121150800, # utc_end 2001-03-24 21:00:00 (Sat) 63108468000, # local_start 2000-10-29 02:00:00 (Sun) 63121168800, # local_end 2001-03-25 02:00:00 (Sun) 18000, 0, 'YEKT', ], [ 63121150800, # utc_start 2001-03-24 21:00:00 (Sat) 63139899600, # utc_end 2001-10-27 21:00:00 (Sat) 63121172400, # local_start 2001-03-25 03:00:00 (Sun) 63139921200, # local_end 2001-10-28 03:00:00 (Sun) 21600, 1, 'YEKST', ], [ 63139899600, # utc_start 2001-10-27 21:00:00 (Sat) 63153205200, # utc_end 2002-03-30 21:00:00 (Sat) 63139917600, # local_start 2001-10-28 02:00:00 (Sun) 63153223200, # local_end 2002-03-31 02:00:00 (Sun) 18000, 0, 'YEKT', ], [ 63153205200, # utc_start 2002-03-30 21:00:00 (Sat) 63171349200, # utc_end 2002-10-26 21:00:00 (Sat) 63153226800, # local_start 2002-03-31 03:00:00 (Sun) 63171370800, # local_end 2002-10-27 03:00:00 (Sun) 21600, 1, 'YEKST', ], [ 63171349200, # utc_start 2002-10-26 21:00:00 (Sat) 63184654800, # utc_end 2003-03-29 21:00:00 (Sat) 63171367200, # local_start 2002-10-27 02:00:00 (Sun) 63184672800, # local_end 2003-03-30 02:00:00 (Sun) 18000, 0, 'YEKT', ], [ 63184654800, # utc_start 2003-03-29 21:00:00 (Sat) 63202798800, # utc_end 2003-10-25 21:00:00 (Sat) 63184676400, # local_start 2003-03-30 03:00:00 (Sun) 63202820400, # local_end 2003-10-26 03:00:00 (Sun) 21600, 1, 'YEKST', ], [ 63202798800, # utc_start 2003-10-25 21:00:00 (Sat) 63216104400, # utc_end 2004-03-27 21:00:00 (Sat) 63202816800, # local_start 2003-10-26 02:00:00 (Sun) 63216122400, # local_end 2004-03-28 02:00:00 (Sun) 18000, 0, 'YEKT', ], [ 63216104400, # utc_start 2004-03-27 21:00:00 (Sat) 63234853200, # utc_end 2004-10-30 21:00:00 (Sat) 63216126000, # local_start 2004-03-28 03:00:00 (Sun) 63234874800, # local_end 2004-10-31 03:00:00 (Sun) 21600, 1, 'YEKST', ], [ 63234853200, # utc_start 2004-10-30 21:00:00 (Sat) 63247554000, # utc_end 2005-03-26 21:00:00 (Sat) 63234871200, # local_start 2004-10-31 02:00:00 (Sun) 63247572000, # local_end 2005-03-27 02:00:00 (Sun) 18000, 0, 'YEKT', ], [ 63247554000, # utc_start 2005-03-26 21:00:00 (Sat) 63266302800, # utc_end 2005-10-29 21:00:00 (Sat) 63247575600, # local_start 2005-03-27 03:00:00 (Sun) 63266324400, # local_end 2005-10-30 03:00:00 (Sun) 21600, 1, 'YEKST', ], [ 63266302800, # utc_start 2005-10-29 21:00:00 (Sat) 63279003600, # utc_end 2006-03-25 21:00:00 (Sat) 63266320800, # local_start 2005-10-30 02:00:00 (Sun) 63279021600, # local_end 2006-03-26 02:00:00 (Sun) 18000, 0, 'YEKT', ], [ 63279003600, # utc_start 2006-03-25 21:00:00 (Sat) 63297752400, # utc_end 2006-10-28 21:00:00 (Sat) 63279025200, # local_start 2006-03-26 03:00:00 (Sun) 63297774000, # local_end 2006-10-29 03:00:00 (Sun) 21600, 1, 'YEKST', ], [ 63297752400, # utc_start 2006-10-28 21:00:00 (Sat) 63310453200, # utc_end 2007-03-24 21:00:00 (Sat) 63297770400, # local_start 2006-10-29 02:00:00 (Sun) 63310471200, # local_end 2007-03-25 02:00:00 (Sun) 18000, 0, 'YEKT', ], [ 63310453200, # utc_start 2007-03-24 21:00:00 (Sat) 63329202000, # utc_end 2007-10-27 21:00:00 (Sat) 63310474800, # local_start 2007-03-25 03:00:00 (Sun) 63329223600, # local_end 2007-10-28 03:00:00 (Sun) 21600, 1, 'YEKST', ], [ 63329202000, # utc_start 2007-10-27 21:00:00 (Sat) 63342507600, # utc_end 2008-03-29 21:00:00 (Sat) 63329220000, # local_start 2007-10-28 02:00:00 (Sun) 63342525600, # local_end 2008-03-30 02:00:00 (Sun) 18000, 0, 'YEKT', ], [ 63342507600, # utc_start 2008-03-29 21:00:00 (Sat) 63360651600, # utc_end 2008-10-25 21:00:00 (Sat) 63342529200, # local_start 2008-03-30 03:00:00 (Sun) 63360673200, # local_end 2008-10-26 03:00:00 (Sun) 21600, 1, 'YEKST', ], [ 63360651600, # utc_start 2008-10-25 21:00:00 (Sat) 63373957200, # utc_end 2009-03-28 21:00:00 (Sat) 63360669600, # local_start 2008-10-26 02:00:00 (Sun) 63373975200, # local_end 2009-03-29 02:00:00 (Sun) 18000, 0, 'YEKT', ], [ 63373957200, # utc_start 2009-03-28 21:00:00 (Sat) 63392101200, # utc_end 2009-10-24 21:00:00 (Sat) 63373978800, # local_start 2009-03-29 03:00:00 (Sun) 63392122800, # local_end 2009-10-25 03:00:00 (Sun) 21600, 1, 'YEKST', ], [ 63392101200, # utc_start 2009-10-24 21:00:00 (Sat) 63405406800, # utc_end 2010-03-27 21:00:00 (Sat) 63392119200, # local_start 2009-10-25 02:00:00 (Sun) 63405424800, # local_end 2010-03-28 02:00:00 (Sun) 18000, 0, 'YEKT', ], [ 63405406800, # utc_start 2010-03-27 21:00:00 (Sat) 63424155600, # utc_end 2010-10-30 21:00:00 (Sat) 63405428400, # local_start 2010-03-28 03:00:00 (Sun) 63424177200, # local_end 2010-10-31 03:00:00 (Sun) 21600, 1, 'YEKST', ], [ 63424155600, # utc_start 2010-10-30 21:00:00 (Sat) 63436856400, # utc_end 2011-03-26 21:00:00 (Sat) 63424173600, # local_start 2010-10-31 02:00:00 (Sun) 63436874400, # local_end 2011-03-27 02:00:00 (Sun) 18000, 0, 'YEKT', ], [ 63436856400, # utc_start 2011-03-26 21:00:00 (Sat) DateTime::TimeZone::INFINITY, # utc_end 63436878000, # local_start 2011-03-27 03:00:00 (Sun) DateTime::TimeZone::INFINITY, # local_end 21600, 0, 'YEKT', ], ]; sub olson_version { '2013a' } sub has_dst_changes { 30 } sub _max_year { 2023 } sub _new_instance { return shift->_init( @_, spans => $spans ); } 1;
liuyangning/WX_web
xampp/perl/vendor/lib/DateTime/TimeZone/Asia/Yekaterinburg.pm
Perl
mit
16,842
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # use 5.10.0; use strict; use warnings; use Thrift; use Thrift::Socket; use IO::Socket::UNIX; package Thrift::UnixSocket; use base qw( Thrift::Socket ); use version 0.77; our $VERSION = version->declare("$Thrift::VERSION"); # # Constructor. # Takes a unix domain socket filename. # See Thrift::Socket for base class parameters. # @param[in] path path to unix socket file # @example my $sock = new Thrift::UnixSocket($path); # sub new { my $classname = shift; my $self = $classname->SUPER::new(); $self->{path} = shift; return bless($self, $classname); } sub __open { my $self = shift; my $sock = IO::Socket::UNIX->new( Type => IO::Socket::SOCK_STREAM, Peer => $self->{path}) || do { my $error = 'UnixSocket: Could not connect to ' . $self->{path} . ' (' . $! . ')'; if ($self->{debug}) { $self->{debugHandler}->($error); } die new Thrift::TTransportException($error, Thrift::TTransportException::NOT_OPEN); }; return $sock; } 1;
johnbelamaric/themis
vendor/github.com/apache/thrift/lib/perl/lib/Thrift/UnixSocket.pm
Perl
apache-2.0
1,850
=pod =head1 NAME BIO_new_CMS - CMS streaming filter BIO =head1 SYNOPSIS #include <openssl/cms.h> BIO *BIO_new_CMS(BIO *out, CMS_ContentInfo *cms); =head1 DESCRIPTION BIO_new_CMS() returns a streaming filter BIO chain based on B<cms>. The output of the filter is written to B<out>. Any data written to the chain is automatically translated to a BER format CMS structure of the appropriate type. =head1 NOTES The chain returned by this function behaves like a standard filter BIO. It supports non blocking I/O. Content is processed and streamed on the fly and not all held in memory at once: so it is possible to encode very large structures. After all content has been written through the chain BIO_flush() must be called to finalise the structure. The B<CMS_STREAM> flag must be included in the corresponding B<flags> parameter of the B<cms> creation function. If an application wishes to write additional data to B<out> BIOs should be removed from the chain using BIO_pop() and freed with BIO_free() until B<out> is reached. If no additional data needs to be written BIO_free_all() can be called to free up the whole chain. Any content written through the filter is used verbatim: no canonical translation is performed. It is possible to chain multiple BIOs to, for example, create a triple wrapped signed, enveloped, signed structure. In this case it is the applications responsibility to set the inner content type of any outer CMS_ContentInfo structures. Large numbers of small writes through the chain should be avoided as this will produce an output consisting of lots of OCTET STRING structures. Prepending a BIO_f_buffer() buffering BIO will prevent this. =head1 BUGS There is currently no corresponding inverse BIO: i.e. one which can decode a CMS structure on the fly. =head1 RETURN VALUES BIO_new_CMS() returns a BIO chain when successful or NULL if an error occurred. The error can be obtained from ERR_get_error(3). =head1 SEE ALSO L<ERR_get_error(3)>, L<CMS_sign(3)>, L<CMS_encrypt(3)> =head1 HISTORY BIO_new_CMS() was added to OpenSSL 1.0.0 =head1 COPYRIGHT Copyright 2008-2016 The OpenSSL Project Authors. All Rights Reserved. Licensed under the OpenSSL license (the "License"). You may not use this file except in compliance with the License. You can obtain a copy in the file LICENSE in the source distribution or at L<https://www.openssl.org/source/license.html>. =cut
openweave/openweave-core
third_party/openssl/openssl/doc/crypto/BIO_new_CMS.pod
Perl
apache-2.0
2,418
#!/usr/bin/env perl # vim:ts=4:sw=4:expandtab # © 2012 Michael Stapelberg # Licensed under BSD license, see http://code.i3wm.org/i3/tree/LICENSE # # Append this line to your i3 config file: # exec_always ~/per-workspace-layout.pl # # Then, change the %layouts hash like you want your workspaces to be set up. # This script requires i3 >= v4.4 for the extended workspace event. use strict; use warnings; use AnyEvent; use AnyEvent::I3; use v5.10; my %layouts = ( '4' => 'tabbed', '5' => 'stacked', ); my $i3 = i3(); die "Could not connect to i3: $!" unless $i3->connect->recv(); die "Could not subscribe to the workspace event: $!" unless $i3->subscribe({ workspace => sub { my ($msg) = @_; return unless $msg->{change} eq 'focus'; die "Your version of i3 is too old. You need >= v4.4" unless exists($msg->{current}); my $ws = $msg->{current}; # If the workspace already has children, don’t change the layout. return unless scalar @{$ws->{nodes}} == 0; my $name = $ws->{name}; my $con_id = $ws->{id}; return unless exists $layouts{$name}; $i3->command(qq|[con_id="$con_id"] layout | . $layouts{$name}); }, _error => sub { my ($msg) = @_; say "AnyEvent::I3 error: $msg"; say "Exiting."; exit 1; }, })->recv->{success}; # Run forever. AnyEvent->condvar->recv
FauxFaux/i3
contrib/per-workspace-layout.pl
Perl
bsd-3-clause
1,505
#!/usr/bin/perl # # PerlKit-0.1 - http://www.t0s.org # # browse.pl: Browse and download files from a webserver use strict; my ($path, %FORM); $|=1; # Get parameters %FORM = parse_parameters($ENV{'QUERY_STRING'}); if(defined $FORM{'path'}) { $path = $FORM{'path'}; } else { $path = "/"; } if(-f $path) { # Download selected file print "Content-Type: application/octet-stream\r\n"; print "\r\n"; open(FILE, "< $path") || print "Could not open file\n"; while(<FILE>) { print; } close(FILE); exit; } print "Content-Type: text/html\r\n"; print "\r\n"; print '<HTML> <body> <form action="" method="GET"> <input type="text" name="path" size=45 value="' . $path . '"> <input type="submit" value="List"> </form> Directory ' . $path . ' contents: <p> <font face="courier"> <table>'; if(defined $FORM{'path'}) { opendir(DIR, $path) || print "Could not open directory"; foreach (sort(readdir(DIR))) { print get_fileinfo($path, $_). "\n"; } closedir(DIR); } print "</table></font>"; sub parse_parameters ($) { my %ret; my $input = shift; foreach my $pair (split('&', $input)) { my ($var, $value) = split('=', $pair, 2); if($var) { $value =~ s/\+/ /g ; $value =~ s/%(..)/pack('c',hex($1))/eg; $ret{$var} = $value; } } return %ret; } sub get_fileinfo ($$) { my $ret; my ($dir,$filename) = @_; my $file = $dir . "/" . $filename; $file=~s/\/+/\//g; $ret = "<tr>"; $ret .= "<td>"; if(-d $file) { $file=~s/\/[^\/]+\/\.\./\//g; $ret .= "<a href=\"?path=$file\">$filename</a>"; } else { $ret .= "$filename <a href=\"?path=$file\">[D]</a>" ; } $ret .= "</td>"; my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size, $atime,$mtime,$ctime,$blksize,$blocks) = stat($file); $ret .= "<td width=30'>&nbsp;</td>"; $ret .= "<td>$size</td>"; $ret .= "<td>". getpwuid($uid) ."</td>"; $ret .= "<td>". getgrgid($gid) ."</td>"; $ret .= "</tr>"; return $ret; }
JackStouffer/Violent-Python
wordlist/fuzzdb/web-backdoors/pl-cgi/list.pl
Perl
mit
1,978
#!/usr/bin/perl -w use strict; # A perl script for converting a methylFreq file to a UCSC BED file. # USAGE: ./methylFreq2BED.pl sampleID [minDepth] < sample_methylFreq.txt # sample_methylFreq.txt: the methylFreq file generated by bisReadMapper.pl # Format of the BED file: # Column 1: chromosome # Column 2: chromosome_start # Column 3: chromosome_end # Column 4: methylation level [0-1] # column 5: sequencing depth # column 6: strand, please ignored since the reads from both strands were combined # Column 7: chromosome_start # Column 8: chromosome_end # Column 9: pesudo color, red is methylated, and green is unmethylated # # Written by Kun Zhang (kzhang@bioeng.ucsd.edu), last modified 10/07/2010 # my $sampleID = $ARGV[0]; $sampleID = "Sample" if (!$sampleID); my $minDepth = $ARGV[1]; $minDepth = 10 if(!$minDepth); my @palette=("0,240,0", "30,210,0","60,180,0","90,150,0","120,120,0","150,90,0","180,60,0","210,0,0"); my %methylTable; sub main(){ while(my $line = <STDIN>){ chomp($line); my @fields = split(/\t/, $line); my $strand = $fields[2] eq 'W' ? '+' : '-'; my %alleleCounts; my $CT_counts; for(my $i=5; $i<scalar(@fields); $i+=2){ $alleleCounts{$fields[$i]}=$fields[$i+1]; $CT_counts += $fields[$i+1] if($fields[$i]=~ /[CT]/); } next if(!$CT_counts || $CT_counts/$fields[3] < 0.9); my $index=$fields[0] . ":" . $fields[1]; $alleleCounts{'C'} =0 if(!$alleleCounts{'C'}); $methylTable{$index}->{'C'} += $alleleCounts{'C'} ; $methylTable{$index}->{'CT'} += $CT_counts; } report_methylFreqBED(); } sub report_methylFreqBED(){ print "track name=\"", $sampleID, "\" description=\"$sampleID Methylation level\" visibility=2 useScore=1 itemRgb=\"On\"\n"; foreach my $index(sort keys(%methylTable)){ next if($methylTable{$index}->{'CT'}<$minDepth); my ($chr,$chr_pos) = split(/:/, $index); my $methylLevel = sprintf("%4.3f", $methylTable{$index}->{'C'}/$methylTable{$index}->{'CT'}); print "$chr\t", $chr_pos-1, "\t", $chr_pos, "\t", "\'", $methylTable{$index}->{'C'}, "/", $methylTable{$index}->{'CT'}, "\'\t", int(1000*$methylTable{$index}->{'C'}/$methylTable{$index}->{'CT'}), "\t+\n", } } main();
bmreilly/bspp_rrbs_comparison_project
methylFreq2BED.pl
Perl
mit
2,189
#!/usr/bin/perl # use strict; use Cwd 'abs_path'; use File::Basename; use lib dirname( abs_path(__FILE__) )."/../../lib"; use CorpusNormalisationFr; while (my $f = shift(@ARGV)) { my $text = ""; open(F, $f) or die("unable to open $f .\n"); while (<F>) { print remove_diacritics($_); } close(F); # print $text; }
glecorve/irisa-text-normalizer
bin/fr/remove-diacritics.pl
Perl
mit
327
=pod =head1 NAME DSA_set_default_method, DSA_get_default_method, DSA_set_method, DSA_new_method, DSA_OpenSSL - select DSA method =head1 SYNOPSIS #include <openssl/dsa.h> #include <openssl/engine.h> void DSA_set_default_method(const DSA_METHOD *meth); const DSA_METHOD *DSA_get_default_method(void); int DSA_set_method(DSA *dsa, const DSA_METHOD *meth); DSA *DSA_new_method(ENGINE *engine); DSA_METHOD *DSA_OpenSSL(void); =head1 DESCRIPTION A B<DSA_METHOD> specifies the functions that OpenSSL uses for DSA operations. By modifying the method, alternative implementations such as hardware accelerators may be used. IMPORTANT: See the NOTES section for important information about how these DSA API functions are affected by the use of B<ENGINE> API calls. Initially, the default DSA_METHOD is the OpenSSL internal implementation, as returned by DSA_OpenSSL(). DSA_set_default_method() makes B<meth> the default method for all DSA structures created later. B<NB>: This is true only whilst no ENGINE has been set as a default for DSA, so this function is no longer recommended. DSA_get_default_method() returns a pointer to the current default DSA_METHOD. However, the meaningfulness of this result is dependent on whether the ENGINE API is being used, so this function is no longer recommended. DSA_set_method() selects B<meth> to perform all operations using the key B<rsa>. This will replace the DSA_METHOD used by the DSA key and if the previous method was supplied by an ENGINE, the handle to that ENGINE will be released during the change. It is possible to have DSA keys that only work with certain DSA_METHOD implementations (eg. from an ENGINE module that supports embedded hardware-protected keys), and in such cases attempting to change the DSA_METHOD for the key can have unexpected results. DSA_new_method() allocates and initializes a DSA structure so that B<engine> will be used for the DSA operations. If B<engine> is NULL, the default engine for DSA operations is used, and if no default ENGINE is set, the DSA_METHOD controlled by DSA_set_default_method() is used. =head1 THE DSA_METHOD STRUCTURE struct { /* name of the implementation */ const char *name; /* sign */ DSA_SIG *(*dsa_do_sign)(const unsigned char *dgst, int dlen, DSA *dsa); /* pre-compute k^-1 and r */ int (*dsa_sign_setup)(DSA *dsa, BN_CTX *ctx_in, BIGNUM **kinvp, BIGNUM **rp); /* verify */ int (*dsa_do_verify)(const unsigned char *dgst, int dgst_len, DSA_SIG *sig, DSA *dsa); /* compute rr = a1^p1 * a2^p2 mod m (May be NULL for some implementations) */ int (*dsa_mod_exp)(DSA *dsa, BIGNUM *rr, BIGNUM *a1, BIGNUM *p1, BIGNUM *a2, BIGNUM *p2, BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *in_mont); /* compute r = a ^ p mod m (May be NULL for some implementations) */ int (*bn_mod_exp)(DSA *dsa, BIGNUM *r, BIGNUM *a, const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx); /* called at DSA_new */ int (*init)(DSA *DSA); /* called at DSA_free */ int (*finish)(DSA *DSA); int flags; char *app_data; /* ?? */ } DSA_METHOD; =head1 RETURN VALUES DSA_OpenSSL() and DSA_get_default_method() return pointers to the respective B<DSA_METHOD>s. DSA_set_default_method() returns no value. DSA_set_method() returns non-zero if the provided B<meth> was successfully set as the method for B<dsa> (including unloading the ENGINE handle if the previous method was supplied by an ENGINE). DSA_new_method() returns NULL and sets an error code that can be obtained by L<ERR_get_error(3)> if the allocation fails. Otherwise it returns a pointer to the newly allocated structure. =head1 NOTES As of version 0.9.7, DSA_METHOD implementations are grouped together with other algorithmic APIs (eg. RSA_METHOD, EVP_CIPHER, etc) in B<ENGINE> modules. If a default ENGINE is specified for DSA functionality using an ENGINE API function, that will override any DSA defaults set using the DSA API (ie. DSA_set_default_method()). For this reason, the ENGINE API is the recommended way to control default implementations for use in DSA and other cryptographic algorithms. =head1 SEE ALSO L<dsa(3)>, L<DSA_new(3)> =head1 HISTORY DSA_set_default_method(), DSA_get_default_method(), DSA_set_method(), DSA_new_method() and DSA_OpenSSL() were added in OpenSSL 0.9.4. DSA_set_default_openssl_method() and DSA_get_default_openssl_method() replaced DSA_set_default_method() and DSA_get_default_method() respectively, and DSA_set_method() and DSA_new_method() were altered to use B<ENGINE>s rather than B<DSA_METHOD>s during development of the engine version of OpenSSL 0.9.6. For 0.9.7, the handling of defaults in the ENGINE API was restructured so that this change was reversed, and behaviour of the other functions resembled more closely the previous behaviour. The behaviour of defaults in the ENGINE API now transparently overrides the behaviour of defaults in the DSA API without requiring changing these function prototypes. =cut
vbloodv/blood
extern/openssl.orig/doc/crypto/DSA_set_method.pod
Perl
mit
5,303
#!/usr/bin/perl # XML Extractor # This script reads the given log files line by line # and produces output with the XMLs found use strict; use warnings; use constant { # just a personal preference for boolean true => 1, false => 0 }; sub help { print <<HEREDOC; ##################################################################### getxml - XML Extractor If you have a file full of xmls and other stuff (typically logs), getxml script can help you get the XML lines alone. Usage: getxml root-tag-name input-file For instance, to extract below XML file <start><item><itemno>1</itemno><name>foo</name><item></start> getxml start /your/log/file.log You can also redirect the output to a file to save XMLs. getxml start /your/log/file.log > xmls.out ##################################################################### HEREDOC } sub croak { die "$0: @_: $!\n" } # error handling sub process_file { my $tag = shift; my $file = shift; my $match = false; my $xml = ''; open FILE, $file or croak("Failed opening $file"); while (my $line = <FILE>) { # each line of file if ($line =~ m/<$tag>/) { # if <tag> presents in a line if (!$match) { # If we're not in the middle of another match $match = true; # We've got a starting match } else { # We're already in the middle of another match # It's a mismatch; Once <tag> is found, # there should be a </tag> before next <tag> # We'll have to discard this one and proceed further. $xml = ''; # Empty the temp variable $match = false; # reset matching # We're suppressing the mismatch for now # TODO: Report list of mismatched XMLs } } if ($match) { # We're in the middle of a match # removes unnecessary line breaks; chomp isn't platform friendly $line =~ s/\r?\n$//; $xml .= $line; # Add found XML to the temp variable } if ($line =~ m/<\/$tag>/) { # if </create> presents in a line if (!$match) { # If we're not in the middle of a match # It's a mismatch too; When a </tag> is found, # there should have been a <tag> matched earlier # We'll have to discard this one and proceed further. # We're suppressing the mismatch for now # TODO: Report list of mismatched XMLs } else { # We're in the middle of a match # We're done with a XML. print "$xml\n"; # Output the whole XML found } # At this point we have a match or a mismatch # Either case, we need to reset the match # Reset the match so we can look for another $xml = ''; # Empty the temp variable $match = false; # reset matching } } close FILE or croak("Failed closing $file"); } sub cat { my $tag = shift || 'h'; # Display help info if no input was supplied if ($tag eq 'h') { help; exit; } while (my $file = shift) { # each file received from command line process_file($tag, $file); } } cat @ARGV;
sanspace/perl-utility-scripts
getxml.pl
Perl
mit
3,266
use strict; use utf8; package Dorq::internals; BEGIN { require Data::Dumper; $Data::Dumper::Terse = 1; $Data::Dumper::Deparse = 1; }; use Scalar::Util 'blessed'; use Encode ( 'is_utf8', 'decode', 'encode' ); sub du { my $s = shift; return ( is_utf8( $s ) ? $s : decode( 'UTF-8', $s ) ); } sub eu { my $s = shift; my $rv = is_utf8( $s ) ? encode( 'UTF-8', $s ) : $s; if( is_utf8( $rv ) ) { utf8::downgrade( $rv ); } return $rv; } sub convert_Dorq_object_to_native_perl { my $o = shift; if( blessed $o ) { if( $o -> isa( 'Dorq::link' ) ) { $o = $o -> val(); } if( $o -> isa( 'Dorq::var' ) ) { $o = $o -> val(); } if( $o -> isa( 'Dorq::hash' ) ) { return &Dorq::internals::convert_Dorq_hash_to_native_perl( $o ); } if( $o -> isa( 'Dorq::array' ) ) { return &Dorq::internals::convert_Dorq_array_to_native_perl( $o ); } if( $o -> isa( 'Dorq::type' ) ) { return $o -> val(); } } return $o; } sub convert_Dorq_hash_to_native_perl { my $in = shift; my %out = (); foreach my $key ( keys %{ $in -> val() } ) { $out{ $key } = &Dorq::internals::convert_Dorq_object_to_native_perl( ${ $in -> { 'hash' } -> { $key } } ); } return \%out; } sub convert_Dorq_array_to_native_perl { my $in = shift -> val(); my @out = (); my $cnt = scalar( @$in ); for( my $i = 0; $i < $cnt; ++$i ) { $out[ $i ] = &Dorq::internals::convert_Dorq_object_to_native_perl( ${ $in -> [ $i ] } ); } return \@out; } sub clone { my ( $i, $limit ) = @_; if( ++$limit > 10000 ) { require Data::Dumper; die Data::Dumper::Dumper( $i ); } if( blessed $i ) { if( $i -> isa( 'Dorq::object' ) ) { return $i -> clone( $limit ); } return $i; } if( ref( $i ) eq 'HASH' ) { return &Dorq::internals::clone_hash( $i, $limit ); } if( ref( $i ) eq 'ARRAY' ) { return &Dorq::internals::clone_array( $i, $limit ); } if( ref( $i ) eq 'SCALAR' ) { return &Dorq::internals::clone_scalar( $i, $limit ); } return $i; } sub clone_hash { my ( $i, $limit ) = @_; my %o = (); foreach my $key ( %$i ) { $o{ $key } = &Dorq::internals::clone( $i -> { $key }, $limit ); } return \%o; } sub clone_array { my ( $i, $limit ) = @_; my @o = (); my $c = scalar( @$i ); for( my $j = 0; $j < $c; ++$j ) { $o[ $j ] = &Dorq::internals::clone( $i -> [ $j ], $limit ); } return \@o; } sub clone_scalar { my ( $i, $limit ) = @_; my $o = &Dorq::internals::clone( $$i, $limit ); return \$o; } -1;
kainwinterheart/dorq-dsl
Dorq/internals.pm
Perl
mit
2,488
#!/usr/bin/env perl use strict; use warnings; my $species = $ARGV[0]; open(CUSTOM_SPECIES_FOLDS_FH, ">", "$species"."_hairpin_precursors.fa.folds"); open(HAIRPIN_FH, ,"<", "./hairpin.fa"); my $FOUND_VALID_ENTRY = 'FALSE'; my $hairpinSeq = ""; while(my $line = <HAIRPIN_FH>){ chomp $line; if($line =~ /^>/){ if($hairpinSeq ne ""){ print TMP_RNAFOLD_OUTPUT_FH "$hairpinSeq\n"; my $tmpRNAfoldOutput = `RNAfold < tmpRNAfoldInput.fa`; print CUSTOM_SPECIES_FOLDS_FH "$tmpRNAfoldOutput"; $FOUND_VALID_ENTRY = 'FALSE'; $hairpinSeq = ""; } #$line =~ s/>//; if($line =~ /^>$species/){ $line =~ s/stem-loop//; open(TMP_RNAFOLD_OUTPUT_FH, ">", "tmpRNAfoldInput.fa"); $FOUND_VALID_ENTRY = 'TRUE'; #my @vals = split(/\s/, $line); print TMP_RNAFOLD_OUTPUT_FH "$line\n"; } } else{ if($FOUND_VALID_ENTRY eq 'TRUE'){ $hairpinSeq = $hairpinSeq.$line; } } } print TMP_RNAFOLD_OUTPUT_FH "$hairpinSeq\n"; my $tmpRNAfoldOutput = `RNAfold < tmpRNAfoldInput.fa`; print CUSTOM_SPECIES_FOLDS_FH "$tmpRNAfoldOutput"; close CUSTOM_SPECIES_FOLDS_FH;
dvitsios/mirMod-pipeline
mirbase_local_factory/makeFoldsFileForSpecificGenome.pl
Perl
mit
1,266
package Chromosome::0Sum::10Genes; use v5.10; use strict; use warnings; use Moo; =head1 NAME Chromosome::0Sum::10Genes - 0Sum Chromosome with 10 genes. =cut with 'Chromosome::0Sum'; sub _build_size { 10 } 1;
meis/2048GA
lib/Chromosome/0Sum/10Genes.pm
Perl
mit
215
# <@LICENSE> # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to you under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # </@LICENSE> package Mail::SpamAssassin::Locker::Flock; use strict; use warnings; use bytes; use Mail::SpamAssassin; use Mail::SpamAssassin::Locker; use Mail::SpamAssassin::Util; use Mail::SpamAssassin::Logger; use File::Spec; use IO::File; use Fcntl qw(:DEFAULT :flock); use vars qw{ @ISA }; @ISA = qw(Mail::SpamAssassin::Locker); ########################################################################### sub new { my $class = shift; my $self = $class->SUPER::new(@_); $self; } ########################################################################### # Attempt to create a file lock, using NFS-UNsafe locking techniques. sub safe_lock { my ($self, $path, $max_retries, $mode) = @_; my $is_locked = 0; my @stat; $max_retries ||= 30; $mode ||= 0600; $mode = oct $mode; my $lock_file = "$path.mutex"; my $umask = umask(~$mode); my $fh = new IO::File(); if (!$fh->open ($lock_file, O_RDWR|O_CREAT)) { umask $umask; # just in case die "locker: safe_lock: cannot create lockfile $lock_file: $!\n"; } umask $umask; # we've created the file, so reset umask dbg("locker: safe_lock: created $lock_file"); my $unalarmed = 0; my $oldalarm = 0; # use a SIGALRM-based timer -- more efficient than second-by-second # sleeps eval { local $SIG{ALRM} = sub { die "alarm\n" }; dbg("locker: safe_lock: trying to get lock on $path with $max_retries timeout"); # max_retries is basically seconds! so use it for the timeout $oldalarm = alarm $max_retries; # HELLO!?! IO::File doesn't have a flock() method?! if (flock ($fh, LOCK_EX)) { alarm $oldalarm; $unalarmed = 1; # avoid calling alarm(0) twice dbg("locker: safe_lock: link to $lock_file: link ok"); $is_locked = 1; # just to be nice: let people know when it was locked $fh->print ("$$\n"); $fh->flush (); # keep the FD around - we need to keep the lockfile open or the lock # is unlocked! $self->{lock_fhs} ||= { }; $self->{lock_fhs}->{$path} = $fh; } }; my $err = $@; $unalarmed or alarm $oldalarm; # if we die'd above, need to reset here if ($err) { if ($err =~ /alarm/) { dbg("locker: safe_lock: timed out after $max_retries seconds"); } else { die "locker: safe_lock: $err"; } } return $is_locked; } ########################################################################### sub safe_unlock { my ($self, $path) = @_; if (!exists $self->{lock_fhs} || !defined $self->{lock_fhs}->{$path}) { dbg("locker: safe_unlock: no lock handle for $path - already unlocked?"); return; } my $fh = $self->{lock_fhs}->{$path}; delete $self->{lock_fhs}->{$path}; flock ($fh, LOCK_UN); $fh->close(); dbg("locker: safe_unlock: unlocked $path.mutex"); # do NOT unlink! this would open a race, whereby: # # procA: ....unlock (unlocked lockfile) # procB: lock (gets lock on lockfile) # procA: unlink (deletes lockfile) # (procB's lock is now deleted as well!) # procC: create, lock (gets lock on new file) # # both procB and procC would then think they had locks, and both # would write to the database file. this is bad. # # unlink ("$path.mutex"); # # side-effect: we leave a .mutex file around. but hey! } ########################################################################### sub refresh_lock { my($self, $path) = @_; return unless $path; if (!exists $self->{lock_fhs} || !defined $self->{lock_fhs}->{$path}) { warn "locker: refresh_lock: no lock handle for $path\n"; return; } my $fh = $self->{lock_fhs}->{$path}; $fh->print ("$$\n"); $fh->flush (); dbg("locker: refresh_lock: refresh $path.mutex"); } ########################################################################### 1;
carlgao/lenga
images/lenny64-peon/usr/share/perl5/Mail/SpamAssassin/Locker/Flock.pm
Perl
mit
4,724
#! /usr/bin/perl # ABOUT: # # This is the Shadowcat Catalyst installer. Its purpose is to make it easier # and quicker to get started with Catalyst development. In order to use it, # make sure you have perl 5.8.1+, a make and a compiler, (nmake and dev-c++ # are good on windows), a configured CPAN.pm and Module::Build installed. Then # run this script by executing # # /path/to/perl cat-install # # and go for a tea break. If anything breaks, please send the full build log # and the output of perl -V to <cat-install@shadowcatsystems.co.uk> and we'll # see what we can do. This script is still BETA though, so don't be too # surprised if something *does* break. # # See http://www.shadowcatsystems.co.uk/projects/catalyst/ for more information # on the Catalyst project and Shadowcat's involvement with it. # # COPYRIGHT: # # This software is Copyright (c) 2006 Shadowcat Systems Limited # <mst@shadowcatsystems.co.uk> # # LICENSE: # # This work is made available to you under the terms of Version 2 of # the GNU General Public License. A copy of this license can be obtained # from www.gnu.org, or by writing to the Free Software Foundation, Inc., # 675 Mass Ave, Cambridge, MA 02139, USA. # # This work is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. package CPAN::Override; $ENV{PERL_MM_USE_DEFAULT} = 1; my $orig = CPAN::Distribution->can('new'); my $replace = sub { my $dist = $orig->(@_); my @bits = split('-', (split('/', $dist->normalize))[-1]); pop(@bits); my $o_pack = join('::', 'CPAN::Override::Distribution', @bits); if ($o_pack->isa('CPAN::Distribution')) { bless($dist, $o_pack); } return $dist; }; { no warnings 'redefine'; *CPAN::Distribution::new = \&$replace; } package CPAN::Override::Distribution::Template::Toolkit; use base qw/CPAN::Distribution/; use strict; use warnings; sub make { my $self = shift; my %args = (DOCS => 'n', SPLASH => 'n', EXAMPLES => 'n', EXTRAS => 'n', DBI => 'n', LATEX => 'n', QUIET => 'n', ACCEPT => 'y'); my $extra_args = join(' ', map { "TT_$_=".$args{$_} } keys %args); local $CPAN::Config->{makepl_arg} = $CPAN::Config->{makepl_arg}.' '.$extra_args; $self->SUPER::make(@_); } package CPAN::Override::Distribution::Module::Install; use base qw/CPAN::Distribution/; sub make { my $self = shift; $self->get; my $builddir = $self->dir; chdir($builddir) && (!-f 'Makefile') && do { my $perl = $self->perl; my $re_code = 's/^auto_install_now.*//; s/.*prerequisites_policy.*//;'; if ($^O eq 'MSWin32') { $re_code .= ' s/^.*ExtUtils::ParseXS.*$//;'; system(qq!$perl -p -i.bak -e "s/\n/\r\n/;" Makefile.PL!); } system(qq!$perl -p -i.bak -e "${re_code}" Makefile.PL!); }; return $self->SUPER::make(@_); } 1; package main; use CPAN; if ($^O eq 'MSWin32') { system("ppm install Test-Simple"); system("ppm install Spiffy"); system("ppm install Test-Base"); system("ppm install YAML"); system("ppm install UNIVERSAL-require"); system("ppm install Module-Pluggable-Fast"); system("ppm install Class-Accessor"); system("ppm install Test-use-ok"); system("ppm install Sub-Uplevel"); system("ppm install Test-Exception"); system("ppm install UNIVERSAL-isa"); system("ppm install UNIVERSAL-can"); system("ppm install Test-MockObject"); system("ppm install Data-Visitor"); system("ppm install Test-Tester"); system("ppm install Test-NoWarnings"); system("ppm install Scalar-List-Utils"); system("ppm install Tree-Simple"); system("ppm install Tree-Simple-VisitorFactory"); system("ppm install URI"); system("ppm install Compress-Zlib"); system("ppm install HTML-Tagset"); system("ppm install HTML-Parser"); system("ppm install libwww-perl"); system("ppm install Class-Data-Inheritable"); system("ppm install File-Modified"); system("ppm install Module-Install"); system("ppm install HTTP-Request-AsCGI"); system("ppm install HTTP-Body"); system("ppm install Text-SimpleTable"); system("ppm install MIME-Types"); system("ppm install AppConfig"); system("ppm install Template-Toolkit"); system("ppm install Path-Class"); system("ppm install File-Copy-Recursive"); system("ppm install Class-Inspector"); system("ppm install Catalyst"); install('Test::MockObject'); } install('Catalyst');
yuweijun/learning-programming
language-perl/cat_install.pl
Perl
mit
4,526
#!/usr/bin/perl while(<STDIN>) { if(m%</way>%) { print " <!-- Special case for riding -->\n"; print "\n"; print " <if>\n"; print " <output k=\"foot\" v=\"no\"/>\n"; print " <output k=\"wheelchair\" v=\"no\"/>\n"; print " <output k=\"moped\" v=\"no\"/>\n"; print " <output k=\"motorbike\" v=\"no\"/>\n"; print " <output k=\"motorcar\" v=\"no\"/>\n"; print " <output k=\"goods\" v=\"no\"/>\n"; print " <output k=\"hgv\" v=\"no\"/>\n"; print " <output k=\"psv\" v=\"no\"/>\n"; print "\n"; print " <output k=\"bridge\" v=\"no\"/>\n"; print " <output k=\"tunnel\" v=\"no\"/>\n"; print "\n"; print " <output k=\"footroute\" v=\"no\"/>\n"; print " </if>\n"; print "\n"; } if(m%</relation>%) { print " <!-- Special case for riding -->\n"; print "\n"; print " <if>\n"; print " <output k=\"footroute\" v=\"no\"/>\n"; print " </if>\n"; print "\n"; } print; }
MinnPost/minnpost-nice-ride
data-processing/routino-2.2/xml/scripts/ride.pl
Perl
mit
1,139
# Copyright 2020, Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. package Google::Ads::GoogleAds::V8::Common::StructuredSnippetFeedItem; use strict; use warnings; use base qw(Google::Ads::GoogleAds::BaseEntity); use Google::Ads::GoogleAds::Utils::GoogleAdsHelper; sub new { my ($class, $args) = @_; my $self = { header => $args->{header}, values => $args->{values}}; # Delete the unassigned fields in this object for a more concise JSON payload remove_unassigned_fields($self, $args); bless $self, $class; return $self; } 1;
googleads/google-ads-perl
lib/Google/Ads/GoogleAds/V8/Common/StructuredSnippetFeedItem.pm
Perl
apache-2.0
1,061
# # Copyright 2019 Centreon (http://www.centreon.com/) # # Centreon is a full-fledged industry-strength solution that meets # the needs in IT infrastructure and application monitoring for # service performance. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # package storage::purestorage::snmp::mode::stats; use base qw(centreon::plugins::templates::counter); use strict; use warnings; sub set_counters { my ($self, %options) = @_; $self->{maps_counters_type} = [ { name => 'global', type => 0, message_separator => ' - ' }, ]; $self->{maps_counters}->{global} = [ { label => 'read-bandwidth', set => { key_values => [ { name => 'pureArrayReadBandwidth' }, ], output_change_bytes => 2, output_template => 'Read Bandwith : %s %s/s', perfdatas => [ { label => 'read_bandwidth', value => 'pureArrayReadBandwidth_absolute', template => '%.2f', min => 0, unit => 'b/s' }, ], } }, { label => 'write-bandwidth', set => { key_values => [ { name => 'pureArrayWriteBandwidth' }, ], output_change_bytes => 2, output_template => 'Write Bandwith : %s %s/s', perfdatas => [ { label => 'write_bandwidth', value => 'pureArrayWriteBandwidth_absolute', template => '%.2f', min => 0, unit => 'b/s' }, ], } }, { label => 'read-iops', set => { key_values => [ { name => 'pureArrayReadIOPS' } ], output_template => 'Read IOPs : %s', perfdatas => [ { label => 'read_iops', value => 'pureArrayReadIOPS_absolute', template => '%s', unit => 'iops', min => 0 }, ], } }, { label => 'write-iops', set => { key_values => [ { name => 'pureArrayWriteIOPS' } ], output_template => 'Write IOPs : %s', perfdatas => [ { label => 'write_iops', value => 'pureArrayWriteIOPS_absolute', template => '%s', unit => 'iops', min => 0 }, ], } }, { label => 'read-latency', set => { key_values => [ { name => 'pureArrayReadLatency' } ], output_template => 'Read Latency : %s us/op', perfdatas => [ { label => 'read_latency', value => 'pureArrayReadLatency_absolute', template => '%s', unit => 'us/op', min => 0 }, ], } }, { label => 'write-latency', set => { key_values => [ { name => 'pureArrayWriteLatency' } ], output_template => 'Write Latency : %s us/op', perfdatas => [ { label => 'write_latency', value => 'pureArrayWriteLatency_absolute', template => '%s', unit => 'us/op', min => 0 }, ], } }, ]; } sub new { my ($class, %options) = @_; my $self = $class->SUPER::new(package => __PACKAGE__, %options); bless $self, $class; $options{options}->add_options(arguments => { }); return $self; } my $mapping = { pureArrayReadBandwidth => { oid => '.1.3.6.1.4.1.40482.4.1' }, pureArrayWriteBandwidth => { oid => '.1.3.6.1.4.1.40482.4.2' }, pureArrayReadIOPS => { oid => '.1.3.6.1.4.1.40482.4.3' }, pureArrayWriteIOPS => { oid => '.1.3.6.1.4.1.40482.4.4' }, pureArrayReadLatency => { oid => '.1.3.6.1.4.1.40482.4.5' }, pureArrayWriteLatency => { oid => '.1.3.6.1.4.1.40482.4.6' }, }; my $oid_purePerformance = '.1.3.6.1.4.1.40482.4'; sub manage_selection { my ($self, %options) = @_; my $snmp_result = $options{snmp}->get_table(oid => $oid_purePerformance, nothing_quit => 1); my $result = $options{snmp}->map_instance(mapping => $mapping, results => $snmp_result, instance => '0'); $result->{pureArrayReadBandwidth} *= 8; $result->{pureArrayWriteBandwidth} *= 8; $self->{global} = { %$result }; } 1; __END__ =head1 MODE Check statistics performance. =over 8 =item B<--filter-counters> Only display some counters (regexp can be used). Example: --filter-counters='bandwidth' =item B<--warning-*> Threshold warning. Can be: 'read-bandwidth', 'write-bandwidth', 'read-iops', 'write-iops', 'read-latency', 'write-latency'. =item B<--critical-*> Threshold critical. Can be: 'read-bandwidth', 'write-bandwidth', 'read-iops', 'write-iops', 'read-latency', 'write-latency'. =back =cut
Sims24/centreon-plugins
storage/purestorage/snmp/mode/stats.pm
Perl
apache-2.0
5,339
#!/usr/bin/perl =head this script is used to find chip-seq correlation within a peak range input files are: chip-seq_BedGraph_file =cut use List::Util qw(first max maxstr min minstr reduce shuffle sum); if ($#ARGV ne 3) { print "command line: perl peak_correlation.pl peak.xls even.bedGraph odd.bedGrap merge.bedGraph\n"; exit; } $peakfile = $ARGV[0]; $chip_file1 = $ARGV[1]; $chip_file2 = $ARGV[2]; $chip_file3 = $ARGV[3]; $outfile = "$peakfile.corr.xls"; $window_width_thrd = 2000; $res = 50; #$num_windows = $half_width/$res; $flag_log = 1; # dividing wiggle files to different chromosome open (in, "<$peakfile"); while ($line=<in>) { if ($line=~/^\#/) { } elsif ($line eq "\n") { } elsif ($line=~/start/) { chomp $line; $header = $line; } else { chomp $line; @data = split /[\t+\s+]/, $line; $chr{$data[0]} = 1; } } close in; @chrs = keys %chr; print "@chrs\n"; open (out, ">$outfile"); print out "$header\tcorrelation\tfold_differences\taver_coverage\n"; #@chrs = ("chr2L"); foreach $chr (@chrs) { undef %flag; undef @exp1; undef @exp2; undef @exp3; my @exp1 = (); my @exp2 = (); my @exp3 = (); print "$chr\n"; open (in, "<$peakfile"); while ($line=<in>) { if (!(($line=~/^\#/) or ($line=~/start/))) { chomp $line; @data = split /[\t+\s+]/, $line; if ($data[0] eq $chr) { $start = $data[1] - $window_width_thrd; $end = $data[2] + $window_width_thrd; for ($i=$start; $i<=$end; $i++) { $flag{$i} = 1; } } } } close in; open (in, "<$chip_file1"); $count = 0; while ($line=<in>) { chomp $line; @data = split /[\s+\t+]/, $line; if ($data[0] eq $chr) { $count ++; if ($count%100000 eq 0) { print "$chr\t$count\t$line\n"; } for ($j=$data[1]; $j<$data[2]; $j++) { if ($flag{$j}) { $exp1[$j] = $data[3]; if ($exp1[$j] < 0) { $exp1[$j] = 0; } } } } } close in; open (in, "<$chip_file2"); $count = 0; while ($line=<in>) { chomp $line; @data = split /[\s+\t+]/, $line; if ($data[0] eq $chr) { $count ++; if ($count%100000 eq 0) { print "$chr\t$count\t$line\n"; } for ($j=$data[1]; $j<$data[2]; $j++) { if ($flag{$j}) { $exp2[$j] = $data[3]; if ($exp2[$j] < 0) { $exp2[$j] = 0; } } } } } close in; open (in, "<$chip_file3"); $count = 0; while ($line=<in>) { chomp $line; @data = split /[\s+\t+]/, $line; if ($data[0] eq $chr) { $count ++; if ($count%100000 eq 0) { print "$chr\t$count\t$line\n"; } for ($j=$data[1]; $j<$data[2]; $j++) { if ($flag{$j}) { $exp3[$j] = $data[3]; if ($exp3[$j] < 0) { $exp3[$j] = 0; } } } } } close in; open (in, "<$peakfile"); while ($line=<in>) { if (!(($line=~/^\#/) or ($line=~/start/))) { @x = (); @y = (); chomp $line; @data = split /[\t+\s+]/, $line; if ($data[0] eq $chr) { $row_count ++; if ($row_count%100 eq 0) { print "$row_count\n"; } $peak_start = $data[1]; $peak_end = $data[2]; $peak_length = $data[3]; $peak_summit = $data[1] + $data[4]; if ($peak_length > $window_width_thrd) { $start = $peak_summit - $window_width_thrd; $num_windows = int(2*$window_width_thrd/$res); $window_size = 2*$window_width_thrd + 1; } else { $start = $peak_start; $num_windows = int($peak_length/$res); $window_size = $peak_length + 1; } $s_x = 0; $s_y = 0; $s_z = 0; for ($i=1; $i<=$num_windows; $i++) { $w_start = ($i-1)*$res + 1 + $start; $w_end = $i*$res + $start; $value1 = 0; $value2 = 0; $value3 = 0; @sub_array1 = @exp1[$w_start..$w_end]; @sub_array2 = @exp2[$w_start..$w_end]; @sub_array3 = @exp3[$w_start..$w_end]; $value1 = sum(@sub_array1); $value2 = sum(@sub_array2); $value3 = sum(@sub_array3); if ($value1<=1) { $value1 = 1; } if ($value2<=1) { $value2 = 1; } if ($value3<=1) { $value3 = 1; } $s_x = $s_x + $value1; $s_y = $s_y + $value2; $s_z = $s_z + $value3; if ($flag_log) { $value1 = log($value1)/log(2); $value2 = log($value2)/log(2); } if ($value1 or $value2) { push (@x, $value1); push (@y, $value2); } } $aver_coverage = $s_z/$window_size; # $s_x = sum(@x); # $s_y = sum(@y); if ($s_x and $s_y) { if ($s_x>=$s_y) { $s_ratio = $s_x/$s_y; } else { $s_ratio = $s_y/$s_x; } } else { $s_ratio = "inf"; } # print "$x\n$y\n"; &pearson_correlation; print out "$line\t$corr\t$s_ratio\t$aver_coverage\n"; } } } } close out; sub pearson_correlation { if ($#x ne $#y) { print "array x and y are not equal size\n"; last; } my $n = $#x + 1; my $sum_x = 0; my $sum_y = 0; my $sum_xy = 0; my $sum_yy = 0; my $sum_xx = 0; for (my $i=0; $i<=$#x; $i++) { $sum_x = $sum_x + $x[$i]; $sum_y = $sum_y + $y[$i]; $sum_xy = $sum_xy + $x[$i]*$y[$i]; $sum_xx = $sum_xx + $x[$i]*$x[$i]; $sum_yy = $sum_yy + $y[$i]*$y[$i]; } if ((($n*$sum_xx-$sum_x*$sum_x)>0) and (($n*$sum_yy-$sum_y*$sum_y)>0)) { $corr = ($n*$sum_xy-$sum_x*$sum_y)/sqrt($n*$sum_xx-$sum_x*$sum_x)/sqrt($n*$sum_yy-$sum_y*$sum_y); } else { $corr = 0; } return; }
bdo311/chirpseq-analysis
peak_correlation.pl
Perl
apache-2.0
5,435
=head1 LICENSE Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute Copyright [2016-2018] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =cut =head1 CONTACT Please email comments or questions to the public Ensembl developers list at <http://lists.ensembl.org/mailman/listinfo/dev>. Questions may also be sent to the Ensembl help desk at <http://www.ensembl.org/Help/Contact>. =cut package InitRemapping; use strict; use warnings; use FileHandle; use Bio::EnsEMBL::Registry; use base ('Bio::EnsEMBL::Hive::Process'); sub run { my $self = shift; my $registry_file = $self->param('registry_file'); my $species = $self->param('species'); my $chroms_list = $self->param('chroms_list'); my $registry = 'Bio::EnsEMBL::Registry'; $registry->load_all($registry_file); my $dbh = $registry->get_DBAdaptor($species, 'variation')->dbc->db_handle; my @chroms = @{get_chroms($chroms_list)}; my @input = (); foreach my $chrom (@chroms) { if ($self->has_mappings($dbh, $chrom)) { $self->warning("Has mappings $chrom"); push @input, { chrom => $chrom, }; } } $self->param('input', \@input); } sub write_output { my $self = shift; $self->dataflow_output_id($self->param('input'), 2); } sub get_chroms { my $file = shift; my @chroms = (); my $fh = FileHandle->new($file, 'r'); while (<$fh>) { chomp; push @chroms, $_; } $fh->close; return \@chroms; } sub has_mappings { my $self = shift; my $dbh = shift; my $seq_region_name = shift; my $population = lc $self->param('population'); my $sth = $dbh->prepare(qq{ SELECT variation_id FROM vcf_variation_$population WHERE seq_region_name_old = ? AND variation_id IS NOT NULL LIMIT 1; }, {mysql_use_result => 1}); $sth->execute($seq_region_name); my @row = $sth->fetchrow_array; $sth->finish(); return $row[0]; } 1;
at7/work
production/goat/RemappingVCF/InitRemapping.pm
Perl
apache-2.0
2,439
# # Copyright 2021 Centreon (http://www.centreon.com/) # # Centreon is a full-fledged industry-strength solution that meets # the needs in IT infrastructure and application monitoring for # service performance. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # package apps::hddtemp::custom::cli; use strict; use warnings; use centreon::plugins::ssh; use centreon::plugins::misc; sub new { my ($class, %options) = @_; my $self = {}; bless $self, $class; if (!defined($options{output})) { print "Class Custom: Need to specify 'output' argument.\n"; exit 3; } if (!defined($options{options})) { $options{output}->add_option_msg(short_msg => "Class Custom: Need to specify 'options' argument."); $options{output}->option_exit(); } if (!defined($options{noptions})) { $options{options}->add_options(arguments => { 'hostname:s' => { name => 'hostname' }, 'timeout:s' => { name => 'timeout', default => 45 }, 'command-drives:s' => { name => 'command_drives' }, 'command-path-drives:s' => { name => 'command_path_drives' }, 'command-options-drives:s' => { name => 'command_options_drives' }, 'command-hddtemp:s' => { name => 'command_hddtemp' }, 'command-path-hddtemp:s' => { name => 'command_path_hddtemp' }, 'command-options-hddtemp:s' => { name => 'command_options_hddtemp' }, 'sudo:s' => { name => 'sudo' } }); } $options{options}->add_help(package => __PACKAGE__, sections => 'CLI OPTIONS', once => 1); $self->{output} = $options{output}; $self->{ssh} = centreon::plugins::ssh->new(%options); return $self; } sub set_options { my ($self, %options) = @_; $self->{option_results} = $options{option_results}; } sub set_defaults {} sub check_options { my ($self, %options) = @_; if (defined($self->{option_results}->{hostname}) && $self->{option_results}->{hostname} ne '') { $self->{ssh}->check_options(option_results => $self->{option_results}); } return 0; } sub list_drives { my ($self, %options) = @_; my $stdout; if (defined($self->{option_results}->{hostname}) && $self->{option_results}->{hostname} ne '') { ($stdout) = $self->{ssh}->execute( hostname => $self->{option_results}->{hostname}, command => defined($self->{option_results}->{command_drives}) && $self->{option_results}->{command_drives} ne '' ? $self->{option_results}->{command_drives} : 'lsblk', command_path => $self->{option_results}->{command_path_drives}, command_options => defined($self->{option_results}->{command_options_drives}) && $self->{option_results}->{command_options_drives} ne '' ? $self->{option_results}->{command_options_drives} : '-I 8 -d -o NAME -p -n', timeout => $self->{option_results}->{timeout} ); } else { ($stdout) = centreon::plugins::misc::execute( output => $self->{output}, options => { timeout => $self->{option_results}->{timeout} }, command => defined($self->{option_results}->{command_drives}) && $self->{option_results}->{command_drives} ne '' ? $self->{option_results}->{command_drives} : 'lsblk', command_path => $self->{option_results}->{command_path_drives}, command_options => defined($self->{option_results}->{command_options_drives}) && $self->{option_results}->{command_options_drives} ne '' ? $self->{option_results}->{command_options_drives} : '-I 8 -d -o NAME -p -n' ); } $self->{output}->output_add(long_msg => "command response: $stdout", debug => 1); my $drives = {}; $drives->{$_} = {} foreach (split /\n/, $stdout); return $drives; } sub get_drives_information { my ($self, %options) = @_; my $drives = $self->list_drives(); my $cmd_options = '-u C ' . join(' ', keys %$drives); my ($stdout, $exit_code); if (defined($self->{option_results}->{hostname}) && $self->{option_results}->{hostname} ne '') { ($stdout, $exit_code) = $self->{ssh}->execute( hostname => $self->{option_results}->{hostname}, sudo => $self->{option_results}->{sudo}, command => defined($self->{option_results}->{command_hddtemp}) && $self->{option_results}->{command_hddtemp} ne '' ? $self->{option_results}->{command_hddtemp} : 'hddtemp', command_path => $self->{option_results}->{command_path_hddtemp}, command_options => defined($self->{option_results}->{command_options_hddtemp}) && $self->{option_results}->{command_options_hddtemp} ne '' ? $self->{option_results}->{command_options_hddtemp} : $cmd_options, timeout => $self->{option_results}->{timeout}, no_quit => 1 ); } else { ($stdout, $exit_code) = centreon::plugins::misc::execute( output => $self->{output}, options => { timeout => $self->{option_results}->{timeout} }, sudo => $self->{option_results}->{sudo}, command => defined($self->{option_results}->{command_hddtemp}) && $self->{option_results}->{command_hddtemp} ne '' ? $self->{option_results}->{command_hddtemp} : 'hddtemp', command_path => $self->{option_results}->{command_path_hddtemp}, command_options => defined($self->{option_results}->{command_options_hddtemp}) && $self->{option_results}->{command_options_hddtemp} ne '' ? $self->{option_results}->{command_options_hddtemp} : $cmd_options . ' 2> /dev/null', no_quit => 1, ); } # exit values can be: 0/1. Need root permissions. if ($exit_code != 0 && $exit_code != 1) { $self->{output}->add_option_msg(short_msg => sprintf('command execution error [exit code: %s]', $exit_code)); $self->{output}->option_exit(); } # OK: # /dev/sda: SanDisk ...: 32 C # ERROR: # message on stderr. So if we don't catch stderr and we have nothing, surely error. for example: # /dev/sda: open: Permission denied # UNKNOWN: # /dev/sda: SanDisk ...: no sensor # SLEEP: # /dev/sda: SanDisk ...: drive is sleeping # NOSENSOR: # /dev/sda: SanDisk ...: drive supported, but it doesn't have a temperature sensor # NOT_APPLICABLE: # /dev/sda: SanDisk ...: misc message foreach my $name (keys %$drives) { if ($stdout =~ /^$name:.*?:\s+(\d+).*?C/m) { $drives->{$name}->{status} = 'ok'; $drives->{$name}->{temperature_unit} = 'C'; $drives->{$name}->{temperature} = $1; } elsif ($stdout =~ /^$name:.*?:\s+(.*)$/m) { my $message = $1; $drives->{$name}->{status} = 'notApplicable'; $drives->{$name}->{status} = 'unknown' if ($message =~ /no sensor/i); $drives->{$name}->{status} = 'driveSleep' if ($message =~ /drive is sleeping/i); $drives->{$name}->{status} = 'noSensor' if ($message =~ /drive supported, but it doesn't have a temperature sensor/i); } else { $drives->{$name}->{status} = 'error'; } } return $drives; } 1; __END__ =head1 NAME ssh =head1 SYNOPSIS my ssh =head1 CLI OPTIONS =over 8 =item B<--hostname> Hostname to query (ssh mode). =item B<--timeout> Timeout in seconds for the command (Default: 45). =item You can override command for drives listing. By default, we use 'lsblk -I 8 -d -o NAME -p -n': =over 16 =item B<--command-drives> Command to get information. Used it you have output in a file. =item B<--command-path-drives> Command path. =item B<--command-options-drives> Command options. =back =item You can override command hddtemp used. By default, we use 'hddtemp -u C /dev/sda /dev/sdb ...' built with the result of drives command: =over 16 =item B<--command-hddtemp> Command to get information. Used it you have output in a file. =item B<--command-path-hddtemp> Command path. =item B<--command-options-hddtemp> Command options. =item B<--sudo> Sudo hddtemp command. =back =back =head1 DESCRIPTION B<custom>. =cut
Tpo76/centreon-plugins
apps/hddtemp/custom/cli.pm
Perl
apache-2.0
8,740
#!/usr/bin/env perl # Copyright [1999-2014] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. =head1 CONTACT Please email comments or questions to the public Ensembl developers list at <http://lists.ensembl.org/mailman/listinfo/dev>. Questions may also be sent to the Ensembl help desk at <helpdesk.org>. =cut # use strict; #use DBH; use Bio::EnsEMBL::Registry; use Bio::EnsEMBL::DBSQL::DBAdaptor; use Bio::EnsEMBL::Variation::DBSQL::DBAdaptor; use Bio::EnsEMBL::Utils::Exception qw(verbose throw warning); use Bio::EnsEMBL::Utils::Argument qw( rearrange ); use Data::Dumper; use Bio::SeqIO; use FindBin qw( $Bin ); use Getopt::Long; use ImportUtils qw(dumpSQL debug create_and_load load); # try to use named options here and write a sub usage() function # eg -host -user -pass -port -snp_dbname -core_dbname etc # optional chromosome name or genomic sequence file # optional more than one genomic sequence file # optional a directory or sequence files (for unknown placing) our ($species, $seq_region_id, $TMP_DIR, $TMP_FILE); GetOptions('species=s' => \$species, 'tmpdir=s' => \$ImportUtils::TMP_DIR, 'tmpfile=s' => \$ImportUtils::TMP_FILE, ); my $registry_file ||= $Bin . "/ensembl.registry"; $TMP_DIR = $ImportUtils::TMP_DIR; $TMP_FILE = $ImportUtils::TMP_FILE; $species ||= 'mouse'; Bio::EnsEMBL::Registry->load_all( $registry_file ); my $cdb = Bio::EnsEMBL::Registry->get_DBAdaptor($species,'core'); my $vdb = Bio::EnsEMBL::Registry->get_DBAdaptor($species,'variation'); my $dbCore = $cdb->dbc->db_handle; my $dbVar = $vdb->dbc->db_handle; my $slice_adaptor = $cdb->get_SliceAdaptor(); #my $sthc = $dbCore->prepare(qq{select sr.seq_region_id from seq_region_attrib sra, attrib_type at, seq_region sr where sra.attrib_type_id=at.attrib_type_id and at.code="toplevel" and sr.seq_region_id = sra.seq_region_id }); my $sthc = $dbCore->prepare(qq{select sr.seq_region_id from seq_region sr, coord_system cs where cs.coord_system_id=sr.coord_system_id and cs.name = "chromosome" and cs.version = "NCBIM35" }); $sthc->execute(); while (my ($seq_region_id) = $sthc->fetchrow_array()) { my $call = "bsub -q long -R'select[mem>2000] rusage[mem=2000]' -e $TMP_DIR/repeat_filter_err -o $TMP_DIR/repeat_filter_out $Bin/repeats_filter.pl -species $species -tmpdir $TMP_DIR -tmpfile $TMP_FILE -seq_region_id $seq_region_id"; system($call); }
dbolser-ebi/ensembl-variation
scripts/import/bsub_repeat_filter.pl
Perl
apache-2.0
3,092
# # Copyright 2021 Centreon (http://www.centreon.com/) # # Centreon is a full-fledged industry-strength solution that meets # the needs in IT infrastructure and application monitoring for # service performance. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # package cloud::aws::cloudwatch::mode::listmetrics; use base qw(centreon::plugins::mode); use strict; use warnings; sub new { my ($class, %options) = @_; my $self = $class->SUPER::new(package => __PACKAGE__, %options); bless $self, $class; $options{options}->add_options(arguments => { 'namespace:s' => { name => 'namespace' }, 'metric:s' => { name => 'metric' }, }); return $self; } sub check_options { my ($self, %options) = @_; $self->SUPER::init(%options); } sub manage_selection { my ($self, %options) = @_; $self->{metrics} = $options{custom}->cloudwatch_list_metrics( namespace => $self->{option_results}->{namespace}, metric => $self->{option_results}->{metric} ); } sub get_dimensions_str { my ($self, %options) = @_; my $dimensions = ''; my $append = ''; foreach (@{$options{dimensions}}) { $dimensions .= $append . "Name=$_->{Name},Value=$_->{Value}"; $append = ','; } return $dimensions; } sub run { my ($self, %options) = @_; $self->manage_selection(%options); foreach (@{$self->{metrics}}) { $self->{output}->output_add(long_msg => sprintf("[Namespace = %s][Dimensions = %s][Metric = %s]", $_->{Namespace}, $self->get_dimensions_str(dimensions => $_->{Dimensions}), $_->{MetricName})); } $self->{output}->output_add( severity => 'OK', short_msg => 'List metrics:' ); $self->{output}->display(nolabel => 1, force_ignore_perfdata => 1, force_long_output => 1); $self->{output}->exit(); } sub disco_format { my ($self, %options) = @_; $self->{output}->add_disco_format(elements => ['namespace', 'metric', 'dimensions']); } sub disco_show { my ($self, %options) = @_; $self->manage_selection(%options); foreach (@{$self->{metrics}}) { $self->{output}->add_disco_entry( namespace => $_->{Namespace}, metric => $_->{MetricName}, dimensions => $self->get_dimensions_str(dimensions => $_->{Dimensions}), ); } } 1; __END__ =head1 MODE List cloudwatch metrics. =over 8 =item B<--namespace> Set cloudwatch namespace. =item B<--metric> Set cloudwatch metric. =back =cut
Tpo76/centreon-plugins
cloud/aws/cloudwatch/mode/listmetrics.pm
Perl
apache-2.0
3,028
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. =pod =head1 NAME Bio::EnsEMBL::Analysis::Tools::Blastz - Ensembl specific blastz output parser =head1 SYNOPSIS open F,"blastz_output_file"; my $fh = \*F; my $BlastzParser = new Bio::EnsEMBL::Analysis::Tools::Blastz(-fh => $fh); or my $blastz_output_file = "blastz_output_file"; my $BlastzParser = new Bio::EnsEMBL::Analysis::Tools::Blastz(-file => $blastz_output_file); while (defined (my $alignment = $BlastzParser->nextAlignment)) { print $alignment->percent_id," ",$alignment->score," ",$alignment->cigar_string,"\n"; print $alignment->start," ",$alignment->end," ",$alignment->strand,"\n"; print $alignment->hstart," ",$alignment->hend," ",$alignment->hstrand,"\n"; } close F; The constructor only need a filehandle opened on a blastz output file. nextAlignment method return a Bio::EnsEMBL::DnaDnaAlignFeature object corresponding to the next HSP-like alignment. =head1 CONTACT Ensembl development mailing list <http://lists.ensembl.org/mailman/listinfo/dev> Abel Ureta-Vidal <abel@ebi.ac.uk> =head1 APPENDIX The rest of the documentation deals wtih each of the object methods. Internal methods are usually preceded by a _ =cut package Bio::EnsEMBL::Analysis::Tools::Blastz; use warnings ; use strict; use Bio::EnsEMBL::DnaDnaAlignFeature; use Bio::EnsEMBL::Utils::Argument qw(rearrange); our @ISA = qw(Bio::EnsEMBL::Root); sub new { my ($class,@args) = @_; my $self = bless {}, $class; $self->{'_fh'} = undef; # filehandle on results file $self->{'_file'} = undef; # path for a results file $self->{'_eof'} = 0; # indicate if end of file and fh closed $self->{'_parsing_initialized'} = 0; $self->{'_command_line'} = ""; $self->{'_matrix'} = ""; $self->{'_options'} = ""; $self->{'_alignment_reported_before'} = {}; my ($fh,$file) = rearrange([qw(FH FILE)], @args); if ((defined $fh && defined $file) || !(defined $fh || defined $file)){ $self->throw("Must pass in either fh or file argument"); } if (defined $fh) { $self->{'_fh'} = $fh; } else { $self->file($file); open F, $self->file; $self->{'_fh'} = \*F; } return $self->_initialize ? $self : undef; } sub _initialize { my ($self) = @_; return undef if ($self->eof); my $fh = $self->fh; my $initialized = 0; while (defined (my $line = <$fh>)) { $initialized = 1; next if ($line =~ /^\#:lav$/); last if ($line =~ /^\}$/); # d stanza if ($line =~ /^d\s\{$/) { my $command_line = <$fh>; chomp $line; $command_line =~ s/^\s+\"//; $self->command_line($command_line); next; } if ($line =~ /^.*,.*$/) { $line =~ s/\"//g; $self->options($self->options.$line); } else { $self->matrix($self->matrix.$line); } } return $self->_parsing_initialized( $initialized ); } =head2 nextAlignmemt Args : none Example : $alignment = $Blastz->nextAligment Descritpion : return the next HSP-like alignment Returntype : array of Bio::EnsEMBL::DnaDnaAlignFeature Exceptions : none Caller : general =cut sub nextAlignment { my ($self) = @_; return undef if ($self->eof); my $fh = $self->fh; my $l_line_fault = 0; while (defined (my $line = <$fh>)) { next if ($line =~ /^\#:lav$/); if ($line =~ /^\#:eof$/) { close $self->fh; $self->eof(1); return undef; } # s stanza : get there sequence length and strand if ($line =~ /^s\s+\{$/) { # on query $line = <$fh>; if ($line =~ /^\s*\"\S+\"\s+(\d+)\s+(\d+)\s+(\d+)\s+\d+$/) { my($start,$end,$strand) = ($1,$2,$3); $self->length($end-$start+1); $self->strand(1) if ($strand == 0); $self->strand(-1) if ($strand == 1); } # on database $line = <$fh>; if ($line =~ /^\s*\"\S+\"\s+(\d+)\s+(\d+)\s+(\d+)\s+\d+$/) { my($hstart,$hend,$hstrand) = ($1,$2,$3); $self->hlength($hend-$hstart+1); $self->hstrand(1) if ($hstrand == 0); $self->hstrand(-1) if ($hstrand == 1); } <$fh>; # skip } line next; } # h stanza : get there seqname and hseqname if ($line =~ /^h\s+\{$/) { # on query $line = <$fh>; if ($line =~ /^\s+\">(\S+)\s*.*\"$/) { my $seqname = $1; $self->seqname($seqname); } # on database $line = <$fh>; if ($line =~ /^\s+\">(\S+)\s*.*\"$/) { my $hseqname = $1; $self->hseqname($hseqname); } <$fh>; # skip } line next; } # a stanza : get there a alignment, with score, percent_id and positions if ($line =~ /^a\s+\{$/) { my ($score,$sum_match_bases,$sum_block_length) ; my @feature_pairs; while (defined ($line = <$fh>)) { last if ($line =~ /^\}$/); if ($line =~ /^\s+s\s+(\d+)$/) { $score = $1; next; } next if ($line =~ /^\s+b\s+\d+\s+\d+$/); next if ($line =~ /^\s+e\s+\d+\s+\d+$/); if ($line =~ /^\s+l\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)$/) { my ($start,$hstart,$end,$hend,$percid) = ($1,$2,$3,$4,$5); if ($start > $end || $hstart > $hend) { # this is a blastz bug that has been reported to the author. No bug fix yet, and # probably not for a long time # so in the meantime, a code hack tries to recover a well format alignment # see below $l_line_fault = 1; next; }; my $block_length = $end - $start + 1; $sum_match_bases += $percid*$block_length/100; $sum_block_length += $block_length; if ($self->strand == -1) { $start = $self->length - $end + 1; $end = $start + $block_length - 1; } if ($self->hstrand == -1) { $hstart = $self->hlength - $hend + 1; $hend = $hstart + $block_length - 1; } if (scalar @feature_pairs == 0 || $l_line_fault == 0) { $l_line_fault = 0; } if ($l_line_fault) { warn("Dealing with a faulty l line\n"); # code hack to fix faulty l lines where start>end or hstart>hend that are ignored. # We extend the previous gap-free piece until it hits the closest # query or target sequence in the next piece # The perc_id and score for this arranged featurepair will not be consistent # but then again, it is a bug in blastz not in the parser... my $f = pop @feature_pairs; my $diff; if ($self->strand == 1) { $diff = $start - $f->end - 1; } else { $diff = $f->start - $end - 1; } if ($self->hstrand == 1 && $diff > $hstart - $f->hend - 1) { $diff = $hstart - $f->hend - 1; } elsif ($diff > $f->hstart - $hend - 1) { $diff = $f->hstart - $hend - 1; } $f->end($f->end + $diff) if ($self->strand == 1); $f->start($f->start - $diff) if ($self->strand == -1); $f->hend($f->hend + $diff) if ($self->hstrand == 1); $f->hstart($f->hstart - $diff) if ($self->hstrand == -1); push @feature_pairs, $f; $l_line_fault = 0; } my $feature_pair = new Bio::EnsEMBL::FeaturePair; $feature_pair->seqname($self->seqname); $feature_pair->start($start); $feature_pair->end($end); $feature_pair->strand($self->strand); $feature_pair->hseqname($self->hseqname); $feature_pair->hstart($hstart); $feature_pair->hend($hend); $feature_pair->hstrand($self->hstrand); $feature_pair->score($score); push @feature_pairs,$feature_pair; } } # calculating the average of percentage identity over the whole HSP-like # not including indels,it probably should... my $average_pecent_id = int($sum_match_bases/$sum_block_length*100); foreach my $feature_pair (@feature_pairs) { $feature_pair->percent_id($average_pecent_id); } my $alignment = new Bio::EnsEMBL::DnaDnaAlignFeature(-features => \@feature_pairs); my $key = ""; map {$key .= $alignment->$_ . "_"} qw(seqname start end strand hseqname hstart hend hstrand score cigar_string); if (defined $self->{'_alignment_reported_before'}{$key}) { next; } $self->{'_alignment_reported_before'}{$key} = 1; return $alignment; } } } =head2 fh Arg [1] : filehandle $filehandle (optional) Example : $Blastz->fh($filehandle) Descritpion : get/set the filehandle value and return it Returntype : filehandle Exceptions : thrown if $filehandle is not the GLOB reference Caller : general =cut sub fh { my ($self,$value) = @_; if(defined $value) { if (ref($value) eq "GLOB") { $self->{'_fh'} = $value; } else { $self->throw("value for fh method should be a filehandle\n"); } } return $self->{'_fh'}; } =head2 file Arg [1] : string $filename_path (optional) Example : $Blastz->file($filename_path) Descritpion : get/set the filename_path value and return it Returntype : string Exceptions : thrown if $filename_path is not found Caller : general =cut sub file { my ($self,$value) = @_; if(defined $value) { if (-e $value) { $self->{'_file'} = $value; } else { $self->throw("file $value not found\n"); } } return $self->{'_file'}; } sub eof { my ($self,$value) = @_; if(defined $value) { $self->{'_eof'} = $value; } return $self->{'_eof'}; } =head2 command_line Arg [1] : string $commandline (optional) command line used to obtain the blastz output which is parsed Example : $Blastz->commandline($commandline) Descritpion : get/set the commandline value and return it Returntype : string Exceptions : none Caller : general =cut sub command_line { my ($self,$value) = @_; if (defined $value) { $self->{'_command_line'} = $value; } return $self->{'_command_line'}; } =head2 matrix Arg [1] : string $matrix (optional) matrix used to obtain the blastz output which is parsed Example : $Blastz->matrix($matrix) Descritpion : get/set the matrix value and return it Returntype : string Exceptions : none Caller : general =cut sub matrix { my ($self,$value) = @_; if (defined $value) { $self->{'_matrix'} = $value; } return $self->{'_matrix'}; } =head2 options Arg [1] : string $options (optional) options used to obtain the blastz output which is parsed Example : $Blastz->options($options) Descritpion : get/set the options value and return it Returntype : string Exceptions : none Caller : general =cut sub options { my ($self,$value) = @_; if (defined $value) { $self->{'_options'} = $value; } return $self->{'_options'}; } sub _parsing_initialized { my ($self,$value) = @_; if (defined $value) { $self->{'_parsing_initialized'} = $value; } return $self->{'_parsing_initialized'}; } =head2 seqname Arg [1] : string $seqname (optional) name of the query sequence Example : $Blastz->seqname($seqname) Descritpion : get/set the seqname value and return it Returntype : string Exceptions : none Caller : general =cut sub seqname { my ($self,$value) = @_; if (defined $value) { $self->{'_seqname'} = $value; } return $self->{'_seqname'}; } =head2 hseqname Arg [1] : string $hseqname (optional) name of the database sequence Example : $Blastz->hseqname($hseqname) Descritpion : get/set the hseqname value and return it Returntype : string Exceptions : none Caller : general =cut sub hseqname { my ($self,$value) = @_; if (defined $value) { $self->{'_hseqname'} = $value; } return $self->{'_hseqname'}; } =head2 length Arg [1] : int $length (optional) sequence length of the query sequence Example : $Blastz->length($length) Descritpion : get/set the length value and return it Returntype : int Exceptions : none Caller : general =cut sub length { my ($self,$value) = @_; if (defined $value) { $self->{'_length'} = $value; } return $self->{'_length'}; } =head2 hlength Arg [1] : int $length (optional) sequence length of the database sequence Example : $Blastz->hlength($length) Descritpion : get/set the hlength value and return it Returntype : int Exceptions : none Caller : general =cut sub hlength { my ($self,$value) = @_; if (defined $value) { $self->{'_hlength'} = $value; } return $self->{'_hlength'}; } =head2 strand Arg [1] : int $strand (optional) strand of the query sequence Example : $Blastz->strand($strand) Descritpion : get/set the strand value and return it Returntype : int Exceptions : none Caller : general =cut sub strand { my ($self,$value) = @_; if (defined $value) { $self->{'_strand'} = $value; } return $self->{'_strand'}; } =head2 hstrand Arg [1] : int $strand (optional) strand of the query sequence Example : $Blastz->hstrand($strand) Descritpion : get/set the hstrand value and return it Returntype : int Exceptions : none Caller : general =cut sub hstrand { my ($self,$value) = @_; if (defined $value) { $self->{'_hstrand'} = $value; } return $self->{'_hstrand'}; } 1;
mn1/ensembl-analysis
modules/Bio/EnsEMBL/Analysis/Tools/Blastz.pm
Perl
apache-2.0
14,272
package VMOMI::ArrayOfDistributedVirtualSwitchHostMemberPnicSpec; use parent 'VMOMI::ComplexType'; use strict; use warnings; our @class_ancestors = ( ); our @class_members = ( ['DistributedVirtualSwitchHostMemberPnicSpec', 'DistributedVirtualSwitchHostMemberPnicSpec', 1, 1], ); sub get_class_ancestors { return @class_ancestors; } sub get_class_members { my $class = shift; my @super_members = $class->SUPER::get_class_members(); return (@super_members, @class_members); } 1;
stumpr/p5-vmomi
lib/VMOMI/ArrayOfDistributedVirtualSwitchHostMemberPnicSpec.pm
Perl
apache-2.0
504
# # Copyright 2022 Centreon (http://www.centreon.com/) # # Centreon is a full-fledged industry-strength solution that meets # the needs in IT infrastructure and application monitoring for # service performance. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # package network::mitel::3300icp::snmp::mode::zapcalls; use base qw(centreon::plugins::templates::counter); use strict; use warnings; use Digest::MD5 qw(md5_hex); sub set_counters { my ($self, %options) = @_; $self->{maps_counters_type} = [ { name => 'zap', type => 1, cb_prefix_output => 'prefix_zap_output', message_multiple => 'All zone access points are ok' }, ]; $self->{maps_counters}->{zap} = [ { label => 'admitted', set => { key_values => [ { name => 'mitelBWMCumCACAdmissions', diff => 1 }, { name => 'display' } ], output_template => 'Admitted calls: %s', perfdatas => [ { label => 'admitted', value => 'mitelBWMCumCACAdmissions', template => '%s', min => 0, unit => 'calls', label_extra_instance => 1, instance_use => 'display' }, ], } }, { label => 'rejected', set => { key_values => [ { name => 'mitelBWMCumCACRejections', diff => 1 }, { name => 'display' } ], output_template => 'Rejected calls: %s', perfdatas => [ { label => 'rejected', value => 'mitelBWMCumCACRejections', template => '%s', min => 0, unit => 'calls', label_extra_instance => 1, instance_use => 'display' }, ], } }, { label => 'rejection-ratio', set => { key_values => [ { name => 'mitelBWMCumCACRejectionRatio' }, { name => 'display' } ], output_template => 'Rejection ratio: %s%%', perfdatas => [ { label => 'rejection_ratio', value => 'mitelBWMCumCACRejectionRatio', template => '%s', min => 0, max => 100, unit => '%', label_extra_instance => 1, instance_use => 'display' }, ], } }, ]; } sub prefix_zap_output { my ($self, %options) = @_; return "Zone access point '" . $options{instance_value}->{display} . "' "; } sub new { my ($class, %options) = @_; my $self = $class->SUPER::new(package => __PACKAGE__, %options, statefile => 1); bless $self, $class; $options{options}->add_options(arguments => { "filter-name:s" => { name => 'filter_name' }, }); return $self; } sub check_options { my ($self, %options) = @_; $self->SUPER::check_options(%options); } my $oid_mitelBWMCumZAPLabel = '.1.3.6.1.4.1.1027.4.1.1.2.5.1.1.2.1.4', my $mapping = { mitelBWMCumCACAdmissions => { oid => '.1.3.6.1.4.1.1027.4.1.1.2.5.1.1.2.1.5' }, mitelBWMCumCACRejections => { oid => '.1.3.6.1.4.1.1027.4.1.1.2.5.1.1.2.1.6' }, mitelBWMCumCACRejectionRatio => { oid => '.1.3.6.1.4.1.1027.4.1.1.2.5.1.1.2.1.7' }, }; sub manage_selection { my ($self, %options) = @_; $self->{zap} = {}; my $snmp_result = $options{snmp}->get_table(oid => $oid_mitelBWMCumZAPLabel, nothing_quit => 1); foreach my $oid (keys %{$snmp_result}) { $oid =~ /^$oid_mitelBWMCumZAPLabel\.(.*)$/; my $instance = $1; if (defined($self->{option_results}->{filter_name}) && $self->{option_results}->{filter_name} ne '' && $snmp_result->{$oid} !~ /$self->{option_results}->{filter_name}/) { $self->{output}->output_add(long_msg => "skipping '" . $snmp_result->{$oid} . "'.", debug => 1); next; } $self->{zap}->{$instance} = { display => $snmp_result->{$oid} }; } $options{snmp}->load(oids => [ $mapping->{mitelBWMCumCACAdmissions}->{oid}, $mapping->{mitelBWMCumCACRejections}->{oid}, $mapping->{mitelBWMCumCACRejectionRatio}->{oid} ], instances => [ keys %{$self->{zap}} ], instance_regexp => '^(.*)$'); $snmp_result = $options{snmp}->get_leef(nothing_quit => 1); foreach (keys %{$self->{zap}}) { my $result = $options{snmp}->map_instance(mapping => $mapping, results => $snmp_result, instance => $_); foreach my $name (keys %{$mapping}) { $self->{zap}->{$_}->{$name} = $result->{$name}; } } if (scalar(keys %{$self->{zap}}) <= 0) { $self->{output}->add_option_msg(short_msg => "No zone access points found."); $self->{output}->option_exit(); } $self->{cache_name} = "mitel_3300icp_" . $options{snmp}->get_hostname() . '_' . $options{snmp}->get_port() . '_' . $self->{mode} . '_' . (defined($self->{option_results}->{filter_name}) ? md5_hex($self->{option_results}->{filter_name}) : md5_hex('all')); } 1; __END__ =head1 MODE Check zone access points calls. =over 8 =item B<--filter-name> Filter by zone access points name (can be a regexp). =item B<--warning-*> Threshold warning. Can be: 'admitted', 'rejected', 'rejection-ratio' (%). =item B<--critical-*> Threshold critical. Can be: 'admitted', 'rejected', 'rejection-ratio' (%). =back =cut
centreon/centreon-plugins
network/mitel/3300icp/snmp/mode/zapcalls.pm
Perl
apache-2.0
5,929
package Paws::CloudDirectory::BatchListAttachedIndices; use Moose; has MaxResults => (is => 'ro', isa => 'Int'); has NextToken => (is => 'ro', isa => 'Str'); has TargetReference => (is => 'ro', isa => 'Paws::CloudDirectory::ObjectReference', required => 1); 1; ### main pod documentation begin ### =head1 NAME Paws::CloudDirectory::BatchListAttachedIndices =head1 USAGE This class represents one of two things: =head3 Arguments in a call to a service Use the attributes of this class as arguments to methods. You shouldn't make instances of this class. Each attribute should be used as a named argument in the calls that expect this type of object. As an example, if Att1 is expected to be a Paws::CloudDirectory::BatchListAttachedIndices object: $service_obj->Method(Att1 => { MaxResults => $value, ..., TargetReference => $value }); =head3 Results returned from an API call Use accessors for each attribute. If Att1 is expected to be an Paws::CloudDirectory::BatchListAttachedIndices object: $result = $service_obj->Method(...); $result->Att1->MaxResults =head1 DESCRIPTION Lists indices attached to an object inside a BatchRead operation. For more information, see ListAttachedIndices and BatchReadRequest$Operations. =head1 ATTRIBUTES =head2 MaxResults => Int The maximum number of results to retrieve. =head2 NextToken => Str The pagination token. =head2 B<REQUIRED> TargetReference => L<Paws::CloudDirectory::ObjectReference> A reference to the object that has indices attached. =head1 SEE ALSO This class forms part of L<Paws>, describing an object used in L<Paws::CloudDirectory> =head1 BUGS and CONTRIBUTIONS The source code is located here: https://github.com/pplu/aws-sdk-perl Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues =cut
ioanrogers/aws-sdk-perl
auto-lib/Paws/CloudDirectory/BatchListAttachedIndices.pm
Perl
apache-2.0
1,815
package Paws::Glue::CodeGenEdge; use Moose; has Source => (is => 'ro', isa => 'Str', required => 1); has Target => (is => 'ro', isa => 'Str', required => 1); has TargetParameter => (is => 'ro', isa => 'Str'); 1; ### main pod documentation begin ### =head1 NAME Paws::Glue::CodeGenEdge =head1 USAGE This class represents one of two things: =head3 Arguments in a call to a service Use the attributes of this class as arguments to methods. You shouldn't make instances of this class. Each attribute should be used as a named argument in the calls that expect this type of object. As an example, if Att1 is expected to be a Paws::Glue::CodeGenEdge object: $service_obj->Method(Att1 => { Source => $value, ..., TargetParameter => $value }); =head3 Results returned from an API call Use accessors for each attribute. If Att1 is expected to be an Paws::Glue::CodeGenEdge object: $result = $service_obj->Method(...); $result->Att1->Source =head1 DESCRIPTION Represents a directional edge in a directed acyclic graph (DAG). =head1 ATTRIBUTES =head2 B<REQUIRED> Source => Str The ID of the node at which the edge starts. =head2 B<REQUIRED> Target => Str The ID of the node at which the edge ends. =head2 TargetParameter => Str The target of the edge. =head1 SEE ALSO This class forms part of L<Paws>, describing an object used in L<Paws::Glue> =head1 BUGS and CONTRIBUTIONS The source code is located here: https://github.com/pplu/aws-sdk-perl Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues =cut
ioanrogers/aws-sdk-perl
auto-lib/Paws/Glue/CodeGenEdge.pm
Perl
apache-2.0
1,561
=head1 LICENSE Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute Copyright [2016] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =cut =head1 CONTACT Please email comments or questions to the public Ensembl developers list at <http://lists.ensembl.org/mailman/listinfo/dev>. Questions may also be sent to the Ensembl help desk at <http://www.ensembl.org/Help/Contact>. =cut =head1 NAME Bio::EnsEMBL::DBSQL::MetaContainer - Encapsulates all access to core database meta information =head1 SYNOPSIS my $meta_container = $registry->get_adaptor( 'Human', 'Core', 'MetaContainer' ); my @mapping_info = @{ $meta_container->list_value_by_key('assembly.mapping') }; my $scientific_name = $meta_container->get_scientific_name(); =head1 DESCRIPTION An object that encapsulates specific access to core db meta data =head1 METHODS =cut package Bio::EnsEMBL::DBSQL::MetaContainer; use strict; use warnings; use Bio::EnsEMBL::Utils::Exception qw/deprecate/; use Bio::Species; use base qw/Bio::EnsEMBL::DBSQL::BaseMetaContainer/; # add well known meta info get-functions below =head2 get_production_name Args : none Example : $species = $meta_container->get_production_name(); Description : Obtains the name of the species in a form usable as, for example, a table name, file name etc. Returntype : string Exceptions : none Status : Stable =cut sub get_production_name { my ($self) = @_; return $self->single_value_by_key('species.production_name'); } =head2 get_display_name Args : none Example : $species = $meta_container->get_display_name(); Description : Obtains the name of the species in a form usable as, for example, a short label in a GUI. Returntype : string Exceptions : none Status : Stable =cut sub get_display_name { my ($self) = @_; return $self->single_value_by_key('species.display_name'); } =head2 get_common_name Args : none Example : $species = $meta_container->get_common_name(); Description : Obtains the common name of the species. Returntype : string Exceptions : none Status : Stable =cut sub get_common_name { my ($self) = @_; return $self->single_value_by_key('species.common_name'); } =head2 get_scientific_name Args : none Example : $species = $meta_container->get_scientific_name(); Description : Obtains the full scientific name of the species. Returntype : string Exceptions : none Status : Stable =cut sub get_scientific_name { my ($self) = @_; return $self->single_value_by_key('species.scientific_name'); } =head2 get_division Args : none Example : $div = $meta_container->get_division(); Description : Obtains the Ensembl Genomes division to which the species belongs. Returntype : string Exceptions : none Status : Stable =cut sub get_division { my ($self) = @_; return $self->single_value_by_key('species.division'); } =head2 get_taxonomy_id Arg [1] : none Example : $tax_id = $meta_container->get_taxonomy_id(); Description: Retrieves the taxonomy id from the database meta table Returntype : string Exceptions : none Caller : ? Status : Stable =cut sub get_taxonomy_id { my ($self) = @_; return $self->single_value_by_key('species.taxonomy_id', 1); } =head2 get_genebuild Arg [1] : none Example : $tax_id = $meta_container->get_genebuild(); Description: Retrieves the genebuild from the database meta table Returntype : string Exceptions : none Caller : ? Status : Stable =cut sub get_genebuild { my ($self) = @_; return $self->single_value_by_key('genebuild.start_date', 1); } =head2 get_genebuild Example : $classification = $meta_container->get_classification(); Description: Retrieves the classification held in the backing database minus any species specific levels. This means that the first element in the array will be subfamily/family level ascending to superkingdom Returntype : ArrayRef[String] Exceptions : none Caller : ? Status : Stable =cut sub get_classification { my ($self) = @_; my $classification = $self->list_value_by_key('species.classification'); my $copy = [@{$classification}]; splice(@{$copy}, 0, 1); # remove the Homo sapiens return $copy; } 1;
danstaines/ensembl
modules/Bio/EnsEMBL/DBSQL/MetaContainer.pm
Perl
apache-2.0
5,057
package Paws::Organizations::CreateOrganizationalUnit; use Moose; has Name => (is => 'ro', isa => 'Str', required => 1); has ParentId => (is => 'ro', isa => 'Str', required => 1); use MooseX::ClassAttribute; class_has _api_call => (isa => 'Str', is => 'ro', default => 'CreateOrganizationalUnit'); class_has _returns => (isa => 'Str', is => 'ro', default => 'Paws::Organizations::CreateOrganizationalUnitResponse'); class_has _result_key => (isa => 'Str', is => 'ro'); 1; ### main pod documentation begin ### =head1 NAME Paws::Organizations::CreateOrganizationalUnit - Arguments for method CreateOrganizationalUnit on Paws::Organizations =head1 DESCRIPTION This class represents the parameters used for calling the method CreateOrganizationalUnit on the AWS Organizations service. Use the attributes of this class as arguments to method CreateOrganizationalUnit. You shouldn't make instances of this class. Each attribute should be used as a named argument in the call to CreateOrganizationalUnit. As an example: $service_obj->CreateOrganizationalUnit(Att1 => $value1, Att2 => $value2, ...); Values for attributes that are native types (Int, String, Float, etc) can passed as-is (scalar values). Values for complex Types (objects) can be passed as a HashRef. The keys and values of the hashref will be used to instance the underlying object. =head1 ATTRIBUTES =head2 B<REQUIRED> Name => Str The friendly name to assign to the new OU. =head2 B<REQUIRED> ParentId => Str The unique identifier (ID) of the parent root or OU in which you want to create the new OU. The regex pattern for a parent ID string requires one of the following: =over =item * Root: a string that begins with "r-" followed by from 4 to 32 lower-case letters or digits. =item * Organizational unit (OU): a string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits. =back =head1 SEE ALSO This class forms part of L<Paws>, documenting arguments for method CreateOrganizationalUnit in L<Paws::Organizations> =head1 BUGS and CONTRIBUTIONS The source code is located here: https://github.com/pplu/aws-sdk-perl Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues =cut
ioanrogers/aws-sdk-perl
auto-lib/Paws/Organizations/CreateOrganizationalUnit.pm
Perl
apache-2.0
2,353
# # Copyright 2021 Centreon (http://www.centreon.com/) # # Centreon is a full-fledged industry-strength solution that meets # the needs in IT infrastructure and application monitoring for # service performance. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # package centreon::common::bluearc::snmp::mode::components::sysdrive; use strict; use warnings; my %map_status = ( 1 => 'online', 2 => 'corrupt', 3 => 'failed', 4 => 'notPresent', 5 => 'disconnected', 6 => 'offline', 7 => 'initializing', 8 => 'formatting', 9 => 'unknown', ); my $mapping = { sysDriveWWN => { oid => '.1.3.6.1.4.1.11096.6.1.1.1.3.4.2.1.2' }, sysDriveStatus => { oid => '.1.3.6.1.4.1.11096.6.1.1.1.3.4.2.1.4', map => \%map_status }, }; my $oid_sysDriveEntry = '.1.3.6.1.4.1.11096.6.1.1.1.3.4.2.1'; sub load { my ($self) = @_; push @{$self->{request}}, { oid => $oid_sysDriveEntry }; } sub check { my ($self) = @_; $self->{output}->output_add(long_msg => "Checking system drives"); $self->{components}->{sysdrive} = {name => 'sysdrives', total => 0, skip => 0}; return if ($self->check_filter(section => 'sysdrive')); foreach my $oid ($self->{snmp}->oid_lex_sort(keys %{$self->{results}->{$oid_sysDriveEntry}})) { next if ($oid !~ /^$mapping->{sysDriveStatus}->{oid}\.(.*)$/); my $instance = $1; my $result = $self->{snmp}->map_instance(mapping => $mapping, results => $self->{results}->{$oid_sysDriveEntry}, instance => $instance); next if ($self->check_filter(section => 'sysdrive', instance => $result->{sysDriveWWN})); $self->{components}->{sysdrive}->{total}++; $self->{output}->output_add(long_msg => sprintf("system drive '%s' status is '%s' [instance: %s].", $result->{sysDriveWWN}, $result->{sysDriveStatus}, $result->{sysDriveWWN} )); my $exit = $self->get_severity(section => 'sysdrive', value => $result->{sysDriveStatus}); if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) { $self->{output}->output_add(severity => $exit, short_msg => sprintf("System drive '%s' status is '%s'", $result->{sysDriveWWN}, $result->{sysDriveStatus})); } } } 1;
Tpo76/centreon-plugins
centreon/common/bluearc/snmp/mode/components/sysdrive.pm
Perl
apache-2.0
2,931
#!/usr/bin/perl -w my %words; my $word = undef; while (chomp($word = <STDIN>)) { if (exists $words{$word}) { $words{$word} += 1; } else { $words{$word} = 1; } } my @keys = keys %words; @keys = sort @keys; foreach (@keys) { printf "'%s' appears %d times\n", $_, $words{$_}; }
alexhilton/miscellaneous
perl/exercise/ex0602.pl
Perl
apache-2.0
307
=head1 LICENSE Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute Copyright [2016-2021] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =cut package ORM::EnsEMBL::DB::Tools::Object::Result; use strict; use warnings; use parent qw(ORM::EnsEMBL::DB::Tools::Object); __PACKAGE__->meta->setup( table => 'result', auto_initialize => [] ); __PACKAGE__->meta->datastructure_columns({'name' => 'result_data', 'trusted' => 1}); 1;
Ensembl/ensembl-orm
modules/ORM/EnsEMBL/DB/Tools/Object/Result.pm
Perl
apache-2.0
1,011
package VMOMI::HostLowLevelProvisioningManagerSnapshotLayoutSpec; use parent 'VMOMI::DynamicData'; use strict; use warnings; our @class_ancestors = ( 'DynamicData', ); our @class_members = ( ['id', undef, 0, ], ['srcFilename', undef, 0, ], ['dstFilename', undef, 0, ], ['disk', 'HostLowLevelProvisioningManagerDiskLayoutSpec', 1, 1], ); sub get_class_ancestors { return @class_ancestors; } sub get_class_members { my $class = shift; my @super_members = $class->SUPER::get_class_members(); return (@super_members, @class_members); } 1;
stumpr/p5-vmomi
lib/VMOMI/HostLowLevelProvisioningManagerSnapshotLayoutSpec.pm
Perl
apache-2.0
579
# Copyright 2020, Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. package Google::Ads::GoogleAds::V9::Resources::KeywordPlanAdGroupKeyword; use strict; use warnings; use base qw(Google::Ads::GoogleAds::BaseEntity); use Google::Ads::GoogleAds::Utils::GoogleAdsHelper; sub new { my ($class, $args) = @_; my $self = { cpcBidMicros => $args->{cpcBidMicros}, id => $args->{id}, keywordPlanAdGroup => $args->{keywordPlanAdGroup}, matchType => $args->{matchType}, negative => $args->{negative}, resourceName => $args->{resourceName}, text => $args->{text}}; # Delete the unassigned fields in this object for a more concise JSON payload remove_unassigned_fields($self, $args); bless $self, $class; return $self; } 1;
googleads/google-ads-perl
lib/Google/Ads/GoogleAds/V9/Resources/KeywordPlanAdGroupKeyword.pm
Perl
apache-2.0
1,326
package VMOMI::AlreadyAuthenticatedSessionEvent; use parent 'VMOMI::SessionEvent'; use strict; use warnings; our @class_ancestors = ( 'SessionEvent', 'Event', 'DynamicData', ); our @class_members = ( ); sub get_class_ancestors { return @class_ancestors; } sub get_class_members { my $class = shift; my @super_members = $class->SUPER::get_class_members(); return (@super_members, @class_members); } 1;
stumpr/p5-vmomi
lib/VMOMI/AlreadyAuthenticatedSessionEvent.pm
Perl
apache-2.0
436
#!/usr/local/ActivePerl-5.8/bin/perl require "../HostConfig.pl"; #add create new host $host = Host->new( OS => "Linux", IP => "0.0.0.0", Owner => "nobody", cpuNum => "null", cpuModel => "null", cpuSpeed => "null", memTotal => "null", swapTotal => "null", osVer => "null", kernelVer => "null", ); #print out this Host print ("Host: ref($host)\n"); $OS = $host->getOS(); $IP = $host->getIP(); $owner = $host->getOwner(); $cpuNum = $host->getCpuNum(); $cpuModel = $host->getCpuModel(); $cpuSpeed = $host->getCpuSpeed(); $memTotal = $host->getMemTotal(); $swapTotal = $host->getSwapTotal(); $osVer = $host->getOsVer(); $kernelVer = $host->getKernelVer(); $lastUpdate = $host->getLastUpdate(); print ("OS: $OS\n"); print ("IP: $IP\n"); print ("Owner: $owner\n"); print ("Last Update: $lastUpdate\n"); print ("CPU Number: $cpuNum\n"); print ("CPU Model: $cpuModel\n"); print ("CPU Speed: $cpuSpeed\n"); print ("Mem Total: $memTotal\n"); print ("Swap Total: $swapTotal\n"); print ("OS Version: $osVer\n"); print ("Kernel Version: $kernelVer\n"); #Store the host $host->store("$perfhome/etc/configs/Linux/host.ser") or die("can't store host.ser?\n");
ktenzer/perfstat
misc/serialize/create/Linux/host.pl
Perl
apache-2.0
1,194
=encoding utf8 =head1 NAME Crypt::PKCS11::CK_ECDH2_DERIVE_PARAMS - Perl interface to PKCS #11 CK_ECDH2_DERIVE_PARAMS structure =head1 SYNPOSIS use Crypt::PKCS11::CK_ECDH2_DERIVE_PARAMS; my $obj = Crypt::PKCS11::CK_ECDH2_DERIVE_PARAMS->new; $obj->set...; $obj->get...; =head1 DESCRIPTION This is the Perl interface for the C structure CK_ECDH2_DERIVE_PARAMS in PKCS #11. See PKCS #11 documentation for more information about the structure and what it is used for. =head1 METHODS =over 4 =item $obj = Crypt::PKCS11::CK_ECDH2_DERIVE_PARAMS->new Returns a new Crypt::PKCS11::CK_ECDH2_DERIVE_PARAMS object. =item $rv = $obj->get_kdf($kdf) Retrieve the value B<kdf> from the structure into C<$kdf>. Returns C<CKR_OK> on success otherwise a CKR describing the error. =item $kdf = $obj->kdf Returns the value B<kdf> from the structure or undef on error. =item $rv = $obj->set_kdf($kdf) Set the value B<kdf> in the structure. Returns C<CKR_OK> on success otherwise a CKR describing the error. =item $rv = $obj->get_pSharedData($pSharedData) Retrieve the value B<pSharedData> from the structure into C<$pSharedData>. Returns C<CKR_OK> on success otherwise a CKR describing the error. =item $pSharedData = $obj->pSharedData Returns the value B<pSharedData> from the structure or undef on error. =item $rv = $obj->set_pSharedData($pSharedData) Set the value B<pSharedData> in the structure. Returns C<CKR_OK> on success otherwise a CKR describing the error. =item $rv = $obj->get_pPublicData($pPublicData) Retrieve the value B<pPublicData> from the structure into C<$pPublicData>. Returns C<CKR_OK> on success otherwise a CKR describing the error. =item $pPublicData = $obj->pPublicData Returns the value B<pPublicData> from the structure or undef on error. =item $rv = $obj->set_pPublicData($pPublicData) Set the value B<pPublicData> in the structure. Returns C<CKR_OK> on success otherwise a CKR describing the error. =item $rv = $obj->get_hPrivateData($hPrivateData) Retrieve the value B<hPrivateData> from the structure into C<$hPrivateData>. Returns C<CKR_OK> on success otherwise a CKR describing the error. =item $hPrivateData = $obj->hPrivateData Returns the value B<hPrivateData> from the structure or undef on error. =item $rv = $obj->set_hPrivateData($hPrivateData) Set the value B<hPrivateData> in the structure. Returns C<CKR_OK> on success otherwise a CKR describing the error. =item $rv = $obj->get_pPublicData2($pPublicData2) Retrieve the value B<pPublicData2> from the structure into C<$pPublicData2>. Returns C<CKR_OK> on success otherwise a CKR describing the error. =item $pPublicData2 = $obj->pPublicData2 Returns the value B<pPublicData2> from the structure or undef on error. =item $rv = $obj->set_pPublicData2($pPublicData2) Set the value B<pPublicData2> in the structure. Returns C<CKR_OK> on success otherwise a CKR describing the error. =back =head1 PRIVATE METHODS These are the private methods used within the module and should not be used elsewhere. =over 4 =item $bytes = $obj->toBytes Return the structure represented as bytes or undef on error. =item $rv = $obj->fromBytes($bytes) Sets the structure from a representation in bytes. Returns C<CKR_OK> on success otherwise a CKR describing the error. =back =head1 NOTE Derived from the RSA Security Inc. PKCS #11 Cryptographic Token Interface (Cryptoki) =head1 AUTHOR Jerry Lundström <lundstrom.jerry@gmail.com> =head1 REPORTING BUGS Report bugs at https://github.com/mkinstab/p5-Crypt-PKCS11/issues . =head1 LICENSE Copyright (c) 2015-2016 Jerry Lundström <lundstrom.jerry@gmail.com> Copyright (c) 2016 make install AB Copyright (c) 2015 .SE (The Internet Infrastructure Foundation) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
dotse/p5-Crypt-PKCS11
lib/Crypt/PKCS11/CK_ECDH2_DERIVE_PARAMS.pod
Perl
bsd-2-clause
5,032