code stringlengths 2 1.05M | repo_name stringlengths 5 101 | path stringlengths 4 991 | language stringclasses 3 values | license stringclasses 5 values | size int64 2 1.05M |
|---|---|---|---|---|---|
#
# Copyright 2018 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package database::postgres::mode::backends;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
$options{options}->add_options(arguments =>
{
"warning:s" => { name => 'warning', },
"critical:s" => { name => 'critical', },
"exclude:s" => { name => 'exclude', },
"noidle" => { name => 'noidle', },
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
if (($self->{perfdata}->threshold_validate(label => 'warning', value => $self->{option_results}->{warning})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong warning threshold '" . $self->{option_results}->{warning} . "'.");
$self->{output}->option_exit();
}
if (($self->{perfdata}->threshold_validate(label => 'critical', value => $self->{option_results}->{critical})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong critical threshold '" . $self->{option_results}->{critical} . "'.");
$self->{output}->option_exit();
}
}
sub run {
my ($self, %options) = @_;
# $options{sql} = sqlmode object
$self->{sql} = $options{sql};
$self->{sql}->connect();
my $noidle = '';
if (defined($self->{option_results}->{noidle})) {
if ($self->{sql}->is_version_minimum(version => '9.2')) {
$noidle = " AND state <> 'idle'";
} else {
$noidle = " AND current_query <> '<IDLE>'";
}
}
my $query = "SELECT COUNT(datid) AS current,
(SELECT setting AS mc FROM pg_settings WHERE name = 'max_connections') AS mc,
d.datname
FROM pg_database d
LEFT JOIN pg_stat_activity s ON (s.datid = d.oid $noidle)
GROUP BY d.datname
ORDER BY d.datname";
$self->{sql}->query(query => $query);
$self->{output}->output_add(severity => 'OK',
short_msg => "All client database connections are ok.");
my $database_check = 0;
my $result = $self->{sql}->fetchall_arrayref();
foreach my $row (@{$result}) {
if (defined($self->{option_results}->{exclude}) && $$row[2] !~ /$self->{option_results}->{exclude}/) {
$self->{output}->output_add(long_msg => "Skipping database '" . $$row[2] . '"');
next;
}
$database_check++;
my $used = $$row[0];
my $max_connections = $$row[1];
my $database_name = $$row[2];
my $prct_used = ($used * 100) / $max_connections;
my $exit_code = $self->{perfdata}->threshold_check(value => $prct_used, threshold => [ { label => 'critical', 'exit_litteral' => 'critical' }, { label => 'warning', exit_litteral => 'warning' } ]);
$self->{output}->output_add(long_msg => sprintf("Database '%s': %.2f%% client connections limit reached (%d of max. %d)",
$database_name, $prct_used, $used, $max_connections));
if (!$self->{output}->is_status(value => $exit_code, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit_code,
short_msg => sprintf("Database '%s': %.2f%% client connections limit reached (%d of max. %d)",
$database_name, $prct_used, $used, $max_connections));
}
$self->{output}->perfdata_add(label => 'connections_' . $database_name,
value => $used,
warning => $self->{perfdata}->get_perfdata_for_output(label => 'warning', total => $max_connections, cast_int => 1),
critical => $self->{perfdata}->get_perfdata_for_output(label => 'critical', total => $max_connections, cast_int => 1),
min => 0, max => $max_connections);
}
if ($database_check == 0) {
$self->{output}->output_add(severity => 'UNKNOWN',
short_msg => 'No database checked. (permission or a wrong exclude filter)');
}
$self->{output}->display();
$self->{output}->exit();
}
1;
__END__
=head1 MODE
Check the current number of connections for one or more databases
=over 8
=item B<--warning>
Threshold warning in percent.
=item B<--critical>
Threshold critical in percent.
=item B<--exclude>
Filter databases.
=item B<--noidle>
Idle connections are not counted.
=back
=cut
| wilfriedcomte/centreon-plugins | database/postgres/mode/backends.pm | Perl | apache-2.0 | 5,623 |
package KinoSearch1::Util::ByteBuf;
use strict;
use warnings;
use KinoSearch1::Util::ToolSet;
use base qw( KinoSearch1::Util::CClass );
1;
__END__
__H__
#ifndef H_KINOSEARCH_UTIL_BYTEBUF
#define H_KINOSEARCH_UTIL_BYTEBUF 1
#include "EXTERN.h"
#include "perl.h"
#include "XSUB.h"
#include "KinoSearch1UtilCarp.h"
#include "KinoSearch1UtilMemManager.h"
typedef struct bytebuf {
char *ptr;
I32 size; /* number of valid chars */
I32 cap; /* allocated bytes, including any null termination */
U32 flags;
} ByteBuf;
ByteBuf* Kino1_BB_new(I32);
ByteBuf* Kino1_BB_new_string(char*, I32);
ByteBuf* Kino1_BB_new_view(char*, I32);
ByteBuf* Kino1_BB_clone(ByteBuf*);
void Kino1_BB_assign_view(ByteBuf*, char*, I32);
void Kino1_BB_assign_string(ByteBuf*, char*, I32);
void Kino1_BB_cat_string(ByteBuf*, char*, I32);
void Kino1_BB_grow(ByteBuf*, I32);
I32 Kino1_BB_compare(ByteBuf*, ByteBuf*);
void Kino1_BB_destroy(ByteBuf*);
#endif /* include guard */
__C__
#include "KinoSearch1UtilByteBuf.h"
#define KINO_BB_VIEW 0x1
/* Return a pointer to a new ByteBuf capable of holding a string of [size]
* bytes. Though the ByteBuf's size member is set, none of the allocated
* memory is initialized.
*/
ByteBuf*
Kino1_BB_new(I32 size) {
ByteBuf *bb;
/* allocate */
Kino1_New(0, bb, 1, ByteBuf);
Kino1_New(0, bb->ptr, size + 1, char);
/* assign */
bb->size = size;
bb->cap = size + 1;
bb->flags = 0;
return bb;
}
/* Return a pointer to a new ByteBuf which holds a copy of the passed in
* string.
*/
ByteBuf*
Kino1_BB_new_string(char *ptr, I32 size) {
ByteBuf *bb;
/* allocate */
Kino1_New(0, bb, 1, ByteBuf);
Kino1_New(0, bb->ptr, size + 1, char);
/* copy */
Copy(ptr, bb->ptr, size, char);
/* assign */
bb->size = size;
bb->cap = size + 1;
bb->ptr[size] = '\0'; /* null terminate */
bb->flags = 0;
return bb;
}
/* Return a pointer to a new "view" ByteBuf, offing a persective on the passed
* in string.
*/
ByteBuf*
Kino1_BB_new_view(char *ptr, I32 size) {
ByteBuf *bb;
/* allocate */
Kino1_New(0, bb, 1, ByteBuf);
/* assign */
bb->ptr = ptr;
bb->size = size;
bb->cap = 0;
bb->flags = 0 | KINO_BB_VIEW;
return bb;
}
/* Return a "real" copy of the ByteBuf (regardless of whether it was a "view"
* ByteBuf before).
*/
ByteBuf*
Kino1_BB_clone(ByteBuf *bb) {
if (bb == NULL)
return NULL;
else
return Kino1_BB_new_string(bb->ptr, bb->size);
}
/* Assign the ptr and size members to the passed in values. Downgrade the
* ByteBuf to a "view" ByteBuf and free any existing assigned memory if
* necessary.
*/
void
Kino1_BB_assign_view(ByteBuf *bb, char*ptr, I32 size) {
/* downgrade the ByteBuf to a view */
if (!bb->flags & KINO_BB_VIEW) {
Kino1_Safefree(bb->ptr);
bb->flags |= KINO_BB_VIEW;
}
/* assign */
bb->ptr = ptr;
bb->size = size;
}
/* Copy the passed-in string into the ByteBuf. Allocate more memory if
* necessary.
*/
void
Kino1_BB_assign_string(ByteBuf *bb, char* ptr, I32 size) {
Kino1_BB_grow(bb, size);
Copy(ptr, bb->ptr, size, char);
bb->size = size;
}
/* Concatenate the passed-in string onto the end of the ByteBuf. Allocate more
* memory as needed.
*/
void
Kino1_BB_cat_string(ByteBuf *bb, char* ptr, I32 size) {
I32 new_size;
new_size = bb->size + size;
Kino1_BB_grow(bb, new_size);
Copy(ptr, (bb->ptr + bb->size), size, char);
bb->size = new_size;
}
/* Assign more memory to the ByteBuf, if it doesn't already have enough room
* to hold a string of [size] bytes. Cannot shrink the allocation.
*/
void
Kino1_BB_grow(ByteBuf *bb, I32 new_size) {
if (bb->flags & KINO_BB_VIEW)
Kino1_confess("grow called on 'view' ByteBuf");
/* bail out if the buffer's already at least as big as required */
if (bb->cap > new_size)
return;
Kino1_Renew(bb->ptr, (new_size + 1), char);
bb->cap = new_size;
}
void
Kino1_BB_destroy(ByteBuf *bb) {
if (bb == NULL)
return;
if (!(bb->flags & KINO_BB_VIEW))
Kino1_Safefree(bb->ptr);
Kino1_Safefree(bb);
}
/* Lexically compare two ByteBufs.
*/
I32
Kino1_BB_compare(ByteBuf *a, ByteBuf *b) {
I32 size;
I32 comparison;
size = a->size < b->size ? a->size : b->size;
comparison = memcmp(a->ptr, b->ptr, size);
if (comparison == 0 && a->size != b->size)
comparison = a->size < b->size ? -1 : 1;
return comparison;
}
__POD__
==begin devdocs
==head1 NAME
KinoSearch1::Util::ByteBuf - stripped down scalar
==head1 DESCRIPTION
The ByteBuf is a C struct that's essentially a growable string of char. It's
like a stripped down scalar that can only deal with strings. It knows its own
size and capacity, so it can contain arbitrary binary data.
"View" ByteBufs don't own their own strings.
==head1 COPYRIGHT
Copyright 2005-2010 Marvin Humphrey
==head1 LICENSE, DISCLAIMER, BUGS, etc.
See L<KinoSearch1> version 1.01.
==end devdocs
==cut
| gitpan/KinoSearch1 | lib/KinoSearch1/Util/ByteBuf.pm | Perl | apache-2.0 | 5,131 |
#
# Copyright 2015 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package hardware::server::hp::proliant::snmp::mode::components::fcaexternalacc;
use strict;
use warnings;
my %map_accel_status = (
1 => 'other',
2 => 'invalid',
3 => 'enabled',
4 => 'tmpDisabled',
5 => 'permDisabled',
);
my %map_accel_condition = (
1 => 'other',
2 => 'ok',
3 => 'degraded',
4 => 'failed',
);
my %map_accelbattery_condition = (
1 => 'other',
2 => 'ok',
3 => 'recharging',
4 => 'failed',
5 => 'degraded',
6 => 'not present',
);
# In 'CPQFCA-MIB.mib'
my $mapping = {
cpqFcaAccelStatus => { oid => '.1.3.6.1.4.1.232.16.2.2.2.1.3', map => \%map_accel_status },
};
my $mapping2 = {
cpqFcaAccelCondition => { oid => '.1.3.6.1.4.1.232.16.2.2.2.1.9', map => \%map_accel_condition },
};
my $mapping3 = {
cpqFcaAccelBatteryStatus => { oid => '.1.3.6.1.4.1.232.16.2.2.2.1.6', map => \%map_accelbattery_condition },
};
my $oid_cpqFcaAccelStatus = '.1.3.6.1.4.1.232.16.2.2.2.1.3';
my $oid_cpqFcaAccelCondition = '.1.3.6.1.4.1.232.16.2.2.2.1.9';
my $oid_cpqFcaAccelBatteryStatus = '.1.3.6.1.4.1.232.16.2.2.2.1.6';
sub load {
my (%options) = @_;
push @{$options{request}}, { oid => $oid_cpqFcaAccelStatus };
push @{$options{request}}, { oid => $oid_cpqFcaAccelCondition };
push @{$options{request}}, { oid => $oid_cpqFcaAccelBatteryStatus };
}
sub check {
my ($self) = @_;
$self->{output}->output_add(long_msg => "Checking fca external accelerator boards");
$self->{components}->{fcaexternalacc} = {name => 'fca external accelerator boards', total => 0, skip => 0};
return if ($self->check_exclude(section => 'fcaexternalacc'));
foreach my $oid ($self->{snmp}->oid_lex_sort(keys %{$self->{results}->{$oid_cpqFcaAccelCondition}})) {
next if ($oid !~ /^$mapping->{cpqFcaAccelCondition}->{oid}\.(.*)$/);
my $instance = $1;
my $result = $self->{snmp}->map_instance(mapping => $mapping, results => $self->{results}->{$oid_cpqFcaAccelStatus}, instance => $instance);
my $result2 = $self->{snmp}->map_instance(mapping => $mapping2, results => $self->{results}->{$oid_cpqFcaAccelCondition}, instance => $instance);
my $result3 = $self->{snmp}->map_instance(mapping => $mapping3, results => $self->{results}->{$oid_cpqFcaAccelBatteryStatus}, instance => $instance);
next if ($self->check_exclude(section => 'fcaexternalacc', instance => $instance));
$self->{components}->{fcaexternalacc}->{total}++;
$self->{output}->output_add(long_msg => sprintf("fca external accelerator boards '%s' [status: %s, battery status: %s] condition is %s.",
$instance,
$result->{cpqFcaAccelStatus}, $result3->{cpqFcaAccelBatteryStatus},
$result2->{cpqFcaAccelCondition}));
my $exit = $self->get_severity(section => 'fcaexternalacc', value => $result2->{cpqFcaAccelCondition});
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("fca external accelerator boards '%s' is %s",
$instance, $result2->{cpqFcaAccelCondition}));
}
$exit = $self->get_severity(section => 'fcaexternalaccbattery', value => $result3->{cpqFcaAccelBatteryStatus});
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("fca external accelerator boards '%s' battery is %s",
$instance, $result3->{cpqFcaAccelBatteryStatus}));
}
}
}
1; | s-duret/centreon-plugins | hardware/server/hp/proliant/snmp/mode/components/fcaexternalacc.pm | Perl | apache-2.0 | 4,592 |
package Paws::CloudHSM::Tag;
use Moose;
has Key => (is => 'ro', isa => 'Str', required => 1);
has Value => (is => 'ro', isa => 'Str', required => 1);
1;
### main pod documentation begin ###
=head1 NAME
Paws::CloudHSM::Tag
=head1 USAGE
This class represents one of two things:
=head3 Arguments in a call to a service
Use the attributes of this class as arguments to methods. You shouldn't make instances of this class.
Each attribute should be used as a named argument in the calls that expect this type of object.
As an example, if Att1 is expected to be a Paws::CloudHSM::Tag object:
$service_obj->Method(Att1 => { Key => $value, ..., Value => $value });
=head3 Results returned from an API call
Use accessors for each attribute. If Att1 is expected to be an Paws::CloudHSM::Tag object:
$result = $service_obj->Method(...);
$result->Att1->Key
=head1 DESCRIPTION
A key-value pair that identifies or specifies metadata about an AWS
CloudHSM resource.
=head1 ATTRIBUTES
=head2 B<REQUIRED> Key => Str
The key of the tag.
=head2 B<REQUIRED> Value => Str
The value of the tag.
=head1 SEE ALSO
This class forms part of L<Paws>, describing an object used in L<Paws::CloudHSM>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/CloudHSM/Tag.pm | Perl | apache-2.0 | 1,389 |
package AlchemyAPI_ConceptParams;
use 5.008000;
use strict;
use warnings;
require Exporter;
use AutoLoader qw(AUTOLOAD);
use base qw( AlchemyAPI_BaseParams );
use Error qw(:try);
use URI::Escape;
#our @ISA = "AlchemyAPI_BaseParams";
# Items to export into callers namespace by default. Note: do not export
# names by default without a very good reason. Use EXPORT_OK instead.
# Do not simply export all your public functions/methods/constants.
# This allows declaration use AlchemyAPI ':all';
# If you do not need this, moving things directly into @EXPORT or @EXPORT_OK
# will save memory.
our %EXPORT_TAGS = ( 'all' => [ qw(
) ] );
our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } );
our @EXPORT = qw(
);
our $VERSION = '0.10';
use constant {
SOURCE_TEXT_CLEANED_OR_RAW => 'cleaned_or_raw',
SOURCE_TEXT_CLEANED => 'cleaned',
SOURCE_TEXT_RAW => 'raw',
SOURCE_TEXT_CQUERY => 'cquery',
SOURCE_TEXT_XPATH => 'xpath'
};
sub new() {
my $class = shift;
my $self = {
_maxRetrieve => -1,
_showSourceText => undef,
_sourceText => undef,
_linkedData => undef,
_cQuery => undef,
_xPath => undef,
_outputMode => AlchemyAPI_BaseParams::OUTPUT_MODE_XML
};
bless $self, $class;
return $self;
}
sub SetMaxRetrieve {
my($self, $maxRetrieve) = @_;
$self->{_maxRetrieve} = $maxRetrieve;
}
sub GetMaxRetrieve {
my($self) = @_;
return $self->{_maxRetrieve};
}
sub SetLinkedData {
my($self, $linkedData) = @_;
if( 0 == $linkedData || 1 == $linkedData ) {
$self->{_linkedData} = $linkedData;
}
else {
throw Error::Simple( "Error: Cannot set LinkedData to ".$linkedData);
}
}
sub GetLinkedData {
my($self) = @_;
return $self->{_linkedData};
}
sub SetShowSourceText {
my($self, $showSourceText) = @_;
if( 0 == $showSourceText || 1 == $showSourceText ) {
$self->{_showSourceText} = $showSourceText;
}
else {
throw Error::Simple( "Error: Cannot set ShowSourceText to ".$showSourceText);
}
}
sub GetShowSourceText {
my($self) = @_;
return $self->{_showSourceText};
}
sub SetSourceText {
my($self, $sourceText) = @_;
if( SOURCE_TEXT_CLEANED_OR_RAW eq $sourceText || SOURCE_TEXT_CLEANED eq $sourceText || SOURCE_TEXT_RAW eq $sourceText ||
SOURCE_TEXT_CQUERY eq $sourceText || SOURCE_TEXT_XPATH eq $sourceText ) {
$self->{_sourceText} = $sourceText;
}
else {
throw Error::Simple( "Error: Cannot set SourceText to ".$sourceText);
}
}
sub GetSourceText {
my($self) = @_;
return $self->{_sourceText};
}
sub SetCQuery {
my($self, $cQuery) = @_;
$self->{_cQuery} = $cQuery;
}
sub GetCQuery {
my($self) = @_;
return $self->{_cQuery};
}
sub SetXPath {
my($self, $xPath) = @_;
$self->{_xPath} = $xPath;
}
sub GetXPath {
my($self) = @_;
return $self->{_xPath};
}
sub GetParameterString {
my($self) = @_;
my $retString = $self->SUPER::GetParameterString();
if( $self->{_maxRetrieve} != -1 ) {
$retString .= "&maxRetrieve=".$self->{_maxRetrieve};
}
if( defined $self->{_linkedData} ) {
$retString .= "&linkedData=".$self->{_linkedData};
}
if( defined $self->{_showSourceText} ) {
$retString .= "&showSourceText=".$self->{_showSourceText};
}
if( defined $self->{_sourceText} ) {
$retString .= "&sourceText=".$self->{_sourceText};
}
if( defined $self->{_cQuery} ) {
$retString .= "&cquery=".uri_escape($self->{_cQuery});
}
if( defined $self->{_xPath} ) {
$retString .= "&xpath=".uri_escape($self->{_xPath});
}
return $retString;
}
1;
__END__
| AlchemyAPI/alchemyapi_perl | module/lib/AlchemyAPI_ConceptParams.pm | Perl | apache-2.0 | 3,623 |
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2018] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
=head1 NAME
Bio::EnsEMBL::Analysis::Tools::Otter::DBSQL::DnaAlignFeatureAdaptor - Adaptor for DnaAlignFeatures
=head1 SYNOPSIS
$dafa = $registry->get_adaptor( 'Human', 'Core', 'DnaAlignFeature' );
my @features = @{ $dafa->fetch_all_by_Slice($slice) };
$dafa->store(@features);
=head1 DESCRIPTION
This is an adaptor responsible for the retrieval and storage of
DnaDnaAlignFeatures from the Otter database. This adaptor inherits most of its
functionality from the DnaAlignFeatureAdaptor, BaseAlignFeatureAdaptor and BaseFeatureAdaptor
superclasses.
The module inherits all methods from DnaAlignFeatureAdaptor EXCEPT that
it overrides the _objs_from_sth method to include dna_align_feature_history
and there is a new method, fetch_all_by_Slice_attach_daf_history.
=head1 CONTACT
Post questions to the EnsEMBL development list <http://lists.ensembl.org/mailman/listinfo/dev>
=cut
package Bio::EnsEMBL::Analysis::Tools::Otter::DBSQL::DnaAlignFeatureAdaptor;
use warnings ;
use vars qw(@ISA);
use strict;
use Bio::EnsEMBL::DBSQL::DnaAlignFeatureAdaptor;
use Bio::EnsEMBL::Utils::Exception qw(throw warning);
@ISA = qw(Bio::EnsEMBL::DBSQL::DnaAlignFeatureAdaptor);
sub fetch_all_by_Slice_attach_daf_history {
# need to modify this method so that we fetch the dna_align_feature_history too
my ($self, $slice, $logic_name) = @_;
return $self->fetch_all_by_Slice($slice, $logic_name);
}
=head2 _objs_from_sth
Arg [1] : DBI statement handle $sth
an exectuted DBI statement handle generated by selecting
the columns specified by _columns() from the table specified
by _table()
Example : @dna_dna_align_feats = $self->_obj_from_hashref
Description: PROTECTED implementation of superclass abstract method.
Creates DnaDnaAlignFeature objects from a DBI hashref
Returntype : listref of Bio::EnsEMBL::DnaDnaAlignFeatures
Exceptions : none
Caller : Bio::EnsEMBL::BaseFeatureAdaptor::generic_fetch
Status : Stable
=cut
sub _objs_from_sth {
my ($self, $sth, $mapper, $dest_slice) = @_;
my @features;
my %analysis_hash;
my %slice_hash;
my %sr_name_hash;
my %sr_cs_hash;
my($dna_align_feature_id, $seq_region_id, $analysis_id, $seq_region_start,
$seq_region_end, $seq_region_strand, $hit_start, $hit_end, $hit_name,
$hit_strand, $cigar_line, $evalue, $perc_ident, $score,
$external_db_id, $hcoverage, $extra_data,
$external_db_name, $external_display_db_name);
$sth->bind_columns(
\$dna_align_feature_id, \$seq_region_id, \$analysis_id, \$seq_region_start,
\$seq_region_end, \$seq_region_strand, \$hit_start, \$hit_end, \$hit_name,
\$hit_strand, \$cigar_line, \$evalue, \$perc_ident, \$score,
\$external_db_id, \$hcoverage, \$extra_data,
\$external_db_name, \$external_display_db_name);
my $sa = $dest_slice ? $dest_slice->adaptor() : $self->db()->get_SliceAdaptor();;
my $aa = $self->db->get_AnalysisAdaptor();
my $dafha = $self->db->get_DnaAlignFeatureHistoryAdaptor($seq_region_id,$analysis_id,$dna_align_feature_id);
my $asm_cs;
my $cmp_cs;
my $asm_cs_vers;
my $asm_cs_name;
my $cmp_cs_vers;
my $cmp_cs_name;
if($mapper) {
$asm_cs = $mapper->assembled_CoordSystem();
$cmp_cs = $mapper->component_CoordSystem();
$asm_cs_name = $asm_cs->name();
$asm_cs_vers = $asm_cs->version();
$cmp_cs_name = $cmp_cs->name();
$cmp_cs_vers = $cmp_cs->version();
}
my $dest_slice_start;
my $dest_slice_end;
my $dest_slice_strand;
my $dest_slice_length;
my $dest_slice_sr_name;
my $dest_slice_seq_region_id;
if($dest_slice) {
$dest_slice_start = $dest_slice->start();
$dest_slice_end = $dest_slice->end();
$dest_slice_strand = $dest_slice->strand();
$dest_slice_length = $dest_slice->length();
$dest_slice_sr_name = $dest_slice->seq_region_name();
$dest_slice_seq_region_id = $dest_slice->get_seq_region_id();
}
FEATURE: while($sth->fetch()) {
#get the analysis object
my $analysis = $analysis_hash{$analysis_id} ||=
$aa->fetch_by_dbID($analysis_id);
my $dna_align_feature_history = $dafha->fetch_by_DnaAlignFeature_info($dna_align_feature_id, $seq_region_id, $analysis_id);
#get the slice object
my $slice = $slice_hash{"ID:".$seq_region_id};
if(!$slice) {
$slice = $sa->fetch_by_seq_region_id($seq_region_id);
$slice_hash{"ID:".$seq_region_id} = $slice;
$sr_name_hash{$seq_region_id} = $slice->seq_region_name();
$sr_cs_hash{$seq_region_id} = $slice->coord_system();
}
my $sr_name = $sr_name_hash{$seq_region_id};
my $sr_cs = $sr_cs_hash{$seq_region_id};
#
# remap the feature coordinates to another coord system
# if a mapper was provided
#
if($mapper) {
($seq_region_id,$seq_region_start,$seq_region_end,$seq_region_strand) =
$mapper->fastmap($sr_name, $seq_region_start, $seq_region_end,
$seq_region_strand, $sr_cs);
#skip features that map to gaps or coord system boundaries
next FEATURE if(!defined($seq_region_id));
#get a slice in the coord system we just mapped to
if($asm_cs == $sr_cs || ($cmp_cs != $sr_cs && $asm_cs->equals($sr_cs))) {
$slice = $slice_hash{"ID:".$seq_region_id} ||=
$sa->fetch_by_seq_region_id($seq_region_id);
} else {
$slice = $slice_hash{"ID:".$seq_region_id} ||=
$sa->fetch_by_seq_region_id($seq_region_id);
}
}
#
# If a destination slice was provided convert the coords
# If the dest_slice starts at 1 and is foward strand, nothing needs doing
#
if($dest_slice) {
if($dest_slice_start != 1 || $dest_slice_strand != 1) {
if($dest_slice_strand == 1) {
$seq_region_start = $seq_region_start - $dest_slice_start + 1;
$seq_region_end = $seq_region_end - $dest_slice_start + 1;
} else {
my $tmp_seq_region_start = $seq_region_start;
$seq_region_start = $dest_slice_end - $seq_region_end + 1;
$seq_region_end = $dest_slice_end - $tmp_seq_region_start + 1;
$seq_region_strand *= -1;
}
#throw away features off the end of the requested slice
if($seq_region_end < 1 || $seq_region_start > $dest_slice_length ||
( $dest_slice_seq_region_id ne $seq_region_id )) {
next FEATURE;
}
}
$slice = $dest_slice;
}
# Finally, create the new DnaAlignFeature.
# not that we can't use create_fast
# and also we must write our keys as eg. -slice and not as eg. 'slice'
# or it will confuse Feature.pm and pass incorrect values
push( @features,
$self->_create_feature(
'Bio::EnsEMBL::Analysis::Tools::Otter::DnaAlignFeature', {
-slice => $slice,
-start => $seq_region_start,
-end => $seq_region_end,
-strand => $seq_region_strand,
-hseqname => $hit_name,
-hstart => $hit_start,
-hend => $hit_end,
-hstrand => $hit_strand,
-score => $score,
-p_value => $evalue,
-percent_id => $perc_ident,
-cigar_string => $cigar_line,
-analysis => $analysis,
-adaptor => $self,
-dbID => $dna_align_feature_id,
-external_db_id => $external_db_id,
-hcoverage => $hcoverage,
-extra_data => $extra_data ? $self->get_dumped_data($extra_data) : '',
-dbname => $external_db_name,
-db_display_name => $external_display_db_name,
-dna_align_feature_history => $dna_align_feature_history,
} ) );
}
return \@features;
}
1;
| kiwiroy/ensembl-analysis | modules/Bio/EnsEMBL/Analysis/Tools/Otter/DBSQL/DnaAlignFeatureAdaptor.pm | Perl | apache-2.0 | 9,173 |
1;
666;
"false";
"Steve Peters, Master Of True Value Finding, was here.";
| jmcveigh/komodo-tools | scripts/perl/simulate_hostile_environments_in_your_tests/fun_true_values.pl | Perl | bsd-2-clause | 74 |
package KorAP::XML::Annotation::LWC::Dependency;
use KorAP::XML::Annotation::Base;
use strict;
use warnings;
sub parse {
my $self = shift;
# Relation data
$$self->add_tokendata(
foundry => 'lwc',
layer => 'dependency',
cb => sub {
my ($stream, $source, $tokens) = @_;
# Get MultiTermToken from stream for source
my $mtt = $stream->pos($source->get_pos);
# Serialized information from token
my $content = $source->get_hash;
# Get relation information
my $rel = $content->{rel};
$rel = [$rel] unless ref $rel eq 'ARRAY';
my $mt;
# Iterate over relations
foreach (@$rel) {
my $label = $_->{-label};
#my $target = $stream->tui($source->pos);
my $from = $_->{span}->{-from};
my $to = $_->{span}->{-to};
# Target
my $target = $tokens->token($from, $to);
# Relation is term-to-term with a found target!
if ($target) {
# Unary means, it refers to itself!
$mt = $mtt->add_by_term('>:lwc/d:' . $label);
$mt->set_pti(32); # term-to-term relation
$mt->set_payload(
'<i>' . $target->get_pos # . # right part token position
# '<s>0' . # $source_term->tui . # left part tui
# '<s>0' # . $target_term->tui # right part tui
);
$mt = $stream->pos($target->get_pos)
->add_by_term('<:lwc/d:' . $label);
$mt->set_pti(32); # term-to-term relation
$mt->set_payload(
'<i>' . $source->get_pos # . # left part token position
# '<s>0' . # $source_term->tui . # left part tui
# '<s>0' # . $target_term->tui # right part tui
);
}
# Relation is possibly term-to-element
# with a found target!
elsif ($target = $tokens->span($from, $to)) {
$mt = $mtt->add_by_term('>:lwc/d:' . $label);
$mt->set_pti(33); # term-to-element relation
$mt->set_payload(
'<i>' . $target->get_o_start . # end position
'<i>' . $target->get_o_end . # end position
'<i>' . $target->get_p_start . # right part start position
'<i>' . $target->get_p_end # . # right part end position
# '<s>0' . # $source_term->tui . # left part tui
# '<s>0' # . $target_span->tui # right part tui
);
$mt = $stream->pos($target->get_p_start)
->add_by_term('<:lwc/d:' . $label);
$mt->set_pti(34); # element-to-term relation
$mt->set_payload(
'<i>' . $target->get_o_start . # end position
'<i>' . $target->get_o_end . # end position
'<i>' . $target->get_p_end . # right part end position
'<i>' . $source->get_pos # . # left part token position
# '<s>0' . # $source_term->tui . # left part tui
# '<s>0' # . $target_span->tui # right part tui
);
}
else {
use Data::Dumper;
$$self->log->warn('Relation currently not supported: ' . Dumper($content));
};
};
}) or return;
return 1;
};
sub layer_info {
['lwc/d=rels']
};
1;
| KorAP/KorAP-XML-Krill | lib/KorAP/XML/Annotation/LWC/Dependency.pm | Perl | bsd-2-clause | 3,239 |
#!/usr/bin/perl -w
use strict;
use warnings;
use utf8;
use Encode;
use IO::Socket::UNIX;
use XML::LibXML;
use vars qw($VERSION %IRSSI);
use User::pwent;
use Data::Dumper;
use Irssi::TextUI;
use IO::Handle;
use POSIX;
use Log::Log4perl qw(:easy);
#Log::Log4perl->easy_init($INFO);
my $conf = q(
log4perl.logger = DEBUG, FileApp
log4perl.appender.FileApp = Log::Log4perl::Appender::File
log4perl.appender.FileApp.filename = irc2you-irssi-client.log
log4perl.appender.FileApp.utf8 = 1
log4perl.appender.FileApp.layout = PatternLayout
log4perl.appender.FileApp.layout.ConversionPattern = %p{1} %d> %m%n
);
Log::Log4perl->init( \$conf );
$VERSION = '0.01';
%IRSSI = (
authors => 'Tigge',
contact => 'tiggex@gmail.com',
name => 'sender',
description => 'Sends stuff...',
license => 'None',
);
sub dbg {
my ($string) = @_;
Log::Log4perl::get_logger()->error(Encode::encode_utf8($string));
}
sub debug {
my ($string) = @_;
Log::Log4perl::get_logger()->debug($string);
}
sub info {
my ($string) = @_;
Log::Log4perl::get_logger()->info(Encode::encode_utf8($string));
}
sub warn {
my ($string) = @_;
Log::Log4perl::get_logger()->warning(Encode::encode_utf8($string));
}
my $socket = new IO::Socket::UNIX(Type => SOCK_STREAM, Peer => "/tmp/irc2you_socket") or die "Error $!\n";
my ($reader, $writer);
pipe($reader, $writer);
$writer->autoflush(1);
my $pid = fork();
if ($pid <= 0) {
info("Starting receiver thread");
close($reader);
receivethread();
info("Exiting receiver thread");
close($writer);
POSIX::_exit(1);
}
close($writer);
Irssi::pidwait_add($pid);
my $pipe_tag = Irssi::input_add(fileno($reader), Irssi::INPUT_READ, \&received, $reader);
my %context_buffer = ();
sub push_buffer {
my ($server, $data, $nick, $address) = @_;
my ($targ, $text) = split(/ :/, $data, 2);
debug("Got privmsg event." );
my $numRows = Irssi::settings_get_int('irc2you_context_rows');
if(not defined $context_buffer{$targ}) {
debug("First message for channel $targ");
$context_buffer{$targ} = [];
} else {
debug("$targ buffer already defined");
if((scalar @{$context_buffer{$targ}}) > $numRows) {
shift(@{$context_buffer{$targ}});
}
}
# Adds the newest element at the end of the array
# Array size is one more than 'irc2you_context_rows' to keep count correct
# after removing the hilight from the context
@{$context_buffer{$targ}}[$numRows]={msg=>$text,from=>$nick,username=>$address,timestamp=>time()};
debug("Buffered value: $text");
}
sub create_attached {
my ($doc) = @_;
debug("Determining if attached");
my $screens = `screen -list`;
$screens = "" if not defined $screens; # prevents warning messages
my $ppid = getppid();
my $attached = $doc->createElement('attached');
if ($screens =~ m/$ppid.*\(Attached\)/) {
debug("attached");
$attached->appendText("true");
} elsif ($screens =~ m/$ppid.*\(Detached\)/) {
$attached->appendText("false");
} else {
$attached->appendText("true");
debug("Not running in screen. Setting attached to true");
}
debug("Done Determining if attached");
return $attached;
}
sub create_context {
my ($doc,$targ) = @_;
debug("Creating message context");
my $context = $doc->createElement('context');
foreach(@{$context_buffer{$targ}}) {
my $ci = $doc->createElement('context_item');
next if not defined($_);
debug("Creating message context item");
$ci->appendText($_->{msg});
$ci->setAttribute("from",$_->{from});
$ci->setAttribute("timestamp",$_->{timestamp});
$ci->setAttribute("username",$_->{username});
$context->appendChild($ci);
}
debug("Done Creating message context");
return $context;
}
my $working;
sub sender {
my ($text_dest, $text, $stripped_text) = @_;
if(!$working) {
$working = 1;
dbg($text_dest);
my $win = $text_dest->{window};
my $serv = $text_dest->{server};
my $targ = $text_dest->{target};
if(!$targ) {
$working = 0;
return;
}
if($targ =~ m/#/) {
info("hilight in channel");
} else {
info("hilight from person");
}
info("target: '" . $targ ."'");
#info("text: " . $text . ", " . $stripped_text);
if (($text_dest->{level} & (Irssi::MSGLEVEL_HILIGHT() | Irssi::MSGLEVEL_MSGS())) && ($text_dest->{level} & Irssi::MSGLEVEL_NOHILIGHT()) == 0) {
#my $test = new IO::Socket::UNIX(Type => SOCK_STREAM, Peer => "/tmp/irc2you_socket") or die "Error $!\n";
my $doc = XML::LibXML::Document->new('1.0', "utf-8");
my $element = $doc->createElement("notification");
#my $user = $doc->createElement('user');
#my $username = getpwuid($<)->name;
#$user->appendText($username);
#$element->appendChild($user);
#debug($stripped_text);
if ($stripped_text =~ m/^<\s*(.*)\s*>/) {
my $sender = $doc->createElement('sender');
$sender->appendText(Encode::encode_utf8($1));
$element->appendChild($sender);
}
my $mess = $doc->createElement('message');
my $last_msg = @{$context_buffer{$targ}}[-1];
$mess->appendText(Encode::encode_utf8($last_msg->{msg})); #$stripped_text
$element->appendChild($mess);
my $chan = $doc->createElement('channel');
$chan->appendText(Encode::encode_utf8($targ));
$element->appendChild($chan);
my $away = $doc->createElement('away');
$away->appendText($serv->{usermode_away} == 1 ? "true" : "false");
$element->appendChild($away);
$element->appendChild(create_attached($doc));
$element->appendChild(create_context($doc,$targ));
my $text = $element->toString();
debug("sending message: '$text'");
print $socket $element->toString();
debug("messsage sent");
print $socket "\n";
#$test->close();
}
$working = 0;
}
}
sub received {
my $reader = shift;
my $text = <$reader>;
if (!defined($text)) {
close($reader);
Irssi::input_remove($pipe_tag);
} else {
info("received something on reader pipe");
info("got: '$text'");
info("$pid");
my $dom = XML::LibXML->load_xml(string => $text);
my $rn = $dom->getDocumentElement();
my $chn = $rn->getElementsByTagName("channel")->[0]->to_literal;
my $mes = $rn->getElementsByTagName("message")->[0]->to_literal;
my $server = Irssi::active_server();
Irssi::Server::command($server, "/msg $chn $mes");
info("sent '/msg $chn $mes'");
}
}
sub receivethread {
info("receive thread started");
while (!eof($socket)) {
defined( my $line = <$socket> )
or die "readline failed: $!";
info("got line from socket '$line'");
print($writer $line);
}
info("receivethread killed");
return;
}
sub terminate {
info("Irssi is being terminated");
shutdown($socket, 2);
}
info("Started irc2you irssi client");
Irssi::settings_add_int('irc2you', 'irc2you_context_rows', 4);
Irssi::signal_add("print text", 'sender');
Irssi::signal_add("event privmsg", 'push_buffer');
Irssi::signal_add("gui exit", 'terminate');
| stenbacka/irc2you | irc.clients/irssi/irc2you.pl | Perl | bsd-3-clause | 7,603 |
#!%PERL%
# Copyright (c) vhffs project and its contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#3. Neither the name of vhffs nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
#"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
#FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
#INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
use strict;
use utf8;
use lib '%VHFFS_LIB_DIR%';
use Vhffs::Robots::Svn;
my $vhffs = new Vhffs;
exit 1 unless defined $vhffs;
Vhffs::Robots::lock( $vhffs, 'svn' );
my $repos = Vhffs::Services::Svn::getall( $vhffs, Vhffs::Constants::WAITING_FOR_CREATION );
foreach ( @{$repos} ) {
Vhffs::Robots::Svn::create( $_ );
}
$repos = Vhffs::Services::Svn::getall( $vhffs, Vhffs::Constants::WAITING_FOR_DELETION );
foreach ( @{$repos} ) {
Vhffs::Robots::Svn::delete( $_ );
}
$repos = Vhffs::Services::Svn::getall( $vhffs, Vhffs::Constants::WAITING_FOR_MODIFICATION );
foreach ( @{$repos} ) {
Vhffs::Robots::Svn::modify( $_ );
}
Vhffs::Robots::unlock( $vhffs, 'svn' );
exit 0;
| najamelan/vhffs-4.5 | vhffs-robots/src/svn.pl | Perl | bsd-3-clause | 2,223 |
#!/usr/bin/perl -w
use strict;
require LWP::UserAgent;
require HTML::Parser;
require DBI;
require CLHousingSearch;
my $SITE = "sfbay";
my $HOST = "meiwu";
my $DRIVER = "mysql";
my $USER = "cl_housing";
my $PASSWORD = "password";
my $DATABASE = "cl_housing";
my $TABLE = "posts";
my $clhs = CLHousingSearch->new();
$clhs->site($SITE);
#$clhs->area("sby");
#$clhs->minAsk(0);
#$clhs->maxAsk(2000);
#$clhs->bedrooms(2);
#$clhs->neighborhood(35);
#print $clhs->uri()."\n";
my $dsn = "DBI:$DRIVER:$DATABASE:$HOST";
my $dbh = DBI->connect($dsn, $USER, $PASSWORD);
if(!$dbh)
{
die("Error connecting to database.\n");
}
sub test_unparsed_field
{
my ($field) = @_;
if($field =~ /\(UNPARSED\)/)
{
return undef;
}
return 1;
}
sub escape_and_wrap
{
my ($str) = @_;
$str =~ s/"/\\"/g;
return "\"".$str."\"";
}
sub harvest
{
my ($result) = @_;
my $url = escape_and_wrap($result->{url});
my $rent = escape_and_wrap($result->{rent});
my $bedrooms = escape_and_wrap($result->{bedrooms});
my $neighborhood = escape_and_wrap($result->{neighborhood});
my $description = escape_and_wrap($result->{description});
my $query = "INSERT INTO $TABLE ".
"(url, rent, bedrooms, neighborhood, description) VALUES ".
"($url, $rent, $bedrooms, $neighborhood, $description);";
$dbh->do($query);
}
$dbh->do("DROP TABLE $TABLE;");
$dbh->do("CREATE TABLE $TABLE (url VARCHAR(1000), rent INT NOT NULL, ".
"bedrooms INT NOT NULL, neighborhood VARCHAR(1000), ".
"description VARCHAR(10000));");
my $results;
$clhs->search();
while(($results = $clhs->results()))
{
my $result;
for $result(@$results)
{
if(test_unparsed_field($result->{url}) &&
test_unparsed_field($result->{rent}) &&
test_unparsed_field($result->{bedrooms}) &&
test_unparsed_field($result->{neighborhood}) &&
test_unparsed_field($result->{description}))
{
harvest($result);
}
}
}
$dbh->disconnect();
| sheheitthey/CLHousingSearch | harvest.pl | Perl | bsd-3-clause | 2,065 |
package Paws::EMR::DescribeStepOutput;
use Moose;
has Step => (is => 'ro', isa => 'Paws::EMR::Step');
has _request_id => (is => 'ro', isa => 'Str');
### main pod documentation begin ###
=head1 NAME
Paws::EMR::DescribeStepOutput
=head1 ATTRIBUTES
=head2 Step => L<Paws::EMR::Step>
The step details for the requested step identifier.
=head2 _request_id => Str
=cut
1; | ioanrogers/aws-sdk-perl | auto-lib/Paws/EMR/DescribeStepOutput.pm | Perl | apache-2.0 | 385 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
=head1 NAME
Lucy - Apache Lucy search engine library.
=head1 VERSION
0.4.0
=head1 SYNOPSIS
First, plan out your index structure, create the index, and add documents:
# indexer.pl
use Lucy::Index::Indexer;
use Lucy::Plan::Schema;
use Lucy::Analysis::EasyAnalyzer;
use Lucy::Plan::FullTextType;
# Create a Schema which defines index fields.
my $schema = Lucy::Plan::Schema->new;
my $easyanalyzer = Lucy::Analysis::EasyAnalyzer->new(
language => 'en',
);
my $type = Lucy::Plan::FullTextType->new(
analyzer => $easyanalyzer,
);
$schema->spec_field( name => 'title', type => $type );
$schema->spec_field( name => 'content', type => $type );
# Create the index and add documents.
my $indexer = Lucy::Index::Indexer->new(
schema => $schema,
index => '/path/to/index',
create => 1,
);
while ( my ( $title, $content ) = each %source_docs ) {
$indexer->add_doc({
title => $title,
content => $content,
});
}
$indexer->commit;
Then, search the index:
# search.pl
use Lucy::Search::IndexSearcher;
my $searcher = Lucy::Search::IndexSearcher->new(
index => '/path/to/index'
);
my $hits = $searcher->hits( query => "foo bar" );
while ( my $hit = $hits->next ) {
print "$hit->{title}\n";
}
=head1 DESCRIPTION
The Apache Lucy search engine library delivers high-performance, modular
full-text search.
=head2 Features
=over
=item *
Extremely fast. A single machine can handle millions of documents.
=item *
Scalable to multiple machines.
=item *
Incremental indexing (addition/deletion of documents to/from an existing
index).
=item *
Configurable near-real-time index updates.
=item *
Unicode support.
=item *
Support for boolean operators AND, OR, and AND NOT; parenthetical groupings;
prepended +plus and -minus.
=item *
Algorithmic selection of relevant excerpts and highlighting of search terms
within excerpts.
=item *
Highly customizable query and indexing APIs.
=item *
Customizable sorting.
=item *
Phrase matching.
=item *
Stemming.
=item *
Stoplists.
=back
=head2 Getting Started
L<Lucy::Simple> provides a stripped down API which may suffice for many
tasks.
L<Lucy::Docs::Tutorial> demonstrates how to build a basic CGI search
application.
The tutorial spends most of its time on these five classes:
=over
=item *
L<Lucy::Plan::Schema> - Plan out your index.
=item *
L<Lucy::Plan::FieldType> - Define index fields.
=item *
L<Lucy::Index::Indexer> - Manipulate index content.
=item *
L<Lucy::Search::IndexSearcher> - Search an index.
=item *
L<Lucy::Analysis::EasyAnalyzer> - A one-size-fits-all parser/tokenizer.
=back
=head2 Delving Deeper
L<Lucy::Docs::Cookbook> augments the tutorial with more advanced
recipes.
For creating complex queries, see L<Lucy::Search::Query> and its
subclasses L<TermQuery|Lucy::Search::TermQuery>,
L<PhraseQuery|Lucy::Search::PhraseQuery>,
L<ANDQuery|Lucy::Search::ANDQuery>,
L<ORQuery|Lucy::Search::ORQuery>,
L<NOTQuery|Lucy::Search::NOTQuery>,
L<RequiredOptionalQuery|Lucy::Search::RequiredOptionalQuery>,
L<MatchAllQuery|Lucy::Search::MatchAllQuery>, and
L<NoMatchQuery|Lucy::Search::NoMatchQuery>, plus
L<Lucy::Search::QueryParser>.
For distributed searching, see L<LucyX::Remote::SearchServer>,
L<LucyX::Remote::SearchClient>, and L<LucyX::Remote::ClusterSearcher>.
=head2 Backwards Compatibility Policy
Lucy will spin off stable forks into new namespaces periodically. The first
will be named "Lucy1". Users who require strong backwards compatibility
should use a stable fork.
The main namespace, "Lucy", is an API-unstable development branch (as hinted
at by its 0.x.x version number). Superficial interface changes happen
frequently. Hard file format compatibility breaks which require reindexing
are rare, as we generally try to provide continuity across multiple releases,
but we reserve the right to make such changes.
=head1 CLASS METHODS
The Lucy module itself does not have a large interface, providing only a
single public class method.
=head2 error
my $instream = $folder->open_in( file => 'foo' ) or die Clownfish->error;
Access a shared variable which is set by some routines on failure. It will
always be either a L<Clownfish::Err> object or undef.
=head1 SUPPORT
The Apache Lucy homepage, where you'll find links to our mailing lists and so
on, is L<http://lucy.apache.org>. Please direct support questions
to the Lucy users mailing list.
=head1 BUGS
Not thread-safe.
Some exceptions leak memory.
If you find a bug, please inquire on the Lucy users mailing list about it,
then report it on the Lucy issue tracker once it has been confirmed:
L<https://issues.apache.org/jira/browse/LUCY>.
=head1 COPYRIGHT
Apache Lucy is distributed under the Apache License, Version 2.0, as
described in the file C<LICENSE> included with the distribution.
=cut
| kidaa/lucy | perl/lib/Lucy.pod | Perl | apache-2.0 | 5,794 |
package Google::Ads::AdWords::v201406::CompetitionSearchParameter::Level;
use strict;
use warnings;
sub get_xmlns { 'https://adwords.google.com/api/adwords/o/v201406'};
# derivation by restriction
use base qw(
SOAP::WSDL::XSD::Typelib::Builtin::string);
1;
__END__
=pod
=head1 NAME
=head1 DESCRIPTION
Perl data type class for the XML Schema defined simpleType
CompetitionSearchParameter.Level from the namespace https://adwords.google.com/api/adwords/o/v201406.
An enumeration of possible values to be used in conjunction with the {@link CompetitionSearchParameter} to specify the granularity of competition to be filtered.
This clase is derived from
SOAP::WSDL::XSD::Typelib::Builtin::string
. SOAP::WSDL's schema implementation does not validate data, so you can use it exactly
like it's base type.
# Description of restrictions not implemented yet.
=head1 METHODS
=head2 new
Constructor.
=head2 get_value / set_value
Getter and setter for the simpleType's value.
=head1 OVERLOADING
Depending on the simple type's base type, the following operations are overloaded
Stringification
Numerification
Boolification
Check L<SOAP::WSDL::XSD::Typelib::Builtin> for more information.
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| gitpan/GOOGLE-ADWORDS-PERL-CLIENT | lib/Google/Ads/AdWords/v201406/CompetitionSearchParameter/Level.pm | Perl | apache-2.0 | 1,263 |
#############################################################################
# Pod/Parser.pm -- package which defines a base class for parsing POD docs.
#
# Copyright (C) 1996-2000 by Bradford Appleton. All rights reserved.
# This file is part of "PodParser". PodParser is free software;
# you can redistribute it and/or modify it under the same terms
# as Perl itself.
#############################################################################
package Pod::Parser;
use strict;
## These "variables" are used as local "glob aliases" for performance
use vars qw($VERSION @ISA %myData %myOpts @input_stack);
$VERSION = '1.63'; ## Current version of this package
require 5.005; ## requires this Perl version or later
#############################################################################
=head1 NAME
Pod::Parser - base class for creating POD filters and translators
=head1 SYNOPSIS
use Pod::Parser;
package MyParser;
@ISA = qw(Pod::Parser);
sub command {
my ($parser, $command, $paragraph, $line_num) = @_;
## Interpret the command and its text; sample actions might be:
if ($command eq 'head1') { ... }
elsif ($command eq 'head2') { ... }
## ... other commands and their actions
my $out_fh = $parser->output_handle();
my $expansion = $parser->interpolate($paragraph, $line_num);
print $out_fh $expansion;
}
sub verbatim {
my ($parser, $paragraph, $line_num) = @_;
## Format verbatim paragraph; sample actions might be:
my $out_fh = $parser->output_handle();
print $out_fh $paragraph;
}
sub textblock {
my ($parser, $paragraph, $line_num) = @_;
## Translate/Format this block of text; sample actions might be:
my $out_fh = $parser->output_handle();
my $expansion = $parser->interpolate($paragraph, $line_num);
print $out_fh $expansion;
}
sub interior_sequence {
my ($parser, $seq_command, $seq_argument) = @_;
## Expand an interior sequence; sample actions might be:
return "*$seq_argument*" if ($seq_command eq 'B');
return "`$seq_argument'" if ($seq_command eq 'C');
return "_${seq_argument}_'" if ($seq_command eq 'I');
## ... other sequence commands and their resulting text
}
package main;
## Create a parser object and have it parse file whose name was
## given on the command-line (use STDIN if no files were given).
$parser = new MyParser();
$parser->parse_from_filehandle(\*STDIN) if (@ARGV == 0);
for (@ARGV) { $parser->parse_from_file($_); }
=head1 REQUIRES
perl5.005, Pod::InputObjects, Exporter, Symbol, Carp
=head1 EXPORTS
Nothing.
=head1 DESCRIPTION
B<NOTE: This module is considered legacy; modern Perl releases (5.18 and
higher) are going to remove Pod-Parser from core and use L<Pod-Simple>
for all things POD.>
B<Pod::Parser> is a base class for creating POD filters and translators.
It handles most of the effort involved with parsing the POD sections
from an input stream, leaving subclasses free to be concerned only with
performing the actual translation of text.
B<Pod::Parser> parses PODs, and makes method calls to handle the various
components of the POD. Subclasses of B<Pod::Parser> override these methods
to translate the POD into whatever output format they desire.
=head1 QUICK OVERVIEW
To create a POD filter for translating POD documentation into some other
format, you create a subclass of B<Pod::Parser> which typically overrides
just the base class implementation for the following methods:
=over 2
=item *
B<command()>
=item *
B<verbatim()>
=item *
B<textblock()>
=item *
B<interior_sequence()>
=back
You may also want to override the B<begin_input()> and B<end_input()>
methods for your subclass (to perform any needed per-file and/or
per-document initialization or cleanup).
If you need to perform any preprocessing of input before it is parsed
you may want to override one or more of B<preprocess_line()> and/or
B<preprocess_paragraph()>.
Sometimes it may be necessary to make more than one pass over the input
files. If this is the case you have several options. You can make the
first pass using B<Pod::Parser> and override your methods to store the
intermediate results in memory somewhere for the B<end_pod()> method to
process. You could use B<Pod::Parser> for several passes with an
appropriate state variable to control the operation for each pass. If
your input source can't be reset to start at the beginning, you can
store it in some other structure as a string or an array and have that
structure implement a B<getline()> method (which is all that
B<parse_from_filehandle()> uses to read input).
Feel free to add any member data fields you need to keep track of things
like current font, indentation, horizontal or vertical position, or
whatever else you like. Be sure to read L<"PRIVATE METHODS AND DATA">
to avoid name collisions.
For the most part, the B<Pod::Parser> base class should be able to
do most of the input parsing for you and leave you free to worry about
how to interpret the commands and translate the result.
Note that all we have described here in this quick overview is the
simplest most straightforward use of B<Pod::Parser> to do stream-based
parsing. It is also possible to use the B<Pod::Parser::parse_text> function
to do more sophisticated tree-based parsing. See L<"TREE-BASED PARSING">.
=head1 PARSING OPTIONS
A I<parse-option> is simply a named option of B<Pod::Parser> with a
value that corresponds to a certain specified behavior. These various
behaviors of B<Pod::Parser> may be enabled/disabled by setting
or unsetting one or more I<parse-options> using the B<parseopts()> method.
The set of currently accepted parse-options is as follows:
=over 3
=item B<-want_nonPODs> (default: unset)
Normally (by default) B<Pod::Parser> will only provide access to
the POD sections of the input. Input paragraphs that are not part
of the POD-format documentation are not made available to the caller
(not even using B<preprocess_paragraph()>). Setting this option to a
non-empty, non-zero value will allow B<preprocess_paragraph()> to see
non-POD sections of the input as well as POD sections. The B<cutting()>
method can be used to determine if the corresponding paragraph is a POD
paragraph, or some other input paragraph.
=item B<-process_cut_cmd> (default: unset)
Normally (by default) B<Pod::Parser> handles the C<=cut> POD directive
by itself and does not pass it on to the caller for processing. Setting
this option to a non-empty, non-zero value will cause B<Pod::Parser> to
pass the C<=cut> directive to the caller just like any other POD command
(and hence it may be processed by the B<command()> method).
B<Pod::Parser> will still interpret the C<=cut> directive to mean that
"cutting mode" has been (re)entered, but the caller will get a chance
to capture the actual C<=cut> paragraph itself for whatever purpose
it desires.
=item B<-warnings> (default: unset)
Normally (by default) B<Pod::Parser> recognizes a bare minimum of
pod syntax errors and warnings and issues diagnostic messages
for errors, but not for warnings. (Use B<Pod::Checker> to do more
thorough checking of POD syntax.) Setting this option to a non-empty,
non-zero value will cause B<Pod::Parser> to issue diagnostics for
the few warnings it recognizes as well as the errors.
=back
Please see L<"parseopts()"> for a complete description of the interface
for the setting and unsetting of parse-options.
=cut
#############################################################################
#use diagnostics;
use Pod::InputObjects;
use Carp;
use Exporter;
BEGIN {
if ($] < 5.006) {
require Symbol;
import Symbol;
}
}
@ISA = qw(Exporter);
#############################################################################
=head1 RECOMMENDED SUBROUTINE/METHOD OVERRIDES
B<Pod::Parser> provides several methods which most subclasses will probably
want to override. These methods are as follows:
=cut
##---------------------------------------------------------------------------
=head1 B<command()>
$parser->command($cmd,$text,$line_num,$pod_para);
This method should be overridden by subclasses to take the appropriate
action when a POD command paragraph (denoted by a line beginning with
"=") is encountered. When such a POD directive is seen in the input,
this method is called and is passed:
=over 3
=item C<$cmd>
the name of the command for this POD paragraph
=item C<$text>
the paragraph text for the given POD paragraph command.
=item C<$line_num>
the line-number of the beginning of the paragraph
=item C<$pod_para>
a reference to a C<Pod::Paragraph> object which contains further
information about the paragraph command (see L<Pod::InputObjects>
for details).
=back
B<Note> that this method I<is> called for C<=pod> paragraphs.
The base class implementation of this method simply treats the raw POD
command as normal block of paragraph text (invoking the B<textblock()>
method with the command paragraph).
=cut
sub command {
my ($self, $cmd, $text, $line_num, $pod_para) = @_;
## Just treat this like a textblock
$self->textblock($pod_para->raw_text(), $line_num, $pod_para);
}
##---------------------------------------------------------------------------
=head1 B<verbatim()>
$parser->verbatim($text,$line_num,$pod_para);
This method may be overridden by subclasses to take the appropriate
action when a block of verbatim text is encountered. It is passed the
following parameters:
=over 3
=item C<$text>
the block of text for the verbatim paragraph
=item C<$line_num>
the line-number of the beginning of the paragraph
=item C<$pod_para>
a reference to a C<Pod::Paragraph> object which contains further
information about the paragraph (see L<Pod::InputObjects>
for details).
=back
The base class implementation of this method simply prints the textblock
(unmodified) to the output filehandle.
=cut
sub verbatim {
my ($self, $text, $line_num, $pod_para) = @_;
my $out_fh = $self->{_OUTPUT};
print $out_fh $text;
}
##---------------------------------------------------------------------------
=head1 B<textblock()>
$parser->textblock($text,$line_num,$pod_para);
This method may be overridden by subclasses to take the appropriate
action when a normal block of POD text is encountered (although the base
class method will usually do what you want). It is passed the following
parameters:
=over 3
=item C<$text>
the block of text for the a POD paragraph
=item C<$line_num>
the line-number of the beginning of the paragraph
=item C<$pod_para>
a reference to a C<Pod::Paragraph> object which contains further
information about the paragraph (see L<Pod::InputObjects>
for details).
=back
In order to process interior sequences, subclasses implementations of
this method will probably want to invoke either B<interpolate()> or
B<parse_text()>, passing it the text block C<$text>, and the corresponding
line number in C<$line_num>, and then perform any desired processing upon
the returned result.
The base class implementation of this method simply prints the text block
as it occurred in the input stream).
=cut
sub textblock {
my ($self, $text, $line_num, $pod_para) = @_;
my $out_fh = $self->{_OUTPUT};
print $out_fh $self->interpolate($text, $line_num);
}
##---------------------------------------------------------------------------
=head1 B<interior_sequence()>
$parser->interior_sequence($seq_cmd,$seq_arg,$pod_seq);
This method should be overridden by subclasses to take the appropriate
action when an interior sequence is encountered. An interior sequence is
an embedded command within a block of text which appears as a command
name (usually a single uppercase character) followed immediately by a
string of text which is enclosed in angle brackets. This method is
passed the sequence command C<$seq_cmd> and the corresponding text
C<$seq_arg>. It is invoked by the B<interpolate()> method for each interior
sequence that occurs in the string that it is passed. It should return
the desired text string to be used in place of the interior sequence.
The C<$pod_seq> argument is a reference to a C<Pod::InteriorSequence>
object which contains further information about the interior sequence.
Please see L<Pod::InputObjects> for details if you need to access this
additional information.
Subclass implementations of this method may wish to invoke the
B<nested()> method of C<$pod_seq> to see if it is nested inside
some other interior-sequence (and if so, which kind).
The base class implementation of the B<interior_sequence()> method
simply returns the raw text of the interior sequence (as it occurred
in the input) to the caller.
=cut
sub interior_sequence {
my ($self, $seq_cmd, $seq_arg, $pod_seq) = @_;
## Just return the raw text of the interior sequence
return $pod_seq->raw_text();
}
#############################################################################
=head1 OPTIONAL SUBROUTINE/METHOD OVERRIDES
B<Pod::Parser> provides several methods which subclasses may want to override
to perform any special pre/post-processing. These methods do I<not> have to
be overridden, but it may be useful for subclasses to take advantage of them.
=cut
##---------------------------------------------------------------------------
=head1 B<new()>
my $parser = Pod::Parser->new();
This is the constructor for B<Pod::Parser> and its subclasses. You
I<do not> need to override this method! It is capable of constructing
subclass objects as well as base class objects, provided you use
any of the following constructor invocation styles:
my $parser1 = MyParser->new();
my $parser2 = new MyParser();
my $parser3 = $parser2->new();
where C<MyParser> is some subclass of B<Pod::Parser>.
Using the syntax C<MyParser::new()> to invoke the constructor is I<not>
recommended, but if you insist on being able to do this, then the
subclass I<will> need to override the B<new()> constructor method. If
you do override the constructor, you I<must> be sure to invoke the
B<initialize()> method of the newly blessed object.
Using any of the above invocations, the first argument to the
constructor is always the corresponding package name (or object
reference). No other arguments are required, but if desired, an
associative array (or hash-table) my be passed to the B<new()>
constructor, as in:
my $parser1 = MyParser->new( MYDATA => $value1, MOREDATA => $value2 );
my $parser2 = new MyParser( -myflag => 1 );
All arguments passed to the B<new()> constructor will be treated as
key/value pairs in a hash-table. The newly constructed object will be
initialized by copying the contents of the given hash-table (which may
have been empty). The B<new()> constructor for this class and all of its
subclasses returns a blessed reference to the initialized object (hash-table).
=cut
sub new {
## Determine if we were called via an object-ref or a classname
my ($this,%params) = @_;
my $class = ref($this) || $this;
## Any remaining arguments are treated as initial values for the
## hash that is used to represent this object.
my $self = { %params };
## Bless ourselves into the desired class and perform any initialization
bless $self, $class;
$self->initialize();
return $self;
}
##---------------------------------------------------------------------------
=head1 B<initialize()>
$parser->initialize();
This method performs any necessary object initialization. It takes no
arguments (other than the object instance of course, which is typically
copied to a local variable named C<$self>). If subclasses override this
method then they I<must> be sure to invoke C<$self-E<gt>SUPER::initialize()>.
=cut
sub initialize {
#my $self = shift;
#return;
}
##---------------------------------------------------------------------------
=head1 B<begin_pod()>
$parser->begin_pod();
This method is invoked at the beginning of processing for each POD
document that is encountered in the input. Subclasses should override
this method to perform any per-document initialization.
=cut
sub begin_pod {
#my $self = shift;
#return;
}
##---------------------------------------------------------------------------
=head1 B<begin_input()>
$parser->begin_input();
This method is invoked by B<parse_from_filehandle()> immediately I<before>
processing input from a filehandle. The base class implementation does
nothing, however, subclasses may override it to perform any per-file
initializations.
Note that if multiple files are parsed for a single POD document
(perhaps the result of some future C<=include> directive) this method
is invoked for every file that is parsed. If you wish to perform certain
initializations once per document, then you should use B<begin_pod()>.
=cut
sub begin_input {
#my $self = shift;
#return;
}
##---------------------------------------------------------------------------
=head1 B<end_input()>
$parser->end_input();
This method is invoked by B<parse_from_filehandle()> immediately I<after>
processing input from a filehandle. The base class implementation does
nothing, however, subclasses may override it to perform any per-file
cleanup actions.
Please note that if multiple files are parsed for a single POD document
(perhaps the result of some kind of C<=include> directive) this method
is invoked for every file that is parsed. If you wish to perform certain
cleanup actions once per document, then you should use B<end_pod()>.
=cut
sub end_input {
#my $self = shift;
#return;
}
##---------------------------------------------------------------------------
=head1 B<end_pod()>
$parser->end_pod();
This method is invoked at the end of processing for each POD document
that is encountered in the input. Subclasses should override this method
to perform any per-document finalization.
=cut
sub end_pod {
#my $self = shift;
#return;
}
##---------------------------------------------------------------------------
=head1 B<preprocess_line()>
$textline = $parser->preprocess_line($text, $line_num);
This method should be overridden by subclasses that wish to perform
any kind of preprocessing for each I<line> of input (I<before> it has
been determined whether or not it is part of a POD paragraph). The
parameter C<$text> is the input line; and the parameter C<$line_num> is
the line number of the corresponding text line.
The value returned should correspond to the new text to use in its
place. If the empty string or an undefined value is returned then no
further processing will be performed for this line.
Please note that the B<preprocess_line()> method is invoked I<before>
the B<preprocess_paragraph()> method. After all (possibly preprocessed)
lines in a paragraph have been assembled together and it has been
determined that the paragraph is part of the POD documentation from one
of the selected sections, then B<preprocess_paragraph()> is invoked.
The base class implementation of this method returns the given text.
=cut
sub preprocess_line {
my ($self, $text, $line_num) = @_;
return $text;
}
##---------------------------------------------------------------------------
=head1 B<preprocess_paragraph()>
$textblock = $parser->preprocess_paragraph($text, $line_num);
This method should be overridden by subclasses that wish to perform any
kind of preprocessing for each block (paragraph) of POD documentation
that appears in the input stream. The parameter C<$text> is the POD
paragraph from the input file; and the parameter C<$line_num> is the
line number for the beginning of the corresponding paragraph.
The value returned should correspond to the new text to use in its
place If the empty string is returned or an undefined value is
returned, then the given C<$text> is ignored (not processed).
This method is invoked after gathering up all the lines in a paragraph
and after determining the cutting state of the paragraph,
but before trying to further parse or interpret them. After
B<preprocess_paragraph()> returns, the current cutting state (which
is returned by C<$self-E<gt>cutting()>) is examined. If it evaluates
to true then input text (including the given C<$text>) is cut (not
processed) until the next POD directive is encountered.
Please note that the B<preprocess_line()> method is invoked I<before>
the B<preprocess_paragraph()> method. After all (possibly preprocessed)
lines in a paragraph have been assembled together and either it has been
determined that the paragraph is part of the POD documentation from one
of the selected sections or the C<-want_nonPODs> option is true,
then B<preprocess_paragraph()> is invoked.
The base class implementation of this method returns the given text.
=cut
sub preprocess_paragraph {
my ($self, $text, $line_num) = @_;
return $text;
}
#############################################################################
=head1 METHODS FOR PARSING AND PROCESSING
B<Pod::Parser> provides several methods to process input text. These
methods typically won't need to be overridden (and in some cases they
can't be overridden), but subclasses may want to invoke them to exploit
their functionality.
=cut
##---------------------------------------------------------------------------
=head1 B<parse_text()>
$ptree1 = $parser->parse_text($text, $line_num);
$ptree2 = $parser->parse_text({%opts}, $text, $line_num);
$ptree3 = $parser->parse_text(\%opts, $text, $line_num);
This method is useful if you need to perform your own interpolation
of interior sequences and can't rely upon B<interpolate> to expand
them in simple bottom-up order.
The parameter C<$text> is a string or block of text to be parsed
for interior sequences; and the parameter C<$line_num> is the
line number corresponding to the beginning of C<$text>.
B<parse_text()> will parse the given text into a parse-tree of "nodes."
and interior-sequences. Each "node" in the parse tree is either a
text-string, or a B<Pod::InteriorSequence>. The result returned is a
parse-tree of type B<Pod::ParseTree>. Please see L<Pod::InputObjects>
for more information about B<Pod::InteriorSequence> and B<Pod::ParseTree>.
If desired, an optional hash-ref may be specified as the first argument
to customize certain aspects of the parse-tree that is created and
returned. The set of recognized option keywords are:
=over 3
=item B<-expand_seq> =E<gt> I<code-ref>|I<method-name>
Normally, the parse-tree returned by B<parse_text()> will contain an
unexpanded C<Pod::InteriorSequence> object for each interior-sequence
encountered. Specifying B<-expand_seq> tells B<parse_text()> to "expand"
every interior-sequence it sees by invoking the referenced function
(or named method of the parser object) and using the return value as the
expanded result.
If a subroutine reference was given, it is invoked as:
&$code_ref( $parser, $sequence )
and if a method-name was given, it is invoked as:
$parser->method_name( $sequence )
where C<$parser> is a reference to the parser object, and C<$sequence>
is a reference to the interior-sequence object.
[I<NOTE>: If the B<interior_sequence()> method is specified, then it is
invoked according to the interface specified in L<"interior_sequence()">].
=item B<-expand_text> =E<gt> I<code-ref>|I<method-name>
Normally, the parse-tree returned by B<parse_text()> will contain a
text-string for each contiguous sequence of characters outside of an
interior-sequence. Specifying B<-expand_text> tells B<parse_text()> to
"preprocess" every such text-string it sees by invoking the referenced
function (or named method of the parser object) and using the return value
as the preprocessed (or "expanded") result. [Note that if the result is
an interior-sequence, then it will I<not> be expanded as specified by the
B<-expand_seq> option; Any such recursive expansion needs to be handled by
the specified callback routine.]
If a subroutine reference was given, it is invoked as:
&$code_ref( $parser, $text, $ptree_node )
and if a method-name was given, it is invoked as:
$parser->method_name( $text, $ptree_node )
where C<$parser> is a reference to the parser object, C<$text> is the
text-string encountered, and C<$ptree_node> is a reference to the current
node in the parse-tree (usually an interior-sequence object or else the
top-level node of the parse-tree).
=item B<-expand_ptree> =E<gt> I<code-ref>|I<method-name>
Rather than returning a C<Pod::ParseTree>, pass the parse-tree as an
argument to the referenced subroutine (or named method of the parser
object) and return the result instead of the parse-tree object.
If a subroutine reference was given, it is invoked as:
&$code_ref( $parser, $ptree )
and if a method-name was given, it is invoked as:
$parser->method_name( $ptree )
where C<$parser> is a reference to the parser object, and C<$ptree>
is a reference to the parse-tree object.
=back
=cut
sub parse_text {
my $self = shift;
local $_ = '';
## Get options and set any defaults
my %opts = (ref $_[0]) ? %{ shift() } : ();
my $expand_seq = $opts{'-expand_seq'} || undef;
my $expand_text = $opts{'-expand_text'} || undef;
my $expand_ptree = $opts{'-expand_ptree'} || undef;
my $text = shift;
my $line = shift;
my $file = $self->input_file();
my $cmd = "";
## Convert method calls into closures, for our convenience
my $xseq_sub = $expand_seq;
my $xtext_sub = $expand_text;
my $xptree_sub = $expand_ptree;
if (defined $expand_seq and $expand_seq eq 'interior_sequence') {
## If 'interior_sequence' is the method to use, we have to pass
## more than just the sequence object, we also need to pass the
## sequence name and text.
$xseq_sub = sub {
my ($sself, $iseq) = @_;
my $args = join('', $iseq->parse_tree->children);
return $sself->interior_sequence($iseq->name, $args, $iseq);
};
}
ref $xseq_sub or $xseq_sub = sub { shift()->$expand_seq(@_) };
ref $xtext_sub or $xtext_sub = sub { shift()->$expand_text(@_) };
ref $xptree_sub or $xptree_sub = sub { shift()->$expand_ptree(@_) };
## Keep track of the "current" interior sequence, and maintain a stack
## of "in progress" sequences.
##
## NOTE that we push our own "accumulator" at the very beginning of the
## stack. It's really a parse-tree, not a sequence; but it implements
## the methods we need so we can use it to gather-up all the sequences
## and strings we parse. Thus, by the end of our parsing, it should be
## the only thing left on our stack and all we have to do is return it!
##
my $seq = Pod::ParseTree->new();
my @seq_stack = ($seq);
my ($ldelim, $rdelim) = ('', '');
## Iterate over all sequence starts text (NOTE: split with
## capturing parens keeps the delimiters)
$_ = $text;
my @tokens = split /([A-Z]<(?:<+(?:\r?\n|[ \t]))?)/;
while ( @tokens ) {
$_ = shift @tokens;
## Look for the beginning of a sequence
if ( /^([A-Z])(<(?:<+(?:\r?\n|[ \t]))?)$/ ) {
## Push a new sequence onto the stack of those "in-progress"
my $ldelim_orig;
($cmd, $ldelim_orig) = ($1, $2);
($ldelim = $ldelim_orig) =~ s/\s+$//;
($rdelim = $ldelim) =~ tr/</>/;
$seq = Pod::InteriorSequence->new(
-name => $cmd,
-ldelim => $ldelim_orig, -rdelim => $rdelim,
-file => $file, -line => $line
);
(@seq_stack > 1) and $seq->nested($seq_stack[-1]);
push @seq_stack, $seq;
}
## Look for sequence ending
elsif ( @seq_stack > 1 ) {
## Make sure we match the right kind of closing delimiter
my ($seq_end, $post_seq) = ('', '');
if ( ($ldelim eq '<' and /\A(.*?)(>)/s)
or /\A(.*?)(\s+$rdelim)/s )
{
## Found end-of-sequence, capture the interior and the
## closing the delimiter, and put the rest back on the
## token-list
$post_seq = substr($_, length($1) + length($2));
($_, $seq_end) = ($1, $2);
(length $post_seq) and unshift @tokens, $post_seq;
}
if (length) {
## In the middle of a sequence, append this text to it, and
## don't forget to "expand" it if that's what the caller wanted
$seq->append($expand_text ? &$xtext_sub($self,$_,$seq) : $_);
$_ .= $seq_end;
}
if (length $seq_end) {
## End of current sequence, record terminating delimiter
$seq->rdelim($seq_end);
## Pop it off the stack of "in progress" sequences
pop @seq_stack;
## Append result to its parent in current parse tree
$seq_stack[-1]->append($expand_seq ? &$xseq_sub($self,$seq)
: $seq);
## Remember the current cmd-name and left-delimiter
if(@seq_stack > 1) {
$cmd = $seq_stack[-1]->name;
$ldelim = $seq_stack[-1]->ldelim;
$rdelim = $seq_stack[-1]->rdelim;
} else {
$cmd = $ldelim = $rdelim = '';
}
}
}
elsif (length) {
## In the middle of a sequence, append this text to it, and
## don't forget to "expand" it if that's what the caller wanted
$seq->append($expand_text ? &$xtext_sub($self,$_,$seq) : $_);
}
## Keep track of line count
$line += /\n/;
## Remember the "current" sequence
$seq = $seq_stack[-1];
}
## Handle unterminated sequences
my $errorsub = (@seq_stack > 1) ? $self->errorsub() : undef;
while (@seq_stack > 1) {
($cmd, $file, $line) = ($seq->name, $seq->file_line);
$ldelim = $seq->ldelim;
($rdelim = $ldelim) =~ tr/</>/;
$rdelim =~ s/^(\S+)(\s*)$/$2$1/;
pop @seq_stack;
my $errmsg = "*** ERROR: unterminated ${cmd}${ldelim}...${rdelim}".
" at line $line in file $file\n";
(ref $errorsub) and &{$errorsub}($errmsg)
or (defined $errorsub) and $self->$errorsub($errmsg)
or carp($errmsg);
$seq_stack[-1]->append($expand_seq ? &$xseq_sub($self,$seq) : $seq);
$seq = $seq_stack[-1];
}
## Return the resulting parse-tree
my $ptree = (pop @seq_stack)->parse_tree;
return $expand_ptree ? &$xptree_sub($self, $ptree) : $ptree;
}
##---------------------------------------------------------------------------
=head1 B<interpolate()>
$textblock = $parser->interpolate($text, $line_num);
This method translates all text (including any embedded interior sequences)
in the given text string C<$text> and returns the interpolated result. The
parameter C<$line_num> is the line number corresponding to the beginning
of C<$text>.
B<interpolate()> merely invokes a private method to recursively expand
nested interior sequences in bottom-up order (innermost sequences are
expanded first). If there is a need to expand nested sequences in
some alternate order, use B<parse_text> instead.
=cut
sub interpolate {
my($self, $text, $line_num) = @_;
my %parse_opts = ( -expand_seq => 'interior_sequence' );
my $ptree = $self->parse_text( \%parse_opts, $text, $line_num );
return join '', $ptree->children();
}
##---------------------------------------------------------------------------
=begin __PRIVATE__
=head1 B<parse_paragraph()>
$parser->parse_paragraph($text, $line_num);
This method takes the text of a POD paragraph to be processed, along
with its corresponding line number, and invokes the appropriate method
(one of B<command()>, B<verbatim()>, or B<textblock()>).
For performance reasons, this method is invoked directly without any
dynamic lookup; Hence subclasses may I<not> override it!
=end __PRIVATE__
=cut
sub parse_paragraph {
my ($self, $text, $line_num) = @_;
local *myData = $self; ## alias to avoid deref-ing overhead
local *myOpts = ($myData{_PARSEOPTS} ||= {}); ## get parse-options
local $_;
## See if we want to preprocess nonPOD paragraphs as well as POD ones.
my $wantNonPods = $myOpts{'-want_nonPODs'};
## Update cutting status
$myData{_CUTTING} = 0 if $text =~ /^={1,2}\S/;
## Perform any desired preprocessing if we wanted it this early
$wantNonPods and $text = $self->preprocess_paragraph($text, $line_num);
## Ignore up until next POD directive if we are cutting
return if $myData{_CUTTING};
## Now we know this is block of text in a POD section!
##-----------------------------------------------------------------
## This is a hook (hack ;-) for Pod::Select to do its thing without
## having to override methods, but also without Pod::Parser assuming
## $self is an instance of Pod::Select (if the _SELECTED_SECTIONS
## field exists then we assume there is an is_selected() method for
## us to invoke (calling $self->can('is_selected') could verify this
## but that is more overhead than I want to incur)
##-----------------------------------------------------------------
## Ignore this block if it isn't in one of the selected sections
if (exists $myData{_SELECTED_SECTIONS}) {
$self->is_selected($text) or return ($myData{_CUTTING} = 1);
}
## If we haven't already, perform any desired preprocessing and
## then re-check the "cutting" state
unless ($wantNonPods) {
$text = $self->preprocess_paragraph($text, $line_num);
return 1 unless ((defined $text) and (length $text));
return 1 if ($myData{_CUTTING});
}
## Look for one of the three types of paragraphs
my ($pfx, $cmd, $arg, $sep) = ('', '', '', '');
my $pod_para = undef;
if ($text =~ /^(={1,2})(?=\S)/) {
## Looks like a command paragraph. Capture the command prefix used
## ("=" or "=="), as well as the command-name, its paragraph text,
## and whatever sequence of characters was used to separate them
$pfx = $1;
$_ = substr($text, length $pfx);
($cmd, $sep, $text) = split /(\s+)/, $_, 2;
$sep = '' unless defined $sep;
$text = '' unless defined $text;
## If this is a "cut" directive then we don't need to do anything
## except return to "cutting" mode.
if ($cmd eq 'cut') {
$myData{_CUTTING} = 1;
return unless $myOpts{'-process_cut_cmd'};
}
}
## Save the attributes indicating how the command was specified.
$pod_para = new Pod::Paragraph(
-name => $cmd,
-text => $text,
-prefix => $pfx,
-separator => $sep,
-file => $myData{_INFILE},
-line => $line_num
);
# ## Invoke appropriate callbacks
# if (exists $myData{_CALLBACKS}) {
# ## Look through the callback list, invoke callbacks,
# ## then see if we need to do the default actions
# ## (invoke_callbacks will return true if we do).
# return 1 unless $self->invoke_callbacks($cmd, $text, $line_num, $pod_para);
# }
# If the last paragraph ended in whitespace, and we're not between verbatim blocks, carp
if ($myData{_WHITESPACE} and $myOpts{'-warnings'}
and not ($text =~ /^\s+/ and ($myData{_PREVIOUS}||"") eq "verbatim")) {
my $errorsub = $self->errorsub();
my $line = $line_num - 1;
my $errmsg = "*** WARNING: line containing nothing but whitespace".
" in paragraph at line $line in file $myData{_INFILE}\n";
(ref $errorsub) and &{$errorsub}($errmsg)
or (defined $errorsub) and $self->$errorsub($errmsg)
or carp($errmsg);
}
if (length $cmd) {
## A command paragraph
$self->command($cmd, $text, $line_num, $pod_para);
$myData{_PREVIOUS} = $cmd;
}
elsif ($text =~ /^\s+/) {
## Indented text - must be a verbatim paragraph
$self->verbatim($text, $line_num, $pod_para);
$myData{_PREVIOUS} = "verbatim";
}
else {
## Looks like an ordinary block of text
$self->textblock($text, $line_num, $pod_para);
$myData{_PREVIOUS} = "textblock";
}
# Update the whitespace for the next time around
#$myData{_WHITESPACE} = $text =~ /^[^\S\r\n]+\Z/m ? 1 : 0;
$myData{_WHITESPACE} = $text =~ /^[^\S\r\n]+\r*\Z/m ? 1 : 0;
return 1;
}
##---------------------------------------------------------------------------
=head1 B<parse_from_filehandle()>
$parser->parse_from_filehandle($in_fh,$out_fh);
This method takes an input filehandle (which is assumed to already be
opened for reading) and reads the entire input stream looking for blocks
(paragraphs) of POD documentation to be processed. If no first argument
is given the default input filehandle C<STDIN> is used.
The C<$in_fh> parameter may be any object that provides a B<getline()>
method to retrieve a single line of input text (hence, an appropriate
wrapper object could be used to parse PODs from a single string or an
array of strings).
Using C<$in_fh-E<gt>getline()>, input is read line-by-line and assembled
into paragraphs or "blocks" (which are separated by lines containing
nothing but whitespace). For each block of POD documentation
encountered it will invoke a method to parse the given paragraph.
If a second argument is given then it should correspond to a filehandle where
output should be sent (otherwise the default output filehandle is
C<STDOUT> if no output filehandle is currently in use).
B<NOTE:> For performance reasons, this method caches the input stream at
the top of the stack in a local variable. Any attempts by clients to
change the stack contents during processing when in the midst executing
of this method I<will not affect> the input stream used by the current
invocation of this method.
This method does I<not> usually need to be overridden by subclasses.
=cut
sub parse_from_filehandle {
my $self = shift;
my %opts = (ref $_[0] eq 'HASH') ? %{ shift() } : ();
my ($in_fh, $out_fh) = @_;
$in_fh = \*STDIN unless ($in_fh);
local *myData = $self; ## alias to avoid deref-ing overhead
local *myOpts = ($myData{_PARSEOPTS} ||= {}); ## get parse-options
local $_;
## Put this stream at the top of the stack and do beginning-of-input
## processing. NOTE that $in_fh might be reset during this process.
my $topstream = $self->_push_input_stream($in_fh, $out_fh);
(exists $opts{-cutting}) and $self->cutting( $opts{-cutting} );
## Initialize line/paragraph
my ($textline, $paragraph) = ('', '');
my ($nlines, $plines) = (0, 0);
## Use <$fh> instead of $fh->getline where possible (for speed)
$_ = ref $in_fh;
my $tied_fh = (/^(?:GLOB|FileHandle|IO::\w+)$/ or tied $in_fh);
## Read paragraphs line-by-line
while (defined ($textline = $tied_fh ? <$in_fh> : $in_fh->getline)) {
$textline = $self->preprocess_line($textline, ++$nlines);
next unless ((defined $textline) && (length $textline));
if ((! length $paragraph) && ($textline =~ /^==/)) {
## '==' denotes a one-line command paragraph
$paragraph = $textline;
$plines = 1;
$textline = '';
} else {
## Append this line to the current paragraph
$paragraph .= $textline;
++$plines;
}
## See if this line is blank and ends the current paragraph.
## If it isn't, then keep iterating until it is.
next unless (($textline =~ /^[^\S\r\n]*[\r\n]*$/)
&& (length $paragraph));
## Now process the paragraph
parse_paragraph($self, $paragraph, ($nlines - $plines) + 1);
$paragraph = '';
$plines = 0;
}
## Don't forget about the last paragraph in the file
if (length $paragraph) {
parse_paragraph($self, $paragraph, ($nlines - $plines) + 1)
}
## Now pop the input stream off the top of the input stack.
$self->_pop_input_stream();
}
##---------------------------------------------------------------------------
=head1 B<parse_from_file()>
$parser->parse_from_file($filename,$outfile);
This method takes a filename and does the following:
=over 2
=item *
opens the input and output files for reading
(creating the appropriate filehandles)
=item *
invokes the B<parse_from_filehandle()> method passing it the
corresponding input and output filehandles.
=item *
closes the input and output files.
=back
If the special input filename "", "-" or "<&STDIN" is given then the STDIN
filehandle is used for input (and no open or close is performed). If no
input filename is specified then "-" is implied. Filehandle references,
or objects that support the regular IO operations (like C<E<lt>$fhE<gt>>
or C<$fh-<Egt>getline>) are also accepted; the handles must already be
opened.
If a second argument is given then it should be the name of the desired
output file. If the special output filename "-" or ">&STDOUT" is given
then the STDOUT filehandle is used for output (and no open or close is
performed). If the special output filename ">&STDERR" is given then the
STDERR filehandle is used for output (and no open or close is
performed). If no output filehandle is currently in use and no output
filename is specified, then "-" is implied.
Alternatively, filehandle references or objects that support the regular
IO operations (like C<print>, e.g. L<IO::String>) are also accepted;
the object must already be opened.
This method does I<not> usually need to be overridden by subclasses.
=cut
sub parse_from_file {
my $self = shift;
my %opts = (ref $_[0] eq 'HASH') ? %{ shift() } : ();
my ($infile, $outfile) = @_;
my ($in_fh, $out_fh);
if ($] < 5.006) {
($in_fh, $out_fh) = (gensym(), gensym());
}
my ($close_input, $close_output) = (0, 0);
local *myData = $self;
local *_;
## Is $infile a filename or a (possibly implied) filehandle
if (defined $infile && ref $infile) {
if (ref($infile) =~ /^(SCALAR|ARRAY|HASH|CODE|REF)$/) {
croak "Input from $1 reference not supported!\n";
}
## Must be a filehandle-ref (or else assume its a ref to an object
## that supports the common IO read operations).
$myData{_INFILE} = ${$infile};
$in_fh = $infile;
}
elsif (!defined($infile) || !length($infile) || ($infile eq '-')
|| ($infile =~ /^<&(?:STDIN|0)$/i))
{
## Not a filename, just a string implying STDIN
$infile ||= '-';
$myData{_INFILE} = '<standard input>';
$in_fh = \*STDIN;
}
else {
## We have a filename, open it for reading
$myData{_INFILE} = $infile;
open($in_fh, "< $infile") or
croak "Can't open $infile for reading: $!\n";
$close_input = 1;
}
## NOTE: we need to be *very* careful when "defaulting" the output
## file. We only want to use a default if this is the beginning of
## the entire document (but *not* if this is an included file). We
## determine this by seeing if the input stream stack has been set-up
## already
## Is $outfile a filename, a (possibly implied) filehandle, maybe a ref?
if (ref $outfile) {
## we need to check for ref() first, as other checks involve reading
if (ref($outfile) =~ /^(ARRAY|HASH|CODE)$/) {
croak "Output to $1 reference not supported!\n";
}
elsif (ref($outfile) eq 'SCALAR') {
# # NOTE: IO::String isn't a part of the perl distribution,
# # so probably we shouldn't support this case...
# require IO::String;
# $myData{_OUTFILE} = "$outfile";
# $out_fh = IO::String->new($outfile);
croak "Output to SCALAR reference not supported!\n";
}
else {
## Must be a filehandle-ref (or else assume its a ref to an
## object that supports the common IO write operations).
$myData{_OUTFILE} = ${$outfile};
$out_fh = $outfile;
}
}
elsif (!defined($outfile) || !length($outfile) || ($outfile eq '-')
|| ($outfile =~ /^>&?(?:STDOUT|1)$/i))
{
if (defined $myData{_TOP_STREAM}) {
$out_fh = $myData{_OUTPUT};
}
else {
## Not a filename, just a string implying STDOUT
$outfile ||= '-';
$myData{_OUTFILE} = '<standard output>';
$out_fh = \*STDOUT;
}
}
elsif ($outfile =~ /^>&(STDERR|2)$/i) {
## Not a filename, just a string implying STDERR
$myData{_OUTFILE} = '<standard error>';
$out_fh = \*STDERR;
}
else {
## We have a filename, open it for writing
$myData{_OUTFILE} = $outfile;
(-d $outfile) and croak "$outfile is a directory, not POD input!\n";
open($out_fh, "> $outfile") or
croak "Can't open $outfile for writing: $!\n";
$close_output = 1;
}
## Whew! That was a lot of work to set up reasonably/robust behavior
## in the case of a non-filename for reading and writing. Now we just
## have to parse the input and close the handles when we're finished.
$self->parse_from_filehandle(\%opts, $in_fh, $out_fh);
$close_input and
close($in_fh) || croak "Can't close $infile after reading: $!\n";
$close_output and
close($out_fh) || croak "Can't close $outfile after writing: $!\n";
}
#############################################################################
=head1 ACCESSOR METHODS
Clients of B<Pod::Parser> should use the following methods to access
instance data fields:
=cut
##---------------------------------------------------------------------------
=head1 B<errorsub()>
$parser->errorsub("method_name");
$parser->errorsub(\&warn_user);
$parser->errorsub(sub { print STDERR, @_ });
Specifies the method or subroutine to use when printing error messages
about POD syntax. The supplied method/subroutine I<must> return TRUE upon
successful printing of the message. If C<undef> is given, then the B<carp>
builtin is used to issue error messages (this is the default behavior).
my $errorsub = $parser->errorsub()
my $errmsg = "This is an error message!\n"
(ref $errorsub) and &{$errorsub}($errmsg)
or (defined $errorsub) and $parser->$errorsub($errmsg)
or carp($errmsg);
Returns a method name, or else a reference to the user-supplied subroutine
used to print error messages. Returns C<undef> if the B<carp> builtin
is used to issue error messages (this is the default behavior).
=cut
sub errorsub {
return (@_ > 1) ? ($_[0]->{_ERRORSUB} = $_[1]) : $_[0]->{_ERRORSUB};
}
##---------------------------------------------------------------------------
=head1 B<cutting()>
$boolean = $parser->cutting();
Returns the current C<cutting> state: a boolean-valued scalar which
evaluates to true if text from the input file is currently being "cut"
(meaning it is I<not> considered part of the POD document).
$parser->cutting($boolean);
Sets the current C<cutting> state to the given value and returns the
result.
=cut
sub cutting {
return (@_ > 1) ? ($_[0]->{_CUTTING} = $_[1]) : $_[0]->{_CUTTING};
}
##---------------------------------------------------------------------------
##---------------------------------------------------------------------------
=head1 B<parseopts()>
When invoked with no additional arguments, B<parseopts> returns a hashtable
of all the current parsing options.
## See if we are parsing non-POD sections as well as POD ones
my %opts = $parser->parseopts();
$opts{'-want_nonPODs}' and print "-want_nonPODs\n";
When invoked using a single string, B<parseopts> treats the string as the
name of a parse-option and returns its corresponding value if it exists
(returns C<undef> if it doesn't).
## Did we ask to see '=cut' paragraphs?
my $want_cut = $parser->parseopts('-process_cut_cmd');
$want_cut and print "-process_cut_cmd\n";
When invoked with multiple arguments, B<parseopts> treats them as
key/value pairs and the specified parse-option names are set to the
given values. Any unspecified parse-options are unaffected.
## Set them back to the default
$parser->parseopts(-warnings => 0);
When passed a single hash-ref, B<parseopts> uses that hash to completely
reset the existing parse-options, all previous parse-option values
are lost.
## Reset all options to default
$parser->parseopts( { } );
See L<"PARSING OPTIONS"> for more information on the name and meaning of each
parse-option currently recognized.
=cut
sub parseopts {
local *myData = shift;
local *myOpts = ($myData{_PARSEOPTS} ||= {});
return %myOpts if (@_ == 0);
if (@_ == 1) {
local $_ = shift;
return ref($_) ? $myData{_PARSEOPTS} = $_ : $myOpts{$_};
}
my @newOpts = (%myOpts, @_);
$myData{_PARSEOPTS} = { @newOpts };
}
##---------------------------------------------------------------------------
=head1 B<output_file()>
$fname = $parser->output_file();
Returns the name of the output file being written.
=cut
sub output_file {
return $_[0]->{_OUTFILE};
}
##---------------------------------------------------------------------------
=head1 B<output_handle()>
$fhandle = $parser->output_handle();
Returns the output filehandle object.
=cut
sub output_handle {
return $_[0]->{_OUTPUT};
}
##---------------------------------------------------------------------------
=head1 B<input_file()>
$fname = $parser->input_file();
Returns the name of the input file being read.
=cut
sub input_file {
return $_[0]->{_INFILE};
}
##---------------------------------------------------------------------------
=head1 B<input_handle()>
$fhandle = $parser->input_handle();
Returns the current input filehandle object.
=cut
sub input_handle {
return $_[0]->{_INPUT};
}
##---------------------------------------------------------------------------
=begin __PRIVATE__
=head1 B<input_streams()>
$listref = $parser->input_streams();
Returns a reference to an array which corresponds to the stack of all
the input streams that are currently in the middle of being parsed.
While parsing an input stream, it is possible to invoke
B<parse_from_file()> or B<parse_from_filehandle()> to parse a new input
stream and then return to parsing the previous input stream. Each input
stream to be parsed is pushed onto the end of this input stack
before any of its input is read. The input stream that is currently
being parsed is always at the end (or top) of the input stack. When an
input stream has been exhausted, it is popped off the end of the
input stack.
Each element on this input stack is a reference to C<Pod::InputSource>
object. Please see L<Pod::InputObjects> for more details.
This method might be invoked when printing diagnostic messages, for example,
to obtain the name and line number of the all input files that are currently
being processed.
=end __PRIVATE__
=cut
sub input_streams {
return $_[0]->{_INPUT_STREAMS};
}
##---------------------------------------------------------------------------
=begin __PRIVATE__
=head1 B<top_stream()>
$hashref = $parser->top_stream();
Returns a reference to the hash-table that represents the element
that is currently at the top (end) of the input stream stack
(see L<"input_streams()">). The return value will be the C<undef>
if the input stack is empty.
This method might be used when printing diagnostic messages, for example,
to obtain the name and line number of the current input file.
=end __PRIVATE__
=cut
sub top_stream {
return $_[0]->{_TOP_STREAM} || undef;
}
#############################################################################
=head1 PRIVATE METHODS AND DATA
B<Pod::Parser> makes use of several internal methods and data fields
which clients should not need to see or use. For the sake of avoiding
name collisions for client data and methods, these methods and fields
are briefly discussed here. Determined hackers may obtain further
information about them by reading the B<Pod::Parser> source code.
Private data fields are stored in the hash-object whose reference is
returned by the B<new()> constructor for this class. The names of all
private methods and data-fields used by B<Pod::Parser> begin with a
prefix of "_" and match the regular expression C</^_\w+$/>.
=cut
##---------------------------------------------------------------------------
=begin _PRIVATE_
=head1 B<_push_input_stream()>
$hashref = $parser->_push_input_stream($in_fh,$out_fh);
This method will push the given input stream on the input stack and
perform any necessary beginning-of-document or beginning-of-file
processing. The argument C<$in_fh> is the input stream filehandle to
push, and C<$out_fh> is the corresponding output filehandle to use (if
it is not given or is undefined, then the current output stream is used,
which defaults to standard output if it doesnt exist yet).
The value returned will be reference to the hash-table that represents
the new top of the input stream stack. I<Please Note> that it is
possible for this method to use default values for the input and output
file handles. If this happens, you will need to look at the C<INPUT>
and C<OUTPUT> instance data members to determine their new values.
=end _PRIVATE_
=cut
sub _push_input_stream {
my ($self, $in_fh, $out_fh) = @_;
local *myData = $self;
## Initialize stuff for the entire document if this is *not*
## an included file.
##
## NOTE: we need to be *very* careful when "defaulting" the output
## filehandle. We only want to use a default value if this is the
## beginning of the entire document (but *not* if this is an included
## file).
unless (defined $myData{_TOP_STREAM}) {
$out_fh = \*STDOUT unless (defined $out_fh);
$myData{_CUTTING} = 1; ## current "cutting" state
$myData{_INPUT_STREAMS} = []; ## stack of all input streams
}
## Initialize input indicators
$myData{_OUTFILE} = '(unknown)' unless (defined $myData{_OUTFILE});
$myData{_OUTPUT} = $out_fh if (defined $out_fh);
$in_fh = \*STDIN unless (defined $in_fh);
$myData{_INFILE} = '(unknown)' unless (defined $myData{_INFILE});
$myData{_INPUT} = $in_fh;
my $input_top = $myData{_TOP_STREAM}
= new Pod::InputSource(
-name => $myData{_INFILE},
-handle => $in_fh,
-was_cutting => $myData{_CUTTING}
);
local *input_stack = $myData{_INPUT_STREAMS};
push(@input_stack, $input_top);
## Perform beginning-of-document and/or beginning-of-input processing
$self->begin_pod() if (@input_stack == 1);
$self->begin_input();
return $input_top;
}
##---------------------------------------------------------------------------
=begin _PRIVATE_
=head1 B<_pop_input_stream()>
$hashref = $parser->_pop_input_stream();
This takes no arguments. It will perform any necessary end-of-file or
end-of-document processing and then pop the current input stream from
the top of the input stack.
The value returned will be reference to the hash-table that represents
the new top of the input stream stack.
=end _PRIVATE_
=cut
sub _pop_input_stream {
my ($self) = @_;
local *myData = $self;
local *input_stack = $myData{_INPUT_STREAMS};
## Perform end-of-input and/or end-of-document processing
$self->end_input() if (@input_stack > 0);
$self->end_pod() if (@input_stack == 1);
## Restore cutting state to whatever it was before we started
## parsing this file.
my $old_top = pop(@input_stack);
$myData{_CUTTING} = $old_top->was_cutting();
## Don't forget to reset the input indicators
my $input_top = undef;
if (@input_stack > 0) {
$input_top = $myData{_TOP_STREAM} = $input_stack[-1];
$myData{_INFILE} = $input_top->name();
$myData{_INPUT} = $input_top->handle();
} else {
delete $myData{_TOP_STREAM};
delete $myData{_INPUT_STREAMS};
}
return $input_top;
}
#############################################################################
=head1 TREE-BASED PARSING
If straightforward stream-based parsing wont meet your needs (as is
likely the case for tasks such as translating PODs into structured
markup languages like HTML and XML) then you may need to take the
tree-based approach. Rather than doing everything in one pass and
calling the B<interpolate()> method to expand sequences into text, it
may be desirable to instead create a parse-tree using the B<parse_text()>
method to return a tree-like structure which may contain an ordered
list of children (each of which may be a text-string, or a similar
tree-like structure).
Pay special attention to L<"METHODS FOR PARSING AND PROCESSING"> and
to the objects described in L<Pod::InputObjects>. The former describes
the gory details and parameters for how to customize and extend the
parsing behavior of B<Pod::Parser>. B<Pod::InputObjects> provides
several objects that may all be used interchangeably as parse-trees. The
most obvious one is the B<Pod::ParseTree> object. It defines the basic
interface and functionality that all things trying to be a POD parse-tree
should do. A B<Pod::ParseTree> is defined such that each "node" may be a
text-string, or a reference to another parse-tree. Each B<Pod::Paragraph>
object and each B<Pod::InteriorSequence> object also supports the basic
parse-tree interface.
The B<parse_text()> method takes a given paragraph of text, and
returns a parse-tree that contains one or more children, each of which
may be a text-string, or an InteriorSequence object. There are also
callback-options that may be passed to B<parse_text()> to customize
the way it expands or transforms interior-sequences, as well as the
returned result. These callbacks can be used to create a parse-tree
with custom-made objects (which may or may not support the parse-tree
interface, depending on how you choose to do it).
If you wish to turn an entire POD document into a parse-tree, that process
is fairly straightforward. The B<parse_text()> method is the key to doing
this successfully. Every paragraph-callback (i.e. the polymorphic methods
for B<command()>, B<verbatim()>, and B<textblock()> paragraphs) takes
a B<Pod::Paragraph> object as an argument. Each paragraph object has a
B<parse_tree()> method that can be used to get or set a corresponding
parse-tree. So for each of those paragraph-callback methods, simply call
B<parse_text()> with the options you desire, and then use the returned
parse-tree to assign to the given paragraph object.
That gives you a parse-tree for each paragraph - so now all you need is
an ordered list of paragraphs. You can maintain that yourself as a data
element in the object/hash. The most straightforward way would be simply
to use an array-ref, with the desired set of custom "options" for each
invocation of B<parse_text>. Let's assume the desired option-set is
given by the hash C<%options>. Then we might do something like the
following:
package MyPodParserTree;
@ISA = qw( Pod::Parser );
...
sub begin_pod {
my $self = shift;
$self->{'-paragraphs'} = []; ## initialize paragraph list
}
sub command {
my ($parser, $command, $paragraph, $line_num, $pod_para) = @_;
my $ptree = $parser->parse_text({%options}, $paragraph, ...);
$pod_para->parse_tree( $ptree );
push @{ $self->{'-paragraphs'} }, $pod_para;
}
sub verbatim {
my ($parser, $paragraph, $line_num, $pod_para) = @_;
push @{ $self->{'-paragraphs'} }, $pod_para;
}
sub textblock {
my ($parser, $paragraph, $line_num, $pod_para) = @_;
my $ptree = $parser->parse_text({%options}, $paragraph, ...);
$pod_para->parse_tree( $ptree );
push @{ $self->{'-paragraphs'} }, $pod_para;
}
...
package main;
...
my $parser = new MyPodParserTree(...);
$parser->parse_from_file(...);
my $paragraphs_ref = $parser->{'-paragraphs'};
Of course, in this module-author's humble opinion, I'd be more inclined to
use the existing B<Pod::ParseTree> object than a simple array. That way
everything in it, paragraphs and sequences, all respond to the same core
interface for all parse-tree nodes. The result would look something like:
package MyPodParserTree2;
...
sub begin_pod {
my $self = shift;
$self->{'-ptree'} = new Pod::ParseTree; ## initialize parse-tree
}
sub parse_tree {
## convenience method to get/set the parse-tree for the entire POD
(@_ > 1) and $_[0]->{'-ptree'} = $_[1];
return $_[0]->{'-ptree'};
}
sub command {
my ($parser, $command, $paragraph, $line_num, $pod_para) = @_;
my $ptree = $parser->parse_text({<<options>>}, $paragraph, ...);
$pod_para->parse_tree( $ptree );
$parser->parse_tree()->append( $pod_para );
}
sub verbatim {
my ($parser, $paragraph, $line_num, $pod_para) = @_;
$parser->parse_tree()->append( $pod_para );
}
sub textblock {
my ($parser, $paragraph, $line_num, $pod_para) = @_;
my $ptree = $parser->parse_text({<<options>>}, $paragraph, ...);
$pod_para->parse_tree( $ptree );
$parser->parse_tree()->append( $pod_para );
}
...
package main;
...
my $parser = new MyPodParserTree2(...);
$parser->parse_from_file(...);
my $ptree = $parser->parse_tree;
...
Now you have the entire POD document as one great big parse-tree. You
can even use the B<-expand_seq> option to B<parse_text> to insert
whole different kinds of objects. Just don't expect B<Pod::Parser>
to know what to do with them after that. That will need to be in your
code. Or, alternatively, you can insert any object you like so long as
it conforms to the B<Pod::ParseTree> interface.
One could use this to create subclasses of B<Pod::Paragraphs> and
B<Pod::InteriorSequences> for specific commands (or to create your own
custom node-types in the parse-tree) and add some kind of B<emit()>
method to each custom node/subclass object in the tree. Then all you'd
need to do is recursively walk the tree in the desired order, processing
the children (most likely from left to right) by formatting them if
they are text-strings, or by calling their B<emit()> method if they
are objects/references.
=head1 CAVEATS
Please note that POD has the notion of "paragraphs": this is something
starting I<after> a blank (read: empty) line, with the single exception
of the file start, which is also starting a paragraph. That means that
especially a command (e.g. C<=head1>) I<must> be preceded with a blank
line; C<__END__> is I<not> a blank line.
=head1 SEE ALSO
L<Pod::InputObjects>, L<Pod::Select>
B<Pod::InputObjects> defines POD input objects corresponding to
command paragraphs, parse-trees, and interior-sequences.
B<Pod::Select> is a subclass of B<Pod::Parser> which provides the ability
to selectively include and/or exclude sections of a POD document from being
translated based upon the current heading, subheading, subsubheading, etc.
=for __PRIVATE__
B<Pod::Callbacks> is a subclass of B<Pod::Parser> which gives its users
the ability the employ I<callback functions> instead of, or in addition
to, overriding methods of the base class.
=for __PRIVATE__
B<Pod::Select> and B<Pod::Callbacks> do not override any
methods nor do they define any new methods with the same name. Because
of this, they may I<both> be used (in combination) as a base class of
the same subclass in order to combine their functionality without
causing any namespace clashes due to multiple inheritance.
=head1 AUTHOR
Please report bugs using L<http://rt.cpan.org>.
Brad Appleton E<lt>bradapp@enteract.comE<gt>
Based on code for B<Pod::Text> written by
Tom Christiansen E<lt>tchrist@mox.perl.comE<gt>
=head1 LICENSE
Pod-Parser is free software; you can redistribute it and/or modify it
under the terms of the Artistic License distributed with Perl version
5.000 or (at your option) any later version. Please refer to the
Artistic License that came with your Perl distribution for more
details. If your version of Perl was not distributed under the
terms of the Artistic License, than you may distribute PodParser
under the same terms as Perl itself.
=cut
1;
# vim: ts=4 sw=4 et
| operepo/ope | bin/usr/share/perl5/core_perl/Pod/Parser.pm | Perl | mit | 65,757 |
#!/usr/bin/env perl
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
use strict;
use warnings;
use Bio::EnsEMBL::Registry;
use Bio::AlignIO;
#
# This script gets all the GenomicAlignBlocks covering the orthologues
# between human and mouse (via DnaDnaAlignFeature)
#
my $reg = 'Bio::EnsEMBL::Registry';
$reg->load_registry_from_db(
-host=>'ensembldb.ensembl.org',
-user=>'anonymous',
);
# get compara DBAdaptor
my $comparaDBA = Bio::EnsEMBL::Registry->get_DBAdaptor('Multi', 'compara');
# get GenomeDB for human and mouse
my $humanGDB = $comparaDBA->get_GenomeDBAdaptor->fetch_by_registry_name("human");
my $human_gdb_id = $humanGDB->dbID;
my $mouseGDB = $comparaDBA->get_GenomeDBAdaptor->fetch_by_registry_name("mouse");
my $mouse_gdb_id = $mouseGDB->dbID;
my $homology_mlss = $comparaDBA->get_MethodLinkSpeciesSetAdaptor->
fetch_by_method_link_type_genome_db_ids('ENSEMBL_ORTHOLOGUES',[$human_gdb_id,$mouse_gdb_id]);
my $homology_list = $comparaDBA->get_HomologyAdaptor->fetch_all_by_MethodLinkSpeciesSet($homology_mlss);
printf("fetched %d homologies\n", scalar(@$homology_list));
# set up an AlignIO to format SimpleAlign output
my $alignIO = Bio::AlignIO->newFh(-interleaved=>1, -fh=>\*STDOUT, -format=>'psi', -idlength=>20);
my $count=0;
foreach my $homology (@{$homology_list}) {
$count++;
print $homology->toString;
my $human_gene = undef;
my $mouse_gene = undef;
foreach my $member (@{$homology->get_all_Members}) {
if($member->genome_db_id == $mouse_gdb_id) { $mouse_gene = $member; }
if($member->genome_db_id == $human_gdb_id) { $human_gene = $member; }
}
next unless($mouse_gene and $human_gene);
$mouse_gene->print_member;
$human_gene->print_member;
# get the alignments on a piece of the DnaFrag
printf("fetch_all_by_species_region(%s,%s,%s,%s,%d,%d,%s)\n",
$mouse_gene->genome_db->name, $mouse_gene->genome_db->assembly,
$human_gene->genome_db->name, $human_gene->genome_db->assembly,
$mouse_gene->dnafrag->name, $mouse_gene->dnafrag_start, $mouse_gene->dnafrag_end,
'LASTZ_NET');
my $dnafeatures = $comparaDBA->get_DnaAlignFeatureAdaptor->fetch_all_by_species_region(
$mouse_gene->genome_db->name, $mouse_gene->genome_db->assembly,
$human_gene->genome_db->name, $human_gene->genome_db->assembly,
$mouse_gene->dnafrag->name, $mouse_gene->dnafrag_start, $mouse_gene->dnafrag_end,
'LASTZ_NET');
foreach my $ddaf (@{$dnafeatures}) {
next unless (($mouse_gene->dnafrag->name eq $ddaf->seqname) and ($human_gene->dnafrag->name eq $ddaf->hseqname));
print "=====================================================\n";
print " length: ", $ddaf->alignment_length, "; score: ", $ddaf->score, "\n";
print " - ", join(" : ", $ddaf->species, $ddaf->coord_system_name,
$ddaf->seqname, $ddaf->start, $ddaf->end, $ddaf->strand), "\n";
print " - ", join(" : ", $ddaf->hspecies, $ddaf->coord_system_name,
$ddaf->hseqname, $ddaf->hstart, $ddaf->hend, $ddaf->hstrand), "\n";
print $alignIO $ddaf->get_SimpleAlign;
}
last if($count > 10);
}
| ckongEbi/ensembl-compara | scripts/examples/mixed_getBlastAlignmentsOverlappingHomologuesWithDnaDnaAlignFeature.pl | Perl | apache-2.0 | 3,815 |
#
# Copyright 2017 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package apps::protocols::smtp::lib::smtp;
use strict;
use warnings;
use Email::Send::SMTP::Gmail;
my $smtp_handle;
my $connected = 0;
sub quit {
if ($connected == 1) {
$smtp_handle->bye;
}
}
sub message {
my ($self, %options) = @_;
my %smtp_options = ();
foreach my $option (@{$self->{option_results}->{smtp_send_options}}) {
next if ($option !~ /^(.+?)=(.+)$/);
$smtp_options{-$1} = $2;
}
my $result;
eval {
local $SIG{ALRM} = sub { die 'timeout' };
alarm($self->{option_results}->{timeout});
$result = $smtp_handle->send(-to => $self->{option_results}->{smtp_to},
-from => $self->{option_results}->{smtp_from},
%smtp_options);
alarm(0);
};
if ($@) {
$self->{output}->output_add(severity => 'unknown',
short_msg => 'Unable to send message: ' . $@);
$self->{output}->display();
$self->{output}->exit();
}
if ($result == -1) {
$self->{output}->output_add(severity => 'critical',
short_msg => 'Unable to send message.');
$self->{output}->display();
$self->{output}->exit();
}
$self->{output}->output_add(severity => 'ok',
short_msg => 'Message sent');
}
sub connect {
my ($self, %options) = @_;
my %smtp_options = ();
if (defined($self->{option_results}->{username}) && $self->{option_results}->{username} ne '' &&
!defined($self->{option_results}->{password})) {
$self->{output}->add_option_msg(short_msg => "Please set --password option.");
$self->{output}->option_exit();
}
$smtp_options{-auth} = 'none';
if (defined($self->{option_results}->{username}) && $self->{option_results}->{username} ne '') {
$smtp_options{-login} = $self->{option_results}->{username};
delete $smtp_options{-auth};
}
if (defined($self->{option_results}->{username}) && defined($self->{option_results}->{password})) {
$smtp_options{-pass} = $self->{option_results}->{password};
}
my $connection_exit = defined($options{connection_exit}) ? $options{connection_exit} : 'unknown';
$smtp_options{-port} = $self->{option_results}->{port} if (defined($self->{option_results}->{port}));
foreach my $option (@{$self->{option_results}->{smtp_options}}) {
next if ($option !~ /^(.+?)=(.+)$/);
$smtp_options{-$1} = $2;
}
my ($stdout);
{
eval {
local $SIG{ALRM} = sub { die 'timeout' };
local *STDOUT;
open STDOUT, '>', \$stdout;
alarm($self->{option_results}->{timeout});
$smtp_handle = Email::Send::SMTP::Gmail->new(-smtp=> $self->{option_results}->{hostname},
%smtp_options);
alarm(0);
};
}
if ($@) {
$self->{output}->output_add(severity => $connection_exit,
short_msg => 'Unable to connect to SMTP: ' . $@);
$self->{output}->display();
$self->{output}->exit();
}
if (defined($stdout) && $smtp_handle == -1) {
chomp $stdout;
$self->{output}->output_add(severity => $connection_exit,
short_msg => 'Unable to connect to SMTP: ' . $stdout);
$self->{output}->display();
$self->{output}->exit();
}
$connected = 1;
}
1;
| nichols-356/centreon-plugins | apps/protocols/smtp/lib/smtp.pm | Perl | apache-2.0 | 4,376 |
## OpenXPKI::Crypto::Backend::OpenSSL::Command::get_pkcs8_keytype
## Written 2006 by Alexander Klink for the OpenXPKI project
## (C) Copyright 2006 by The OpenXPKI Project
use strict;
use warnings;
package OpenXPKI::Crypto::Backend::OpenSSL::Command::get_pkcs8_keytype;
use base qw(OpenXPKI::Crypto::Backend::OpenSSL::Command);
use OpenXPKI::Debug;
sub get_command
{
my $self = shift;
## compensate missing parameters
my $engine = "";
my $engine_usage = $self->{ENGINE}->get_engine_usage();
if ($self->{ENGINE}->get_engine() and
(($engine_usage =~ m{ NEW_ALG }xms) or
($engine_usage =~ m{ ALWAYS }xms) or
($engine_usage =~ m{ PRIV_KEY_OPS }xms))
) {
$engine = $self->{ENGINE}->get_engine();
}
$self->get_tmpfile ('KEY', 'OUT');
$self->write_file (FILENAME => $self->{KEYFILE},
CONTENT => $self->{DATA},
FORCE => 1);
if (not exists $self->{PASSWD})
{
OpenXPKI::Exception->throw (
message => "I18N_OPENXPKI_CRYPTO_OPENSSL_COMMAND_GET_PKCS8_KEYTYPE_MISSING_PASSWD");
}
if (not exists $self->{DATA})
{
OpenXPKI::Exception->throw (
message => "I18N_OPENXPKI_CRYPTO_OPENSSL_COMMAND_GET_PKCS8_KEYTYPE_MISSING_DATA");
}
## build the command
my $command = "pkcs8 ";
$command .= " -inform PEM";
$command .= " -engine $engine" if ($engine);
$command .= " -in ".$self->{KEYFILE};
$command .= " -out ".$self->{OUTFILE};
if ($self->{PASSWD})
{
$command .= " -passin env:pwd";
$self->set_env ("pwd" => $self->{PASSWD});
}
return [ $command ];
}
sub hide_output
{
return 0;
}
sub key_usage
{
return 1;
}
sub get_result
{
my $self = shift;
my $result = $self->read_file($self->{OUTFILE});
my ($type) = ($result =~ m{ \A -----BEGIN\ ([A-Z]+)\ PRIVATE\ KEY----- }xms);
##! 16: 'type: ' . $type
return $type;
}
1;
__END__
=head1 Name
OpenXPKI::Crypto::Backend::OpenSSL::Command::get_pkcs8_keytype
=head1 Description
This command returns the type of a key contained in a PKCS#8 PEM
data block. This is necessary if you want to convert a PKCS#8 to
an OpenSSL/SSLeay format, as you need to know the key type for that
so that the conversion can be done in one step.
=head1 Functions
=head2 get_command
=over
=item * DATA - the PKCS#8 PEM data
=item * PASSWD - the password for the PKCS#8
=back
=head2 hide_output
returns 1
=head2 key_usage
returns 1 (private key must be decoded first)
=head2 get_result
simply returns the type, e.g. RSA, EC, DSA.
| durko/openxpki | core/server/OpenXPKI/Crypto/Backend/OpenSSL/Command/get_pkcs8_keytype.pm | Perl | apache-2.0 | 2,626 |
#! /usr/bin/perl
# Merge driver - make commands for merging BWTs together
use strict;
use Getopt::Long;
my $numThreads = 1;
my $sgaBin = "sga";
my $bHelp = 0;
GetOptions("threads=i" => \$numThreads,
"bin=s" => \$sgaBin,
"help" => \$bHelp);
if($bHelp)
{
print "sga-mergeDriver.pl - generate sga merge commands from a list of files\n";
print "usage: sga-mergeDriver.pl [options] <files>\n";
print "options: \n";
print " -t,--threads=N use N threads for the merge processes\n";
print " --bin=PROG use PROG as the sga executable [default: sga]\n";
exit(1);
}
my @files = @ARGV;
my $n = scalar(@files);
my $finalName = "final";
# In optimize memory mode, we load the smaller of the two bwts into memory
# In optimize time mode, we load the larger of the two into memory
my $MODE_OPT_MEMORY = 0;
my $MODE_OPT_TIME = 1;
my $mode = $MODE_OPT_TIME;
my $finalParam = "";
if($n == 2)
{
$finalParam = "-p $finalName";
}
# Sort the input files by size, smallest to largest
my @sorted = sort { getFilesize($a) <=> getFilesize($b) } @files;
#print join(" ", @sorted) . "\n";
# Merge the largest file with the smallest, etc
my $half = $n / 2;
my $i = 0;
my $j = $n - 1;
while($i < $j)
{
print makeMergeLine($sorted[$i], $sorted[$j]);
++$i;
--$j;
}
sub makeMergeLine
{
my($f1, $f2) = @_;
my $larger;
my $smaller;
if(-s $f1 > -s $f2)
{
$larger = $f1;
$smaller = $f2;
}
else
{
$larger = $f2;
$smaller = $f1;
}
my $preamble = "$sgaBin merge -r -t $numThreads $finalParam";
if($mode == $MODE_OPT_MEMORY)
{
return "$preamble $larger $smaller\n";
}
else
{
return "$preamble $finalParam $smaller $larger\n";
}
}
sub getFilesize
{
my($a) = @_;
return -s $a;
}
| gnewton/MBB-Bio-Roll | mbb-bio/sga/SOURCES/sga-0.9.20/src/bin/sga-mergeDriver.pl | Perl | mit | 1,873 |
=pod
=head1 NAME
provider-storemgmt - The OSSL_STORE library E<lt>-E<gt> provider functions
=head1 SYNOPSIS
#include <openssl/core_dispatch.h>
/*
* None of these are actual functions, but are displayed like this for
* the function signatures for functions that are offered as function
* pointers in OSSL_DISPATCH arrays.
*/
void *OSSL_FUNC_store_open(void *provctx, const char *uri);
void *OSSL_FUNC_store_attach(void *provctx, OSSL_CORE_BIO *bio);
const OSSL_PARAM *store_settable_ctx_params(void *provctx);
int OSSL_FUNC_store_set_ctx_params(void *loaderctx, const OSSL_PARAM[]);
int OSSL_FUNC_store_load(void *loaderctx,
OSSL_CALLBACK *object_cb, void *object_cbarg,
OSSL_PASSPHRASE_CALLBACK *pw_cb, void *pw_cbarg);
int OSSL_FUNC_store_eof(void *loaderctx);
int OSSL_FUNC_store_close(void *loaderctx);
int OSSL_FUNC_store_export_object
(void *loaderctx, const void *objref, size_t objref_sz,
OSSL_CALLBACK *export_cb, void *export_cbarg);
=head1 DESCRIPTION
The STORE operation is the provider side of the L<ossl_store(7)> API.
The primary responsibility of the STORE operation is to load all sorts
of objects from a container indicated by URI. These objects are given
to the OpenSSL library in provider-native object abstraction form (see
L<provider-object(7)>). The OpenSSL library is then responsible for
passing on that abstraction to suitable provided functions.
Examples of functions that the OpenSSL library can pass the abstraction to
include OSSL_FUNC_keymgmt_load() (L<provider-keymgmt(7)>),
OSSL_FUNC_store_export_object() (which exports the object in parameterized
form).
All "functions" mentioned here are passed as function pointers between
F<libcrypto> and the provider in B<OSSL_DISPATCH> arrays via
B<OSSL_ALGORITHM> arrays that are returned by the provider's
provider_query_operation() function
(see L<provider-base(7)/Provider Functions>).
All these "functions" have a corresponding function type definition named
B<OSSL_FUNC_{name}_fn>, and a helper function to retrieve the function pointer
from a B<OSSL_DISPATCH> element named B<OSSL_get_{name}>.
For example, the "function" OSSL_FUNC_store_load() has these:
typedef void *(OSSL_OSSL_FUNC_store_load_fn)(void *provctx,
const OSSL_PARAM params[]);
static ossl_inline OSSL_OSSL_FUNC_store_load_fn
OSSL_OSSL_FUNC_store_load(const OSSL_DISPATCH *opf);
B<OSSL_DISPATCH> arrays are indexed by numbers that are provided as macros
in L<openssl-core_dispatch.h(7)>, as follows:
OSSL_FUNC_store_open OSSL_FUNC_STORE_OPEN
OSSL_FUNC_store_attach OSSL_FUNC_STORE_ATTACH
OSSL_FUNC_store_settable_ctx_params OSSL_FUNC_STORE_SETTABLE_CTX_PARAMS
OSSL_FUNC_store_set_ctx_params OSSL_FUNC_STORE_SET_CTX_PARAMS
OSSL_FUNC_store_load OSSL_FUNC_STORE_LOAD
OSSL_FUNC_store_eof OSSL_FUNC_STORE_EOF
OSSL_FUNC_store_close OSSL_FUNC_STORE_CLOSE
OSSL_FUNC_store_export_object OSSL_FUNC_STORE_EXPORT_OBJECT
=head2 Functions
OSSL_FUNC_store_open() should create a provider side context with data based
on the input I<uri>. The implementation is entirely responsible for the
interpretation of the URI.
OSSL_FUNC_store_attach() should create a provider side context with the core
B<BIO> I<bio> attached. This is an alternative to using a URI to find storage,
supporting L<OSSL_STORE_attach(3)>.
OSSL_FUNC_store_settable_ctx_params() should return a constant array of
descriptor B<OSSL_PARAM>, for parameters that OSSL_FUNC_store_set_ctx_params()
can handle.
OSSL_FUNC_store_set_ctx_params() should set additional parameters, such as what
kind of data to expect, search criteria, and so on. More on those below, in
L</Load Parameters>. Whether unrecognised parameters are an error or simply
ignored is at the implementation's discretion.
Passing NULL for I<params> should return true.
OSSL_FUNC_store_load() loads the next object from the URI opened by
OSSL_FUNC_store_open(), creates an object abstraction for it (see
L<provider-object(7)>), and calls I<object_cb> with it as well as
I<object_cbarg>. I<object_cb> will then interpret the object abstraction
and do what it can to wrap it or decode it into an OpenSSL structure. In
case a passphrase needs to be prompted to unlock an object, I<pw_cb> should
be called.
OSSL_FUNC_store_eof() indicates if the end of the set of objects from the
URI has been reached. When that happens, there's no point trying to do any
further loading.
OSSL_FUNC_store_close() frees the provider side context I<ctx>.
=head2 Load Parameters
=over 4
=item "expect" (B<OSSL_STORE_PARAM_EXPECT>) <integer>
Is a hint of what type of data the OpenSSL library expects to get.
This is only useful for optimization, as the library will check that the
object types match the expectation too.
The number that can be given through this parameter is found in
F<< <openssl/store.h> >>, with the macros having names starting with
C<OSSL_STORE_INFO_>. These are further described in
L<OSSL_STORE_INFO(3)/SUPPORTED OBJECTS>.
=item "subject" (B<OSSL_STORE_PARAM_SUBJECT>) <octet string>
Indicates that the caller wants to search for an object with the given
subject associated. This can be used to select specific certificates
by subject.
The contents of the octet string is expected to be in DER form.
=item "issuer" (B<OSSL_STORE_PARAM_ISSUER>) <octet string>
Indicates that the caller wants to search for an object with the given
issuer associated. This can be used to select specific certificates
by issuer.
The contents of the octet string is expected to be in DER form.
=item "serial" (B<OSSL_STORE_PARAM_SERIAL>) <integer>
Indicates that the caller wants to search for an object with the given
serial number associated.
=item "digest" (B<OSSL_STORE_PARAM_DIGEST>) <UTF8 string>
=item "fingerprint" (B<OSSL_STORE_PARAM_FINGERPRINT>) <octet string>
Indicates that the caller wants to search for an object with the given
fingerprint, computed with the given digest.
=item "alias" (B<OSSL_STORE_PARAM_ALIAS>) <UTF8 string>
Indicates that the caller wants to search for an object with the given
alias (some call it a "friendly name").
=item "properties" (B<OSSL_STORE_PARAM_PROPERTIES) <utf8 string>
Property string to use when querying for algorithms such as the B<OSSL_DECODER>
decoder implementations.
=item "input-type" (B<OSSL_STORE_PARAM_INPUT_TYPE) <utf8 string>
Type of the input format as a hint to use when decoding the objects in the
store.
=back
Several of these search criteria may be combined. For example, to
search for a certificate by issuer+serial, both the "issuer" and the
"serial" parameters will be given.
=head1 SEE ALSO
L<provider(7)>
=head1 HISTORY
The STORE interface was introduced in OpenSSL 3.0.
=head1 COPYRIGHT
Copyright 2020-2021 The OpenSSL Project Authors. All Rights Reserved.
Licensed under the Apache License 2.0 (the "License"). You may not use
this file except in compliance with the License. You can obtain a copy
in the file LICENSE in the source distribution or at
L<https://www.openssl.org/source/license.html>.
=cut
| jens-maus/amissl | openssl/doc/man7/provider-storemgmt.pod | Perl | bsd-3-clause | 7,223 |
########################################################################
# Bio::KBase::ObjectAPI::KBaseGenomes::GenomeComparisonGenome - This is the moose object corresponding to the KBaseGenomes.GenomeComparisonGenome object
# Authors: Christopher Henry, Scott Devoid, Paul Frybarger
# Contact email: chenry@mcs.anl.gov
# Development location: Mathematics and Computer Science Division, Argonne National Lab
# Date of module creation: 2014-07-23T06:10:57
########################################################################
use strict;
use Bio::KBase::ObjectAPI::KBaseGenomes::DB::GenomeComparisonGenome;
package Bio::KBase::ObjectAPI::KBaseGenomes::GenomeComparisonGenome;
use Moose;
use namespace::autoclean;
extends 'Bio::KBase::ObjectAPI::KBaseGenomes::DB::GenomeComparisonGenome';
#***********************************************************************************************************
# ADDITIONAL ATTRIBUTES:
#***********************************************************************************************************
#***********************************************************************************************************
# BUILDERS:
#***********************************************************************************************************
#***********************************************************************************************************
# CONSTANTS:
#***********************************************************************************************************
#***********************************************************************************************************
# FUNCTIONS:
#***********************************************************************************************************
__PACKAGE__->meta->make_immutable;
1;
| samseaver/KBaseFBAModeling | lib/Bio/KBase/ObjectAPI/KBaseGenomes/GenomeComparisonGenome.pm | Perl | mit | 1,772 |
eval 'exec perl -wS $0 ${1+"$@"}'
if 0;
#
# $Id$
#
# Created 2010
# Author: Mike Ovsiannikov
#
# Copyright 2010 Quantcast Corp.
#
# This file is part of Kosmos File System (KFS).
#
# Licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
#
use Socket;
use IO::Handle;
my $numcmd = shift || 10;
my $remote = shift || '127.0.0.1';
my $port = shift || 24000; # random port
my $fid = shift || 8; # 68030
socket(SOCK, PF_INET, SOCK_STREAM, getprotobyname('tcp')) or die "socket: $!";
my $iaddr = inet_aton($remote) || die "no host: $remote";
my $paddr = sockaddr_in($port, $iaddr);
connect(SOCK, $paddr) or die "connect: $!";
binmode(SOCK);
SOCK->autoflush(1);
my $start = time();
my $resp = "";
my $seqf=999000000;
my $seq=$seqf;
my $end=$seq + $numcmd;
for (; $seq < $end; $seq++) {
print SOCK "
ALLOCATE\r
Cseq: $seq\r
Version: KFS/1.0\r
Client-Protocol-Version: 100\r
Client-host: somehostname\r
Pathname: /sort/job/1/fanout/27/file.27\r
File-handle: $fid\r
Chunk-offset: 0\r
Chunk-append: 1\r
Space-reserve: 0\r
Max-appenders: 640000000\r
\r
";
my $cs = -5;
my $st = -1;
$resp = "";
while (defined($line = <SOCK>)) {
$resp = $resp . $line;
if ($cs < 0 && $line =~ /Cseq: (\d+)/) {
$cs = $1;
}
if ($st != 0 && $line =~ /Status: (\d+)/) {
$st = $1;
}
last if ($line eq "\r\n");
}
last if ($cs != $seq || $st != 0);
}
$stop = time();
close (SOCK) || die "close: $!";
print $resp;
printf("elapsed: %d %d op/sec\n",
$stop - $start,
$stop > $start ? ($seq - $seqf) / ($stop - $start) : 0
);
| qnu/qfs | src/test-scripts/allocate.pl | Perl | apache-2.0 | 2,097 |
#!/usr/bin/env perl
if ($#ARGV != 2) {
print"Content-type: text/html\n\n";
die "usage: perl fnhtml.pl sys_dir log_dir fnid\n";
}
$sys_dir = $ARGV[0];
$log_dir = $ARGV[1];
$fnid = $ARGV[2];
print "Content-type: text/html\n\n";
print "<HTML>\n";
print "<HEAD>\n";
print "<TITLE> AST for Function $fnid </TITLE>\n";
print "<SCRIPT SRC=\"$sys_dir/etc/www/mktree.js\" LANGUAGE=\"JavaScript\"></SCRIPT>";
print "<LINK REL=\"stylesheet\" HREF=\"$sys_dir/etc/www/mktree.css\">";
print "</HEAD>\n";
print "<div style=\"text-align: center;\"><big><big><span style=\"font-weight: bold;\">";
print "AST for Function $fnid <br><br></span></big></big>\n";
print "<div style=\"text-align: left;\">\n\n";
for ($i = 1; $i < 100; $i++) {
@files = `ls $log_dir/pass$i\_*.html`;
if ($#files != -1) {
@lines = `cat $log_dir/pass$i\_*.html`;
@passname = split(/\"/, $lines[0]);
print "<B>$passname[1]<BR>\n";
$on = 0;
foreach $line (@lines) {
if ($line =~ m/CHPLTAG/) {
if ($line =~ m/\"FN$fnid\"/) {
$on = 1 - $on;
}
} else {
if ($on) {
print $line;
}
}
}
}
}
print "</HTML>\n";
| CoryMcCartan/chapel | compiler/etc/www/fnhtml.pl | Perl | apache-2.0 | 1,271 |
package DDG::Goodie::Reverse;
# ABSTRACT: Reverse the order of chars in the remainder
use strict;
use DDG::Goodie;
triggers startend => "reverse text";
zci answer_type => "reverse";
zci is_cached => 1;
handle remainder => sub {
my $in = $_;
return unless $in; # Guard against empty query.
#Filter out requests for DNA/RNA reverse complements, handled
# by the ReverseComplement goodie
return if $in =~ /^complement\s(of )?[ATCGURYKMSWBVDHN\s-]+$/i;
my $out = reverse $in;
return qq|Reversed "$_": | . $out,
structured_answer => {
input => [html_enc($in)],
operation => 'Reverse string',
result => html_enc($out),
};
};
1;
| aleksandar-todorovic/zeroclickinfo-goodies | lib/DDG/Goodie/Reverse.pm | Perl | apache-2.0 | 682 |
use utf8;
package Schema::Result::Cachegroup;
# Created by DBIx::Class::Schema::Loader
# DO NOT MODIFY THE FIRST PART OF THIS FILE
=head1 NAME
Schema::Result::Cachegroup
=cut
use strict;
use warnings;
use base 'DBIx::Class::Core';
=head1 TABLE: C<cachegroup>
=cut
__PACKAGE__->table("cachegroup");
=head1 ACCESSORS
=head2 id
data_type: 'bigint'
is_auto_increment: 1
is_nullable: 0
sequence: 'cachegroup_id_seq'
=head2 name
data_type: 'text'
is_nullable: 0
=head2 short_name
data_type: 'text'
is_nullable: 0
=head2 latitude
data_type: 'numeric'
is_nullable: 1
=head2 longitude
data_type: 'numeric'
is_nullable: 1
=head2 parent_cachegroup_id
data_type: 'bigint'
is_foreign_key: 1
is_nullable: 1
=head2 secondary_parent_cachegroup_id
data_type: 'bigint'
is_foreign_key: 1
is_nullable: 1
=head2 type
data_type: 'bigint'
is_foreign_key: 1
is_nullable: 0
=head2 last_updated
data_type: 'timestamp with time zone'
default_value: current_timestamp
is_nullable: 1
original: {default_value => \"now()"}
=cut
__PACKAGE__->add_columns(
"id",
{
data_type => "bigint",
is_auto_increment => 1,
is_nullable => 0,
sequence => "cachegroup_id_seq",
},
"name",
{ data_type => "text", is_nullable => 0 },
"short_name",
{ data_type => "text", is_nullable => 0 },
"latitude",
{ data_type => "numeric", is_nullable => 1 },
"longitude",
{ data_type => "numeric", is_nullable => 1 },
"parent_cachegroup_id",
{ data_type => "bigint", is_foreign_key => 1, is_nullable => 1 },
"secondary_parent_cachegroup_id",
{ data_type => "bigint", is_foreign_key => 1, is_nullable => 1 },
"type",
{ data_type => "bigint", is_foreign_key => 1, is_nullable => 0 },
"last_updated",
{
data_type => "timestamp with time zone",
default_value => \"current_timestamp",
is_nullable => 1,
original => { default_value => \"now()" },
},
);
=head1 PRIMARY KEY
=over 4
=item * L</id>
=item * L</type>
=back
=cut
__PACKAGE__->set_primary_key("id", "type");
=head1 UNIQUE CONSTRAINTS
=head2 C<idx_54252_cg_name_unique>
=over 4
=item * L</name>
=back
=cut
__PACKAGE__->add_unique_constraint("idx_54252_cg_name_unique", ["name"]);
=head2 C<idx_54252_cg_short_unique>
=over 4
=item * L</short_name>
=back
=cut
__PACKAGE__->add_unique_constraint("idx_54252_cg_short_unique", ["short_name"]);
=head2 C<idx_54252_lo_id_unique>
=over 4
=item * L</id>
=back
=cut
__PACKAGE__->add_unique_constraint("idx_54252_lo_id_unique", ["id"]);
=head1 RELATIONS
=head2 asns
Type: has_many
Related object: L<Schema::Result::Asn>
=cut
__PACKAGE__->has_many(
"asns",
"Schema::Result::Asn",
{ "foreign.cachegroup" => "self.id" },
{ cascade_copy => 0, cascade_delete => 0 },
);
=head2 cachegroup_parameters
Type: has_many
Related object: L<Schema::Result::CachegroupParameter>
=cut
__PACKAGE__->has_many(
"cachegroup_parameters",
"Schema::Result::CachegroupParameter",
{ "foreign.cachegroup" => "self.id" },
{ cascade_copy => 0, cascade_delete => 0 },
);
=head2 cachegroup_secondary_parent_cachegroups
Type: has_many
Related object: L<Schema::Result::Cachegroup>
=cut
__PACKAGE__->has_many(
"cachegroup_secondary_parent_cachegroups",
"Schema::Result::Cachegroup",
{ "foreign.secondary_parent_cachegroup_id" => "self.id" },
{ cascade_copy => 0, cascade_delete => 0 },
);
=head2 cachegroups
Type: has_many
Related object: L<Schema::Result::Cachegroup>
=cut
__PACKAGE__->has_many(
"cachegroups",
"Schema::Result::Cachegroup",
{ "foreign.parent_cachegroup_id" => "self.id" },
{ cascade_copy => 0, cascade_delete => 0 },
);
=head2 parent_cachegroup
Type: belongs_to
Related object: L<Schema::Result::Cachegroup>
=cut
__PACKAGE__->belongs_to(
"parent_cachegroup",
"Schema::Result::Cachegroup",
{ id => "parent_cachegroup_id" },
{
is_deferrable => 0,
join_type => "LEFT",
on_delete => "NO ACTION",
on_update => "NO ACTION",
},
);
=head2 secondary_parent_cachegroup
Type: belongs_to
Related object: L<Schema::Result::Cachegroup>
=cut
__PACKAGE__->belongs_to(
"secondary_parent_cachegroup",
"Schema::Result::Cachegroup",
{ id => "secondary_parent_cachegroup_id" },
{
is_deferrable => 0,
join_type => "LEFT",
on_delete => "NO ACTION",
on_update => "NO ACTION",
},
);
=head2 servers
Type: has_many
Related object: L<Schema::Result::Server>
=cut
__PACKAGE__->has_many(
"servers",
"Schema::Result::Server",
{ "foreign.cachegroup" => "self.id" },
{ cascade_copy => 0, cascade_delete => 0 },
);
=head2 staticdnsentries
Type: has_many
Related object: L<Schema::Result::Staticdnsentry>
=cut
__PACKAGE__->has_many(
"staticdnsentries",
"Schema::Result::Staticdnsentry",
{ "foreign.cachegroup" => "self.id" },
{ cascade_copy => 0, cascade_delete => 0 },
);
=head2 type
Type: belongs_to
Related object: L<Schema::Result::Type>
=cut
__PACKAGE__->belongs_to(
"type",
"Schema::Result::Type",
{ id => "type" },
{ is_deferrable => 0, on_delete => "NO ACTION", on_update => "NO ACTION" },
);
# Created by DBIx::Class::Schema::Loader v0.07046 @ 2016-11-18 22:45:19
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:lU7dUVFuoTyhpC7x7BGaDg
# You can replace this text with custom code or comments, and it will be preserved on regeneration
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
1;
| rscrimojr/incubator-trafficcontrol | traffic_ops/app/lib/Schema/Result/Cachegroup.pm | Perl | apache-2.0 | 5,972 |
=head1 NAME
Imager::Filters - Entire Image Filtering Operations
=head1 SYNOPSIS
use Imager;
$img = ...;
$img->filter(type=>'autolevels');
$img->filter(type=>'autolevels', lsat=>0.2);
$img->filter(type=>'turbnoise')
# and lots of others
load_plugin("dynfilt/dyntest.so")
or die "unable to load plugin\n";
$img->filter(type=>'lin_stretch', a=>35, b=>200);
unload_plugin("dynfilt/dyntest.so")
or die "unable to load plugin\n";
$out = $img->difference(other=>$other_img);
=head1 DESCRIPTION
Filters are operations that have similar calling interface.
=over
=item filter()
Parameters:
=over
=item *
type - the type of filter, see L</Types of Filters>.
=item *
many other possible parameters, see L</Types of Filters> below.
=back
Returns the invocant (C<$self>) on success, returns a false value on
failure. You can call C<< $self->errstr >> to determine the cause of
the failure.
$self->filter(type => $type, ...)
or die $self->errstr;
=back
=head2 Types of Filters
Here is a list of the filters that are always available in Imager.
This list can be obtained by running the C<filterlist.perl> script
that comes with the module source.
Filter Arguments Default value
autolevels lsat 0.1
usat 0.1
skew 0
bumpmap bump lightx lighty
elevation 0
st 2
bumpmap_complex bump
channel 0
tx 0
ty 0
Lx 0.2
Ly 0.4
Lz -1
cd 1.0
cs 40.0
n 1.3
Ia (0 0 0)
Il (255 255 255)
Is (255 255 255)
contrast intensity
conv coef
fountain xa ya xb yb
ftype linear
repeat none
combine none
super_sample none
ssample_param 4
segments(see below)
gaussian stddev
gradgen xo yo colors
dist 0
hardinvert
hardinvertall
mosaic size 20
noise amount 3
subtype 0
postlevels levels 10
radnoise xo 100
yo 100
ascale 17.0
rscale 0.02
turbnoise xo 0.0
yo 0.0
scale 10.0
unsharpmask stddev 2.0
scale 1.0
watermark wmark
pixdiff 10
tx 0
ty 0
All parameters must have some value but if a parameter has a default
value it may be omitted when calling the filter function.
Every one of these filters modifies the image in place.
If none of the filters here do what you need, the
L<Imager::Engines/transform()> or L<Imager::Engines/transform2()>
function may be useful.
=for stopwords
autolevels bumpmap bumpmap_complex conv gaussian hardinvert hardinvertall
radnoise turbnoise unsharpmask gradgen postlevels
A reference of the filters follows:
=over
=item autolevels
scales the value of each channel so that the values in the image will
cover the whole possible range for the channel. C<lsat> and C<usat>
truncate the range by the specified fraction at the top and bottom of
the range respectively.
# increase contrast per channel, losing little detail
$img->filter(type=>"autolevels")
or die $img->errstr;
# increase contrast, losing 20% of highlight at top and bottom range
$img->filter(type=>"autolevels", lsat=>0.2, usat=>0.2)
or die $img->errstr;
=item bumpmap
uses the channel C<elevation> image C<bump> as a bump map on your
image, with the light at (C<lightx>, C<lightty>), with a shadow length
of C<st>.
$img->filter(type=>"bumpmap", bump=>$bumpmap_img,
lightx=>10, lighty=>10, st=>5)
or die $img->errstr;
=item bumpmap_complex
uses the channel C<channel> image C<bump> as a bump map on your image.
If C<< Lz < 0 >> the three L parameters are considered to be the
direction of the light. If C<< Lz > 0 >> the L parameters are
considered to be the light position. C<Ia> is the ambient color,
C<Il> is the light color, C<Is> is the color of specular highlights.
C<cd> is the diffuse coefficient and C<cs> is the specular
coefficient. C<n> is the shininess of the surface.
$img->filter(type=>"bumpmap_complex", bump=>$bumpmap_img)
or die $img->errstr;
=item contrast
scales each channel by C<intensity>. Values of C<intensity> < 1.0
will reduce the contrast.
# higher contrast
$img->filter(type=>"contrast", intensity=>1.3)
or die $img->errstr;
# lower contrast
$img->filter(type=>"contrast", intensity=>0.8)
or die $img->errstr;
=item conv
performs 2 1-dimensional convolutions on the image using the values
from C<coef>. C<coef> should be have an odd length and the sum of the
coefficients must be non-zero.
# sharper
$img->filter(type=>"conv", coef=>[-0.5, 2, -0.5 ])
or die $img->errstr;
# blur
$img->filter(type=>"conv", coef=>[ 1, 2, 1 ])
or die $img->errstr;
# error
$img->filter(type=>"conv", coef=>[ -0.5, 1, -0.5 ])
or die $img->errstr;
=item fountain
renders a fountain fill, similar to the gradient tool in most paint
software. The default fill is a linear fill from opaque black to
opaque white. The points C<A(Cxa, ya)> and C<B(xb, yb)> control the
way the fill is performed, depending on the C<ftype> parameter:
=for stopwords ramping
=over
=item C<linear>
the fill ramps from A through to B.
=item C<bilinear>
the fill ramps in both directions from A, where AB defines the length
of the gradient.
=item C<radial>
A is the center of a circle, and B is a point on it's circumference.
The fill ramps from the center out to the circumference.
=item C<radial_square>
A is the center of a square and B is the center of one of it's sides.
This can be used to rotate the square. The fill ramps out to the
edges of the square.
=item C<revolution>
A is the center of a circle and B is a point on its circumference. B
marks the 0 and 360 point on the circle, with the fill ramping
clockwise.
=item C<conical>
A is the center of a circle and B is a point on it's circumference. B
marks the 0 and point on the circle, with the fill ramping in both
directions to meet opposite.
=back
The C<repeat> option controls how the fill is repeated for some
C<ftype>s after it leaves the AB range:
=over
=item C<none>
no repeats, points outside of each range are treated as if they were
on the extreme end of that range.
=item C<sawtooth>
the fill simply repeats in the positive direction
=item C<triangle>
the fill repeats in reverse and then forward and so on, in the
positive direction
=item C<saw_both>
the fill repeats in both the positive and negative directions (only
meaningful for a linear fill).
=item C<tri_both>
as for triangle, but in the negative direction too (only meaningful
for a linear fill).
=back
By default the fill simply overwrites the whole image (unless you have
parts of the range 0 through 1 that aren't covered by a segment), if
any segments of your fill have any transparency, you can set the
I<combine> option to 'normal' to have the fill combined with the
existing pixels. See the description of I<combine> in L<Imager::Fill>.
If your fill has sharp edges, for example between steps if you use
repeat set to 'triangle', you may see some aliased or ragged edges.
You can enable super-sampling which will take extra samples within the
pixel in an attempt anti-alias the fill.
The possible values for the super_sample option are:
=over
=item none
no super-sampling is done
=item grid
a square grid of points are sampled. The number of points sampled is
the square of ceil(0.5 + sqrt(ssample_param)).
=item random
a random set of points within the pixel are sampled. This looks
pretty bad for low ssample_param values.
=item circle
the points on the radius of a circle within the pixel are sampled.
This seems to produce the best results, but is fairly slow (for now).
=back
You can control the level of sampling by setting the ssample_param
option. This is roughly the number of points sampled, but depends on
the type of sampling.
The segments option is an arrayref of segments. You really should use
the L<Imager::Fountain> class to build your fountain fill. Each
segment is an array ref containing:
=over
=item start
a floating point number between 0 and 1, the start of the range of
fill parameters covered by this segment.
=item middle
a floating point number between start and end which can be used to
push the color range towards one end of the segment.
=item end
a floating point number between 0 and 1, the end of the range of fill
parameters covered by this segment. This should be greater than
start.
=item c0
=item c1
The colors at each end of the segment. These can be either
Imager::Color or Imager::Color::Float objects.
=item segment type
The type of segment, this controls the way the fill parameter varies
over the segment. 0 for linear, 1 for curved (unimplemented), 2 for
sine, 3 for sphere increasing, 4 for sphere decreasing.
=item color type
The way the color varies within the segment, 0 for simple RGB, 1 for
hue increasing and 2 for hue decreasing.
=back
Don't forget to use Imager::Fountain instead of building your own.
Really. It even loads GIMP gradient files.
# build the gradient the hard way - linear from black to white,
# then back again
my @simple =
(
[ 0, 0.25, 0.5, 'black', 'white', 0, 0 ],
[ 0.5. 0.75, 1.0, 'white', 'black', 0, 0 ],
);
# across
my $linear = $img->copy;
$linear->filter(type => "fountain",
ftype => 'linear',
repeat => 'sawtooth',
segments => \@simple,
xa => 0,
ya => $linear->getheight / 2,
xb => $linear->getwidth - 1,
yb => $linear->getheight / 2)
or die $linear->errstr;
# around
my $revolution = $img->copy;
$revolution->filter(type => "fountain",
ftype => 'revolution',
segments => \@simple,
xa => $revolution->getwidth / 2,
ya => $revolution->getheight / 2,
xb => $revolution->getwidth / 2,
yb => 0)
or die $revolution->errstr;
# out from the middle
my $radial = $img->copy;
$radial->filter(type => "fountain",
ftype => 'radial',
segments => \@simple,
xa => $im->getwidth / 2,
ya => $im->getheight / 2,
xb => $im->getwidth / 2,
yb => 0)
or die $radial->errstr;
=for stopwords Gaussian
=item gaussian
performs a Gaussian blur of the image, using C<stddev> as the standard
deviation of the curve used to combine pixels, larger values give
bigger blurs. For a definition of Gaussian Blur, see:
http://www.maths.abdn.ac.uk/~igc/tch/mx4002/notes/node99.html
Values of C<stddev> around 0.5 provide a barely noticeable blur,
values around 5 provide a very strong blur.
# only slightly blurred
$img->filter(type=>"gaussian", stddev=>0.5)
or die $img->errstr;
# more strongly blurred
$img->filter(type=>"gaussian", stddev=>5)
or die $img->errstr;
=item gradgen
renders a gradient, with the given I<colors> at the corresponding
points (x,y) in C<xo> and C<yo>. You can specify the way distance is
measured for color blending by setting C<dist> to 0 for Euclidean, 1
for Euclidean squared, and 2 for Manhattan distance.
$img->filter(type="gradgen",
xo=>[ 10, 50, 10 ],
yo=>[ 10, 50, 50 ],
colors=>[ qw(red blue green) ]);
=item hardinvert
X<filters, hardinvert>X<hardinvert>
inverts the image, black to white, white to black. All color channels
are inverted, excluding the alpha channel if any.
$img->filter(type=>"hardinvert")
or die $img->errstr;
=item hardinvertall
X<filters, hardinvertall>X<hardinvertall>
inverts the image, black to white, white to black. All channels are
inverted, including the alpha channel if any.
$img->filter(type=>"hardinvertall")
or die $img->errstr;
=item mosaic
produces averaged tiles of the given C<size>.
$img->filter(type=>"mosaic", size=>5)
or die $img->errstr;
=item noise
adds noise of the given C<amount> to the image. If C<subtype> is
zero, the noise is even to each channel, otherwise noise is added to
each channel independently.
# monochrome noise
$img->filter(type=>"noise", amount=>20, subtype=>0)
or die $img->errstr;
# color noise
$img->filter(type=>"noise", amount=>20, subtype=>1)
or die $img->errstr;
=for stopwords Perlin
=item radnoise
renders radiant Perlin turbulent noise. The center of the noise is at
(C<xo>, C<yo>), C<ascale> controls the angular scale of the noise ,
and C<rscale> the radial scale, higher numbers give more detail.
$img->filter(type=>"radnoise", xo=>50, yo=>50,
ascale=>1, rscale=>0.02)
or die $img->errstr;
=item postlevels
alters the image to have only C<levels> distinct level in each
channel.
$img->filter(type=>"postlevels", levels=>10)
or die $img->errstr;
=item turbnoise
renders Perlin turbulent noise. (C<xo>, C<yo>) controls the origin of
the noise, and C<scale> the scale of the noise, with lower numbers
giving more detail.
$img->filter(type=>"turbnoise", xo=>10, yo=>10, scale=>10)
or die $img->errstr;
=for stopwords unsharp
=item unsharpmask
performs an unsharp mask on the image. This increases the contrast of
edges in the image.
This is the result of subtracting a Gaussian blurred version of the
image from the original. C<stddev> controls the C<stddev> parameter
of the Gaussian blur. Each output pixel is:
in + scale * (in - blurred)
eg.
$img->filter(type=>"unsharpmask", stddev=>1, scale=>0.5)
or die $img->errstr;
C<unsharpmark> has the following parameters:
=for stopwords GIMP GIMP's
=over
=item *
C<stddev> - this is equivalent to the C<Radius> value in the GIMP's
unsharp mask filter. This controls the size of the contrast increase
around edges, larger values will remove fine detail. You should
probably experiment on the types of images you plan to work with.
Default: 2.0.
=item *
C<scale> - controls the strength of the edge enhancement, equivalent
to I<Amount> in the GIMP's unsharp mask filter. Default: 1.0.
=back
=item watermark
applies C<wmark> as a watermark on the image with strength C<pixdiff>,
with an origin at (C<tx>, C<ty>)
$img->filter(type=>"watermark", tx=>10, ty=>50,
wmark=>$wmark_image, pixdiff=>50)
or die $img->errstr;
=back
A demonstration of most of the filters can be found at:
http://www.develop-help.com/imager/filters.html
=head2 External Filters
As of Imager 0.48 you can create perl or XS based filters and hook
them into Imager's filter() method:
=over
=item register_filter()
Registers a filter so it is visible via Imager's filter() method.
Imager->register_filter(type => 'your_filter',
defaults => { parm1 => 'default1' },
callseq => [ qw/image parm1/ ],
callsub => \&your_filter);
$img->filter(type=>'your_filter', parm1 => 'something');
The following parameters are needed:
=over
=item *
C<type> - the type value that will be supplied to filter() to use your
filter.
=item *
C<defaults> - a hash of defaults for the filter's parameters
=item *
C<callseq> - a reference to an array of required parameter names.
=item *
C<callsub> - a code reference called to execute your filter. The
parameters passed to filter() are supplied as a list of parameter
name, value ... which can be assigned to a hash.
The special parameters C<image> and C<imager> are supplied as the low
level image object from $self and $self itself respectively.
The function you supply must modify the image in place.
To indicate an error, die with an error message followed by a
newline. C<filter()> will store the error message as the C<errstr()>
for the invocant and return false to indicate failure.
sub my_filter {
my %opts = @_;
_is_valid($opts{myparam})
or die "myparam invalid!\n";
# actually do the filtering...
}
=back
See L<Imager::Filter::Mandelbrot> for an example.
=back
=for stopwords DSOs
=head2 Plug-ins
The plug in interface is deprecated. Please use the Imager API, see
L<Imager::API> and L</External Filters> for details
It is possible to add filters to the module without recompiling Imager
itself. This is done by using DSOs (Dynamic shared object) available
on most systems. This way you can maintain your own filters and not
have to have it added to Imager, or worse patch every new version of
Imager. Modules can be loaded AND UNLOADED at run time. This means
that you can have a server/daemon thingy that can do something like:
load_plugin("dynfilt/dyntest.so")
or die "unable to load plugin\n";
$img->filter(type=>'lin_stretch', a=>35, b=>200);
unload_plugin("dynfilt/dyntest.so")
or die "unable to load plugin\n";
Someone decides that the filter is not working as it should -
F<dyntest.c> can be modified and recompiled, and then reloaded:
load_plugin("dynfilt/dyntest.so")
or die "unable to load plugin\n";
$img->filter(%hsh);
=for stopwords Linux Solaris HPUX OpenBSD FreeBSD TRU64 OSF1 AIX Win32 OS X
Note: This has been tested successfully on the following systems:
Linux, Solaris, HPUX, OpenBSD, FreeBSD, TRU64/OSF1, AIX, Win32, OS X.
=over
=item load_plugin()
This is a function, not a method, exported by default. You should
import this function explicitly for future compatibility if you need
it.
Accepts a single parameter, the name of a shared library file to load.
Returns true on success. Check Imager->errstr on failure.
=item unload_plugin()
This is a function, not a method, which is exported by default. You
should import this function explicitly for future compatibility if you
need it.
Accepts a single parameter, the name of a shared library to unload.
This library must have been previously loaded by load_plugin().
Returns true on success. Check Imager->errstr on failure.
=back
A few example plug-ins are included and built (but not installed):
=over
=item *
F<plugins/dyntest.c> - provides the C<null> (no action) filter, and
C<lin_stretch> filters. C<lin_stretch> stretches sample values
between C<a> and C<b> out to the full sample range.
=item *
F<plugins/dt2.c> - provides the C<html_art> filter that writes the
image to the HTML fragment file supplied in C<fname> as a HTML table.
=item *
F<plugins/flines.c> - provides the C<flines> filter that dims
alternate lines to emulate an old CRT display.
L<Imager::Filter::Flines> provides the same functionality.
=item *
F<plugins/mandelbrot.c> - provides the C<mandelbrot> filter that
renders the Mandelbrot set within the given range of x [-2, 0.5) and y
[-1.25, 1,25). L<Imager::Filter::Mandelbrot> provides a more flexible
Mandelbrot set renderer.
=back
=head2 Image Difference
=over
=item difference()
You can create a new image that is the difference between 2 other images.
my $diff = $img->difference(other=>$other_img);
For each pixel in $img that is different to the pixel in $other_img,
the pixel from $other_img is given, otherwise the pixel is transparent
black.
This can be used for debugging image differences ("Where are they
different?"), and for optimizing animated GIFs.
Note that $img and $other_img must have the same number of channels.
The width and height of $diff will be the minimum of each of the width
and height of $img and $other_img.
Parameters:
=over
=item *
C<other> - the other image object to compare against
=item *
C<mindist> - the difference between corresponding samples must be
greater than C<mindist> for the pixel to be considered different. So
a value of zero returns all different pixels, not all pixels. Range:
0 to 255 inclusive. Default: 0.
For large sample images this is scaled down to the range 0 .. 1.
=back
=back
=head1 AUTHOR
Arnar M. Hrafnkelsson, Tony Cook <tonyc@cpan.org>.
=head1 SEE ALSO
Imager, Imager::Filter::Flines, Imager::Filter::Mandelbrot
=head1 REVISION
$Revision$
=cut
| Dokaponteam/ITF_Project | xampp/perl/vendor/lib/Imager/Filters.pod | Perl | mit | 20,829 |
package DDG::Spice::RedditSearch;
# ABSTRACT: Search for Reddit
use strict;
use DDG::Spice;
name "Reddit Search";
description "Search Reddit posts";
source "Reddit";
primary_example_queries "reddit baking";
category "forums";
topics "geek", "social";
code_url "https://github.com/duckduckgo/zeroclickinfo-spice/blob/master/lib/DDG/Spice/RedditSearch.pm";
icon_url "/i/www.reddit.com.ico";
attribution web => ['http://dylansserver.com','Dylan Lloyd'],
email => ['dylan@dylansserver.com','Dylan Lloyd'];
triggers any => "reddit";
spice to => 'http://www.reddit.com/search.json?q=$1&restrict_sr=true&sort=relevance&jsonp=ddg_spice_reddit';
handle remainder => sub {
return $_ if $_;
return;
};
1;
| ppant/zeroclickinfo-spice | lib/DDG/Spice/RedditSearch.pm | Perl | apache-2.0 | 719 |
#!/usr/bin/env perl
use strict;
sub main {
my $input = do { local $/; <> };
$input =~ s/.*?\n= Commands\n//s;
$input =~ s/(.*?\n== Configuration Commands\n.*?\n)==? .*/$1/s;
my @list;
while ($input =~ s/.*?^- (.*?)(?=\n- |\n== |\z)//ms) {
my $text = $1;
$text =~ /\A(.*)\n/
or die "Bad text '$text'";
my $usage = $1;
$usage =~ s/\A`(.*)`\z/$1/
or die "Bad usage: '$text'";
(my $name = $usage) =~ s/ .*//;
push @list, $name;
}
@list = sort @list;
print <<"...";
#!bash
# DO NOT EDIT. This file generated by tool/generate-completion.pl.
_git_hub() {
__gitcomp "@list"
}
...
}
main;
| dnmfarrell/git-hub | tool/generate-completion.pl | Perl | mit | 692 |
package IO::Compress::Deflate ;
require 5.006 ;
use strict ;
use warnings;
use bytes;
require Exporter ;
use IO::Compress::RawDeflate 2.069 ();
use IO::Compress::Adapter::Deflate 2.069 ;
use IO::Compress::Zlib::Constants 2.069 ;
use IO::Compress::Base::Common 2.069 qw();
our ($VERSION, @ISA, @EXPORT_OK, %EXPORT_TAGS, %DEFLATE_CONSTANTS, $DeflateError);
$VERSION = '2.069';
$DeflateError = '';
@ISA = qw(Exporter IO::Compress::RawDeflate);
@EXPORT_OK = qw( $DeflateError deflate ) ;
%EXPORT_TAGS = %IO::Compress::RawDeflate::DEFLATE_CONSTANTS ;
push @{ $EXPORT_TAGS{all} }, @EXPORT_OK ;
Exporter::export_ok_tags('all');
sub new
{
my $class = shift ;
my $obj = IO::Compress::Base::Common::createSelfTiedObject($class, \$DeflateError);
return $obj->_create(undef, @_);
}
sub deflate
{
my $obj = IO::Compress::Base::Common::createSelfTiedObject(undef, \$DeflateError);
return $obj->_def(@_);
}
sub bitmask($$$$)
{
my $into = shift ;
my $value = shift ;
my $offset = shift ;
my $mask = shift ;
return $into | (($value & $mask) << $offset ) ;
}
sub mkDeflateHdr($$$;$)
{
my $method = shift ;
my $cinfo = shift;
my $level = shift;
my $fdict_adler = shift ;
my $cmf = 0;
my $flg = 0;
my $fdict = 0;
$fdict = 1 if defined $fdict_adler;
$cmf = bitmask($cmf, $method, ZLIB_CMF_CM_OFFSET, ZLIB_CMF_CM_BITS);
$cmf = bitmask($cmf, $cinfo, ZLIB_CMF_CINFO_OFFSET, ZLIB_CMF_CINFO_BITS);
$flg = bitmask($flg, $fdict, ZLIB_FLG_FDICT_OFFSET, ZLIB_FLG_FDICT_BITS);
$flg = bitmask($flg, $level, ZLIB_FLG_LEVEL_OFFSET, ZLIB_FLG_LEVEL_BITS);
my $fcheck = 31 - ($cmf * 256 + $flg) % 31 ;
$flg = bitmask($flg, $fcheck, ZLIB_FLG_FCHECK_OFFSET, ZLIB_FLG_FCHECK_BITS);
my $hdr = pack("CC", $cmf, $flg) ;
$hdr .= pack("N", $fdict_adler) if $fdict ;
return $hdr;
}
sub mkHeader
{
my $self = shift ;
my $param = shift ;
my $level = $param->getValue('level');
my $strategy = $param->getValue('strategy');
my $lflag ;
$level = 6
if $level == Z_DEFAULT_COMPRESSION ;
if (ZLIB_VERNUM >= 0x1210)
{
if ($strategy >= Z_HUFFMAN_ONLY || $level < 2)
{ $lflag = ZLIB_FLG_LEVEL_FASTEST }
elsif ($level < 6)
{ $lflag = ZLIB_FLG_LEVEL_FAST }
elsif ($level == 6)
{ $lflag = ZLIB_FLG_LEVEL_DEFAULT }
else
{ $lflag = ZLIB_FLG_LEVEL_SLOWEST }
}
else
{
$lflag = ($level - 1) >> 1 ;
$lflag = 3 if $lflag > 3 ;
}
#my $wbits = (MAX_WBITS - 8) << 4 ;
my $wbits = 7;
mkDeflateHdr(ZLIB_CMF_CM_DEFLATED, $wbits, $lflag);
}
sub ckParams
{
my $self = shift ;
my $got = shift;
$got->setValue('adler32' => 1);
return 1 ;
}
sub mkTrailer
{
my $self = shift ;
return pack("N", *$self->{Compress}->adler32()) ;
}
sub mkFinalTrailer
{
return '';
}
#sub newHeader
#{
# my $self = shift ;
# return *$self->{Header};
#}
sub getExtraParams
{
my $self = shift ;
return $self->getZlibParams(),
}
sub getInverseClass
{
return ('IO::Uncompress::Inflate',
\$IO::Uncompress::Inflate::InflateError);
}
sub getFileInfo
{
my $self = shift ;
my $params = shift;
my $file = shift ;
}
1;
__END__
=head1 NAME
IO::Compress::Deflate - Write RFC 1950 files/buffers
=head1 SYNOPSIS
use IO::Compress::Deflate qw(deflate $DeflateError) ;
my $status = deflate $input => $output [,OPTS]
or die "deflate failed: $DeflateError\n";
my $z = new IO::Compress::Deflate $output [,OPTS]
or die "deflate failed: $DeflateError\n";
$z->print($string);
$z->printf($format, $string);
$z->write($string);
$z->syswrite($string [, $length, $offset]);
$z->flush();
$z->tell();
$z->eof();
$z->seek($position, $whence);
$z->binmode();
$z->fileno();
$z->opened();
$z->autoflush();
$z->input_line_number();
$z->newStream( [OPTS] );
$z->deflateParams();
$z->close() ;
$DeflateError ;
# IO::File mode
print $z $string;
printf $z $format, $string;
tell $z
eof $z
seek $z, $position, $whence
binmode $z
fileno $z
close $z ;
=head1 DESCRIPTION
This module provides a Perl interface that allows writing compressed
data to files or buffer as defined in RFC 1950.
For reading RFC 1950 files/buffers, see the companion module
L<IO::Uncompress::Inflate|IO::Uncompress::Inflate>.
=head1 Functional Interface
A top-level function, C<deflate>, is provided to carry out
"one-shot" compression between buffers and/or files. For finer
control over the compression process, see the L</"OO Interface">
section.
use IO::Compress::Deflate qw(deflate $DeflateError) ;
deflate $input_filename_or_reference => $output_filename_or_reference [,OPTS]
or die "deflate failed: $DeflateError\n";
The functional interface needs Perl5.005 or better.
=head2 deflate $input_filename_or_reference => $output_filename_or_reference [, OPTS]
C<deflate> expects at least two parameters,
C<$input_filename_or_reference> and C<$output_filename_or_reference>.
=head3 The C<$input_filename_or_reference> parameter
The parameter, C<$input_filename_or_reference>, is used to define the
source of the uncompressed data.
It can take one of the following forms:
=over 5
=item A filename
If the <$input_filename_or_reference> parameter is a simple scalar, it is
assumed to be a filename. This file will be opened for reading and the
input data will be read from it.
=item A filehandle
If the C<$input_filename_or_reference> parameter is a filehandle, the input
data will be read from it. The string '-' can be used as an alias for
standard input.
=item A scalar reference
If C<$input_filename_or_reference> is a scalar reference, the input data
will be read from C<$$input_filename_or_reference>.
=item An array reference
If C<$input_filename_or_reference> is an array reference, each element in
the array must be a filename.
The input data will be read from each file in turn.
The complete array will be walked to ensure that it only
contains valid filenames before any data is compressed.
=item An Input FileGlob string
If C<$input_filename_or_reference> is a string that is delimited by the
characters "<" and ">" C<deflate> will assume that it is an
I<input fileglob string>. The input is the list of files that match the
fileglob.
See L<File::GlobMapper|File::GlobMapper> for more details.
=back
If the C<$input_filename_or_reference> parameter is any other type,
C<undef> will be returned.
=head3 The C<$output_filename_or_reference> parameter
The parameter C<$output_filename_or_reference> is used to control the
destination of the compressed data. This parameter can take one of
these forms.
=over 5
=item A filename
If the C<$output_filename_or_reference> parameter is a simple scalar, it is
assumed to be a filename. This file will be opened for writing and the
compressed data will be written to it.
=item A filehandle
If the C<$output_filename_or_reference> parameter is a filehandle, the
compressed data will be written to it. The string '-' can be used as
an alias for standard output.
=item A scalar reference
If C<$output_filename_or_reference> is a scalar reference, the
compressed data will be stored in C<$$output_filename_or_reference>.
=item An Array Reference
If C<$output_filename_or_reference> is an array reference,
the compressed data will be pushed onto the array.
=item An Output FileGlob
If C<$output_filename_or_reference> is a string that is delimited by the
characters "<" and ">" C<deflate> will assume that it is an
I<output fileglob string>. The output is the list of files that match the
fileglob.
When C<$output_filename_or_reference> is an fileglob string,
C<$input_filename_or_reference> must also be a fileglob string. Anything
else is an error.
See L<File::GlobMapper|File::GlobMapper> for more details.
=back
If the C<$output_filename_or_reference> parameter is any other type,
C<undef> will be returned.
=head2 Notes
When C<$input_filename_or_reference> maps to multiple files/buffers and
C<$output_filename_or_reference> is a single
file/buffer the input files/buffers will be stored
in C<$output_filename_or_reference> as a concatenated series of compressed data streams.
=head2 Optional Parameters
Unless specified below, the optional parameters for C<deflate>,
C<OPTS>, are the same as those used with the OO interface defined in the
L</"Constructor Options"> section below.
=over 5
=item C<< AutoClose => 0|1 >>
This option applies to any input or output data streams to
C<deflate> that are filehandles.
If C<AutoClose> is specified, and the value is true, it will result in all
input and/or output filehandles being closed once C<deflate> has
completed.
This parameter defaults to 0.
=item C<< BinModeIn => 0|1 >>
When reading from a file or filehandle, set C<binmode> before reading.
Defaults to 0.
=item C<< Append => 0|1 >>
The behaviour of this option is dependent on the type of output data
stream.
=over 5
=item * A Buffer
If C<Append> is enabled, all compressed data will be append to the end of
the output buffer. Otherwise the output buffer will be cleared before any
compressed data is written to it.
=item * A Filename
If C<Append> is enabled, the file will be opened in append mode. Otherwise
the contents of the file, if any, will be truncated before any compressed
data is written to it.
=item * A Filehandle
If C<Append> is enabled, the filehandle will be positioned to the end of
the file via a call to C<seek> before any compressed data is
written to it. Otherwise the file pointer will not be moved.
=back
When C<Append> is specified, and set to true, it will I<append> all compressed
data to the output data stream.
So when the output is a filehandle it will carry out a seek to the eof
before writing any compressed data. If the output is a filename, it will be opened for
appending. If the output is a buffer, all compressed data will be
appended to the existing buffer.
Conversely when C<Append> is not specified, or it is present and is set to
false, it will operate as follows.
When the output is a filename, it will truncate the contents of the file
before writing any compressed data. If the output is a filehandle
its position will not be changed. If the output is a buffer, it will be
wiped before any compressed data is output.
Defaults to 0.
=back
=head2 Examples
To read the contents of the file C<file1.txt> and write the compressed
data to the file C<file1.txt.1950>.
use strict ;
use warnings ;
use IO::Compress::Deflate qw(deflate $DeflateError) ;
my $input = "file1.txt";
deflate $input => "$input.1950"
or die "deflate failed: $DeflateError\n";
To read from an existing Perl filehandle, C<$input>, and write the
compressed data to a buffer, C<$buffer>.
use strict ;
use warnings ;
use IO::Compress::Deflate qw(deflate $DeflateError) ;
use IO::File ;
my $input = new IO::File "<file1.txt"
or die "Cannot open 'file1.txt': $!\n" ;
my $buffer ;
deflate $input => \$buffer
or die "deflate failed: $DeflateError\n";
To compress all files in the directory "/my/home" that match "*.txt"
and store the compressed data in the same directory
use strict ;
use warnings ;
use IO::Compress::Deflate qw(deflate $DeflateError) ;
deflate '</my/home/*.txt>' => '<*.1950>'
or die "deflate failed: $DeflateError\n";
and if you want to compress each file one at a time, this will do the trick
use strict ;
use warnings ;
use IO::Compress::Deflate qw(deflate $DeflateError) ;
for my $input ( glob "/my/home/*.txt" )
{
my $output = "$input.1950" ;
deflate $input => $output
or die "Error compressing '$input': $DeflateError\n";
}
=head1 OO Interface
=head2 Constructor
The format of the constructor for C<IO::Compress::Deflate> is shown below
my $z = new IO::Compress::Deflate $output [,OPTS]
or die "IO::Compress::Deflate failed: $DeflateError\n";
It returns an C<IO::Compress::Deflate> object on success and undef on failure.
The variable C<$DeflateError> will contain an error message on failure.
If you are running Perl 5.005 or better the object, C<$z>, returned from
IO::Compress::Deflate can be used exactly like an L<IO::File|IO::File> filehandle.
This means that all normal output file operations can be carried out
with C<$z>.
For example, to write to a compressed file/buffer you can use either of
these forms
$z->print("hello world\n");
print $z "hello world\n";
The mandatory parameter C<$output> is used to control the destination
of the compressed data. This parameter can take one of these forms.
=over 5
=item A filename
If the C<$output> parameter is a simple scalar, it is assumed to be a
filename. This file will be opened for writing and the compressed data
will be written to it.
=item A filehandle
If the C<$output> parameter is a filehandle, the compressed data will be
written to it.
The string '-' can be used as an alias for standard output.
=item A scalar reference
If C<$output> is a scalar reference, the compressed data will be stored
in C<$$output>.
=back
If the C<$output> parameter is any other type, C<IO::Compress::Deflate>::new will
return undef.
=head2 Constructor Options
C<OPTS> is any combination of the following options:
=over 5
=item C<< AutoClose => 0|1 >>
This option is only valid when the C<$output> parameter is a filehandle. If
specified, and the value is true, it will result in the C<$output> being
closed once either the C<close> method is called or the C<IO::Compress::Deflate>
object is destroyed.
This parameter defaults to 0.
=item C<< Append => 0|1 >>
Opens C<$output> in append mode.
The behaviour of this option is dependent on the type of C<$output>.
=over 5
=item * A Buffer
If C<$output> is a buffer and C<Append> is enabled, all compressed data
will be append to the end of C<$output>. Otherwise C<$output> will be
cleared before any data is written to it.
=item * A Filename
If C<$output> is a filename and C<Append> is enabled, the file will be
opened in append mode. Otherwise the contents of the file, if any, will be
truncated before any compressed data is written to it.
=item * A Filehandle
If C<$output> is a filehandle, the file pointer will be positioned to the
end of the file via a call to C<seek> before any compressed data is written
to it. Otherwise the file pointer will not be moved.
=back
This parameter defaults to 0.
=item C<< Merge => 0|1 >>
This option is used to compress input data and append it to an existing
compressed data stream in C<$output>. The end result is a single compressed
data stream stored in C<$output>.
It is a fatal error to attempt to use this option when C<$output> is not an
RFC 1950 data stream.
There are a number of other limitations with the C<Merge> option:
=over 5
=item 1
This module needs to have been built with zlib 1.2.1 or better to work. A
fatal error will be thrown if C<Merge> is used with an older version of
zlib.
=item 2
If C<$output> is a file or a filehandle, it must be seekable.
=back
This parameter defaults to 0.
=item -Level
Defines the compression level used by zlib. The value should either be
a number between 0 and 9 (0 means no compression and 9 is maximum
compression), or one of the symbolic constants defined below.
Z_NO_COMPRESSION
Z_BEST_SPEED
Z_BEST_COMPRESSION
Z_DEFAULT_COMPRESSION
The default is Z_DEFAULT_COMPRESSION.
Note, these constants are not imported by C<IO::Compress::Deflate> by default.
use IO::Compress::Deflate qw(:strategy);
use IO::Compress::Deflate qw(:constants);
use IO::Compress::Deflate qw(:all);
=item -Strategy
Defines the strategy used to tune the compression. Use one of the symbolic
constants defined below.
Z_FILTERED
Z_HUFFMAN_ONLY
Z_RLE
Z_FIXED
Z_DEFAULT_STRATEGY
The default is Z_DEFAULT_STRATEGY.
=item C<< Strict => 0|1 >>
This is a placeholder option.
=back
=head2 Examples
TODO
=head1 Methods
=head2 print
Usage is
$z->print($data)
print $z $data
Compresses and outputs the contents of the C<$data> parameter. This
has the same behaviour as the C<print> built-in.
Returns true if successful.
=head2 printf
Usage is
$z->printf($format, $data)
printf $z $format, $data
Compresses and outputs the contents of the C<$data> parameter.
Returns true if successful.
=head2 syswrite
Usage is
$z->syswrite $data
$z->syswrite $data, $length
$z->syswrite $data, $length, $offset
Compresses and outputs the contents of the C<$data> parameter.
Returns the number of uncompressed bytes written, or C<undef> if
unsuccessful.
=head2 write
Usage is
$z->write $data
$z->write $data, $length
$z->write $data, $length, $offset
Compresses and outputs the contents of the C<$data> parameter.
Returns the number of uncompressed bytes written, or C<undef> if
unsuccessful.
=head2 flush
Usage is
$z->flush;
$z->flush($flush_type);
Flushes any pending compressed data to the output file/buffer.
This method takes an optional parameter, C<$flush_type>, that controls
how the flushing will be carried out. By default the C<$flush_type>
used is C<Z_FINISH>. Other valid values for C<$flush_type> are
C<Z_NO_FLUSH>, C<Z_SYNC_FLUSH>, C<Z_FULL_FLUSH> and C<Z_BLOCK>. It is
strongly recommended that you only set the C<flush_type> parameter if
you fully understand the implications of what it does - overuse of C<flush>
can seriously degrade the level of compression achieved. See the C<zlib>
documentation for details.
Returns true on success.
=head2 tell
Usage is
$z->tell()
tell $z
Returns the uncompressed file offset.
=head2 eof
Usage is
$z->eof();
eof($z);
Returns true if the C<close> method has been called.
=head2 seek
$z->seek($position, $whence);
seek($z, $position, $whence);
Provides a sub-set of the C<seek> functionality, with the restriction
that it is only legal to seek forward in the output file/buffer.
It is a fatal error to attempt to seek backward.
Empty parts of the file/buffer will have NULL (0x00) bytes written to them.
The C<$whence> parameter takes one the usual values, namely SEEK_SET,
SEEK_CUR or SEEK_END.
Returns 1 on success, 0 on failure.
=head2 binmode
Usage is
$z->binmode
binmode $z ;
This is a noop provided for completeness.
=head2 opened
$z->opened()
Returns true if the object currently refers to a opened file/buffer.
=head2 autoflush
my $prev = $z->autoflush()
my $prev = $z->autoflush(EXPR)
If the C<$z> object is associated with a file or a filehandle, this method
returns the current autoflush setting for the underlying filehandle. If
C<EXPR> is present, and is non-zero, it will enable flushing after every
write/print operation.
If C<$z> is associated with a buffer, this method has no effect and always
returns C<undef>.
B<Note> that the special variable C<$|> B<cannot> be used to set or
retrieve the autoflush setting.
=head2 input_line_number
$z->input_line_number()
$z->input_line_number(EXPR)
This method always returns C<undef> when compressing.
=head2 fileno
$z->fileno()
fileno($z)
If the C<$z> object is associated with a file or a filehandle, C<fileno>
will return the underlying file descriptor. Once the C<close> method is
called C<fileno> will return C<undef>.
If the C<$z> object is associated with a buffer, this method will return
C<undef>.
=head2 close
$z->close() ;
close $z ;
Flushes any pending compressed data and then closes the output file/buffer.
For most versions of Perl this method will be automatically invoked if
the IO::Compress::Deflate object is destroyed (either explicitly or by the
variable with the reference to the object going out of scope). The
exceptions are Perl versions 5.005 through 5.00504 and 5.8.0. In
these cases, the C<close> method will be called automatically, but
not until global destruction of all live objects when the program is
terminating.
Therefore, if you want your scripts to be able to run on all versions
of Perl, you should call C<close> explicitly and not rely on automatic
closing.
Returns true on success, otherwise 0.
If the C<AutoClose> option has been enabled when the IO::Compress::Deflate
object was created, and the object is associated with a file, the
underlying file will also be closed.
=head2 newStream([OPTS])
Usage is
$z->newStream( [OPTS] )
Closes the current compressed data stream and starts a new one.
OPTS consists of any of the options that are available when creating
the C<$z> object.
See the L</"Constructor Options"> section for more details.
=head2 deflateParams
Usage is
$z->deflateParams
TODO
=head1 Importing
A number of symbolic constants are required by some methods in
C<IO::Compress::Deflate>. None are imported by default.
=over 5
=item :all
Imports C<deflate>, C<$DeflateError> and all symbolic
constants that can be used by C<IO::Compress::Deflate>. Same as doing this
use IO::Compress::Deflate qw(deflate $DeflateError :constants) ;
=item :constants
Import all symbolic constants. Same as doing this
use IO::Compress::Deflate qw(:flush :level :strategy) ;
=item :flush
These symbolic constants are used by the C<flush> method.
Z_NO_FLUSH
Z_PARTIAL_FLUSH
Z_SYNC_FLUSH
Z_FULL_FLUSH
Z_FINISH
Z_BLOCK
=item :level
These symbolic constants are used by the C<Level> option in the constructor.
Z_NO_COMPRESSION
Z_BEST_SPEED
Z_BEST_COMPRESSION
Z_DEFAULT_COMPRESSION
=item :strategy
These symbolic constants are used by the C<Strategy> option in the constructor.
Z_FILTERED
Z_HUFFMAN_ONLY
Z_RLE
Z_FIXED
Z_DEFAULT_STRATEGY
=back
=head1 EXAMPLES
=head2 Apache::GZip Revisited
See L<IO::Compress::FAQ|IO::Compress::FAQ/"Apache::GZip Revisited">
=head2 Working with Net::FTP
See L<IO::Compress::FAQ|IO::Compress::FAQ/"Compressed files and Net::FTP">
=head1 SEE ALSO
L<Compress::Zlib>, L<IO::Compress::Gzip>, L<IO::Uncompress::Gunzip>, L<IO::Uncompress::Inflate>, L<IO::Compress::RawDeflate>, L<IO::Uncompress::RawInflate>, L<IO::Compress::Bzip2>, L<IO::Uncompress::Bunzip2>, L<IO::Compress::Lzma>, L<IO::Uncompress::UnLzma>, L<IO::Compress::Xz>, L<IO::Uncompress::UnXz>, L<IO::Compress::Lzop>, L<IO::Uncompress::UnLzop>, L<IO::Compress::Lzf>, L<IO::Uncompress::UnLzf>, L<IO::Uncompress::AnyInflate>, L<IO::Uncompress::AnyUncompress>
L<IO::Compress::FAQ|IO::Compress::FAQ>
L<File::GlobMapper|File::GlobMapper>, L<Archive::Zip|Archive::Zip>,
L<Archive::Tar|Archive::Tar>,
L<IO::Zlib|IO::Zlib>
For RFC 1950, 1951 and 1952 see
F<http://www.faqs.org/rfcs/rfc1950.html>,
F<http://www.faqs.org/rfcs/rfc1951.html> and
F<http://www.faqs.org/rfcs/rfc1952.html>
The I<zlib> compression library was written by Jean-loup Gailly
F<gzip@prep.ai.mit.edu> and Mark Adler F<madler@alumni.caltech.edu>.
The primary site for the I<zlib> compression library is
F<http://www.zlib.org>.
The primary site for gzip is F<http://www.gzip.org>.
=head1 AUTHOR
This module was written by Paul Marquess, F<pmqs@cpan.org>.
=head1 MODIFICATION HISTORY
See the Changes file.
=head1 COPYRIGHT AND LICENSE
Copyright (c) 2005-2015 Paul Marquess. All rights reserved.
This program is free software; you can redistribute it and/or
modify it under the same terms as Perl itself.
| rosiro/wasarabi | local/lib/perl5/x86_64-linux-thread-multi/IO/Compress/Deflate.pm | Perl | mit | 23,640 |
#!/usr/bin/env perl
########################################################################
# Authors: Christopher Henry, Scott Devoid, Paul Frybarger
# Contact email: chenry@mcs.anl.gov
# Development location: Mathematics and Computer Science Division, Argonne National Lab
########################################################################
use strict;
use warnings;
use Bio::KBase::workspace::ScriptHelpers qw(get_ws_client workspace workspaceURL parseObjectMeta parseWorkspaceMeta printObjectMeta printObjectInfo);
use Bio::KBase::fbaModelServices::ScriptHelpers qw(load_table fbaws printJobData get_fba_client runFBACommand universalFBAScriptCode );
#Defining globals describing behavior
my $primaryArgs = ["Model"];
my $servercommand = "queue_gapfill_model";
my $script = "fba-gapfill";
my $translation = {
Model => "model",
modelws => "model_workspace",
modelout => "out_model",
outputid => "out_model",
workspace => "workspace",
intsol => "integrate_solution",
timepersol => "timePerSolution",
timelimit => "totalTimeLimit",
iterativegf => "completeGapfill",
solver => "solver",
sourcemdl => "source_model",
sourcemdlws => "source_model_ws",
booleanexp => "booleanexp",
exp_raw_data => "exp_raw_data",
expseries => "expseries",
expseriesws => "expseriesws",
expsample => "expsample",
expthreshold => "expression_threshold_percentile",
discretevar => "use_discrete_variables",
alpha => "alpha",
omega => "omega",
kappa => "kappa",
rxnsensitivity => "sensitivity_analysis",
numsol => "num_solutions",
nomediahyp => "nomediahyp",
nobiomasshyp => "nobiomasshyp",
nogprhyp => "nogprhyp",
nopathwayhyp => "nopathwayhyp",
allowunbalanced => "allowunbalanced",
drainpen => "drainpen",
directionpen => "directionpen",
nostructpen => "nostructpen",
unfavorablepen => "unfavorablepen",
nodeltagpen => "nodeltagpen",
biomasstranspen => "biomasstranspen",
singletranspen => "singletranspen",
transpen=> "transpen",
probrxn => "probabilisticReactions",
probanno => "probabilisticAnnotation",
probannows => "probabilisticAnnotation_workspace",
notes => "notes",
};
my $fbaTranslation = {
media => "media",
mediaws => "media_workspace",
objfraction => "objfraction",
allrev => "allreversible",
maximize => "maximizeObjective",
defaultmaxflux => "defaultmaxflux",
defaultminuptake => "defaultminuptake",
defaultmaxuptake => "defaultmaxuptake",
simplethermo => "simplethermoconst",
thermoconst => "thermoconst",
nothermoerror => "nothermoerror",
minthermoerror => "minthermoerror",
addlcpd => "additionalcpds"
};
#Defining usage and options
my $specs = [
[ 'modelout|outputid:s', 'ID for output model in workspace' ],
[ 'modelws=s', 'Workspace of model to gapfill', { "default" => fbaws() } ],
[ 'sourcemdl=s', 'Source model to gapfill from' ],
[ 'sourcemdlws=s', 'Workspace of source model to gapfill from', { "default" => fbaws() } ],
[ 'intsol', 'Automatically integrate solution', { "default" => 0 } ],
[ 'booleanexp:s', 'Constrain modeling with on/off expression data of specified type. Either "absolute" or "probability"'],
[ 'expseries:s', 'Expression matrix object to use in transcriptomic gapfilling'],
[ 'expseriesws:s', 'Workspace with expression matrix', { "default" => fbaws() } ],
[ 'expsample:s', 'ID of expression condition to fit gapfilling or file with expression data' ],
[ 'alpha:s', 'Constant denoting fraction of objective to use for activation', { "default" => 0 } ],
[ 'omega:s', 'Constant denoting fraction of objective to use for max objective', { "default" => 0 } ],
[ 'kappa:s', 'Tolerance to classify genes as unknown, not on or off. In [0,0.5]', {"default" => 0.1}],
[ 'expthreshold:s', 'Set threshold percentile for considering genes on or off from expression' ],
[ 'discretevar', 'Set this flag to use discrete variables in gapfilling' ],
[ 'iterativegf|t', 'Gapfill all inactive reactions', { "default" => 0 } ],
[ 'targrxn|x:s@', 'Gapfill to activate these reactions only (; delimiter)'],
[ 'rxnsensitivity|y', 'Flag indicates if sensitivity analysis of gapfill solutions should run'],
[ 'timepersol:s', 'Maximum time spent per solution' ],
[ 'timelimit:s', 'Maximum toal time' ],
[ 'media|m:s', 'Media formulation for FBA (default is complete media)' ],
[ 'mediaws:s', 'Workspace with media formulation' ],
[ 'addlcpd|c:s@', 'Additional compounds (; delimiter)' ],
[ 'numsol:i', 'Number of solutions desired', {"default" => 1} ],
[ 'nomediahyp', 'Donot search for media hypotheses', {"default" => 0} ],
[ 'nobiomasshyp', 'Donot search for biomass hypotheses', {"default" => 1} ],
[ 'nogprhyp', 'Donot search for gpr hypotheses', {"default" => 0} ],
[ 'nopathwayhyp', 'Donot search for pathway hypotheses', {"default" => 0} ],
[ 'allowunbalanced', 'Allow unbalanced reactions in solutions', {"default" => 0} ],
[ 'activitybonus:s', 'Bonus for activating reactions'],
[ 'drainpen:s', 'Penalty for drain reactions', {"default" => 1} ],
[ 'directionpen:s', 'Penalty for reactions operating in wrong direction', {"default" => 1} ],
[ 'nostructpen:s', 'Penalty for reactions with missing structures', {"default" => 1} ],
[ 'unfavorablepen:s', 'Penalty for unfavorable reactions', {"default" => 1} ],
[ 'nodeltagpen:s', 'Penalty for reactions with no delta G', {"default" => 1} ],
[ 'biomasstranspen:s', 'Penalty for biomass transport reactions', {"default" => 1} ],
[ 'singletranspen:s', 'Penalty for single transport reactions', {"default" => 1} ],
[ 'transpen:s', 'Penalty for transport reactions', {"default" => 1} ],
[ 'probrxn=s', 'ID of probabilistic reaction object' ],
[ 'probanno=s', 'ID of probabilistic annotation object' ],
[ 'probannows:s', 'Workspace with probabilistic annotation or probabilistic reaction', { "default" => fbaws() } ],
[ 'blacklist:s@', 'List of blacklisted reactions (; delimiter)' ],
[ 'guaranteed:s@', 'List of guaranteed reactions (; delimiter)' ],
[ 'allowedcmp:s@', 'List of allowed compartments (; delimiter)' ],
[ 'maximize:s', 'Maximize objective', { "default" => 1 } ],
[ 'objterms:s@', 'Objective terms (; delimiter)' ],
[ 'geneko:s@', 'List of gene KO (; delimiter)' ],
[ 'rxnko:s@', 'List of reaction KO (; delimiter)' ],
[ 'bounds:s@', 'Custom bounds' ],
[ 'constraints:s@', 'Custom constraints' ],
[ 'defaultmaxflux:s', 'Default maximum reaction flux' ],
[ 'defaultminuptake:s', 'Default minimum nutrient uptake' ],
[ 'defaultmaxuptake:s', 'Default maximum nutrient uptake' ],
[ 'uptakelim:s@', 'Atom uptake limits' ],
[ 'simplethermo', 'Use simple thermodynamic constraints' ],
[ 'thermoconst', 'Use full thermodynamic constraints' ],
[ 'nothermoerror', 'No uncertainty in thermodynamic constraints' ],
[ 'minthermoerror', 'Minimize uncertainty in thermodynamic constraints' ],
[ 'objfraction:s', 'Fraction of objective for follow on analysis', { "default" => 0.001 }],
[ 'notes:s', 'Notes for flux balance analysis' ],
[ 'solver:s', 'Solver to use for gapfilling' ],
[ 'workspace|w:s', 'Workspace to save FBA results', { "default" => fbaws() } ],
];
my ($opt,$params) = universalFBAScriptCode($specs,$script,$primaryArgs,$translation);
if (!defined($opt->{mediaws}) && defined($opt->{media})) {
$opt->{mediaws} = $opt->{workspace};
}
if (defined($opt->{probanno}) && defined($opt->{probrxn})) {
die "Attempt to pass probanno and probrxns objects in the same call. This is not allowed because probrxn is calcualted from probanno and could cause collisions";
}
if (defined($opt->{targrxn})) {
foreach my $terms (@{$opt->{targrxn}}) {
my $array = [split(/;/,$terms)];
push(@{$params->{target_reactions}},@{$array});
}
}
if (defined($params->{expsample}) && -e $params->{expsample}) {
my $data = load_table($params->{expsample},"\t",0);
foreach my $row (@{$data->{"data"}}) {
$params->{exp_raw_data}->{$row->[0]} = $row->[1];
}
delete $params->{expsample};
}
$params->{blacklistedrxns} = [];
$params->{gauranteedrxns} = [];
if (defined($opt->{blacklist})) {
foreach my $terms (@{$opt->{blacklist}}) {
my $array = [split(/;/,$terms)];
push(@{$params->{blacklistedrxns}},@{$array});
}
}
if (defined($opt->{guaranteed})) {
foreach my $terms (@{$opt->{guaranteed}}) {
my $array = [split(/;/,$terms)];
push(@{$params->{gauranteedrxns}},@{$array});
}
}
$params->{formulation} = {
geneko => [],
rxnko => [],
bounds => [],
constraints => [],
uptakelim => {},
additionalcpds => [],
};
foreach my $key (keys(%{$fbaTranslation})) {
if (defined($opt->{$key})) {
$params->{formulation}->{$fbaTranslation->{$key}} = $opt->{$key};
}
}
if (defined($opt->{objterms})) {
foreach my $terms (@{$opt->{objterms}}) {
my $array = [split(/;/,$terms)];
foreach my $term (@{$array}) {
my $termArray = [split(/:/,$term)];
if (defined($termArray->[2])) {
push(@{$params->{formulation}->{objectiveTerms}},$termArray);
}
}
}
}
if (defined($opt->{geneko})) {
foreach my $gene (@{$opt->{geneko}}) {
push(@{$params->{formulation}->{geneko}},split(/;/,$gene));
}
}
if (defined($opt->{rxnko})) {
foreach my $rxn (@{$opt->{rxnko}}) {
push(@{$params->{formulation}->{rxnko}},split(/;/,$rxn));
}
}
if (defined($opt->{additionalcpds})) {
foreach my $cpd (@{$opt->{additionalcpds}}) {
push(@{$params->{formulation}->{additionalcpds}},split(/;/,$cpd));
}
}
if (defined($opt->{bounds})) {
foreach my $terms (@{$opt->{bounds}}) {
my $array = [split(/;/,$terms)];
foreach my $term (@{$array}) {
my $termArray = [split(/:/,$term)];
if (defined($termArray->[3])) {
push(@{$params->{formulation}->{bounds}},$termArray);
}
}
}
}
if (defined($opt->{constraints})) {
my $count = 0;
foreach my $constraint (@{$opt->{constraints}}) {
my $array = [split(/;/,$constraint)];
my $rhs = shift(@{$array});
my $sign = shift(@{$array});
my $terms = [];
foreach my $term (@{$array}) {
my $termArray = [split(/:/,$term)];
if (defined($termArray->[2])) {
push(@{$terms},$termArray)
}
}
push(@{$params->{formulation}->{constraints}},[$rhs,$sign,$terms,"Constraint ".$count]);
$count++;
}
}
if (defined($opt->{uptakelim})) {
foreach my $uplims (@{$opt->{uptakelim}}) {
my $array = [split(/;/,$uplims)];
foreach my $uplim (@{$array}) {
my $pair = [split(/:/,$uplim)];
if (defined($pair->[1])) {
$params->{formulation}->{uptakelim}->{$pair->[0]} = $pair->[1];
}
}
}
}
$params->{formulation}->{nobiomasshyp} = 1;
my $output = runFBACommand($params,"gapfill_model",$opt);
if (!defined($output)) {
print "Gapfilling failed!\n";
} else {
print "Gapfilling successful!\n";
printObjectInfo($output);
print "Run fba-getgapfills or fba-integratesolution to print solution!\n";
}
| kbase/KBaseFBAModeling | scripts/fba-gapfill.pl | Perl | mit | 10,804 |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% A Learning Engine for Proposing Hypotheses %
% %
% A L E P H %
% Version 5 (last modified: Sun Mar 11 03:25:37 UTC 2007) %
% %
% This is the source for Aleph written and maintained %
% by Ashwin Srinivasan (ashwin@comlab.ox.ac.uk) %
% %
% %
% It was originally written to run with the Yap Prolog Compiler %
% Yap can be found at: http://sourceforge.net/projects/yap/ %
% Yap must be compiled with -DDEPTH_LIMIT=1 %
% %
% It should also run with SWI Prolog, although performance may be %
% sub-optimal. %
% %
% If you obtain this version of Aleph and have not already done so %
% please subscribe to the Aleph mailing list. You can do this by %
% mailing majordomo@comlab.ox.ac.uk with the following command in the %
% body of the mail message: subscribe aleph %
% %
% Aleph is freely available for academic purposes. %
% If you intend to use it for commercial purposes then %
% please contact Ashwin Srinivasan first. %
% %
% A simple on-line manual is available on the Web at %
% www.comlab.ox.ac.uk/oucl/research/areas/machlearn/Aleph/index.html %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
:- module(aleph,
[
induce/1,
induce_tree/1,
induce_max/1,
induce_cover/1,
induce_incremental/1,
induce_clauses/1,
induce_theory/1,
induce_modes/1,
induce_features/1,
induce_constraints/1,
sat/1,
aleph_set/2,
aleph_setting/2,
goals_to_list/2,
clause_to_list/2,
aleph_subsumes/2,
aleph_delete/3,
hypothesis/3,
hypothesis/4,
var_types/3,
show/1,
rdhyp/1,
addhyp_i/1,
sphyp_i/1,
covers/1,
coversn/1,
reduce/1,
abducible/1,
bottom/1,
commutative/1,
man/1,
symmetric/1,
lazy_evaluate/1,
model/1,
positive_only/1,
example_saturated/1,
addgcws_i/1,
rmhyp_i/1,
random/2,
aleph_random/1,
(mode)/2,
modeh/2,
modeb/2,
good_clauses/1,
op(500,fy,#),
op(500,fy,*),
op(900,xfy,because)
]).
/** <module> aleph
# A Learning Engine for Proposing Hypotheses - ALEPH
## Version 5
Aleph is an Inductive Logic Programming system developed by
[Ashwin Srinivasan](https://www.bits-pilani.ac.in/goa/ashwin/profile):
http://www.cs.ox.ac.uk/activities/machlearn/Aleph/
Aleph v.5 was ported to SWI-Prolog by [Fabrizio Riguzzi](http://ml.unife.it/fabrizio-riguzzi/)
and Paolo Niccolò Giubelli.
Aleph is freely available for academic purposes. %
If you intend to use it for commercial purposes then %
please contact Ashwin Srinivasan first. %
@author Ashwin Srinivasan, Fabrizio Riguzzi and Paolo Niccolò Giubelli.
@copyright Ashwin Srinivasan
*/
:- use_module(library(arithmetic)).
:-use_module(library(broadcast)).
:-use_module(library(time)).
:- arithmetic_function(inf/0).
inf(1e10).
:-set_prolog_flag(unknown,warning).
:- dynamic aleph_input_mod/1.
:- meta_predicate induce(:).
:- meta_predicate induce_tree(:).
:- meta_predicate induce_max(:).
:- meta_predicate induce_cover(:).
:- meta_predicate induce_incremental(:).
:- meta_predicate induce_clauses(:).
:- meta_predicate induce_theory(:).
:- meta_predicate induce_modes(:).
:- meta_predicate induce_features(:).
:- meta_predicate induce_constraints(:).
:- meta_predicate sat(:).
:- meta_predicate aleph_set(:,+).
:- meta_predicate aleph_setting(:,+).
:- meta_predicate noset(:).
:- meta_predicate model(:).
:- meta_predicate mode(:,+).
:- meta_predicate modeh(:,+).
:- meta_predicate modeb(:,+).
:- meta_predicate show(:).
:- meta_predicate hypothesis(:,+,-).
:- meta_predicate rdhyp(:).
:- meta_predicate addhyp_i(:).
:- meta_predicate sphyp_i(:).
:- meta_predicate covers(:).
:- meta_predicate coversn(:).
:- meta_predicate reduce(:).
:- meta_predicate abducible(:).
:- meta_predicate bottom(:).
:- meta_predicate commutative(:).
:- meta_predicate symmetric(:).
:- meta_predicate lazy_evaluate(:).
:- meta_predicate positive_only(:).
:- meta_predicate example_saturated(:).
:- meta_predicate addgcws_i(:).
:- meta_predicate rmhyp_i(:).
:- meta_predicate good_clauses(:).
/* INIT ALEPH */
system:term_expansion((:- aleph), []) :-
prolog_load_context(module, M),
assert(aleph_input_mod(M)),!,
initialize(M).
initialize(M):-
% nl, nl,
% write('A L E P H'), nl,
% aleph:aleph_version(Version), write('Version '), write(Version), nl,
% aleph:aleph_version_date(Date), write('Last modified: '), write(Date), nl, nl,
% aleph:aleph_manual(Man),
% write('Manual: '),
% write(Man), nl, nl,
aleph:aleph_version(V), aleph:set(version,V,M), aleph:reset(M),
%findall(local_setting(P,V),default_setting_sc(P,V),L),
%assert_all(L,M,_),
M:dynamic((pos_on/0,neg_on/0,bg_on/0,incneg/1,incpos/1,in/1,bgc/1,bg/1)),
M:dynamic(('$aleph_feature'/2,
'$aleph_global'/2,
'$aleph_good'/3,
'$aleph_local'/2,
'$aleph_sat'/2,
'$aleph_sat_atom'/2,
'$aleph_sat_ovars'/2,
'$aleph_sat_ivars'/2,
'$aleph_sat_varsequiv'/2,
'$aleph_sat_varscopy'/3,
'$aleph_sat_terms'/4,
'$aleph_sat_vars'/4,
'$aleph_sat_litinfo'/6,
'$aleph_search_cache'/1,
'$aleph_search_prunecache'/1,
'$aleph_search'/2,
'$aleph_search_seen'/2,
'$aleph_search_expansion'/4,
'$aleph_search_gain'/4,
'$aleph_search_node'/8,
'$aleph_link_vars'/2,
'$aleph_has_vars'/3,
'$aleph_has_ovar'/4,
'$aleph_has_ivar'/4,
'$aleph_determination'/2,
'$aleph_search_seen'/2)),
M:dynamic((prune/1,cost/3,example/3,aleph_portray/1)),
style_check(-discontiguous),
aleph:init(swi,M),
assert(M:(reduce:-reduce(_))),
assert(M:(induce_constraints:-induce_constraints(_))),
assert(M:(induce_modes:-induce_modes(_))),
assert(M:(induce_incremental:-induce_incremental(_))),
assert(M:(induce_clauses:-induce_clauses(_))),
assert(M:(induce:-induce(_))),
assert(M:(induce_tree:-induce_tree(_))),
assert(M:(induce_max:-induce_max(_))),
assert(M:(induce_cover:-induce_cover(_))),
assert(M:(induce_theory:-induce_theory(_))),
assert(M:(induce_features:-induce_features(_))),
assert(M:(rdhyp:-rdhyp(_))),
assert(M:(sphyp:-sphyp_i(_))),
assert(M:(addgcws:-addgcws_i(_))),
assert(M:(rmhyp:-rmhyp_i(_))),
assert(M:(addhyp:-addhyp_i(_))),
assert(M:(covers:-covers(_))),
assert(M:(coversn:-coversn(_))),
aleph:clean_up(M),
retractall(M:example(_,_,_)),
aleph:reset(M).
system:term_expansion((:- begin_bg), []) :-
prolog_load_context(module, M),
aleph_input_mod(M),!,
assert(M:bg_on).
system:term_expansion(C, C) :-
C\= (:- end_bg),
prolog_load_context(module, M),
aleph_input_mod(M),
M:bg_on,!.
system:term_expansion((:- end_bg), []) :-
prolog_load_context(module, M),
aleph_input_mod(M),!,
retractall(M:bg_on).
%findall(C,M:bgc(C),L),
%retractall(M:bgc(_)),
% (M:bg(BG0)->
% retract(M:bg(BG0)),
% append(BG0,L,BG),
% assert(M:bg(BG))
% ;
% assert_all(L,M,_)
% ).
system:term_expansion((:- begin_in_pos), []) :-
prolog_load_context(module, M),
aleph_input_mod(M),!,
assert(M:pos_on),
clean_up_examples(pos,M),
asserta(M:'$aleph_global'(size,size(pos,0))).
system:term_expansion(C, []) :-
C\= (:- end_in_pos),
prolog_load_context(module, M),
aleph_input_mod(M),
M:pos_on,!,aleph:record_example(nocheck,pos,C,_,M).
system:term_expansion((:- end_in_pos), []) :-
prolog_load_context(module, M),
aleph_input_mod(M),!,
retractall(M:pos_on).
%findall(C,M:incpos(C),L),
%retractall(M:incpos(_)),
% (M:in(IN0)->
% retract(M:in(IN0)),%
%
% append(IN0,L,IN),
% assert(M:in(IN))
% ;
% assert(M:in(L))
% ).
%%%%%%
system:term_expansion((:- begin_in_neg), []) :-
prolog_load_context(module, M),
aleph_input_mod(M),!,
assert(M:neg_on),
aleph:clean_up_examples(neg,M),
asserta(M:'$aleph_global'(size,size(neg,0))).
system:term_expansion(C, []) :-
C\= (:- end_in_neg),
prolog_load_context(module, M),
aleph_input_mod(M),
M:neg_on,!,aleph:record_example(nocheck,neg,C,_,M).
system:term_expansion(:- mode(A,B), []) :-
prolog_load_context(module, M),
aleph_input_mod(M),!,
aleph:mode(A,B,M).
system:term_expansion(:- modeh(A,B), []) :-
prolog_load_context(module, M),
aleph_input_mod(M),!,
aleph:modeh(A,B,M).
system:term_expansion(:- modeb(A,B), []) :-
prolog_load_context(module, M),
aleph_input_mod(M),!,
aleph:modeb(A,B,M).
system:term_expansion(:- aleph_set(A,B), []) :-
prolog_load_context(module, M),
aleph_input_mod(M),!,
aleph:set(A,B,M).
system:term_expansion(:- determination(A,B), []) :-
prolog_load_context(module, M),
aleph_input_mod(M),!,
aleph:determination(A,B,M).
system:term_expansion((:- end_in_neg), []) :-
prolog_load_context(module, M),
aleph_input_mod(M),!,
retractall(M:neg_on).
%findall(C,M:incneg(C),L),
%retractall(M:incneg(_)),
%
% (M:in(IN0)->
% retract(M:in(IN0)),
% append(IN0,L,IN),
% assert(M:in(IN))
% ;
% assert(M:in(L))
% ).
system:term_expansion((:- aleph_read_all), []) :-
prolog_load_context(module, M),
aleph_input_mod(M),!.
system:term_expansion(end_of_file, end_of_file) :-
prolog_load_context(module, M),
aleph_input_mod(M),!,
retractall(pita_input_mod(M)),
aleph:record_targetpred(M),
aleph:check_recursive_calls(M),
aleph:check_prune_defs(M),
aleph:check_user_search(M),
aleph:check_posonly(M),
aleph:check_auto_refine(M),
aleph:check_abducibles(M),
%Aggiunti alla fine
aleph:reset_counts(M),
asserta(M:'$aleph_global'(last_clause,last_clause(0))),
broadcast(examples(loaded)),
(M:'$aleph_global'(size,size(pos,NP))-> true;NP=0),
(NP > 0 -> ExP = [1-NP]; ExP = []),
asserta(M:'$aleph_global'(atoms,atoms(pos,ExP))),
asserta(M:'$aleph_global'(atoms_left,atoms_left(pos,ExP))),
asserta(M:'$aleph_global'(last_example,last_example(pos,NP))),
(M:'$aleph_global'(size,size(neg,NN))->true;NN=0),
(NN > 0 -> ExN = [1-NN]; ExN = []),
asserta(M:'$aleph_global'(atoms,atoms(neg,ExN))),
asserta(M:'$aleph_global'(atoms_left,atoms_left(neg,ExN))),
asserta(M:'$aleph_global'(last_example,last_example(neg,NN))),
set_lazy_recalls(M),
(setting(prior,_,M) -> true;
normalise_distribution([NP-pos,NN-neg],Prior),
set(prior,Prior,M)
).
assert_all([],_M,[]).
assert_all([H|T],M,[HRef|TRef]):-
assertz(M:H,HRef),
assert_all(T,M,TRef).
assert_all([],[]).
assert_all([H|T],[HRef|TRef]):-
assertz(H,HRef),
assert_all(T,TRef).
print_arr([]).
print_arr([H|T]):-
write(H),print_arr(T),nl.
/*
theory_induce(Theory):-
aleph_input_mod(M),
induce,
show(Theory).
*/
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% C O M P I L E R S P E C I F I C
prolog_type(yap):-
predicate_property(yap_flag(_,_),built_in), !.
prolog_type(swi).
init(yap):-
source,
system_predicate(false,false), hide(false),
style_check(single_var),
% yap_flag(profiling,on),
assert_static((aleph_random(X):- X is random)),
(predicate_property(alarm(_,_,_),built_in) ->
assert_static((remove_alarm(X):- alarm(0,_,_)));
assert_static(alarm(_,_,_)),
assert_static(remove_alarm(_))),
assert_static((aleph_consult(F):- consult(F))),
assert_static((aleph_reconsult(F):- reconsult(F))),
(predicate_property(thread_local(_),built_in) -> true;
assert_static(thread_local(_))),
assert_static(broadcast(_)),
assert_static((aleph_background_predicate(Lit):-
predicate_property(Lit,P),
((P = static); (P = dynamic); (P = built_in)), !)),
(predicate_property(delete_file(_),built_in) -> true;
assert_static(delete_file(_))).
init(swi,M):-
%redefine_system_predicate(false),
style_check(+singleton),
style_check(-discontiguous),
M:dynamic(aleph_false/0),
M:dynamic(example/3),
assert((depth_bound_call(G,L,M):-
call_with_depth_limit(M:G,L,R),
R \= depth_limit_exceeded)),
(predicate_property(numbervars(_,_,_),built_in) -> true;
assert((numbervars(A,B,C):- numbervars(A,'$VAR',B,C)))),
assert((system(X):- shell(X))),
assert((exists(X):- exists_file(X))),
assert((aleph_reconsult(F):- consult(F))),
%assert((aleph_random(X):- I = 1000000, X is float(random(I-1))/float(I))),
(predicate_property(thread_local(_),built_in) -> true;
assert(thread_local(_))),
(predicate_property(delete_file(_),built_in) -> true;
assert(delete_file(_))).
aleph_background_predicate(Lit,M):-
predicate_property(M:Lit,P),
((P=interpreted);(P=built_in)), !.
aleph_consult(X,M):- aleph_open(X,read,S), repeat,
read(S,F), (F = end_of_file -> close(S), !;
assertz(M:F),fail).
/**
* aleph_random(-X:float) is det
*
* Returns a random number in [0,1)
*/
aleph_random(X):- I = 1000000, X is float(random(I-1))/float(I).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% A L E P H
aleph_version(5).
aleph_version_date('Sun Mar 11 03:25:37 UTC 2007').
aleph_manual('http://www.comlab.ox.ac.uk/oucl/groups/machlearn/Aleph/index.html').
:- thread_local aleph_input_mod/1.
:- multifile prune/1.
:- multifile refine/2.
:- multifile cost/3.
:- multifile prove/2.
:- multifile redundant/2.
:- multifile text/2.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% C O N S T R U C T B O T T O M
% get_atoms(+Preds,+Depth,+MaxDepth,+Last,-LastLit)
% layered generation of ground atoms to add to bottom clause
% Preds is list of PName/Arity entries obtained from the determinations
% Depth is current variable-chain depth
% MaxDepth is maximum allowed variable chain depth (i setting)
% Last is last atom number so far
% Lastlit is atom number after all atoms to MaxDepth have been generated
% get_atoms(L,1,Ival,Last1,Last) che diventa
% get_atoms([short/1,...],1,2,1,-LastLit).
get_atoms([],_,_,Last,Last,_M):- !.
get_atoms(Preds,Depth,MaxDepth,Last,LastLit,M):-
Depth =< MaxDepth,
Depth0 is Depth - 1,
M:'$aleph_sat_terms'(_,Depth0,_,_), % new terms generated ?
!,
get_atoms1(Preds,Depth,MaxDepth,Last,Last1,M),
Depth1 is Depth + 1,
get_atoms(Preds,Depth1,MaxDepth,Last1,LastLit,M).
get_atoms(_,_,_,Last,Last,_M).
% auxiliary predicate used by get_atoms/5
get_atoms1([],_,_,Last,Last,_M).
% get_atoms1([short/1|...],1,2,1,-LastLit,M).
get_atoms1([Pred|Preds],Depth,MaxDepth,Last,LastLit,M):-
gen_layer(Pred,Depth,M),
flatten(Depth,MaxDepth,Last,Last1,M),
get_atoms1(Preds,Depth,MaxDepth,Last1,LastLit,M).
% flatten(+Depth,+MaxDepth,+Last,-LastLit)
% flatten a set of ground atoms by replacing all in/out terms with variables
% constants are wrapped in a special term called aleph_const(...)
% eg suppose p/3 had modes p(+char,+char,#int)
% then p(a,a,3) becomes p(X,X,aleph_const(3))
% ground atoms to be flattened are assumed to be in the i.d.b atoms
% vars and terms are actually integers which are stored in vars/terms databases
% so eg above actually becomes p(1,1,aleph_const(3)).
% where variable 1 stands for term 2 (say) which in turn stands for a
% Depth is current variable-chain depth
% MaxDepth is maximum allowed variable chain depth (i setting)
% Last is last atom number so far
% Lastlit is atom number after ground atoms here have been flattened
% If permute_bottom is set to true, then the order of ground atoms is
% shuffled. The empirical utility of doing this has been investigated by
% P. Schorn in "Random Local Bottom Clause Permutations for Better Search Space
% Exploration in Progol-like ILP Systems.", 16th International Conference on
% ILP (ILP 2006).
flatten(Depth,MaxDepth,Last,Last1,M):-
retractall(M:'$aleph_local'(flatten_num,_)),
asserta(M:'$aleph_local'(flatten_num,Last)),
M:'$aleph_sat_atom'(_,_),!,
(setting(permute_bottom,Permute,M) -> true; Permute = false),
flatten_atoms(Permute,Depth,MaxDepth,Last1,M).
flatten(_,_,_,Last,M):-
retract(M:'$aleph_local'(flatten_num,Last)), !.
flatten_atoms(true,Depth,MaxDepth,Last1,M):-
findall(L-Mod,retract(M:'$aleph_sat_atom'(L,Mod)),LitModes),
aleph_rpermute(LitModes,PLitModes),
aleph_member(Lit1-Mode,PLitModes),
retract(M:'$aleph_local'(flatten_num,LastSoFar)),
(Lit1 = not(Lit) -> Negated = true; Lit = Lit1, Negated = false),
flatten_atom(Depth,MaxDepth,Lit,Negated,Mode,LastSoFar,Last1,M),
asserta(M:'$aleph_local'(flatten_num,Last1)),
fail.
flatten_atoms(false,Depth,MaxDepth,Last1,M):-
repeat,
retract(M:'$aleph_sat_atom'(Lit1,Mode)),
retract(M:'$aleph_local'(flatten_num,LastSoFar)),
(Lit1 = not(Lit) -> Negated = true; Lit = Lit1, Negated = false),
flatten_atom(Depth,MaxDepth,Lit,Negated,Mode,LastSoFar,Last1,M),
asserta(M:'$aleph_local'(flatten_num,Last1)),
(M:'$aleph_sat_atom'(_,_) ->
fail;
retract(M:'$aleph_local'(flatten_num,Last1))), !.
flatten_atoms(_,_,_,Last,M):-
retract(M:'$aleph_local'(flatten_num,Last)), !.
% flatten_atom(+Depth,+Depth1,+Lit,+Negated,+Mode,+Last,-Last1)
% update lits database by adding ``flattened atoms''. This involves:
% replacing ground terms at +/- positions in Lit with variables
% and wrapping # positions in Lit within a special term stucture
% Mode contains actual mode and term-place numbers and types for +/-/#
% Last is the last literal number in the lits database at present
% Last1 is the last literal number after the update
flatten_atom(Depth,Depth1,Lit,Negated,Mode,Last,Last1,M):-
arg(3,Mode,O), arg(4,Mode,C),
integrate_args(Depth,Lit,O,M),
integrate_args(Depth,Lit,C,M),
(Depth = Depth1 -> CheckOArgs = true; CheckOArgs = false),
flatten_lits(Lit,CheckOArgs,Depth,Negated,Mode,Last,Last1,M).
% variabilise literals by replacing terms with variables
% if var splitting is on then new equalities are introduced into bottom clause
% if at final i-layer, then literals with o/p args that do not contain at least
% one output var from head are discarded
flatten_lits(Lit,CheckOArgs,Depth,Negated,Mode,Last,_,M):-
functor(Lit,Name,Arity),
asserta(M:'$aleph_local'(flatten_lits,Last)),
Depth1 is Depth - 1,
functor(OldFAtom,Name,Arity),
flatten_lit(Lit,Mode,OldFAtom,_,_,M),
functor(FAtom,Name,Arity),
apply_equivs(Depth1,Arity,OldFAtom,FAtom,M),
retract(M:'$aleph_local'(flatten_lits,OldLast)),
(CheckOArgs = true ->
arg(3,Mode,Out),
get_vars(FAtom,Out,OVars),
(in_path(OVars,M) ->
add_new_lit(Depth,FAtom,Mode,OldLast,Negated,NewLast,M);
NewLast = OldLast) ;
add_new_lit(Depth,FAtom,Mode,OldLast,Negated,NewLast,M)),
asserta(M:'$aleph_local'(flatten_lits,NewLast)),
fail.
flatten_lits(_,_,_,_,_,_,Last1,M):-
retract(M:'$aleph_local'(flatten_lits,Last1)).
% flatten_lit(+Lit,+Mode,+FAtom,-IVars,-OVars)
% variabilise Lit as FAtom
% Mode contains actual mode and
% In, Out, Const positions as term-place numbers with types
% replace ground terms with integers denoting variables
% or special terms denoting constants
% variable numbers arising from variable splits are disallowed
% returns Input and Output variable numbers
flatten_lit(Lit,mode(Mode,In,Out,Const),FAtom,IVars,OVars,M):-
functor(Mode,_,Arity),
once(copy_modeterms(Mode,FAtom,Arity)),
flatten_vars(In,Lit,FAtom,IVars,M),
flatten_vars(Out,Lit,FAtom,OVars,M),
flatten_consts(Const,Lit,FAtom).
% flatten_vars(+TPList,+Lit,+FAtom,-Vars):-
% FAtom is Lit with terms-places in TPList replaced by variables
flatten_vars([],_,_,[],_M).
flatten_vars([Pos/Type|Rest],Lit,FAtom,[Var|Vars],M):-
tparg(Pos,Lit,Term),
M:'$aleph_sat_terms'(TNo,_,Term,Type),
M:'$aleph_sat_vars'(Var,TNo,_,_),
\+(M:'$aleph_sat_varscopy'(Var,_,_)),
tparg(Pos,FAtom,Var),
flatten_vars(Rest,Lit,FAtom,Vars,M).
% replace a list of terms at places marked by # in the modes
% with a special term structure denoting a constant
flatten_consts([],_,_).
flatten_consts([Pos/_|Rest],Lit,FAtom):-
tparg(Pos,Lit,Term),
tparg(Pos,FAtom,aleph_const(Term)),
flatten_consts(Rest,Lit,FAtom).
% in_path(+ListOfOutputVars)
% check to avoid generating useless literals in the last i layer
in_path(OVars,M):-
M:'$aleph_sat'(hovars,Vars), !,
(Vars=[];OVars=[];intersects(Vars,OVars)).
in_path(_,_M).
% update_equivs(+VariableEquivalences,+IDepth)
% update variable equivalences created at a particular i-depth
% is non-empty only if variable splitting is allowed
update_equivs([],_,_M):- !.
update_equivs(Equivs,Depth,M):-
retract(M:'$aleph_sat_varsequiv'(Depth,Eq1)), !,
update_equiv_lists(Equivs,Eq1,Eq2),
asserta(M:'$aleph_sat_varsequiv'(Depth,Eq2)).
update_equivs(Equivs,Depth,M):-
Depth1 is Depth - 1,
get_equivs(Depth1,Eq1,M),
update_equiv_lists(Equivs,Eq1,Eq2,M),
asserta(M:'$aleph_sat_varsequiv'(Depth,Eq2)).
update_equiv_lists([],E,E):- !.
update_equiv_lists([Var/E1|Equivs],ESoFar,E):-
aleph_delete(Var/E2,ESoFar,ELeft), !,
update_list(E1,E2,E3),
update_equiv_lists(Equivs,[Var/E3|ELeft],E).
update_equiv_lists([Equiv|Equivs],ESoFar,E):-
update_equiv_lists(Equivs,[Equiv|ESoFar],E).
% get variable equivalences at a particular depth
% recursively descend to greatest depth below this for which equivs exist
% also returns the database reference of entry
get_equivs(Depth,[],_M):-
Depth < 0, !.
get_equivs(Depth,Equivs,M):-
M:'$aleph_sat_varsequiv'(Depth,Equivs), !.
get_equivs(Depth,E,M):-
Depth1 is Depth - 1,
get_equivs(Depth1,E,M).
% apply equivalences inherited from Depth to a flattened literal
% if no variable splitting, then succeeds only once
apply_equivs(Depth,Arity,Old,New,M):-
get_equivs(Depth,Equivs,M),
rename(Arity,Equivs,[],Old,New).
% rename args using list of Var/Equivalences
rename(_,[],_,L,L):- !.
rename(0,_,_,_,_):- !.
rename(Pos,Equivs,Subst0,Old,New):-
arg(Pos,Old,OldVar),
aleph_member(OldVar/Equiv,Equivs), !,
aleph_member(NewVar,Equiv),
arg(Pos,New,NewVar),
Pos1 is Pos - 1,
rename(Pos1,Equivs,[OldVar/NewVar|Subst0],Old,New).
rename(Pos,Equivs,Subst0,Old,New):-
arg(Pos,Old,OldVar),
(aleph_member(OldVar/NewVar,Subst0) ->
arg(Pos,New,NewVar);
arg(Pos,New,OldVar)),
Pos1 is Pos - 1,
rename(Pos1,Equivs,Subst0,Old,New).
% add a new literal to lits database
% performs variable splitting if splitvars is set to true
add_new_lit(Depth,FAtom,Mode,OldLast,Negated,NewLast,M):-
arg(1,Mode,M1),
functor(FAtom,Name,Arity),
functor(SplitAtom,Name,Arity),
once(copy_modeterms(M1,SplitAtom,Arity)),
arg(2,Mode,In), arg(3,Mode,Out), arg(4,Mode,Const),
split_vars(Depth,FAtom,In,Out,Const,SplitAtom,IVars,OVars,Equivs,M),
update_equivs(Equivs,Depth,M),
add_lit(OldLast,Negated,SplitAtom,In,Out,IVars,OVars,LitNum,M),
insert_eqs(Equivs,Depth,LitNum,NewLast,M), !.
% modify the literal database: check if performing lazy evaluation
% of bottom clause, and update input and output terms in literal
add_lit(Last,Negated,FAtom,I,O,_,_,Last,M):-
setting(construct_bottom,CBot,M),
(CBot = false ; CBot = reduction),
(Negated = true -> Lit = not(FAtom); Lit = FAtom),
M:'$aleph_sat_litinfo'(_,0,Lit,I,O,_), !.
add_lit(Last,Negated,FAtom,In,Out,IVars,OVars,LitNum,M):-
LitNum is Last + 1,
update_iterms(LitNum,IVars,M),
update_oterms(LitNum,OVars,[],Dependents,M),
add_litinfo(LitNum,Negated,FAtom,In,Out,Dependents,M),
assertz(M:'$aleph_sat_ivars'(LitNum,IVars)),
assertz(M:'$aleph_sat_ovars'(LitNum,OVars)), !.
% update lits database after checking that the atom does not exist
% used during updates of lit database by lazy evaluation
update_lit(LitNum,true,FAtom,I,O,D,M):-
M:'$aleph_sat_litinfo'(LitNum,0,not(FAtom),I,O,D), !.
update_lit(LitNum,false,FAtom,I,O,D,M):-
M:'$aleph_sat_litinfo'(LitNum,0,FAtom,I,O,D), !.
update_lit(LitNum,Negated,FAtom,I,O,D,M):-
gen_nlitnum(LitNum,M),
add_litinfo(LitNum,Negated,FAtom,I,O,D,M),
get_vars(FAtom,I,IVars),
get_vars(FAtom,O,OVars),
assertz(M:'$aleph_sat_ivars'(LitNum,K,IVars)),
assertz(M:'$aleph_sat_ovars'(LitNum,K,OVars)), !.
% add a literal to lits database without checking
add_litinfo(LitNum,true,FAtom,I,O,D,M):-
!,
assertz(M:'$aleph_sat_litinfo'(LitNum,0,not(FAtom),I,O,D)).
add_litinfo(LitNum,_,FAtom,I,O,D,M):-
assertz(M:'$aleph_sat_litinfo'(LitNum,0,FAtom,I,O,D)).
% update database with input terms of literal
update_iterms(_,[],_M).
update_iterms(LitNum,[VarNum|Vars],M):-
retract(M:'$aleph_sat_vars'(VarNum,TNo,I,O)),
update(I,LitNum,NewI),
asserta(M:'$aleph_sat_vars'(VarNum,TNo,NewI,O)),
update_dependents(LitNum,O,M),
update_iterms(LitNum,Vars,M).
% update database with output terms of literal
% return list of dependent literals
update_oterms(_,[],Dependents,Dependents,_M).
update_oterms(LitNum,[VarNum|Vars],DSoFar,Dependents,M):-
retract(M:'$aleph_sat_vars'(VarNum,TNo,I,O)),
update(O,LitNum,NewO),
asserta(M:'$aleph_sat_vars'(VarNum,TNo,I,NewO)),
update_list(I,DSoFar,D1),
update_oterms(LitNum,Vars,D1,Dependents,M).
% update Dependent list of literals with LitNum
update_dependents(_,[],_M).
update_dependents(LitNum,[Lit|Lits],M):-
retract(M:'$aleph_sat_litinfo'(Lit,Depth,Atom,ITerms,OTerms,Dependents)),
update(Dependents,LitNum,NewD),
asserta(M:'$aleph_sat_litinfo'(Lit,Depth,Atom,ITerms,OTerms,NewD)),
update_dependents(LitNum,Lits,M).
% update dependents of head with literals that are simply generators
% that is, literals that require no input args
update_generators(M):-
findall(L,(M:'$aleph_sat_litinfo'(L,_,_,[],_,_),L>1),GList),
GList \= [], !,
retract(M:'$aleph_sat_litinfo'(1,Depth,Lit,I,O,D)),
aleph_append(D,GList,D1),
asserta(M:'$aleph_sat_litinfo'(1,Depth,Lit,I,O,D1)).
update_generators(_M).
% mark literals
mark_lits(Lits,M):-
aleph_member(Lit,Lits),
asserta(M:'$aleph_local'(marked,Lit/0)),
fail.
mark_lits(_,_M).
% recursively mark literals with minimum depth to bind output vars in head
mark_lits([],_,_,_M).
mark_lits(Lits,OldVars,Depth,M):-
mark_lits(Lits,Depth,true,[],Predecessors,OldVars,NewVars,M),
aleph_delete_list(Lits,Predecessors,P1),
Depth1 is Depth + 1,
mark_lits(P1,NewVars,Depth1,M).
mark_lits([],_,_,P,P,V,V,_M).
mark_lits([Lit|Lits],Depth,GetPreds,PSoFar,P,VSoFar,V,M):-
retract(M:'$aleph_local'(marked,Lit/Depth0)), !,
(Depth < Depth0 ->
mark_lit(Lit,Depth,GetPreds,VSoFar,P1,V2,M),
update_list(P1,PSoFar,P2),
mark_lits(Lits,Depth,GetPreds,P2,P,V2,V,M);
asserta(M:'$aleph_local'(marked,Lit/Depth0)),
mark_lits(Lits,Depth,GetPreds,PSoFar,P,VSoFar,V,M)).
mark_lits([Lit|Lits],Depth,GetPreds,PSoFar,P,VSoFar,V,M):-
mark_lit(Lit,Depth,GetPreds,VSoFar,P1,V2,M), !,
update_list(P1,PSoFar,P2),
mark_lits(Lits,Depth,GetPreds,P2,P,V2,V,M).
mark_lits([_|Lits],Depth,GetPreds,PSoFar,P,VSoFar,V,M):-
mark_lits(Lits,Depth,GetPreds,PSoFar,P,VSoFar,V,M).
mark_lit(Lit,Depth,GetPreds,VSoFar,P1,V1,M):-
retract(M:'$aleph_sat_litinfo'(Lit,_,Atom,I,O,D)),
asserta(M:'$aleph_local'(marked,Lit/Depth)),
asserta(M:'$aleph_sat_litinfo'(Lit,Depth,Atom,I,O,D)),
(GetPreds = false ->
P1 = [],
V1 = VSoFar;
get_vars(Atom,O,OVars),
update_list(OVars,VSoFar,V1),
get_predicates(D,V1,D1,M),
mark_lits(D1,Depth,false,[],_,VSoFar,_,M),
get_vars(Atom,I,IVars),
get_predecessors(IVars,[],P1,M)).
% mark lits that produce outputs that are not used by any other literal
mark_floating_lits(Lit,Last,_M):-
Lit > Last, !.
mark_floating_lits(Lit,Last,M):-
M:'$aleph_sat_litinfo'(Lit,_,_,_,O,D),
O \= [],
(D = []; D = [Lit]), !,
asserta(M:'$aleph_local'(marked,Lit/0)),
Lit1 is Lit + 1,
mark_floating_lits(Lit1,Last,M).
mark_floating_lits(Lit,Last,M):-
Lit1 is Lit + 1,
mark_floating_lits(Lit1,Last,M).
% mark lits in bottom clause that are specified redundant by user
% requires definition of redundant/2 that have distinguished first arg ``bottom''
mark_redundant_lits(Lit,Last,_M):-
Lit > Last, !.
mark_redundant_lits(Lit,Last,M):-
get_pclause([Lit],[],Atom,_,_,_,M),
redundant(bottom,Atom,M), !,
asserta(M:'$aleph_local'(marked,Lit/0)),
Lit1 is Lit + 1,
mark_redundant_lits(Lit1,Last,M).
mark_redundant_lits(Lit,Last,M):-
Lit1 is Lit + 1,
mark_redundant_lits(Lit1,Last,M).
% get literals that are linked and do not link to any others (ie predicates)
get_predicates([],_,[],_M).
get_predicates([Lit|Lits],Vars,[Lit|T],M):-
M:'$aleph_sat_litinfo'(Lit,_,Atom,I,_,[]),
get_vars(Atom,I,IVars),
aleph_subset1(IVars,Vars), !,
get_predicates(Lits,Vars,T,M).
get_predicates([_|Lits],Vars,T,M):-
get_predicates(Lits,Vars,T,M).
% get all predecessors in the bottom clause of a set of literals
get_predecessors([],[],_M).
get_predecessors([Lit|Lits],P,M):-
(Lit = 1 -> Pred = [];
get_ivars1(false,Lit,IVars,M),
get_predecessors(IVars,[],Pred,M)),
get_predecessors(Pred,PPred,M),
update_list(Pred,PPred,P1),
get_predecessors(Lits,P2,M),
update_list(P2,P1,P).
% get list of literals in the bottom clause that produce a set of vars
get_predecessors([],P,P,_M).
get_predecessors([Var|Vars],PSoFar,P,M):-
M:'$aleph_sat_vars'(Var,_,_,O),
update_list(O,PSoFar,P1),
get_predecessors(Vars,P1,P,M).
% removal of literals in bottom clause by negative-based reduction.
% A greedy strategy is employed, as implemented within the ILP system
% Golem (see Muggleton and Feng, "Efficient induction
% of logic programs", Inductive Logic Programming, S. Muggleton (ed.),
% AFP Press). In this, given a clause H:- B1, B2,...Bn, let Bi be the
% first literal s.t. H:-B1,...,Bi covers no more than the allowable number
% of negatives. The clause H:- Bi,B1,...,Bi-1 is then reduced. The
% process continues until there is no change in the length of a clause
% within an iteration. The algorithm is O(n^2).
rm_nreduce(Last,N,M):-
setting(nreduce_bottom,true,M), !,
get_litnums(1,Last,BottomLits,M),
M:'$aleph_global'(atoms,atoms(neg,Neg)),
setting(depth,Depth,M),
setting(prooftime,Time,M),
setting(proof_strategy,Proof,M),
setting(noise,Noise,M),
neg_reduce(BottomLits,Neg,Last,Depth/Time/Proof,Noise,M),
get_marked(1,Last,Lits,M),
length(Lits,N),
p1_message('negative-based removal'), p_message(N/Last).
rm_nreduce(_,0,_M).
neg_reduce([Head|Body],Neg,Last,DepthTime,Noise,M):-
get_pclause([Head],[],Clause,TV,_,_,M),
neg_reduce(Body,Clause,TV,2,Neg,DepthTime,Noise,NewLast,M),
NewLast \= Last, !,
NewLast1 is NewLast - 1,
aleph_remove_n(NewLast1,[Head|Body],Prefix,[LastLit|Rest]),
mark_lits(Rest,M),
insert_lastlit(LastLit,Prefix,Lits1),
neg_reduce(Lits1,Neg,NewLast,DepthTime,Noise,M).
neg_reduce(_,_,_,_,_,_M).
neg_reduce([],_,_,N,_,_,_,N).
neg_reduce([L1|Lits],C,TV,N,Neg,ProofFlags,Noise,LastLit,M):-
get_pclause([L1],TV,Lit1,TV1,_,_,M),
extend_clause(C,Lit1,Clause,M),
prove(ProofFlags,neg,Clause,Neg,NegCover,Count,M),
Count > Noise, !,
N1 is N + 1,
neg_reduce(Lits,Clause,TV1,N1,NegCover,ProofFlags,Noise,LastLit,M).
neg_reduce(_,_,_,N,_,_,_,N,_M).
% insert_lastlit(LastLit,[1|Lits],Lits1):-
% find_last_ancestor(Lits,LastLit,1,2,Last),
% aleph_remove_n(Last,[1|Lits],Prefix,Suffix),
% aleph_append([LastLit|Suffix],Prefix,Lits1).
insert_lastlit(LastLit,Lits,Lits1,M):-
get_predecessors([LastLit],Prefix,M),
aleph_delete_list(Prefix,Lits,Suffix),
aleph_append([LastLit|Suffix],Prefix,Lits1).
find_last_ancestor([],_,Last,_,Last,_M):- !.
find_last_ancestor([Lit|Lits],L,_,LitNum,Last,M):-
M:'$aleph_sat_litinfo'(Lit,_,_,_,_,D),
aleph_member1(L,D), !,
NextLit is LitNum + 1,
find_last_ancestor(Lits,L,LitNum,NextLit,Last,M).
find_last_ancestor([_|Lits],L,Last0,LitNum,Last,M):-
NextLit is LitNum + 1,
find_last_ancestor(Lits,L,Last0,NextLit,Last,M).
% removal of literals that are repeated because of mode differences
rm_moderepeats(_,_,M):-
M:'$aleph_sat_litinfo'(Lit1,_,Pred1,_,_,_),
M:'$aleph_sat_litinfo'(Lit2,_,Pred1,_,_,_),
Lit1 >= 1, Lit2 > Lit1,
retract(M:'$aleph_sat_litinfo'(Lit2,_,Pred1,_,_,_)),
asserta(M:'$aleph_local'(marked,Lit2/0)),
fail.
rm_moderepeats(Last,N,M):-
M:'$aleph_local'(marked,_), !,
get_marked(1,Last,Lits,M),
length(Lits,N),
p1_message('repeated literals'), p_message(N/Last),
remove_lits(Lits,M).
rm_moderepeats(_,0,_M).
% removal of symmetric literals
rm_symmetric(_,_,M):-
M:'$aleph_global'(symmetric,_),
M:'$aleph_sat_litinfo'(Lit1,_,Pred1,[I1|T1],_,_),
is_symmetric(Pred1,Name,Arity,M),
get_vars(Pred1,[I1|T1],S1),
M:'$aleph_sat_litinfo'(Lit2,_,Pred2,[I2|T2],_,_),
Lit1 \= Lit2,
is_symmetric(Pred2,Name,Arity,M),
Pred1 =.. [_|Args1],
Pred2 =.. [_|Args2],
symmetric_match(Args1,Args2),
get_vars(Pred2,[I2|T2],S2),
equal_set(S1,S2),
asserta(M:'$aleph_local'(marked,Lit2/0)),
retract(M:'$aleph_sat_litinfo'(Lit2,_,Pred2,[I2|T2],_,_)),
fail.
rm_symmetric(Last,N,M):-
M:'$aleph_local'(marked,_), !,
get_marked(1,Last,Lits,M),
length(Lits,N),
p1_message('symmetric literals'), p_message(N/Last),
remove_lits(Lits,M).
rm_symmetric(_,0,_M).
is_symmetric(not(Pred),not(Name),Arity,M):-
!,
functor(Pred,Name,Arity),
M:'$aleph_global'(symmetric,symmetric(Name/Arity)).
is_symmetric(Pred,Name,Arity,M):-
functor(Pred,Name,Arity),
M:'$aleph_global'(symmetric,symmetric(Name/Arity)).
symmetric_match([],[]).
symmetric_match([aleph_const(Term)|Terms1],[aleph_const(Term)|Terms2]):-
!,
symmetric_match(Terms1,Terms2).
symmetric_match([Term1|Terms1],[Term2|Terms2]):-
integer(Term1), integer(Term2),
symmetric_match(Terms1,Terms2).
% removal of literals that are repeated because of commutativity
rm_commutative(_,_,M):-
M:'$aleph_global'(commutative,commutative(Name/Arity)),
p1_message('checking commutative literals'), p_message(Name/Arity),
functor(Pred,Name,Arity), functor(Pred1,Name,Arity),
M:'$aleph_sat_litinfo'(Lit1,_,Pred,[I1|T1],O1,_),
% check for marked literals
% (SWI-Prolog specific: suggested by Vasili Vrubleuski)
\+(M:'$aleph_local'(marked,Lit1/0)),
get_vars(Pred,[I1|T1],S1),
M:'$aleph_sat_litinfo'(Lit2,_,Pred1,[I2|T2],O2,_),
Lit1 \= Lit2 ,
O1 = O2,
get_vars(Pred1,[I2|T2],S2),
equal_set(S1,S2),
asserta(M:'$aleph_local'(marked,Lit2/0)),
retract(M:'$aleph_sat_litinfo'(Lit2,_,Pred1,[I2|T2],_,_)),
fail.
rm_commutative(Last,N,M):-
M:'$aleph_local'(marked,_), !,
get_marked(1,Last,Lits),
length(Lits,N),
p1_message('commutative literals'), p_message(N/Last),
remove_lits(Lits).
rm_commutative(_,0,_M).
% recursive marking of literals that do not contribute to establishing
% variable chains to output vars in the head
% or produce outputs that are not used by any literal
% controlled by setting flag check_useless
rm_uselesslits(_,0,M):-
setting(check_useless,false,M), !.
rm_uselesslits(Last,N,M):-
M:'$aleph_sat'(hovars,OVars),
OVars \= [], !,
get_predecessors(OVars,[],P,M),
M:'$aleph_sat'(hivars,IVars),
mark_lits(P,IVars,0,M),
get_unmarked(1,Last,Lits,M),
length(Lits,N),
p1_message('useless literals'), p_message(N/Last),
remove_lits(Lits,M).
rm_uselesslits(_,0,_M).
% call user-defined predicate redundant/2 to remove redundant
% literals from bottom clause. Redundancy checking only done on request
rm_redundant(_,0,M):-
setting(check_redundant,false,M), !.
rm_redundant(Last,N,M):-
mark_redundant_lits(1,Last,M),
get_marked(1,Last,Lits,M),
length(Lits,N),
p1_message('redundant literals'), p_message(N/Last),
remove_lits(Lits,M).
% get a list of unmarked literals
get_unmarked(Lit,Last,[],_M):-
Lit > Last, !.
get_unmarked(Lit,Last,Lits,M):-
retract(M:'$aleph_local'(marked,Lit/_)), !,
Next is Lit + 1,
get_unmarked(Next,Last,Lits,M).
get_unmarked(Lit,Last,[Lit|Lits],M):-
retract(M:'$aleph_sat_litinfo'(Lit,_,_,_,_,_)), !,
Next is Lit + 1,
get_unmarked(Next,Last,Lits,M).
get_unmarked(Lit,Last,Lits,M):-
Next is Lit + 1,
get_unmarked(Next,Last,Lits,M).
% get a list of marked literals
get_marked(Lit,Last,[],_M):-
Lit > Last, !.
get_marked(Lit,Last,[Lit|Lits],M):-
retract(M:'$aleph_local'(marked,Lit/_)), !,
(retract(M:'$aleph_sat_litinfo'(Lit,_,_,_,_,_)) ->
true;
true),
Next is Lit + 1,
get_marked(Next,Last,Lits,M).
get_marked(Lit,Last,Lits,M):-
Next is Lit + 1,
get_marked(Next,Last,Lits,M).
% update descendent lists of literals by removing useless literals
remove_lits(L,M):-
retract(M:'$aleph_sat_litinfo'(Lit,Depth,A,I,O,D)),
aleph_delete_list(L,D,D1),
asserta(M:'$aleph_sat_litinfo'(Lit,Depth,A,I,O,D1)),
fail.
remove_lits(_,_M).
% generate a new literal at depth Depth: forced backtracking will give all lits
gen_layer(Name/Arity,Depth,M):-
(Name/Arity = (not)/1 ->
M:'$aleph_global'(modeb,modeb(NSucc,not(Mode))),
functor(Mode,Name1,Arity1),
functor(Lit1,Name1,Arity1),
once(copy_modeterms(Mode,Lit1,Arity1)),
Lit = not(Lit1);
functor(Mode,Name,Arity),
functor(Lit,Name,Arity),
M:'$aleph_global'(modeb,modeb(NSucc,Mode)),
once(copy_modeterms(Mode,Lit,Arity))),
split_args(Mode,Mode,Input,Output,Constants,M),
(Input = [] -> Call1 = true, Call2 = true;
aleph_delete(Arg/Type,Input,OtherInputs),
Depth1 is Depth - 1,
construct_incall(Lit,Depth1,[Arg/Type],Call1,M),
construct_call(Lit,Depth,OtherInputs,Call2,M)),
Call1,
Call2,
aleph_background_predicate(Lit,M),
get_successes(Lit,NSucc,mode(Mode,Input,Output,Constants),M),
fail.
gen_layer(_,_,_M).
get_successes(Literal,1,Mo,M):-
depth_bound_call(Literal,M),
update_atoms(Literal,Mo,M), !.
get_successes(Literal,*,Mo,M):-
depth_bound_call(Literal,M),
update_atoms(Literal,Mo,M).
get_successes(Literal,N,Mo,M):-
integer(N),
N > 1,
reset_succ,
get_nsuccesses(Literal,N,Mo,M).
% get at most N matches for a literal
get_nsuccesses(Literal,N,Mo,M):-
depth_bound_call(Literal,M),
retract(M:'$aleph_local'(last_success,Succ0)),
Succ0 < N,
Succ1 is Succ0 + 1,
update_atoms(Literal,Mo,M),
asserta(M:'$aleph_local'(last_success,Succ1)),
(Succ1 >= N -> !; true).
update_atoms(Atom,Mo,M):-
M:'$aleph_sat_atom'(Atom,Mo), !.
update_atoms(Atom,Mo,M):-
assertz(M:'$aleph_sat_atom'(Atom,Mo)).
% call with input term that is an ouput of a previous literal
construct_incall(_,_,[],true,_M):- !.
construct_incall(not(Lit),Depth,Args,Call,M):-
!,
construct_incall(Lit,Depth,Args,Call,M).
construct_incall(Lit,Depth,[Pos/Type],Call,M):-
!,
Call = legal_term(exact,Depth,Type,Term,M),
tparg(Pos,Lit,Term).
construct_incall(Lit,Depth,[Pos/Type|Args],(Call,Calls),M):-
tparg(Pos,Lit,Term),
Call = legal_term(exact,Depth,Type,Term,M),
(var(Depth)-> construct_incall(Lit,_,Args,Calls,M);
construct_incall(Lit,Depth,Args,Calls,M)).
construct_call(_,_,[],true,_M):- !.
construct_call(not(Lit),Depth,Args,Call,M):-
!,
construct_call(Lit,Depth,Args,Call,M).
construct_call(Lit,Depth,[Pos/Type],Call,M):-
!,
Call = legal_term(upper,Depth,Type,Term,M),
tparg(Pos,Lit,Term).
construct_call(Lit,Depth,[Pos/Type|Args],(Call,Calls),M):-
tparg(Pos,Lit,Term),
Call = legal_term(upper,Depth,Type,Term,M),
construct_call(Lit,Depth,Args,Calls,M).
% generator of legal terms seen so far
legal_term(exact,Depth,Type,Term,M):-
M:'$aleph_sat_terms'(TNo,Depth,Term,Type),
once(M:'$aleph_sat_vars'(_,TNo,_,[_|_])).
% legal_term(exact,Depth,Type,Term):-
% M:'$aleph_sat_varscopy'(NewVar,OldVar,Depth),
% once(M:'$aleph_sat_vars'(NewVar,TNo,_,_)),
% M:'$aleph_sat_terms'(TNo,_,Term,Type),_).
legal_term(upper,Depth,Type,Term,M):-
M:'$aleph_sat_terms'(TNo,Depth1,Term,Type),
Depth1 \= unknown,
Depth1 < Depth,
once(M:'$aleph_sat_vars'(_,TNo,_,[_|_])).
% legal_term(upper,Depth,Type,Term):-
% M:'$aleph_sat_varscopy'(NewVar,OldVar,Depth),
% once(M:'$aleph_sat_vars'(NewVar,TNo,_,_)),
% M:'$aleph_sat_terms'(TNo,Depth1,Term,Type),
% Depth1 \= unknown.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% V A R I A B L E -- S P L I T T I N G
split_vars(Depth,FAtom,I,O,C,SplitAtom,IVars,OVars,Equivs,M):-
setting(splitvars,true,M), !,
get_args(FAtom,I,[],IVarList),
get_args(FAtom,O,[],OVarList),
get_var_equivs(Depth,IVarList,OVarList,IVars,OVars0,Equivs0),
(Equivs0 = [] ->
OVars = OVars0, SplitAtom = FAtom, Equivs = Equivs0;
functor(FAtom,Name,Arity),
functor(SplitAtom,Name,Arity),
copy_args(FAtom,SplitAtom,I),
copy_args(FAtom,SplitAtom,C),
rename_ovars(O,Depth,FAtom,SplitAtom,Equivs0,Equivs),
get_argterms(SplitAtom,O,[],OVars)).
% write('splitting: '), write(FAtom), write(' to: '), write(SplitAtom), nl.
split_vars(_,FAtom,I,O,_,FAtom,IVars,OVars,[],_M):-
get_vars(FAtom,I,IVars),
get_vars(FAtom,O,OVars).
% get equivalent classes of variables from co-references
get_var_equivs(Depth,IVarList,OVarList,IVars,OVars,Equivs):-
sort(IVarList,IVars),
sort(OVarList,OVars),
(Depth = 0 ->
intersect1(IVars,OVarList,IOCoRefs,_),
get_repeats(IVarList,IOCoRefs,ICoRefs);
intersect1(IVars,OVarList,ICoRefs,_)),
get_repeats(OVarList,ICoRefs,CoRefs),
add_equivalences(CoRefs,Depth,Equivs).
add_equivalences([],_,[]).
add_equivalences([Var|Vars],Depth,[Var/E|Rest]):-
% (Depth = 0 -> E = []; E = [Var]),
E = [Var],
add_equivalences(Vars,Depth,Rest).
get_repeats([],L,L).
get_repeats([Var|Vars],Ref1,L):-
aleph_member1(Var,Vars), !,
update(Ref1,Var,Ref2),
get_repeats(Vars,Ref2,L).
get_repeats([_|Vars],Ref,L):-
get_repeats(Vars,Ref,L).
% rename all output vars that are co-references
% updates vars database and return equivalent class of variables
rename_ovars([],_,_,_,L,L).
rename_ovars([ArgNo|Args],Depth,Old,New,CoRefs,Equivalences):-
(ArgNo = Pos/_ -> true; Pos = ArgNo),
tparg(Pos,Old,OldVar),
aleph_delete(OldVar/Equiv,CoRefs,Rest), !,
copy_var(OldVar,NewVar,Depth),
tparg(Pos,New,NewVar),
rename_ovars(Args,Depth,Old,New,[OldVar/[NewVar|Equiv]|Rest],Equivalences).
rename_ovars([ArgNo|Args],Depth,Old,New,CoRefs,Equivalences):-
(ArgNo = Pos/_ -> true; Pos = ArgNo),
tparg(Pos,Old,OldVar),
tparg(Pos,New,OldVar),
rename_ovars(Args,Depth,Old,New,CoRefs,Equivalences).
% create new equalities to allow co-references to re-appear in search
insert_eqs([],_,L,L,_M).
insert_eqs([OldVar/Equivs|Rest],Depth,Last,NewLast,M):-
M:'$aleph_sat_vars'(OldVar,TNo,_,_),
M:'$aleph_sat_terms'(TNo,_,_,Type),
add_eqs(Equivs,Depth,Type,Last,Last1,M),
insert_eqs(Rest,Depth,Last1,NewLast,M).
add_eqs([],_,_,L,L,_M).
add_eqs([V1|Rest],Depth,Type,Last,NewLast,M):-
add_eqs(Rest,Depth,V1,Type,Last,Last1,M),
add_eqs(Rest,Depth,Type,Last1,NewLast,M).
add_eqs([],_,_,_,L,L,_M).
add_eqs([Var2|Rest],Depth,Var1,Type,Last,NewLast,M):-
(Depth = 0 ->
add_lit(Last,false,(Var1=Var2),[1/Type],[2/Type],[Var1],[Var2],Last1,M);
add_lit(Last,false,(Var1=Var2),[1/Type,2/Type],[],[Var1,Var2],[],Last1),M),
add_eqs(Rest,Depth,Var1,Type,Last1,NewLast,M).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% utilities for updating mappings between terms and variables
% integrate terms specified by a list of arguments
% integrating a term means:
% updating 2 databases: terms and vars
% terms contains the term along with a term-id
% vars contains a var-id <-> term-id mapping
% var and term-ids are integers
integrate_args(_,_,[],_M):-!.
integrate_args(Depth,Literal,[Pos/Type|T],M):-
tparg(Pos,Literal,Term),
integrate_term(Depth,Term/Type,M),
(retract(M:'$aleph_sat_terms'(TNo,Depth,Term,unknown)) ->
asserta(M:'$aleph_sat_terms'(TNo,Depth,Term,Type));
true),
integrate_args(Depth,Literal,T,M).
% integrate a term
integrate_term(Depth,Term/Type,M):-
M:'$aleph_sat_terms'(TNo,Depth,Term,Type),
M:'$aleph_sat_vars'(_,TNo,_,[_|_]), !.
integrate_term(Depth,Term/Type,M):-
M:'$aleph_sat_terms'(TNo,Depth1,Term,Type),
(Type = unknown ; M:'$aleph_sat_vars'(_,TNo,_,[])), !,
(Depth1 = unknown ->
retract(M:'$aleph_sat_terms'(TNo,Depth1,Term,Type)),
asserta(M:'$aleph_sat_terms'(TNo,Depth,Term,Type));
true).
integrate_term(_,Term/Type,M):-
M:'$aleph_sat_terms'(_,_,Term,Type),
Type \= unknown,
!.
integrate_term(Depth,Term/Type,M):-
retract(M:'$aleph_sat'(lastterm,Num)),
retract(M:'$aleph_sat'(lastvar,Var0)),
TNo is Num + 1,
Var is Var0 + 1,
asserta(M:'$aleph_sat'(lastterm,TNo)),
asserta(M:'$aleph_sat'(lastvar,Var)),
asserta(M:'$aleph_sat_vars'(Var,TNo,[],[])),
asserta(M:'$aleph_sat_terms'(TNo,Depth,Term,Type)).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% split_args(+Lit,?Mode,-Input,-Output,-Constants)
% return term-places and types of +,-, and # args in Lit
% by finding a matching mode declaration if Mode is given
% otherwise first mode that matches is used
split_args(Lit,Mode,Input,Output,Constants,M):-
functor(Lit,Psym,Arity),
find_mode(mode,Psym/Arity,Mode,M),
functor(Template,Psym,Arity),
copy_modeterms(Mode,Template,Arity),
Template = Lit,
tp(Mode,TPList),
split_tp(TPList,Input,Output,Constants).
% split_tp(+TPList,-Input,-Output,-Constants)
% split term-place/type list into +,-,#
split_tp([],[],[],[]).
split_tp([(+Type)/Place|TP],[Place/Type|Input],Output,Constants):-
!,
split_tp(TP,Input,Output,Constants).
split_tp([(-Type)/Place|TP],Input,[Place/Type|Output],Constants):-
!,
split_tp(TP,Input,Output,Constants).
split_tp([(#Type)/Place|TP],Input,Output,[Place/Type|Constants]):-
!,
split_tp(TP,Input,Output,Constants).
split_tp([_|TP],Input,Output,Constants):-
split_tp(TP,Input,Output,Constants).
% tp(+Literal,-TPList)
% return terms and places in Literal
tp(Literal,TPList):-
functor(Literal,_,Arity),
tp_list(Literal,Arity,[],[],TPList).
tp_list(_,0,_,L,L):- !.
tp_list(Term,Pos,PlaceList,TpSoFar,TpList):-
arg(Pos,Term,Arg),
aleph_append([Pos],PlaceList,Places),
unwrap_term(Arg,Places,[Arg/Places|TpSoFar],L1),
Pos1 is Pos - 1,
tp_list(Term,Pos1,PlaceList,L1,TpList).
unwrap_term(Term,_,L,L):-
var(Term), !.
unwrap_term(Term,Place,TpSoFar,TpList):-
functor(Term,_,Arity),
tp_list(Term,Arity,Place,TpSoFar,TpList).
get_determs(PSym/Arity,L,M):-
findall(Pred,M:'$aleph_global'(determination,determination(PSym/Arity,Pred)),L).
get_modes(PSym/Arity,L,M):-
functor(Lit,PSym,Arity),
findall(Lit,M:'$aleph_global'(mode,mode(_,Lit)),L).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% S E A R C H
% basic search engine for single clause search
search(S,Nodes,M):-
arg(36,S,Time),
Inf is inf,
Time =\= Inf,
SearchTime is integer(Time),
SearchTime > 0, !,
catch(time_bound_call(SearchTime,searchlimit,graphsearch(S,_),M),
searchlimit,p_message('Time limit reached')),
M:'$aleph_search'(current,current(_,Nodes,_)).
search(S,Nodes,M):-
graphsearch(S,Nodes,M).
% basic search engine for theory-based search
tsearch(S,Nodes,M):-
arg(36,S,Time),
Inf is inf,
Time =\= Inf,
SearchTime is integer(Time),
SearchTime > 0, !,
alarm(SearchTime,throw(searchlimit),Id),
catch(theorysearch(S,Nodes,M),searchlimit,p_message('Time limit reached')),
remove_alarm(Id).
tsearch(S,Nodes,M):-
theorysearch(S,Nodes,M).
graphsearch(S,Nodes,M):-
next_node(_,M), !,
arg(3,S,RefineOp),
arg(23,S,LazyPreds),
repeat,
next_node(NodeRef,M),
once(retract(M:'$aleph_search'(current,current(LastE,Last,BestSoFar)))),
expand(RefineOp,S,NodeRef,Node,Path,MinLength,Succ,PosCover,NegCover,OVars,
PrefixClause,PrefixTV,PrefixLength,M),
((LazyPreds = []; RefineOp \= false) -> Succ1 = Succ;
lazy_evaluate(Succ,LazyPreds,Path,PosCover,NegCover,Succ1,M)),
NextE is LastE + 1,
get_gains(S,Last,BestSoFar,Path,PrefixClause,PrefixTV,PrefixLength,
MinLength,Succ1,PosCover,NegCover,OVars,NextE,Last0,NextBest0,M),
(RefineOp = false ->
get_sibgains(S,Node,Last0,NextBest0,Path,PrefixClause,
PrefixTV,PrefixLength,MinLength,PosCover,NegCover,
OVars,NextE,Last1,NextBest,M);
Last1 = Last0, NextBest = NextBest0),
asserta(M:'$aleph_search'(current,current(NextE,Last1,NextBest))),
NextL is Last + 1,
asserta(M:'$aleph_search_expansion'(NextE,Node,NextL,Last1)),
(discontinue_search(S,NextBest,Last1,M) ->
M:'$aleph_search'(current,current(_,Nodes,_));
prune_open(S,BestSoFar,NextBest,M),
get_nextbest(S,Next,M),
Next = none,
M:'$aleph_search'(current,current(_,Nodes,_))),
!.
graphsearch(_,Nodes,M):-
M:'$aleph_search'(current,current(_,Nodes,_)).
theorysearch(S,Nodes,M):-
next_node(_,M), !,
M:'$aleph_global'(atoms,atoms(pos,Pos)),
M:'$aleph_global'(atoms,atoms(neg,Neg)),
interval_count(Pos,P,M),
interval_count(Neg,N,M),
repeat,
next_node(NodeRef,M),
M:'$aleph_search_node'(NodeRef,Theory,_,_,_,_,_,_),
once(retract(M:'$aleph_search'(current,current(_,Last,BestSoFar)))),
get_theory_gain(S,Last,BestSoFar,Theory,Pos,Neg,P,N,NextBest,Last1,M),
asserta(M:'$aleph_search'(current,current(0,Last1,NextBest))),
(discontinue_search(S,NextBest,Last1,M) ->
M:'$aleph_search'(current,current(_,Nodes,_));
prune_open(S,BestSoFar,NextBest,M),
get_nextbest(S,Next,M),
Next = none,
M:'$aleph_search'(current,current(_,Nodes,_))),
!.
theorysearch(_,Nodes,M):-
M:'$aleph_search'(current,current(_,Nodes,_)).
next_node(NodeRef,M):-
once(M:'$aleph_search'(nextnode,NodeRef)), !.
get_search_settings(S,M):-
functor(S,set,47),
setting(nodes,MaxNodes,M), arg(1,S,MaxNodes),
setting(explore,Explore,M), arg(2,S,Explore),
setting(refineop,RefineOp,M), arg(3,S,RefineOp),
setting(searchstrat,SearchStrat,M), setting(evalfn,EvalFn,M),
arg(4,S,SearchStrat/EvalFn),
(setting(greedy,Greedy,M)-> arg(5,S,Greedy); arg(5,S,false)),
setting(verbosity,Verbose,M), arg(6,S,Verbose),
setting(clauselength,CLength,M), arg(7,S,CLength),
setting(caching,Cache,M), arg(8,S,Cache),
(setting(prune_defs,Prune,M)-> arg(9,S,Prune); arg(9,S,false)),
setting(lazy_on_cost,LCost,M), arg(10,S,LCost),
setting(lazy_on_contradiction,LContra,M), arg(11,S,LContra),
setting(lazy_negs,LNegs,M), arg(12,S,LNegs),
setting(minpos,MinPos,M), arg(13,S,MinPos),
setting(depth,Depth,M), arg(14,S,Depth),
setting(cache_clauselength,CCLim,M), arg(15,S,CCLim),
(M:'$aleph_global'(size,size(pos,PSize))-> arg(16,S,PSize); arg(16,S,0)),
setting(noise,Noise,M), arg(17,S,Noise),
setting(minacc,MinAcc,M), arg(18,S,MinAcc),
setting(minscore,MinScore,M), arg(19,S,MinScore),
(M:'$aleph_global'(size,size(rand,RSize))-> arg(20,S,RSize); arg(20,S,0)),
setting(mingain,MinGain,M), arg(21,S,MinGain),
setting(search,Search,M), arg(22,S,Search),
findall(PN/PA,M:'$aleph_global'(lazy_evaluate,lazy_evaluate(PN/PA)),LazyPreds),
arg(23,S,LazyPreds),
(M:'$aleph_global'(size,size(neg,NSize))-> arg(24,S,NSize); arg(24,S,0)),
setting(openlist,OSize,M), arg(25,S,OSize),
setting(check_redundant,RCheck,M), arg(26,S,RCheck),
(M:'$aleph_sat'(eq,Eq) -> arg(27,S,Eq); arg(27,S,false)),
(M:'$aleph_sat'(hovars,HOVars) -> arg(28,S,HOVars); arg(28,S,_HOVars)),
setting(prooftime,PTime,M), arg(29,S,PTime),
setting(construct_bottom,CBott,M), arg(30,S,CBott),
(get_ovars1(false,1,HIVars,M) -> arg(31,S,HIVars); arg(31,S,[])),
setting(language,Lang,M), arg(32,S,Lang),
setting(splitvars,Split,M), arg(33,S,Split),
setting(proof_strategy,Proof,M), arg(34,S,Proof),
setting(portray_search,VSearch,M), arg(35,S,VSearch),
setting(searchtime,Time,M), arg(36,S,Time),
setting(optimise_clauses,Optim,M), arg(37,S,Optim),
setting(newvars,NewV,M), arg(38,S,NewV),
(setting(rls_type,RlsType,M) -> arg(39,S,RlsType);arg(39,S,false,M)),
setting(minposfrac,MinPosFrac,M), arg(40,S,MinPosFrac),
(setting(recursion,_Recursion,M) -> true; _Recursion = false),
prolog_type(Prolog), arg(41,S,Prolog),
setting(interactive,Interactive,M), arg(42,S,Interactive),
setting(lookahead,LookAhead,M), arg(43,S,LookAhead),
(setting(construct_features,Features,M)-> arg(44,S,Features); arg(44,S,false)),
setting(max_features,FMax,M), arg(45,S,FMax),
setting(subsample,SS,M), arg(46,S,SS),
setting(subsamplesize,SSize,M), arg(47,S,SSize).
% stop search from proceeding if certain
% conditions are reached. These are:
% . minacc and minpos values reached in rrr search
% . best hypothesis has accuracy 1.0 if evalfn=accuracy
% . best hypothesis covers all positive examples
discontinue_search(S,[P,_,_,F|_]/_,_,_M):-
arg(39,S,RlsType),
RlsType = rrr,
arg(13,S,MinPos),
P >= MinPos,
arg(19,S,MinScore),
F >= MinScore, !.
discontinue_search(S,_,Nodes,_M):-
arg(1,S,MaxNodes),
Nodes >= MaxNodes, !,
p_message('node limit reached').
discontinue_search(S,_,_,M):-
arg(44,S,Features),
Features = true,
arg(45,S,FMax),
M:'$aleph_search'(last_good,LastGood),
LastGood >= FMax, !,
p_message('feature limit reached').
discontinue_search(S,[_,_,_,F|_]/_,_,_M):-
arg(4,S,_/Evalfn),
Evalfn = accuracy,
F = 1.0, !.
discontinue_search(S,Best,_,_M):-
arg(2,S,Explore),
Explore = false,
arg(4,S,_/Evalfn),
Evalfn \= user,
Evalfn \= posonly,
arg(22,S,Search),
Search \= ic,
Best = [P|_]/_,
arg(16,S,P).
update_max_head_count(N,0,M):-
retractall(M:'$aleph_local'(max_head_count,_)),
asserta(M:'$aleph_local'(max_head_count,N)), !.
update_max_head_count(Count,Last,M):-
M:'$aleph_search_node'(Last,LitNum,_,_,PosCover,_,_,_), !,
asserta(M:'$aleph_local'(head_lit,LitNum)),
interval_count(PosCover,N),
Next is Last - 1,
(N > Count -> update_max_head_count(N,Next,M);
update_max_head_count(Count,Next,M)).
update_max_head_count(Count,Last,M):-
Next is Last - 1,
update_max_head_count(Count,Next,M).
expand(false,S,NodeRef,NodeRef,Path1,Length,Descendents,PosCover,NegCover,OVars,C,TV,CL,M):-
!,
M:'$aleph_search_node'(NodeRef,LitNum,Path,Length/_,PCover,NCover,OVars,_),
arg(46,S,SSample),
(SSample = false -> PosCover = PCover, NegCover = NCover;
get_sample_cover(S,PosCover,NegCover,M)),
aleph_append([LitNum],Path,Path1),
get_pclause(Path1,[],C,TV,CL,_,M),
M:'$aleph_sat_litinfo'(LitNum,_,_,_,_,Dependents),
intersect1(Dependents,Path1,_,Succ),
check_parents(Succ,OVars,Descendents,_,M).
expand(_,S,NodeRef,NodeRef,Path1,Length,[_],PosCover,NegCover,OVars,_,_,_,M):-
retract(M:'$aleph_search_node'(NodeRef,_,Path1,Length/_,_,_,OVars,_)),
get_sample_cover(S,PosCover,NegCover,M).
get_sample_cover(S,PosCover,NegCover,M):-
arg(5,S,Greedy),
(Greedy = true ->
M:'$aleph_global'(atoms_left,atoms_left(pos,PCover));
arg(16,S,PSize),
PCover = [1-PSize]),
arg(4,S,_/Evalfn),
(Evalfn = posonly ->
M:'$aleph_global'(atoms_left,atoms_left(rand,NCover));
arg(24,S,NSize),
NCover = [1-NSize]),
arg(46,S,SSample),
(SSample = false -> PosCover = PCover, NegCover = NCover;
arg(47,S,SampleSize),
interval_sample(SampleSize,PCover,PosCover,M),
interval_sample(SampleSize,NCover,NegCover,M)).
get_ovars([],_,V,V,_M).
get_ovars([LitNum|Lits],K,VarsSoFar,Vars,M):-
get_ovars1(K,LitNum,OVars,M),
aleph_append(VarsSoFar,OVars,Vars1),
get_ovars(Lits,K,Vars1,Vars,M).
get_ovars1(false,LitNum,OVars,M):-
M:'$aleph_sat_ovars'(LitNum,OVars), !.
get_ovars1(false,LitNum,OVars,M):-
!,
M:'$aleph_sat_litinfo'(LitNum,_,Atom,_,O,_),
get_vars(Atom,O,OVars).
get_ovars1(K,LitNum,OVars,M):-
M:'$aleph_sat_ovars'(LitNum,K,OVars), !.
get_ovars1(K,LitNum,OVars,M):-
M:'$aleph_sat_litinfo'(LitNum,K,_,Atom,_,O,_),
get_vars(Atom,O,OVars).
% get set of vars at term-places specified
get_vars(not(Literal),Args,Vars):-
!,
get_vars(Literal,Args,Vars).
get_vars(_,[],[]).
get_vars(Literal,[ArgNo|Args],Vars):-
(ArgNo = Pos/_ -> true; Pos = ArgNo),
tparg(Pos,Literal,Term),
get_vars_in_term([Term],TV1),
get_vars(Literal,Args,TV2),
update_list(TV2,TV1,Vars).
get_vars_in_term([],[]).
get_vars_in_term([Var|Terms],[Var|TVars]):-
integer(Var), !,
get_vars_in_term(Terms,TVars).
get_vars_in_term([Term|Terms],TVars):-
Term =.. [_|Terms1],
get_vars_in_term(Terms1,TV1),
get_vars_in_term(Terms,TV2),
update_list(TV2,TV1,TVars).
% get terms at term-places specified
% need not be variables
get_argterms(not(Literal),Args,TermsSoFar,Terms):-
!,
get_argterms(Literal,Args,TermsSoFar,Terms).
get_argterms(_,[],Terms,Terms).
get_argterms(Literal,[ArgNo|Args],TermsSoFar,Terms):-
(ArgNo = Pos/_ -> true; Pos = ArgNo),
tparg(Pos,Literal,Term),
update(TermsSoFar,Term,T1),
get_argterms(Literal,Args,T1,Terms).
% get list of terms at arg positions specified
get_args(not(Literal),Args,TermsSoFar,Terms):-
!,
get_args(Literal,Args,TermsSoFar,Terms).
get_args(_,[],Terms,Terms).
get_args(Literal,[ArgNo|Args],TermsSoFar,Terms):-
(ArgNo = Pos/_ -> true; Pos = ArgNo),
tparg(Pos,Literal,Term),
get_args(Literal,Args,[Term|TermsSoFar],Terms).
get_ivars([],_,V,V,_M).
get_ivars([LitNum|Lits],K,VarsSoFar,Vars,M):-
get_ivars1(K,LitNum,IVars,M),
aleph_append(VarsSoFar,IVars,Vars1),
get_ivars(Lits,K,Vars1,Vars,M).
get_ivars1(false,LitNum,IVars,M):-
M:'$aleph_sat_ivars'(LitNum,IVars), !.
get_ivars1(false,LitNum,IVars,M):-
!,
M:'$aleph_sat_litinfo'(LitNum,_,Atom,I,_,_),
get_vars(Atom,I,IVars).
get_ivars1(K,LitNum,IVars,M):-
M:'$aleph_sat_ivars'(LitNum,K,IVars), !.
get_ivars1(K,LitNum,IVars,M):-
M:'$aleph_sat_litinfo'(LitNum,K,_,Atom,I,_,_),
get_vars(Atom,I,IVars).
check_parents([],_,[],[],_M).
check_parents([LitNum|Lits],OutputVars,[LitNum|DLits],Rest,M):-
get_ivars1(false,LitNum,IVars,M),
aleph_subset1(IVars,OutputVars), !,
check_parents(Lits,OutputVars,DLits,Rest,M).
check_parents([LitNum|Lits],OutputVars,DLits,[LitNum|Rest],M):-
check_parents(Lits,OutputVars,DLits,Rest,M), !.
get_gains(S,Last,Best,_,_,_,_,_,_,_,_,_,_,Last,Best,M):-
discontinue_search(S,Best,Last,M), !.
get_gains(_,Last,Best,_,_,_,_,_,[],_,_,_,_,Last,Best,_M):- !.
get_gains(S,Last,Best,Path,C,TV,L,Min,[L1|Succ],Pos,Neg,OVars,E,Last1,NextBest,M):-
get_gain(S,upper,Last,Best,Path,C,TV,L,Min,L1,Pos,Neg,OVars,E,Best1,Node1,M), !,
get_gains(S,Node1,Best1,Path,C,TV,L,Min,Succ,Pos,Neg,OVars,E,Last1,NextBest,M).
get_gains(S,Last,BestSoFar,Path,C,TV,L,Min,[_|Succ],Pos,Neg,OVars,E,Last1,NextBest,M):-
get_gains(S,Last,BestSoFar,Path,C,TV,L,Min,Succ,Pos,Neg,OVars,E,Last1,NextBest,M),
!.
get_sibgains(S,Node,Last,Best,Path,C,TV,L,Min,Pos,Neg,OVars,E,Last1,NextBest,M):-
M:'$aleph_search_node'(Node,LitNum,_,_,_,_,_,OldE),
M:'$aleph_search_expansion'(OldE,_,_,LastSib),
M:'$aleph_sat_litinfo'(LitNum,_,_,_,_,Desc),
Node1 is Node + 1,
arg(31,S,HIVars),
aleph_delete_list(HIVars,OVars,LVars),
get_sibgain(S,LVars,LitNum,Desc,Node1,LastSib,Last,
Best,Path,C,TV,L,Min,Pos,Neg,OVars,E,NextBest,Last1,M), !.
get_sibgain(S,_,_,_,Node,Node1,Last,Best,_,_,_,_,_,_,_,_,_,Best,Last,M):-
(Node > Node1;
discontinue_search(S,Best,Last,M)), !.
get_sibgain(S,LVars,LitNum,Desc,Node,LastSib,Last,Best,Path,C,TV,L,Min,Pos,Neg,OVars,E,LBest,LNode,M):-
arg(23,S,Lazy),
get_sibpncover(Lazy,Node,Desc,Pos,Neg,Sib1,PC,NC,M),
lazy_evaluate([Sib1],Lazy,Path,PC,NC,[Sib],M),
get_ivars1(false,Sib,SibIVars,M),
(intersects(SibIVars,LVars) -> Flag = upper;
get_ovars1(false,Sib,SibOVars,M),
(intersects(SibOVars,LVars) -> Flag = upper; Flag = exact)),
get_gain(S,Flag,Last,Best,Path,C,TV,L,Min,Sib,PC,NC,OVars,E,Best1,Node1,M), !,
NextNode is Node + 1,
get_sibgain(S,LVars,LitNum,Desc,NextNode,LastSib,Node1,Best1,Path,C,TV,L,
Min,Pos,Neg,OVars,E,LBest,LNode,M), !.
get_sibgain(S,LVars,LitNum,Desc,Node,LastSib,Last,Best,Path,C,TV,L,Min,Pos,Neg,OVars,E,Best1,Node1,M):-
NextNode is Node + 1,
get_sibgain(S,LVars,LitNum,Desc,NextNode,LastSib,Last,Best,Path,C,TV,L,
Min,Pos,Neg,OVars,E,Best1,Node1,M), !.
get_sibgain(S,LVars,LitNum,Node,LastSib,Last,Best,Path,C,TV,L,Min,Pos,Neg,OVars,E,Best1,Node1,M):-
NextNode is Node + 1,
get_sibgain(S,LVars,LitNum,NextNode,LastSib,Last,Best,Path,C,TV,L,Min,Pos,Neg,
OVars,E,Best1,Node1,M), !.
get_sibpncover(Lazy,NodeNum,Desc,Pos,Neg,Sib,PC,NC,M):-
M:'$aleph_search_node'(NodeNum,Sib,_,_,Pos1,Neg1,_,_),
M:'$aleph_sat_litinfo'(Sib,_,Atom,_,_,_),
\+(aleph_member1(Sib,Desc)),
functor(Atom,Name,Arity),
(aleph_member1(Name/Arity,Lazy) ->
PC = Pos, NC = Neg;
calc_intersection(Pos,Pos1,PC),
calc_intersection(Neg,Neg1,NC)).
% in some cases, it is possible to simply use the intersection of
% covers cached. The conditions under which this is possible was developed
% in discussions with James Cussens
calc_intersection(A1/[B1-L1],A2/[B2-L2],A/[B-L]):-
!,
intervals_intersection(A1,A2,A),
B3 is max(B1,B2),
(intervals_intersects(A1,[B2-L2],X3-_) -> true; X3 = B3),
(intervals_intersects(A2,[B1-L1],X4-_) -> true; X4 = B3),
B4 is min(X3,B3),
B is min(X4,B4),
L is max(L1,L2).
calc_intersection(A1/_,A2,A):-
!,
intervals_intersection(A1,A2,A).
calc_intersection(A1,A2/_,A):-
!,
intervals_intersection(A1,A2,A).
calc_intersection(A1,A2,A):-
intervals_intersection(A1,A2,A).
get_gain(S,_,Last,Best,Path,_,_,_,MinLength,_,Pos,Neg,OVars,E,Best1,NewLast,M):-
arg(3,S,RefineOp),
RefineOp \= false , !,
get_refine_gain(S,Last,Best,Path,MinLength,Pos,Neg,OVars,E,Best1,NewLast,M).
get_gain(S,Flag,Last,Best/Node,Path,C,TV,Len1,MinLen,L1,Pos,Neg,OVars,E,Best1,Last1,M):-
arg(26,S,RCheck),
arg(33,S,SplitVars),
retractall(M:'$aleph_search'(covers,_)),
retractall(M:'$aleph_search'(coversn,_)),
get_pclause([L1],TV,Lit1,_,Len2,LastD,M),
split_ok(SplitVars,C,Lit1), !,
extend_clause(C,Lit1,Clause),
(RCheck = true ->
(redundant(Clause,Lit1,M) -> fail; true);
true),
CLen is Len1 + Len2,
length_ok(S,MinLen,CLen,LastD,EMin,ELength),
% arg(41,S,Prolog),
split_clause(Clause,Head,Body),
% (Prolog = yap ->
% assertz(M:'$aleph_search'(pclause,pclause(Head,Body)),DbRef);
% assertz(M:'$aleph_search'(pclause,pclause(Head,Body)))),
assertz(M:'$aleph_search'(pclause,pclause(Head,Body))),
arg(6,S,Verbosity),
(Verbosity >= 1 ->
pp_dclause(Clause,M);
true),
get_gain1(S,Flag,Clause,CLen,EMin/ELength,Last,Best/Node,
Path,L1,Pos,Neg,OVars,E,Best1,M),
% (Prolog = yap ->
% erase(DbRef);
% retractall(M:'$aleph_search'(pclause,_))),
retractall(M:'$aleph_search'(pclause,_)),
Last1 is Last + 1.
get_gain(_,_,Last,Best,_,_,_,_,_,_,_,_,_,_,Best,Last,_M).
get_refine_gain(S,Last,Best/Node,Path,MinLength,Pos,Neg,OVars,E,Best1,NewLast,M):-
arg(3,S,RefineOp),
RefineOp = rls,
refine_prelims(Best/Node,Last,M),
rls_refine(clauses,Path,Path1,M),
get_refine_gain1(S,Path1,MinLength,Pos,Neg,OVars,E,Best1,NewLast,M),
!.
get_refine_gain(S,Last,Best/Node,Path,MinLength,Pos,Neg,OVars,E,Best1,NewLast,M):-
arg(3,S,RefineOp),
RefineOp \= rls,
refine_prelims(Best/Node,Last,M),
Path = CL-[Example,Type,_,Clause],
arg(30,S,ConstructBottom),
arg(43,S,LookAhead),
get_user_refinement(RefineOp,LookAhead,Clause,R,_,M),
match_bot(ConstructBottom,R,R1,LitNums,M),
Path1 = CL-[Example,Type,LitNums,R1],
get_refine_gain1(S,Path1,MinLength,Pos,Neg,OVars,E,Best1,NewLast,M),
!.
get_refine_gain(_,_,_,_,_,_,_,_,_,Best,Last,M):-
retract(M:'$aleph_search'(best_refinement,best_refinement(Best))),
retract(M:'$aleph_search'(last_refinement,last_refinement(Last))).
get_theory_gain(S,Last,BestSoFar,T0,Pos,Neg,P,N,Best1,NewLast,M):-
refine_prelims(BestSoFar,Last,M),
arg(3,S,RefineOp),
(RefineOp = rls -> rls_refine(theories,T0,T1,M); fail),
arg(23,S,LazyPreds),
(LazyPreds = [] -> Theory = T1;
lazy_evaluate_theory(T1,LazyPreds,Pos,Neg,Theory,M)),
retract(M:'$aleph_search'(best_refinement,best_refinement(OldBest))),
retract(M:'$aleph_search'(last_refinement,last_refinement(OldLast))),
arg(6,S,Verbosity),
(Verbosity >= 1 ->
p_message('new refinement'),
pp_dclauses(Theory,M);
true),
record_pclauses(Theory,M),
get_theory_gain1(S,Theory,OldLast,OldBest,Pos,Neg,P,N,Best1,M),
retractall(M:'$aleph_search'(pclause,_)),
NewLast is OldLast + 1,
asserta(M:'$aleph_search'(last_refinement,last_refinement(NewLast))),
asserta(M:'$aleph_search'(best_refinement,best_refinement(Best1))),
(discontinue_search(S,Best1,NewLast,M) ->
retract(M:'$aleph_search'(last_refinement,last_refinement(_))),
retract(M:'$aleph_search'(best_refinement,best_refinement(_)));
fail),
!.
get_theory_gain(_,_,_,_,_,_,_,_,Best,Last,M):-
M:'$aleph_search'(best_refinement,best_refinement(Best)),
M:'$aleph_search'(last_refinement,last_refinement(Last)).
refine_prelims(Best,Last,M):-
retractall(M:'$aleph_search'(last_refinement,_)),
retractall(M:'$aleph_search'(best_refinement,_)),
asserta(M:'$aleph_search'(best_refinement,best_refinement(Best))),
asserta(M:'$aleph_search'(last_refinement,last_refinement(Last))).
get_refine_gain1(S,Path,MinLength,Pos,Neg,OVars,E,Best1,NewLast,M):-
arg(23,S,LazyPreds),
Path = CL-[Example,Type,Ids,Refine],
(LazyPreds = [] -> Ids1 = Ids, Clause = Refine;
lazy_evaluate_refinement(Ids,Refine,LazyPreds,Pos,Neg,Ids1,Clause,M)),
retractall(M:'$aleph_search'(covers,_)),
retractall(M:'$aleph_search'(coversn,_)),
Path1 = CL-[Example,Type,Ids1,Clause],
split_clause(Clause,Head,Body),
nlits(Body,CLength0),
CLength is CLength0 + 1,
length_ok(S,MinLength,CLength,0,EMin,ELength),
arg(41,S,Prolog),
split_clause(Clause,Head,Body),
(Prolog = yap ->
assertz(M:'$aleph_search'(pclause,pclause(Head,Body)),DbRef);
assertz(M:'$aleph_search'(pclause,pclause(Head,Body)))),
retract(M:'$aleph_search'(best_refinement,best_refinement(OldBest))),
retract(M:'$aleph_search'(last_refinement,last_refinement(OldLast))),
arg(6,S,Verbosity),
(Verbosity >= 1 ->
p_message('new refinement'),
pp_dclause(Clause,M);
true),
once(get_gain1(S,upper,Clause,CLength,EMin/ELength,OldLast,OldBest,
Path1,[],Pos,Neg,OVars,E,Best1,M)),
(Prolog = yap ->
erase(DbRef);
retractall(M:'$aleph_search'(pclause,_))),
NewLast is OldLast + 1,
asserta(M:'$aleph_search'(last_refinement,last_refinement(NewLast))),
asserta(M:'$aleph_search'(best_refinement,best_refinement(Best1))),
(discontinue_search(S,Best1,NewLast,M) ->
retract(M:'$aleph_search'(last_refinement,last_refinement(_))),
retract(M:'$aleph_search'(best_refinement,best_refinement(_)));
fail),
!.
get_theory_gain1(S,Theory,Last,Best,Pos,Neg,P,N,Best1,M):-
(M:aleph_false -> p_message('constraint violated'),
Contradiction = true;
Contradiction = false),
Contradiction = false,
Node1 is Last + 1,
arg(32,S,Lang),
theory_lang_ok(Theory,Lang,M),
arg(38,S,NewVars),
theory_newvars_ok(Theory,NewVars),
arg(14,S,Depth),
arg(29,S,Time),
arg(34,S,Proof),
prove(Depth/Time/Proof,pos,(X:-X),Pos,PCvr,TP,M),
prove(Depth/Time/Proof,neg,(X:-X),Neg,NCvr,FP,M),
arg(4,S,_/Evalfn),
Correct is TP + (N - FP),
Incorrect is FP + (P - TP),
length(Theory,L),
Label = [Correct,Incorrect,L],
complete_label(Evalfn,Theory,Label,Label1,M),
get_search_keys(heuristic,Label1,SearchKeys),
arg(6,S,Verbosity),
(Verbosity >= 1 -> p_message(Correct/Incorrect); true),
asserta(M:'$aleph_search_node'(Node1,Theory,[],0,PCvr,NCvr,[],0)),
update_open_list(SearchKeys,Node1,Label1,M),
update_best_theory(S,Theory,PCvr,NCvr,Best,Label1/Node1,Best1,M), !.
get_theory_gain1(_,_,_,Best,_,_,_,_,Best,_M).
get_gain1(S,_,C,CL,_,Last,Best,Path,_,Pos,Neg,_,E,Best,M):-
abandon_branch(S,C,M), !,
Node1 is Last + 1,
arg(3,S,RefineOp),
arg(7,S,ClauseLength),
arg(35,S,VSearch),
(ClauseLength = CL -> true;
(RefineOp = false ->
asserta(M:'$aleph_search_node'(Node1,0,Path,0,Pos,Neg,[],E));
true)),
(VSearch = true ->
asserta(M:'$aleph_search'(bad,Node1)),
asserta(M:'$aleph_search_node'(Node1,C));
true).
get_gain1(S,_,Clause,_,_,_,Best,_,_,_,_,_,_,Best,M):-
arg(8,S,Caching),
Caching = true,
skolemize(Clause,SHead,SBody,0,_),
M:'$aleph_search_prunecache'([SHead|SBody]), !,
arg(6,S,Verbosity),
(Verbosity >= 1 -> p_message('in prune cache'); true).
get_gain1(S,Flag,C,CL,EMin/EL,Last,Best/Node,Path,L1,Pos,Neg,OVars,E,Best1,M):-
split_clause(C,Head,Body),
arg(22,S,Search),
((Search \== ic, M:aleph_false) -> p_message('constraint violated'),
Contradiction = true;
Contradiction = false),
Node1 is Last + 1,
arg(8,S,Caching),
(Caching = true -> arg(15,S,CCLim),
get_cache_entry(CCLim,C,Entry);
Entry = false),
arg(35,S,VSearch),
(VSearch = true ->
asserta(M:'$aleph_search_node'(Node1,C));
true),
arg(3,S,RefineOp),
refinement_ok(RefineOp,Entry,M),
arg(32,S,Lang),
lang_ok((Head:-Body),Lang),
arg(38,S,NewVars),
newvars_ok((Head:-Body),NewVars),
arg(34,S,Proof),
arg(37,S,Optim),
rewrite_clause(Proof,Optim,(Head:-Body),(Head1:-Body1)),
(Search = ic ->
PCvr = [],
Label = [_,_,CL],
ccheck(S,(Head1:-Body1),NCvr,Label,M);
prove_examples(S,Flag,Contradiction,Entry,Best,CL,EL,
(Head1:-Body1),Pos,Neg,PCvr,NCvr,Label,M)
),
arg(4,S,SearchStrat/Evalfn),
arg(40,S,MinPosFrac),
((MinPosFrac > 0.0 ; Evalfn = wracc) ->
reset_clause_prior(S,Head1,M);
true
),
arg(46,S,SSample),
(SSample = true ->
arg(47,S,SampleSize),
estimate_label(SampleSize,Label,Label0,M);
Label0 = Label),
complete_label(Evalfn,C,Label0,Label1,M),
compression_ok(Evalfn,Label1),
get_search_keys(SearchStrat,Label1,SearchKeys),
arg(6,S,Verbosity),
arg(10,S,LCost),
arg(11,S,LContra),
((Verbosity >= 1, LContra = false, LCost = false) ->
Label = [A,B|_],
p_message(A/B);
true),
arg(7,S,ClauseLength),
(RefineOp = false ->
get_ovars1(false,L1,OVars1,M),
aleph_append(OVars1,OVars,OVars2);
true),
((ClauseLength=CL, RefineOp = false) -> true;
(RefineOp = false ->
asserta(M:'$aleph_search_node'(Node1,L1,Path,EMin/EL,PCvr,
NCvr,OVars2,E));
asserta(M:'$aleph_search_node'(Node1,0,Path,EMin/EL,PCvr,
NCvr,[],E))),
update_open_list(SearchKeys,Node1,Label1,M)),
(VSearch = true ->
asserta(M:'$aleph_search'(label,label(Node1,Label)));
true),
(((RefineOp \= false,Contradiction=false);
(arg(28,S,HOVars),clause_ok1(Contradiction,HOVars,OVars2))) ->
update_best(S,C,PCvr,NCvr,Best/Node,Label1/Node1,Best1,M);
Best1=Best/Node),
!.
get_gain1(_,_,_,_,_,_,Best,_,_,_,_,_,_,Best,_M).
abandon_branch(S,C,M):-
arg(9,S,PruneDefined),
PruneDefined = true,
M:prune(C), !,
arg(6,S,Verbosity),
(Verbosity >= 1 -> p_message(pruned); true).
clause_ok1(false,V1,V2):-
aleph_subset1(V1,V2).
% check to see if a clause is acceptable
% unacceptable if it fails noise, minacc, or minpos settings
% unacceptable if it fails search or language constraints
clause_ok(_,_,_M):-
false, !, fail.
clause_ok(_,Label,M):-
extract_pos(Label,P),
extract_neg(Label,N),
Acc is P/(P+N),
setting(noise,Noise,M),
setting(minacc,MinAcc,M),
setting(minpos,MinPos,M),
(N > Noise; Acc < MinAcc; P < MinPos), !, fail.
clause_ok(Clause,_,M):-
M:prune(Clause), !, fail.
clause_ok(Clause,_,M):-
setting(language,Lang,M),
\+ lang_ok(Clause,Lang), !, fail.
clause_ok(Clause,_,M):-
setting(newvars,NewVars,M),
\+ newvars_ok(Clause,NewVars), !, fail.
clause_ok(_,_,_M).
% check to see if refinement has been produced before
refinement_ok(false,_,_M):- !.
refinement_ok(rls,_,_M):- !.
refinement_ok(_,false,_M):- !.
refinement_ok(_,Entry,M):-
(check_cache(Entry,pos,_,M); check_cache(Entry,neg,_,M)), !,
p_message('redundant refinement'),
fail.
refinement_ok(_,_,_M).
% specialised redundancy check with equality theory
% used only to check if equalities introduced by splitting vars make
% literal to be added redundant
split_ok(false,_,_):- !.
split_ok(_,Clause,Lit):-
functor(Lit,Name,_),
Name \= '=',
copy_term(Clause/Lit,Clause1/Lit1),
lit_redun(Lit1,Clause1), !,
p_message('redundant literal'), nl,
fail.
split_ok(_,_,_).
lit_redun(Lit,(Head:-Body)):-
!,
lit_redun(Lit,(Head,Body)).
lit_redun(Lit,(L1,_)):-
Lit == L1, !.
lit_redun(Lit,(L1,L2)):-
!,
execute_equality(L1),
lit_redun(Lit,L2).
lit_redun(Lit,L):-
Lit == L.
execute_equality(Lit):-
functor(Lit,'=',2), !,
Lit.
execute_equality(_).
theory_lang_ok([],_).
theory_lang_ok([_-[_,_,_,Clause]|T],Lang):-
lang_ok(Lang,Clause),
theory_lang_ok(Lang,T).
theory_newvars_ok([],_).
theory_newvars_ok([_-[_,_,_,Clause]|T],NewV):-
newvars_ok(NewV,Clause),
theory_newvars_ok(T,NewV).
lang_ok((Head:-Body),N):-
!,
(lang_ok(N,Head,Body) -> true;
p_message('outside language bound'),
fail).
lang_ok(N,_,_):- N is inf, !.
lang_ok(N,Head,Body):-
get_psyms((Head,Body),PSymList),
lang_ok1(PSymList,N).
newvars_ok((Head:-Body),N):-
!,
(newvars_ok(N,Head,Body) -> true;
p_message('outside newvars bound'),
fail).
newvars_ok(N,_,_):- N is inf, !.
newvars_ok(N,Head,Body):-
vars_in_term([Head],[],HVars),
goals_to_list(Body,BodyL),
vars_in_term(BodyL,[],BVars),
aleph_ord_subtract(BVars,HVars,NewVars),
length(NewVars,N1),
N1 =< N.
get_psyms((L,B),[N/A|Syms]):-
!,
functor(L,N,A),
get_psyms(B,Syms).
get_psyms(true,[]):- !.
get_psyms(L,[N/A]):-
functor(L,N,A).
lang_ok1([],_).
lang_ok1([Pred|Preds],N):-
length(Preds,N0),
aleph_delete_all(Pred,Preds,Preds1),
length(Preds1,N1),
PredOccurs is N0 - N1 + 1,
PredOccurs =< N,
lang_ok1(Preds1,N).
rewrite_clause(sld,_,_,(X:-X)):- !.
rewrite_clause(restricted_sld,true,(Head:-Body),(Head1:-Body1)):-
!,
optimise((Head:-Body),(Head1:-Body1)).
rewrite_clause(_,_,Clause,Clause).
record_pclauses([],_M).
record_pclauses([_-[_,_,_,Clause]|T],M):-
split_clause(Clause,Head,Body),
assertz(M:'$aleph_search'(pclause,pclause(Head,Body))),
record_pclauses(T,M).
% get pos/neg distribution of clause head
reset_clause_prior(S,Head,M):-
arg(3,S,Refine),
Refine = false, !,
(M:'$aleph_search'(clauseprior,_) -> true;
get_clause_prior(S,Head,Prior,M),
assertz(M:'$aleph_search'(clauseprior,Prior))
).
reset_clause_prior(S,Head,M):-
copy_term(Head,Head1),
numbervars(Head1,0,_),
(M:'$aleph_local'(clauseprior,prior(Head1,Prior)) ->
true;
get_clause_prior(S,Head,Prior,M),
assertz(M:'$aleph_local'(clauseprior,prior(Head1,Prior)))
),
retractall(M:'$aleph_search'(clauseprior,_)),
assertz(M:'$aleph_search'(clauseprior,Prior)).
get_clause_prior(S,Head,Total-[P-pos,N-neg],M):-
arg(5,S,Greedy),
arg(14,S,Depth),
arg(29,S,Time),
arg(34,S,Proof),
(Greedy = true ->
M:'$aleph_global'(atoms_left,atoms_left(pos,Pos));
M:'$aleph_global'(atoms,atoms(pos,Pos))
),
M:'$aleph_global'(atoms_left,atoms_left(neg,Neg)),
prove(Depth/Time/Proof,pos,(Head:-true),Pos,_,P,M),
prove(Depth/Time/Proof,neg,(Head:-true),Neg,_,N,M),
Total is P + N.
get_user_refinement(auto,L,Clause,Template,0,M):-
auto_refine(L,Clause,Template,M).
get_user_refinement(user,_,Clause,Template,0,M):-
M:refine(Clause,Template).
match_bot(false,Clause,Clause,[],_M).
match_bot(reduction,Clause,Clause1,Lits,M):-
match_lazy_bottom(Clause,Lits,M),
get_pclause(Lits,[],Clause1,_,_,_,M).
match_bot(saturation,Clause,Clause1,Lits,M):-
once(get_aleph_clause(Clause,AlephClause)),
match_bot_lits(AlephClause,[],Lits,M),
get_pclause(Lits,[],Clause1,_,_,_,M).
match_bot_lits((Lit,Lits),SoFar,[LitNum|LitNums],M):-
!,
match_bot_lit(Lit,LitNum,M),
\+(aleph_member(LitNum,SoFar)),
match_bot_lits(Lits,[LitNum|SoFar],LitNums,M).
match_bot_lits(Lit,SoFar,[LitNum],M):-
match_bot_lit(Lit,LitNum,M),
\+(aleph_member(LitNum,SoFar)).
match_bot_lit(Lit,LitNum,M):-
M:'$aleph_sat'(botsize,Last),
M:'$aleph_sat_litinfo'(LitNum,_,Lit,_,_,_),
LitNum >= 0,
LitNum =< Last.
match_lazy_bottom(Clause,Lits,M):-
once(get_aleph_clause(Clause,AlephClause)),
copy_term(Clause,CClause),
split_clause(CClause,CHead,CBody),
example_saturated(CHead,M),
store(stage,M),
set(stage,saturation,M),
match_lazy_bottom1(CBody,M),
reinstate(stage,M),
match_bot_lits(AlephClause,[],Lits,M).
match_lazy_bottom1(Body,M):-
M:Body,
match_body_modes(Body,M),
fail.
match_lazy_bottom1(_,M):-
flatten_matched_atoms(body,M).
match_body_modes((CLit,CLits),M):-
!,
match_mode(body,CLit,M),
match_body_modes(CLits,M).
match_body_modes(CLit,M):-
match_mode(body,CLit,M).
match_mode(_,true,_M):- !.
match_mode(Loc,CLit,M):-
functor(CLit,Name,Arity),
functor(Mode,Name,Arity),
(Loc=head ->
M:'$aleph_global'(modeh,modeh(_,Mode));
M:'$aleph_global'(modeb,modeb(_,Mode))),
split_args(Mode,Mode,I,O,C,M),
(Loc = head ->
update_atoms(CLit,mode(Mode,O,I,C));
update_atoms(CLit,mode(Mode,I,O,C))),
fail.
match_mode(_,_,_M).
flatten_matched_atoms(Loc,M):-
setting(i,IVal,M),
(retract(M:'$aleph_sat'(botsize,BSize))-> true; BSize = 0),
(retract(M:'$aleph_sat'(lastlit,Last))-> true ; Last = 0),
(Loc = head ->
flatten(0,IVal,BSize,BSize1);
flatten(0,IVal,Last,BSize1)),
asserta(M:'$aleph_sat'(botsize,BSize1)),
(Last < BSize1 ->
asserta(M:'$aleph_sat'(lastlit,BSize1));
asserta(M:'$aleph_sat'(lastlit,Last))), !.
flatten_matched_atoms(_,_M).
% integrate head literal into lits database
% used during lazy evaluation of bottom clause
integrate_head_lit(HeadOVars,M):-
example_saturated(Example,M),
split_args(Example,_,_,Output,_,M),
integrate_args(unknown,Example,Output),
match_mode(head,Example,M),
flatten_matched_atoms(head,M),
get_ivars1(false,1,HeadOVars,M), !.
integrate_head_lit([],_M).
get_aleph_clause((Lit:-true),PLit):-
!,
get_aleph_lit(Lit,PLit).
get_aleph_clause((Lit:-Lits),(PLit,PLits)):-
!,
get_aleph_lit(Lit,PLit),
get_aleph_lits(Lits,PLits).
get_aleph_clause(Lit,PLit):-
get_aleph_lit(Lit,PLit).
get_aleph_lits((Lit,Lits),(PLit,PLits)):-
!,
get_aleph_lit(Lit,PLit),
get_aleph_lits(Lits,PLits).
get_aleph_lits(Lit,PLit):-
get_aleph_lit(Lit,PLit).
get_aleph_lit(Lit,PLit):-
functor(Lit,Name,Arity),
functor(PLit,Name,Arity),
get_aleph_lit(Lit,PLit,Arity).
get_aleph_lit(_,_,0):- !.
get_aleph_lit(Lit,PLit,Arg):-
arg(Arg,Lit,Term),
(var(Term) -> arg(Arg,PLit,Term);arg(Arg,PLit,aleph_const(Term))),
NextArg is Arg - 1,
get_aleph_lit(Lit,PLit,NextArg), !.
% Claudien-style consistency checking as described by De Raedt and Dehaspe, 1996
% currently does not retain actual substitutions that result in inconsistencies
% also, only checks for constraints of the form false:- ...
% this simplifies the check of Body,not(Head) to just Body
ccheck(S,(aleph_false:-Body),[],[0,N|_],M):-
(Body = true ->
N is inf;
arg(11,S,LContra),
(LContra = false ->
arg(14,S,Depth),
arg(29,S,Time),
findall(X,(resource_bound_call(Time,Depth,Body,M),X=1),XL),
length(XL,N);
lazy_ccheck(S,Body,N,M)
)
).
lazy_ccheck(S,Body,N,M):-
arg(14,S,Depth),
arg(17,S,Noise),
arg(29,S,Time),
retractall(M:'$aleph_local'(subst_count,_)),
asserta(M:'$aleph_local'(subst_count,0)),
resource_bound_call(Time,Depth,Body,M),
retract(M:'$aleph_local'(subst_count,N0)),
N is N0 + 1,
N > Noise, !.
lazy_ccheck(_,_,N,M):-
retract(M:'$aleph_local'(subst_count,N)).
% posonly formula as described by Muggleton, ILP-96
prove_examples(S,Flag,Contradiction,Entry,Best,CL,L2,Clause,Pos,Rand,PCover,RCover,[P,B,CL,I,G],M):-
arg(4,S,_/Evalfn),
Evalfn = posonly, !,
arg(11,S,LazyOnContra),
((LazyOnContra = true, Contradiction = true) ->
prove_lazy_cached(S,Entry,Pos,Rand,PCover,RCover,M),
interval_count(PCover,_PC),
interval_count(RCover,RC);
prove_pos(S,Flag,Entry,Best,[PC,L2],Clause,Pos,PCover,PC,M),
prove_rand(S,Flag,Entry,Clause,Rand,RCover,RC,M)),
find_posgain(PCover,P,M),
arg(16,S,MM), arg(20,S,N),
GC is (RC+1.0)/(N+2.0), % Laplace correction for small numbers
A is log(P),
B is log(GC),
G is GC*MM/P,
C is CL/P,
% Sz is CL*M/P,
% D is M*G,
% I is M - D - Sz,
I is A - B - C.
prove_examples(S,_,_,Entry,_,CL,_,_,Pos,Neg,Pos,Neg,[PC,NC,CL],M):-
arg(10,S,LazyOnCost),
LazyOnCost = true, !,
prove_lazy_cached(S,Entry,Pos,Neg,Pos1,Neg1,M),
interval_count(Pos1,PC),
interval_count(Neg1,NC).
prove_examples(S,_,true,Entry,_,CL,_,_,Pos,Neg,Pos,Neg,[PC,NC,CL],M):-
arg(11,S,LazyOnContra),
LazyOnContra = true, !,
prove_lazy_cached(S,Entry,Pos,Neg,Pos1,Neg1,M),
interval_count(Pos1,PC),
interval_count(Neg1,NC).
prove_examples(S,Flag,_,Ent,Best,CL,L2,Clause,Pos,Neg,PCover,NCover,[PC,NC,CL],M):-
arg(3,S,RefineOp),
(RefineOp = false; RefineOp = auto),
arg(7,S,ClauseLength),
ClauseLength = CL, !,
interval_count(Pos,MaxPCount),
prove_neg(S,Flag,Ent,Best,[MaxPCount,CL],Clause,Neg,NCover,NC,M),
arg(17,S,Noise), arg(18,S,MinAcc),
maxlength_neg_ok(Noise/MinAcc,Ent,MaxPCount,NC,M),
prove_pos(S,Flag,Ent,Best,[PC,L2],Clause,Pos,PCover,PC,M),
maxlength_neg_ok(Noise/MinAcc,Ent,PC,NC,M),
!.
prove_examples(S,Flag,_,Ent,Best,CL,L2,Clause,Pos,Neg,PCover,NCover,[PC,NC,CL],M):-
prove_pos(S,Flag,Ent,Best,[PC,L2],Clause,Pos,PCover,PC,M),
prove_neg(S,Flag,Ent,Best,[PC,CL],Clause,Neg,NCover,NC,M),
!.
prove_lazy_cached(S,Entry,Pos,Neg,Pos1,Neg1,M):-
arg(8,S,Caching),
Caching = true, !,
(check_cache(Entry,pos,Pos1,M)->
true;
add_cache(Entry,pos,Pos,M),
Pos1 = Pos),
(check_cache(Entry,neg,Neg1,M)->
true;
add_cache(Entry,neg,Neg,M),
Neg1 = Neg).
prove_lazy_cached(_,_,Pos,Neg,Pos,Neg,_M).
complete_label(posonly,_,L,L,_M):- !.
complete_label(user,Clause,[P,N,L],[P,N,L,Val],M):-
M:cost(Clause,[P,N,L],Cost), !,
Val is -Cost.
complete_label(entropy,_,[P,N,L],[P,N,L,Val],M):-
evalfn(entropy,[P,N,L],Entropy,M),
Val is -Entropy, !.
complete_label(gini,_,[P,N,L],[P,N,L,Val],M):-
evalfn(gini,[P,N,L],Gini,M),
Val is -Gini, !.
complete_label(EvalFn,_,[P,N,L],[P,N,L,Val],M):-
evalfn(EvalFn,[P,N,L],Val,M), !.
complete_label(_,_,_,_,_M):-
p_message1('error'), p_message('incorrect evaluation/cost function'),
fail.
% estimate label based on subsampling
estimate_label(Sample,[P,N|Rest],[P1,N1|Rest],M):-
M:'$aleph_global'(atoms_left,atoms_left(pos,Pos)),
M:'$aleph_global'(atoms_left,atoms_left(neg,Neg)),
interval_count(Pos,PC), interval_count(Neg,NC),
PFrac is P/Sample,
NFrac is N/Sample,
P1 is integer(PFrac*PC),
N1 is integer(NFrac*NC).
% get primary and secondary search keys for search
% use [Primary|Secondary] notation as it is the most compact
get_search_keys(bf,[_,_,L,F|_],[L1|F]):-
!,
L1 is -1*L.
get_search_keys(df,[_,_,L,F|_],[L|F]):- !.
get_search_keys(_,[_,_,L,F|_],[F|L1]):-
L1 is -1*L.
prove_pos(_,_,_,_,_,_,[],[],0,_M):- !.
prove_pos(S,_,Entry,BestSoFar,PosSoFar,Clause,_,PCover,PCount,M):-
M:'$aleph_search'(covers,covers(PCover,PCount)), !,
pos_ok(S,Entry,BestSoFar,PosSoFar,Clause,PCover,M).
prove_pos(S,Flag,Entry,BestSoFar,PosSoFar,Clause,Pos,PCover,PCount,M):-
prove_cache(Flag,S,pos,Entry,Clause,Pos,PCover,PCount,M),
pos_ok(S,Entry,BestSoFar,PosSoFar,Clause,PCover,M), !.
prove_neg(S,_,Entry,_,_,_,[],[],0,M):-
arg(8,S,Caching),
(Caching = true -> add_cache(Entry,neg,[],M); true), !.
prove_neg(S,Flag,Entry,_,_,Clause,Neg,NCover,NCount,M):-
arg(3,S,RefineOp),
RefineOp = rls, !,
prove_cache(Flag,S,neg,Entry,Clause,Neg,NCover,NCount,M).
prove_neg(_,_,_,_,_,_,_,NCover,NCount,M):-
M:'$aleph_search'(coversn,coversn(NCover,NCount)), !.
prove_neg(S,Flag,Entry,BestSoFar,PosSoFar,Clause,Neg,NCover,NCount,M):-
arg(12,S,LazyNegs),
LazyNegs = true, !,
lazy_prove_neg(S,Flag,Entry,BestSoFar,PosSoFar,Clause,Neg,NCover,NCount,M).
prove_neg(S,Flag,Entry,[P,0,L1|_],[P,L2],Clause,Neg,[],0,M):-
arg(4,S,bf/coverage),
L2 is L1 - 1,
!,
prove_cache(Flag,S,neg,Entry,Clause,Neg,0,[],0,M), !.
prove_neg(S,Flag,Entry,[P,N|_],[P,L1],Clause,Neg,NCover,NCount,M):-
arg(4,S,bf/coverage),
!,
arg(7,S,ClauseLength),
(ClauseLength = L1 ->
arg(2,S,Explore),
(Explore = true -> MaxNegs is N; MaxNegs is N - 1),
MaxNegs >= 0,
prove_cache(Flag,S,neg,Entry,Clause,Neg,MaxNegs,NCover,NCount,M),
NCount =< MaxNegs;
prove_cache(Flag,S,neg,Entry,Clause,Neg,NCover,NCount,M)),
!.
prove_neg(S,Flag,Entry,_,[P1,L1],Clause,Neg,NCover,NCount,M):-
arg(7,S,ClauseLength),
ClauseLength = L1, !,
arg(17,S,Noise), arg(18,S,MinAcc),
get_max_negs(Noise/MinAcc,P1,N1),
prove_cache(Flag,S,neg,Entry,Clause,Neg,N1,NCover,NCount,M),
NCount =< N1,
!.
prove_neg(S,Flag,Entry,_,_,Clause,Neg,NCover,NCount,M):-
prove_cache(Flag,S,neg,Entry,Clause,Neg,NCover,NCount,M),
!.
prove_rand(S,Flag,Entry,Clause,Rand,RCover,RCount,M):-
prove_cache(Flag,S,rand,Entry,Clause,Rand,RCover,RCount,M),
!.
lazy_prove_neg(S,Flag,Entry,[P,N|_],[P,_],Clause,Neg,NCover,NCount,M):-
arg(4,S,bf/coverage),
!,
MaxNegs is N + 1,
prove_cache(Flag,S,neg,Entry,Clause,Neg,MaxNegs,NCover,NCount,M),
!.
lazy_prove_neg(S,Flag,Entry,_,[P1,_],Clause,Neg,NCover,NCount,M):-
arg(17,S,Noise), arg(18,S,MinAcc),
get_max_negs(Noise/MinAcc,P1,N1),
MaxNegs is N1 + 1,
prove_cache(Flag,S,neg,Entry,Clause,Neg,MaxNegs,NCover,NCount,M),
!.
% Bug reported by Daniel Fredouille
% For MiAcc =:= 0, Negs was being set to P1 + 1. Unclear why.
% This definition is as it was up to Aleph 2.
get_max_negs(Noise/MinAcc,P1,N):-
number(P1),
(MinAcc =:= 0.0 -> N is Noise;
(N1 is integer((1-MinAcc)*P1/MinAcc),
(Noise < N1 -> N is Noise; N is N1))
), !.
get_max_negs(Noise/_,_,Noise).
% update_open_list(+SearchKeys,+NodeRef,+Label)
% insert SearchKeys into openlist
update_open_list([K1|K2],NodeRef,Label,M):-
assertz(M:'$aleph_search_gain'(K1,K2,NodeRef,Label)),
retract(M:'$aleph_search'(openlist,OpenList)),
uniq_insert(descending,[K1|K2],OpenList,List1),
asserta(M:'$aleph_search'(openlist,List1)).
pos_ok(S,_,_,_,_,_,_M):-
arg(3,S,RefineOp),
(RefineOp = rls; RefineOp = user), !.
pos_ok(S,Entry,_,[P,_],_,_,M):-
arg(13,S,MinPos),
P < MinPos, !,
arg(8,S,Caching),
(Caching = true ->
add_prune_cache(Entry,M);
true),
fail.
pos_ok(S,Entry,_,[P,_],_,_,M):-
arg(40,S,MinPosFrac),
MinPosFrac > 0.0,
M:'$aleph_search'(clauseprior,_-[P1-pos,_]),
P/P1 < MinPosFrac, !,
arg(8,S,Caching),
(Caching = true ->
add_prune_cache(Entry,M);
true),
fail.
pos_ok(S,_,[_,_,_,C1|_],[P,L],_,_,M):-
arg(4,S,_/Evalfn),
arg(2,S,Explore),
((Evalfn = user; Explore = true) -> true;
evalfn(Evalfn,[P,0,L],C2,M),
best_value(Evalfn,S,[P,0,L,C2],Max,M),
Max > C1), !.
maxlength_neg_ok(Noise/MinAcc,Entry,P,N,M):-
((N > Noise); (P/(P+N) < MinAcc)), !,
add_prune_cache(Entry,M),
fail.
maxlength_neg_ok(_,_,_,_,_M).
compression_ok(compression,[P,_,L|_]):-
!,
P - L + 1 > 0.
compression_ok(_,_).
length_ok(S,MinLen,ClauseLen,LastD,ExpectedMin,ExpectedCLen):-
arg(3,S,RefineOp),
(RefineOp = false -> L1 = LastD; L1 = 0),
(L1 < MinLen->ExpectedMin = L1;ExpectedMin = MinLen),
ExpectedCLen is ClauseLen + ExpectedMin,
arg(7,S,CLength),
ExpectedCLen =< CLength, !.
update_best(S,_,_,_,Best,[P,_,_,F|_]/_,Best,_M):-
arg(13,S,MinPos),
arg(19,S,MinScore),
(P < MinPos; F is -inf; F < MinScore), !.
update_best(S,_,_,_,Best,[P|_]/_,Best,M):-
arg(40,S,MinPosFrac),
MinPosFrac > 0.0,
M:'$aleph_search'(clauseprior,_-[P1-pos,_]),
P/P1 < MinPosFrac, !.
update_best(S,_,_,_,Best,[P,N,_,_|_]/_,Best,_M):-
arg(4,S,_/Evalfn),
Evalfn \= posonly,
% Evalfn \= user,
arg(17,S,Noise),
arg(18,S,MinAcc),
arg(22,S,Search),
Total is P + N,
((N > Noise);(Search \= ic, Total > 0, P/Total < MinAcc)), !.
update_best(S,Clause,PCover,NCover,Label/_,Label1/Node1,Label1/Node1,M):-
Label = [_,_,_,GainE|_],
Label1 = [_,_,_,Gain1E|_],
arithmetic_expression_value(GainE,Gain),
arithmetic_expression_value(Gain1E,Gain1),
% (Gain1 = inf; Gain = -inf; Gain1 > Gain), !,
Gain1 > Gain, !,
retractall(M:'$aleph_search'(selected,_)),
asserta(M:'$aleph_search'(selected,selected(Label1,Clause,PCover,NCover))),
arg(35,S,VSearch),
(VSearch = true ->
retractall(M:'$aleph_search'(best,_)),
asserta(M:'$aleph_search'(best,Node1)),
asserta(M:'$aleph_search'(good,Node1));
true),
update_good(Label1,Clause,M),
show_clause(newbest,Label1,Clause,Node1,M),
record_clause(newbest,Label1,Clause,Node1,M),
record_clause(good,Label1,Clause,Node1,M).
update_best(S,Clause,_,_,Label/Node,Label1/Node1,Label/Node,M):-
arg(35,S,VSearch),
(VSearch = true ->
asserta(M:'$aleph_search'(good,Node1));
true),
update_good(Label1,Clause,M),
show_clause(good,Label1,Clause,Node1,M),
record_clause(good,Label1,Clause,Node1,M).
update_good(Label,Clause,M):-
setting(good,true,M), !,
Label = [_,_,L|_],
setting(check_good,Flag,M),
update_good(Flag,L,Label,Clause,M).
update_good(_,_,_M).
update_good(_,_,_,_,M):-
setting(goodfile,_,M), !.
update_good(true,L,Label,Clause,M):-
M:'$aleph_good'(L,Label,Clause), !.
update_good(_,L,Label,Clause,M):-
assertz(M:'$aleph_good'(L,Label,Clause)),
(retract(M:'$aleph_search'(last_good,Good)) ->
Good1 is Good + 1;
Good1 is 1),
assertz(M:'$aleph_search'(last_good,Good1)).
update_best_theory(S,_,_,_,Best,[P,N,_,F|_]/_,Best,_M):-
arg(17,S,Noise),
arg(18,S,MinAcc),
arg(19,S,MinScore),
(N > Noise; P/(P+N) < MinAcc; F < MinScore), !.
update_best_theory(_,Theory,PCover,NCover,Label/_,Label1/Node1,Label1/Node1,M):-
Label = [_,_,_,GainE|_],
Label1 = [_,_,_,Gain1E|_],
arithmetic_expression_value(GainE,Gain),
arithmetic_expression_value(Gain1E,Gain1),
Gain1 > Gain, !,
retractall(M:'$aleph_search'(selected,_)),
asserta(M:'$aleph_search'(selected,selected(Label1,Theory,PCover,NCover))),
show_theory(newbest,Label1,Theory,Node1,M),
record_theory(newbest,Label1,Theory,Node1,M),
record_theory(good,Label1,Theory,Node1,M).
update_best_theory(_,Theory,_,_,Best,Label1/_,Best,M):-
show_theory(good,Label1,Theory,Node1,M),
record_theory(good,Label1,Theory,Node1,M).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% P R U N I N G C L A U S E S
get_node([[K1|K2]|_],[K1|K2],Node,M):-
M:'$aleph_search_gain'(K1,K2,Node,_).
get_node([_|Gains],Gain,Node,M):-
get_node(Gains,Gain,Node,M).
prune_open(S,_,_,M):-
arg(25,S,OSize),
Inf is inf,
OSize =\= Inf,
retractall(M:'$aleph_local'(in_beam,_)),
asserta(M:'$aleph_local'(in_beam,0)),
M:'$aleph_search'(openlist,Gains),
get_node(Gains,[K1|K2],NodeNum,M),
M:'$aleph_local'(in_beam,N),
(N < OSize->
retract(M:'$aleph_local'(in_beam,N)),
N1 is N + 1,
asserta(M:'$aleph_local'(in_beam,N1));
retract(M:'$aleph_search_gain'(K1,K2,NodeNum,_)),
arg(6,S,Verbose),
(Verbose < 1 ->
true;
p1_message('non-admissible removal'),
p_message(NodeNum))),
fail.
prune_open(S,_,_,_M):-
arg(2,S,Explore),
arg(3,S,RefineOp),
(Explore = true; RefineOp = rls; RefineOp = user), !.
prune_open(_,_/N,_/N,_M):- !.
prune_open(S,_,[_,_,_,Best|_]/_,M):-
arg(4,S,_/Evalfn),
built_in_prune(Evalfn),
M:'$aleph_search_gain'(_,_,_,Label),
best_value(Evalfn,S,Label,Best1,M),
Best1 =< Best,
retract(M:'$aleph_search_gain'(_,_,_,Label)),
fail.
prune_open(_,_,_,_M).
built_in_prune(coverage).
built_in_prune(compression).
built_in_prune(posonly).
built_in_prune(laplace).
built_in_prune(wracc).
built_in_prune(mestimate).
built_in_prune(auto_m).
% pruning for posonly, laplace and m-estimates devised in
% discussion with James Cussens
% pruning for weighted relative accuracy devised in
% discussion with Steve Moyle
% corrections to best_value/4 after discussion with
% Mark Reid and James Cussens
best_value(gini,_,_,0.0,_M):- !.
best_value(entropy,_,_,0.0,_M):- !.
best_value(posonly,S,[P,_,L|_],Best,_M):-
arg(20,S,RSize),
Best is log(P) + log(RSize+2.0) - (L+1)/P, !.
best_value(wracc,_,[P|_],Best,M):-
(M:'$aleph_search'(clauseprior,Total-[P1-pos,_]) ->
Best is P*(Total - P1)/(Total^2);
Best is 0.25), !.
best_value(Evalfn,_,[P,_,L|Rest],Best,M):-
L1 is L + 1, % need at least 1 extra literal to achieve best value
evalfn(Evalfn,[P,0,L1|Rest],Best,M).
get_nextbest(S,NodeRef,M):-
arg(22,S,Search),
select_nextbest(Search,NodeRef,M).
% Select the next best node
% Incorporates the changes made by Filip Zelezny to
% achieve the `randomised rapid restart' (or rrr) technique
% within randomised local search
select_nextbest(rls,NodeRef,M):-
retractall(M:'$aleph_search'(nextnode,_)),
setting(rls_type,Type,M),
(retract(M:'$aleph_search'(rls_parentstats,stats(PStats,_,_))) -> true; true),
(rls_nextbest(Type,PStats,NodeRef,Label,M) ->
asserta(M:'$aleph_search'(rls_parentstats,stats(Label,[],[]))),
setting(rls_type,RlsType,M),
(RlsType = rrr ->
true;
assertz(M:'$aleph_search'(nextnode,NodeRef)));
NodeRef = none), !.
select_nextbest(_,NodeRef,M):-
retractall(M:'$aleph_search'(nextnode,_)),
get_nextbest(NodeRef,M), !.
select_nextbest(_,none,_M).
get_nextbest(NodeRef,M):-
M:'$aleph_search'(openlist,[H|_]),
H = [K1|K2],
retract(M:'$aleph_search_gain'(K1,K2,NodeRef,_)),
assertz(M:'$aleph_search'(nextnode,NodeRef)).
get_nextbest(NodeRef,M):-
retract(M:'$aleph_search'(openlist,[_|T])),
asserta(M:'$aleph_search'(openlist,T)),
get_nextbest(NodeRef,M), !.
get_nextbest(none,_M).
rls_nextbest(rrr,_,NodeRef,_,M):-
get_nextbest(NodeRef,M).
rls_nextbest(gsat,_,NodeRef,Label,M):-
retract(M:'$aleph_search'(openlist,[H|_])),
H = [K1|K2],
asserta(M:'$aleph_search'(openlist,[])),
findall(N-L,M:'$aleph_search_gain'(K1,K2,N,L),Choices),
length(Choices,Last),
get_random(Last,N),
aleph_remove_nth(N,Choices,NodeRef-Label,_),
retractall(M:'$aleph_search_gain'(_,_,_,_)).
rls_nextbest(wsat,PStats,NodeRef,Label,M):-
setting(walk,WProb,M),
aleph_random(P),
P >= WProb, !,
rls_nextbest(gsat,PStats,NodeRef,Label,M).
rls_nextbest(wsat,PStats,NodeRef,Label,M):-
p_message('random walk'),
retract(M:'$aleph_search'(openlist,_)),
asserta(M:'$aleph_search'(openlist,[])),
findall(N-L,M:'$aleph_search_gain'(_,_,N,L),AllNodes),
potentially_good(AllNodes,PStats,Choices),
length(Choices,Last),
get_random(Last,N),
aleph_remove_nth(N,Choices,NodeRef-Label,_),
retractall(M:'$aleph_search_gain'(_,_,_,_)).
rls_nextbest(anneal,[P,N|_],NodeRef,Label,M):-
setting(temperature,Temp,M),
retract(M:'$aleph_search'(openlist,_)),
asserta(M:'$aleph_search'(openlist,[])),
findall(N-L,M:'$aleph_search_gain'(_,_,N,L),AllNodes),
length(AllNodes,Last),
get_random(Last,S),
aleph_remove_nth(S,AllNodes,NodeRef-Label,_),
Label = [P1,N1|_],
Gain is (P1 - N1) - (P - N),
((P = 1); (Gain >= 0);(aleph_random(R), R < exp(Gain/Temp))).
potentially_good([],_,[]).
potentially_good([H|T],Label,[H|T1]):-
H = _-Label1,
potentially_good(Label,Label1), !,
potentially_good(T,Label,T1).
potentially_good([_|T],Label,T1):-
potentially_good(T,Label,T1).
potentially_good([1|_],[P1|_]):-
!,
P1 > 1.
potentially_good([P,_,L|_],[P1,_,L1|_]):-
L1 =< L, !,
P1 > P.
potentially_good([_,N|_],[_,N1|_]):-
N1 < N.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% P R O V E
% prove with caching
% if entry exists in cache, then return it
% otherwise find and cache cover
% if ``exact'' flag is set then only check proof for examples
% in the part left over due to lazy theorem-proving
% ideas in caching developed in discussions with James Cussens
prove_cache(exact,S,Type,Entry,Clause,Intervals,IList,Count,M):-
!,
(Intervals = Exact/Left ->
arg(14,S,Depth),
arg(29,S,Time),
arg(34,S,Proof),
prove(Depth/Time/Proof,Type,Clause,Left,IList1,Count1,M),
aleph_append(IList1,Exact,IList),
interval_count(Exact,Count0),
Count is Count0 + Count1;
IList = Intervals,
interval_count(IList,Count)),
arg(8,S,Caching),
(Caching = true -> add_cache(Entry,Type,IList); true).
prove_cache(upper,S,Type,Entry,Clause,Intervals,IList,Count,M):-
arg(8,S,Caching),
Caching = true, !,
arg(14,S,Depth),
arg(29,S,Time),
arg(34,S,Proof),
(check_cache(Entry,Type,Cached,M)->
prove_cached(S,Type,Entry,Cached,Clause,Intervals,IList,Count,M);
prove_intervals(Depth/Time/Proof,Type,Clause,Intervals,IList,Count,M),
add_cache(Entry,Type,IList,M)).
prove_cache(upper,S,Type,_,Clause,Intervals,IList,Count,M):-
arg(14,S,Depth),
arg(29,S,Time),
arg(34,S,Proof),
(Intervals = Exact/Left ->
aleph_append(Left,Exact,IList1),
prove(Depth/Time/Proof,Type,Clause,IList1,IList,Count,M);
prove(Depth/Time/Proof,Type,Clause,Intervals,IList,Count,M)).
prove_intervals(DepthTime,Type,Clause,I1/Left,IList,Count,M):-
!,
aleph_append(Left,I1,Intervals),
prove(DepthTime,Type,Clause,Intervals,IList,Count,M).
prove_intervals(DepthTime,Type,Clause,Intervals,IList,Count,M):-
prove(DepthTime,Type,Clause,Intervals,IList,Count,M).
prove_cached(S,Type,Entry,I1/Left,Clause,Intervals,IList,Count,M):-
!,
arg(14,S,Depth),
arg(29,S,Time),
arg(34,S,Proof),
prove(Depth/Time/Proof,Type,Clause,Left,I2,_,M),
aleph_append(I2,I1,I),
(Type = pos ->
arg(5,S,Greedy),
(Greedy = true ->
intervals_intersection(I,Intervals,IList);
IList = I);
IList = I),
interval_count(IList,Count),
update_cache(Entry,Type,IList,M).
prove_cached(S,Type,Entry,I1,_,Intervals,IList,Count,M):-
(Type = pos -> arg(5,S,Greedy),
(Greedy = true ->
intervals_intersection(I1,Intervals,IList);
IList = I1);
IList = I1),
interval_count(IList,Count),
update_cache(Entry,Type,IList,M).
% prove at most Max atoms
prove_cache(exact,S,Type,Entry,Clause,Intervals,Max,IList,Count,M):-
!,
(Intervals = Exact/Left ->
interval_count(Exact,Count0),
Max1 is Max - Count0,
arg(12,S,LNegs),
arg(14,S,Depth),
arg(29,S,Time),
arg(34,S,Proof),
prove(LNegs/false,Depth/Time/Proof,Type,Clause,Left,Max1,IList1,Count1,M),
aleph_append(IList1,Exact,Exact1),
find_lazy_left(S,Type,Exact1,Left1),
IList = Exact1/Left1,
Count is Count0 + Count1;
IList = Intervals,
interval_count(Intervals,Count)),
arg(8,S,Caching),
(Caching = true -> add_cache(Entry,Type,IList); true).
prove_cache(upper,S,Type,Entry,Clause,Intervals,Max,IList,Count,M):-
arg(8,S,Caching),
Caching = true, !,
(check_cache(Entry,Type,Cached,M)->
prove_cached(S,Type,Entry,Cached,Clause,Intervals,Max,IList,Count,M);
(prove_intervals(S,Type,Clause,Intervals,Max,IList1,Count,M)->
find_lazy_left(S,Type,IList1,Left1),
add_cache(Entry,Type,IList1/Left1,M),
IList = IList1/Left1,
retractall(M:'$aleph_local'(example_cache,_));
collect_example_cache(IList,M),
add_cache(Entry,Type,IList,M),
fail)).
prove_cache(upper,S,Type,_,Clause,Intervals,Max,IList/Left1,Count,M):-
arg(8,S,Caching),
arg(12,S,LNegs),
arg(14,S,Depth),
arg(29,S,Time),
arg(34,S,Proof),
(Intervals = Exact/Left ->
aleph_append(Left,Exact,IList1),
prove(LNegs/Caching,Depth/Time/Proof,Type,Clause,IList1,Max,IList,Count,M);
prove(LNegs/Caching,Depth/Time/Proof,Type,Clause,Intervals,Max,IList,Count,M)),
find_lazy_left(S,Type,IList,Left1).
prove_intervals(S,Type,Clause,I1/Left,Max,IList,Count,M):-
!,
arg(8,S,Caching),
arg(12,S,LNegs),
arg(14,S,Depth),
arg(29,S,Time),
arg(34,S,Proof),
aleph_append(Left,I1,Intervals),
prove(LNegs/Caching,Depth/Time/Proof,Type,Clause,Intervals,Max,IList,Count,M).
prove_intervals(S,Type,Clause,Intervals,Max,IList,Count,M):-
arg(8,S,Caching),
arg(12,S,LNegs),
arg(14,S,Depth),
arg(29,S,Time),
arg(34,S,Proof),
prove(LNegs/Caching,Depth/Time/Proof,Type,Clause,Intervals,Max,IList,Count,M).
prove_cached(S,Type,Entry, I1/Left,Clause,_,Max,IList/Left1,Count,M):-
!,
arg(8,S,Caching),
arg(12,S,LNegs),
arg(14,S,Depth),
arg(29,S,Time),
arg(34,S,Proof),
interval_count(I1,C1),
Max1 is Max - C1,
Max1 >= 0,
(prove(LNegs/Caching,Depth/Time/Proof,Type,Clause,Left,Max1,I2,C2,M)->
aleph_append(I2,I1,IList),
Count is C2 + C1,
find_lazy_left(S,Type,IList,Left1),
update_cache(Entry,Type,IList/Left1,M),
retractall(M:'$aleph_local'(example_cache,_));
collect_example_cache(I2/Left1,M),
aleph_append(I2,I1,IList),
update_cache(Entry,Type,IList/Left1,M),
fail).
prove_cached(_,neg,_, I1/L1,_,_,_,I1/L1,C1,_M):-
!,
interval_count(I1,C1).
prove_cached(S,_,_,I1,_,_,Max,I1,C1,_M):-
interval_count(I1,C1),
arg(12,S,LNegs),
(LNegs = true ->true; C1 =< Max).
collect_example_cache(Intervals/Left,M):-
retract(M:'$aleph_local'(example_cache,[Last|Rest])),
aleph_reverse([Last|Rest],IList),
list_to_intervals1(IList,Intervals),
Next is Last + 1,
M:'$aleph_global'(size,size(neg,LastN)),
(Next > LastN -> Left = []; Left = [Next-LastN]).
find_lazy_left(S,_,_,[]):-
arg(12,S,LazyNegs),
LazyNegs = false, !.
find_lazy_left(_,_,[],[]).
find_lazy_left(S,Type,[_-F],Left):-
!,
F1 is F + 1,
(Type = pos -> arg(16,S,Last);
(Type = neg -> arg(24,S,Last);
(Type = rand -> arg(20,S,Last); Last = F))),
(F1 > Last -> Left = []; Left = [F1-Last]).
find_lazy_left(S,Type,[_|T1],Left):-
find_lazy_left(S,Type,T1,Left).
% prove atoms specified by Type and index set using Clause.
% dependent on data structure used for index set:
% currently index set is a list of intervals
% return atoms proved and their count
% if tail-recursive version is needed see below
prove(_,_,_,[],[],0,_M).
prove(Flags,Type,Clause,[Interval|Intervals],IList,Count,M):-
index_prove(Flags,Type,Clause,Interval,I1,C1,M),
prove(Flags,Type,Clause,Intervals,I2,C2,M),
aleph_append(I2,I1,IList),
Count is C1 + C2.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% T A I L - R E C U R S I V E P R O V E/6
% use this rather than the prove/6 above for tail recursion
% written by James Cussens
% prove(DepthTime,Type,Clause,Intervals,IList,Count,M):-
% prove2(Intervals,DepthTime,Type,Clause,0,IList,Count,M).
% code for tail recursive cover testing
% starts here
% when we know that Sofar is a variable.
prove2([],_,_,_,Count,[],Count,_M).
prove2([Current-Finish|Intervals],Depth/Time/Proof,Type,(Head:-Body),InCount,Sofar,OutCount,M) :-
M:example(Current,Type,Example),
\+ prove1(Proof,Depth/Time,Example,(Head:-Body,M)), %uncovered
!,
(Current>=Finish ->
prove2(Intervals,Depth/Time/Proof,Type,(Head:-Body),InCount,Sofar,OutCount,M);
Next is Current+1,!,
prove2([Next-Finish|Intervals],Depth/Time/Proof,Type,(Head:-Body),InCount,Sofar,OutCount,M)
).
prove2([Current-Finish|Intervals],ProofFlags,Type,Clause,InCount,Sofar,OutCount,M) :-
(Current>=Finish ->
Sofar=[Current-Current|Rest],
MidCount is InCount+1,!,
prove2(Intervals,ProofFlags,Type,Clause,MidCount,Rest,OutCount,M);
Next is Current+1,
Sofar=[Current-_Last|_Rest],!,
prove3([Next-Finish|Intervals],ProofFlags,Type,Clause,InCount,Sofar,OutCount,M)
).
%when Sofar is not a variable
prove3([Current-Finish|Intervals],Depth/Time/Proof,Type,(Head:-Body),InCount,Sofar,OutCount,M) :-
M:example(Current,Type,Example),
\+ prove1(Proof,Depth/Time,Example,(Head:-Body),M), %uncovered
!,
Last is Current-1, %found some previously
Sofar=[Start-Last|Rest], %complete found interval
MidCount is InCount+Current-Start,
(Current>=Finish ->
prove2(Intervals,Depth/Time/Proof,Type,(Head:-Body),MidCount,Rest,OutCount,M);
Next is Current+1,!,
prove2([Next-Finish|Intervals],Depth/Time/Proof,Type,(Head:-Body),MidCount,Rest,OutCount,M)
).
prove3([Current-Finish|Intervals],ProofFlags,Type,Clause,InCount,Sofar,OutCount,M) :-
(Current>=Finish ->
Sofar=[Start-Finish|Rest],
MidCount is InCount+Finish-Start+1,!,
prove2(Intervals,ProofFlags,Type,Clause,MidCount,Rest,OutCount,M);
Next is Current+1,!,
prove3([Next-Finish|Intervals],ProofFlags,Type,Clause,InCount,Sofar,OutCount,M)
).
% code for tail recursive cover testing
% ends here
index_prove(_,_,_,Start-Finish,[],0,_M):-
Start > Finish, !.
index_prove(ProofFlags,Type,Clause,Start-Finish,IList,Count,M):-
index_prove1(ProofFlags,Type,Clause,Start,Finish,Last,M),
Last0 is Last - 1 ,
Last1 is Last + 1,
(Last0 >= Start->
index_prove(ProofFlags,Type,Clause,Last1-Finish,Rest,Count1,M),
IList = [Start-Last0|Rest],
Count is Last - Start + Count1;
index_prove(ProofFlags,Type,Clause,Last1-Finish,IList,Count,M)).
prove1(G,M):-
depth_bound_call(G,M), !.
prove1(user,_,Example,Clause,M):-
prove(Clause,Example,M), !.
prove1(restricted_sld,Depth/Time,Example,(Head:-Body),M):-
\+((\+(((Example = Head),resource_bound_call(Time,Depth,Body,M))))), !.
prove1(sld,Depth/Time,Example,_,M):-
\+(\+(resource_bound_call(Time,Depth,Example,M))), !.
index_prove1(_,_,_,Num,Last,Num,_M):-
Num > Last, !.
index_prove1(Depth/Time/Proof,Type,Clause,Num,Finish,Last,M):-
M:example(Num,Type,Example),
prove1(Proof,Depth/Time,Example,Clause,M), !,
Num1 is Num + 1,
index_prove1(Depth/Time/Proof,Type,Clause,Num1,Finish,Last,M).
index_prove1(_,_,_,Last,_,Last,_M).
% proves at most Max atoms using Clause.
prove(_,_,_,_,[],_,[],0,_M).
prove(Flags,ProofFlags,Type,Clause,[Interval|Intervals],Max,IList,Count,M):-
index_prove(Flags,ProofFlags,Type,Clause,Interval,Max,I1,C1,M), !,
Max1 is Max - C1,
prove(Flags,ProofFlags,Type,Clause,Intervals,Max1,I2,C2,M),
aleph_append(I2,I1,IList),
Count is C1 + C2.
index_prove(_,_,_,_,Start-Finish,_,[],0,_M):-
Start > Finish, !.
index_prove(Flags,ProofFlags,Type,Clause,Start-Finish,Max,IList,Count,M):-
index_prove1(Flags,ProofFlags,Type,Clause,Start,Finish,0,Max,Last,M),
Last0 is Last - 1 ,
Last1 is Last + 1,
(Last0 >= Start->
Max1 is Max - Last + Start,
((Max1 = 0, Flags = true/_) ->
Rest = [], Count1 = 0;
index_prove(Flags,ProofFlags,Type,Clause,Last1-Finish,
Max1,Rest,Count1,M)),
IList = [Start-Last0|Rest],
Count is Last - Start + Count1;
index_prove(Flags,ProofFlags,Type,Clause,Last1-Finish,Max,IList,Count,M)).
index_prove1(false/_,_,_,_,_,_,Proved,Allowed,_,_M):-
Proved > Allowed, !, fail.
index_prove1(_,_,_,_,Num,Last,_,_,Num,_M):-
Num > Last, !.
index_prove1(true/_,_,_,_,Num,_,Allowed,Allowed,Num,_M):- !.
index_prove1(LNegs/Caching,Depth/Time/Proof,Type,Clause,Num,Finish,Proved,Allowed,Last,M):-
M:example(Num,Type,Example),
prove1(Proof,Depth/Time,Example,Clause,M), !,
Num1 is Num + 1,
Proved1 is Proved + 1,
(Caching = true ->
(retract(M:'$aleph_local'(example_cache,L)) ->
asserta(M:'$aleph_local'(example_cache,[Num|L]));
asserta(M:'$aleph_local'(example_cache,[Num])));
true),
index_prove1(LNegs/Caching,Depth/Time/Proof,Type,Clause,Num1,Finish,Proved1,Allowed,Last,M).
index_prove1(_,_,_,_,Last,_,_,_,Last,_M).
% resource_bound_call(Time,Depth,Goals)
% attempt to prove Goals using depth bounded theorem-prover
% in at most Time secs
resource_bound_call(T,Depth,Goals,M):-
Inf is inf,
T =:= Inf,
!,
depth_bound_call(Goals,Depth,M).
resource_bound_call(T,Depth,Goals,M):-
catch(time_bound_call(T,prooflimit,depth_bound_call(Goals,Depth,M),M),
prooflimit,fail).
time_bound_call(T,Exception,Goal,M):-
alarm(T,throw(Exception),X),
(M:Goal -> remove_alarm(X); remove_alarm(X), fail).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% C A C H I N G
clear_cache(M):-
retractall(M:'$aleph_search_cache'(_)),
retractall(M:'$aleph_search_prunecache'(_)).
check_cache(Entry,Type,I,M):-
Entry \= false,
M:'$aleph_search_cache'(Entry), !,
functor(Entry,_,Arity),
(Type = pos -> Arg is Arity - 1; Arg is Arity),
arg(Arg,Entry,I),
nonvar(I).
add_cache(false,_,_,_M):- !.
add_cache(Entry,Type,I,M):-
(retract(M:'$aleph_search_cache'(Entry))-> true ; true),
functor(Entry,_,Arity),
(Type = pos -> Arg is Arity - 1; Arg is Arity),
(arg(Arg,Entry,I)-> asserta(M:'$aleph_search_cache'(Entry));
true), !.
update_cache(Entry,Type,I,M):-
Entry \= false,
functor(Entry,Name,Arity),
(Type = pos -> Arg is Arity - 1; Arg is Arity),
arg(Arg,Entry,OldI),
OldI = _/_,
retract(M:'$aleph_search_cache'(Entry)),
functor(NewEntry,Name,Arity),
Arg0 is Arg - 1,
copy_args(Entry,NewEntry,1,Arg0),
arg(Arg,NewEntry,I),
Arg1 is Arg + 1,
copy_args(Entry,NewEntry,Arg1,Arity),
asserta(M:'$aleph_search_cache'(NewEntry)), !.
update_cache(_,_,_,_M).
add_prune_cache(false,_M):- !.
add_prune_cache(Entry,M):-
(M:'$aleph_global'(caching,set(caching,true))->
functor(Entry,_,Arity),
A1 is Arity - 2,
arg(A1,Entry,Clause),
asserta(M:'$aleph_search_prunecache'(Clause));
true).
get_cache_entry(Max,Clause,Entry):-
skolemize(Clause,Head,Body,0,_),
length(Body,L1),
Max >= L1 + 1,
aleph_hash_term([Head|Body],Entry), !.
get_cache_entry(_,_,false).
% upto 3-argument indexing using predicate names in a clause
aleph_hash_term([L0,L1,L2,L3,L4|T],Entry):-
!,
functor(L1,P1,_), functor(L2,P2,_),
functor(L3,P3,_), functor(L4,P4,_),
functor(Entry,P4,6),
arg(1,Entry,P2), arg(2,Entry,P3),
arg(3,Entry,P1), arg(4,Entry,[L0,L1,L2,L3,L4|T]).
aleph_hash_term([L0,L1,L2,L3],Entry):-
!,
functor(L1,P1,_), functor(L2,P2,_),
functor(L3,P3,_),
functor(Entry,P3,5),
arg(1,Entry,P2), arg(2,Entry,P1),
arg(3,Entry,[L0,L1,L2,L3]).
aleph_hash_term([L0,L1,L2],Entry):-
!,
functor(L1,P1,_), functor(L2,P2,_),
functor(Entry,P2,4),
arg(1,Entry,P1), arg(2,Entry,[L0,L1,L2]).
aleph_hash_term([L0,L1],Entry):-
!,
functor(L1,P1,_),
functor(Entry,P1,3),
arg(1,Entry,[L0,L1]).
aleph_hash_term([L0],Entry):-
functor(L0,P0,_),
functor(Entry,P0,3),
arg(1,Entry,[L0]).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% T R E E S
construct_tree(Type,M):-
setting(searchtime,Time,M),
Inf is inf,
Time =\= Inf,
SearchTime is integer(Time),
SearchTime > 0, !,
catch(time_bound_call(SearchTime,searchlimit,find_tree(Type),M),
searchlimit,p_message('Time limit reached')).
construct_tree(Type,M):-
find_tree(Type,M).
% find_tree(Type,M) where Type is one of
% classification, regression, class_probability
find_tree(Type,M):-
retractall(M:'$aleph_search'(tree,_)),
retractall(M:'$aleph_search'(tree_besterror,_)),
retractall(M:'$aleph_search'(tree_gain,_)),
retractall(M:'$aleph_search'(tree_lastleaf,_)),
retractall(M:'$aleph_search'(tree_leaf,_)),
retractall(M:'$aleph_search'(tree_newleaf,_)),
retractall(M:'$aleph_search'(tree_startdistribution,_)),
get_start_distribution(Type,Distribution,M),
asserta(M:'$aleph_search'(tree_startdistribution,d(Type,Distribution))),
M:'$aleph_global'(atoms_left,atoms_left(pos,Pos)),
setting(dependent,Argno,M),
p_message('constructing tree'),
stopwatch(StartClock),
get_search_settings(S,M),
auto_refine(aleph_false,Head,M),
gen_leaf(Leaf,M),
eval_treenode(S,Type,(Head:-true),[Argno],Pos,Examples,N,Cost,M),
asserta(M:'$aleph_search'(tree_leaf,l(Leaf,Leaf,[Head,Cost,N],Examples))),
find_tree1([Leaf],S,Type,[Argno],M),
prune_rules(S,Type,[Argno],M),
stopwatch(StopClock),
add_tree(S,Type,[Argno],M),
Time is StopClock - StartClock,
p1_message('construction time'), p_message(Time).
get_start_distribution(regression,0-[0,0],_M):- !.
get_start_distribution(model,0-[0,0],M):-
setting(evalfn,mse,M), !.
get_start_distribution(model,0-Distribution,M):-
setting(evalfn,accuracy,M), !,
(setting(classes,Classes,M) -> true;
!,
p_message('missing setting for classes'),
fail),
initialise_distribution(Classes,Distribution), !.
get_start_distribution(Tree,0-Distribution,M):-
(Tree = classification; Tree = class_probability),
(setting(classes,Classes,M) -> true;
!,
p_message('missing setting for classes'),
fail),
initialise_distribution(Classes,Distribution), !.
get_start_distribution(_,_,_M):-
p_message('incorrect/missing setting for tree_type or evalfn'),
fail.
initialise_distribution([],[]).
initialise_distribution([Class|Classes],[0-Class|T]):-
initialise_distribution(Classes,T).
laplace_correct([],[]).
laplace_correct([N-Class|Classes],[N1-Class|T]):-
N1 is N + 1,
laplace_correct(Classes,T).
find_tree1([],_,_,_,_M).
find_tree1([Leaf|Leaves],S,Type,Predict,M):-
can_split(S,Type,Predict,Leaf,Left,Right,M), !,
split_leaf(Leaf,Left,Right,NewLeaves,M),
aleph_append(NewLeaves,Leaves,LeavesLeft),
find_tree1(LeavesLeft,S,Type,Predict,M).
find_tree1([_|LeavesLeft],S,Type,Predict,M):-
find_tree1(LeavesLeft,S,Type,Predict,M).
prune_rules(S,Tree,Predict,M):-
setting(prune_tree,true,M),
prune_rules1(Tree,S,Predict,M), !.
prune_rules(_,_,_,_M).
% pessimistic pruning by employing corrections to observed errors
prune_rules1(class_probability,_,_,_M):-
p_message('no pruning for class probability trees'), !.
prune_rules1(model,_,_,_M):-
p_message('no pruning for model trees'), !.
prune_rules1(Tree,S,Predict,M):-
p_message('pruning clauses'),
M:'$aleph_search'(tree_leaf,l(Leaf,Parent,Clause,Examples)),
prune_rule(Tree,S,Predict,Clause,Examples,NewClause,NewExamples,M),
retract(M:'$aleph_search'(tree_leaf,l(Leaf,Parent,Clause,Examples))),
asserta(M:'$aleph_search'(tree_newleaf,l(Leaf,Parent,NewClause,NewExamples))),
fail.
prune_rules1(_,_,_,M):-
retract(M:'$aleph_search'(tree_newleaf,l(Leaf,Parent,NewClause,NewExamples))),
asserta(M:'$aleph_search'(tree_leaf,l(Leaf,Parent,NewClause,NewExamples))),
fail.
prune_rules1(_,_,_,_M).
prune_rule(Tree,S,PredictArg,[Clause,_,N],Examples,[PrunedClause,E1,NCov],NewEx,M):-
node_stats(Tree,Examples,PredictArg,Total-Distribution,M),
leaf_prediction(Tree,Total-Distribution,_,Incorrect),
estimate_error(Tree,Incorrect,Total,Upper,M),
split_clause(Clause,Head,Body),
goals_to_list(Body,BodyL),
arg(14,S,Depth),
arg(29,S,Time),
arg(34,S,Proof),
greedy_prune_rule(Tree,Depth/Time/Proof,PredictArg,[Head|BodyL],Upper,C1L,E1,M),
list_to_clause(C1L,PrunedClause),
% p1_message('pruned clause'), p_message(Clause),
% p_message('to'),
% p_message(PrunedClause),
(E1 < Upper ->
M:'$aleph_global'(atoms_left,atoms_left(pos,Pos)),
prove(Depth/Time/Proof,pos,PrunedClause,Pos,NewEx,NCov,M);
NewEx = Examples,
NCov = N).
% estimate error using binomial distribution as done in C4.5
estimate_error(classification,Incorrect,Total,Error,M):-
setting(confidence,Conf,M),
estimate_error(1.0/0.0,0.0/1.0,Conf,Total,Incorrect,Error).
% estimate upper bound on sample std deviation by
% assuming the n values in a leaf are normally distributed.
% In this case, a (1-alpha)x100 confidence interval for the
% variance is (n-1)s^2/X^2(alpha/2) =< var =< (n-1)s^2/X^2(1-alpha/2)
estimate_error(regression,Sd,1,Sd,_M):- !.
estimate_error(regression,Sd,N,Upper,M):-
(setting(confidence,Conf,M) -> true; Conf = 0.95),
Alpha is 1.0 - Conf,
DF is N - 1,
Prob is 1 - Alpha/2,
chi_square(DF,Prob,ChiSq),
Upper is Sd*sqrt((N-1)/ChiSq).
bound_error(classification,Error,Total,Lower,Upper,M):-
(setting(confidence,Alpha,M) -> true; Alpha = 0.95),
approx_z(Alpha,Z),
Lower is Error - Z*sqrt(Error*(1-Error)/Total),
Upper is Error + Z*sqrt(Error*(1-Error)/Total).
approx_z(P,2.58):- P >= 0.99, !.
approx_z(P,Z):- P >= 0.98, !, Z is 2.33 + (P-0.98)*(2.58-2.33)/(0.99-0.98).
approx_z(P,Z):- P >= 0.95, !, Z is 1.96 + (P-0.95)*(2.33-1.96)/(0.98-0.95).
approx_z(P,Z):- P >= 0.90, !, Z is 1.64 + (P-0.90)*(1.96-1.64)/(0.95-0.90).
approx_z(P,Z):- P >= 0.80, !, Z is 1.28 + (P-0.80)*(1.64-1.28)/(0.90-0.80).
approx_z(P,Z):- P >= 0.68, !, Z is 1.00 + (P-0.68)*(1.28-1.00)/(0.80-0.68).
approx_z(P,Z):- P >= 0.50, !, Z is 0.67 + (P-0.50)*(1.00-0.67)/(0.68-0.50).
approx_z(_,0.67).
greedy_prune_rule(Tree,Flags,PredictArg,Clause,Err0,NewClause,BestErr,M):-
greedy_prune_rule1(Tree,Flags,PredictArg,Clause,Err0,Clause1,Err1,M),
Clause \= Clause1, !,
greedy_prune_rule(Tree,Flags,PredictArg,Clause1,Err1,NewClause,BestErr,M).
greedy_prune_rule(_,_,_,C,E,C,E,_M).
greedy_prune_rule1(Tree,Flags,PredictArg,[Head|Body],Err0,_,_,M):-
retractall(M:'$aleph_search'(tree_besterror,_)),
asserta(M:'$aleph_search'(tree_besterror,besterror([Head|Body],Err0))),
M:'$aleph_global'(atoms_left,atoms_left(pos,Pos)),
aleph_delete(_,Body,Left),
strip_negs(Left,Body1),
aleph_mode_linked([Head|Body1],M),
list_to_clause([Head|Left],Clause),
prove(Flags,pos,Clause,Pos,Ex1,_,M),
node_stats(Tree,Ex1,PredictArg,Total-Distribution,M),
leaf_prediction(Tree,Total-Distribution,_,Incorrect),
estimate_error(Tree,Incorrect,Total,Upper,M),
M:'$aleph_search'(tree_besterror,besterror(_,BestError)),
Upper =< BestError,
retract(M:'$aleph_search'(tree_besterror,besterror(_,BestError))),
asserta(M:'$aleph_search'(tree_besterror,besterror([Head|Left],Upper))),
fail.
greedy_prune_rule1(_,_,_,_,_,Clause1,Err1,M):-
retract(M:'$aleph_search'(tree_besterror,besterror(Clause1,Err1))).
strip_negs([],[]).
strip_negs([not(L)|T],[L|T1]):-
!,
strip_negs(T,T1).
strip_negs([L|T],[L|T1]):-
strip_negs(T,T1).
add_tree(_,Tree,Predict,M):-
retract(M:'$aleph_search'(tree_leaf,l(_,_,Leaf,Examples))),
Leaf = [Clause,Cost,P],
add_prediction(Tree,Clause,Predict,Examples,Clause1,M),
p_message('best clause'),
pp_dclause(Clause1,M),
nlits(Clause,L),
Gain is -Cost,
asserta(M:'$aleph_global'(hypothesis,hypothesis([P,0,L,Gain],Clause1,Examples,[]))),
addhyp(M),
fail.
add_tree(_,_,_,_M).
add_prediction(Tree,Clause,PredictArg,Examples,Clause1,M):-
split_clause(Clause,Head,_),
(Tree = model ->
setting(evalfn,Evalfn,M),
add_model(Evalfn,Clause,PredictArg,Examples,Clause1,_,_,M);
node_stats(Tree,Examples,PredictArg,Distribution,M),
leaf_prediction(Tree,Distribution,Prediction,Error),
tparg(PredictArg,Head,Var),
add_prediction(Tree,Clause,Var,Prediction,Error,Clause1,M)).
add_prediction(classification,Clause,Var,Prediction,_,Clause1,_M):-
extend_clause(Clause,(Var = Prediction),Clause1).
add_prediction(class_probability,Clause,Var,Prediction,_,Clause1,_M):-
extend_clause(Clause,(random(Var,Prediction)),Clause1).
add_prediction(regression,Clause,Var,Mean,Sd,Clause1,_M):-
extend_clause(Clause,(random(Var,normal(Mean,Sd))),Clause1).
add_model(Evalfn,Clause,PredictArg,Examples,_,_,_,M):-
retractall(M:'$aleph_local'(tree_model,_,_,_)),
Best is inf,
split_clause(Clause,Head,_),
tparg(PredictArg,Head,Var),
asserta(M:'$aleph_local'(tree_model,aleph_false,0,Best)),
M:'$aleph_global'(model,model(Name/Arity)),
functor(Model,Name,Arity),
auto_extend(Clause,Model,C,M),
leaf_predicts(Arity,Model,Var),
lazy_evaluate_refinement([],C,[Name/Arity],Examples,[],[],C1,M),
find_model_error(Evalfn,Examples,C1,PredictArg,Total,Error,M),
% pp_dclause(C1,M),
% p1_message(error),
% p1_message(Error),
M:'$aleph_local'(tree_model,_,_,BestSoFar),
%p1_message(BestSoFar),
(Error < BestSoFar ->
retract(M:'$aleph_local'(tree_model,_,_,_)),
asserta(M:'$aleph_local'(tree_model,C1,Total,Error));
true),
fail.
add_model(_,_,_,_,Clause,Total,Error,M):-
retract(M:'$aleph_local'(tree_model,Clause,Total,Error)).
find_model_error(Evalfn,Examples,(Head:-Body),[PredictArg],T,E,M):-
functor(Head,_,Arity),
findall(Actual-Pred,
(aleph_member(Interval,Examples),
aleph_member3(N,Interval),
M:example(N,pos,Example),
copy_iargs(Arity,Example,Head,PredictArg),
once(M:Body),
arg(PredictArg,Head,Pred),
arg(PredictArg,Example,Actual)
),
L),
sum_model_errors(L,Evalfn,0,0.0,T,E), !.
sum_model_errors([],_,N,E,N,E).
sum_model_errors([Act-Pred|T],Evalfn,NSoFar,ESoFar,N,E):-
get_model_error(Evalfn,Act,Pred,E1),
E1SoFar is ESoFar + E1,
N1SoFar is NSoFar + 1,
sum_model_errors(T,Evalfn,N1SoFar,E1SoFar,N,E).
get_model_error(mse,Act,Pred,E):-
E is (Act-Pred)^2.
get_model_error(accuracy,Act,Pred,E):-
(Act = Pred -> E is 0.0; E is 1.0).
leaf_predicts(0,_,_):- !, fail.
leaf_predicts(Arg,Model,Var):-
arg(Arg,Model,Var1),
var(Var1),
Var1 == Var, !.
leaf_predicts(Arg,Model,Var):-
Arg1 is Arg - 1,
leaf_predicts(Arg1,Model,Var).
leaf_prediction(classification,Total-Distribution,Class,Incorrect):-
find_maj_class(Distribution,N-Class),
Incorrect is Total - N.
leaf_prediction(class_probability,T1-D1,NDistr,0):-
length(D1,NClasses),
laplace_correct(D1,LaplaceD1),
LaplaceTotal is T1 + NClasses,
normalise_distribution(LaplaceD1,LaplaceTotal,NDistr).
leaf_prediction(regression,_-[Mean,Sd],Mean,Sd).
find_maj_class([X],X):- !.
find_maj_class([N-Class|Rest],MajClass):-
find_maj_class(Rest,N1-C1),
(N > N1 -> MajClass = N-Class; MajClass = N1-C1).
can_split(S,Type,Predict,Leaf,Left,Right,M):-
arg(21,S,MinGain),
M:'$aleph_search'(tree_leaf,l(Leaf,_,[Clause,Cost,N],Examples)),
Cost >= MinGain,
get_best_subtree(S,Type,Predict,[Clause,Cost,N],Examples,Gain,Left,Right,M),
Gain >= MinGain,
p_message('found clauses'),
Left = [ClF,CostF|_], Right = [ClS,CostS|_],
arg(4,S,_/Evalfn),
pp_dclause(ClS,M),
print_eval(Evalfn,CostS),
pp_dclause(ClF,M),
print_eval(Evalfn,CostF),
p1_message('expected cost reduction'), p_message(Gain).
get_best_subtree(S,Type,Predict,[Clause,Cost,N],Examples,Gain,Left,Right,M):-
arg(42,S,Interactive),
arg(43,S,LookAhead),
retractall(M:'$aleph_search'(tree_gain,_)),
MInf is -inf,
(Interactive = false ->
asserta(M:'$aleph_search'(tree_gain,tree_gain(MInf,[],[])));
true),
split_clause(Clause,Head,Body),
arg(4,S,_/Evalfn),
arg(13,S,MinPos),
auto_refine(LookAhead,Clause,ClS,M),
tree_refine_ok(Type,ClS,M),
eval_treenode(S,Type,ClS,Predict,Examples,ExS,NS,CostS,M),
NS >= MinPos,
rm_intervals(ExS,Examples,ExF),
split_clause(ClS,Head,Body1),
get_goaldiffs(Body,Body1,Diff),
extend_clause(Clause,not(Diff),ClF),
eval_treenode(S,Type,ClF,Predict,ExF,NF,CostF,M),
NF >= MinPos,
AvLeafCost is (NS*CostS + NF*CostF)/N,
CostReduction is Cost - AvLeafCost,
(Interactive = false ->
pp_dclause(ClS,M), print_eval(Evalfn,CostS),
pp_dclause(ClF,M), print_eval(Evalfn,CostF),
p1_message('expected cost reduction'), p_message(CostReduction),
M:'$aleph_search'(tree_gain,tree_gain(BestSoFar,_,_)),
CostReduction > BestSoFar,
retract(M:'$aleph_search'(tree_gain,tree_gain(BestSoFar,_,_))),
asserta(M:'$aleph_search'(tree_gain,tree_gain(CostReduction,
[ClF,CostF,NF,ExF],
[ClS,CostS,NS,ExS])));
asserta(M:'$aleph_search'(tree_gain,tree_gain(CostReduction,
[ClF,CostF,NF,ExF],
[ClS,CostS,NS,ExS])))),
AvLeafCost =< 0.0,
!,
get_best_subtree(Interactive,Clause,Gain,Left,Right,M).
get_best_subtree(S,_,_,[Clause|_],_,Gain,Left,Right,M):-
arg(42,S,Interactive),
get_best_subtree(Interactive,Clause,Gain,Left,Right,M).
get_best_subtree(false,_,Gain,Left,Right,M):-
retract(M:'$aleph_search'(tree_gain,tree_gain(Gain,Left,Right))), !.
get_best_subtree(true,Clause,Gain,Left,Right,M):-
nl, write('Extending path: '), nl,
write('---------------'), nl,
pp_dclause(Clause,M),
findall(MCR-[Left,Right],
(M:'$aleph_search'(tree_gain,tree_gain(CostReduction,Left,Right)),
MCR is -1*CostReduction),
SplitsList),
keysort(SplitsList,Sorted),
get_best_split(Clause,Sorted,Gain,Left,Right),
retractall(M:'$aleph_search'(tree_gain,_)).
get_best_split(Clause,Splits,Gain,Left,Right):-
show_split_list(Clause,Splits),
ask_best_split(Splits,Gain,Left,Right).
show_split_list(Clause,Splits):-
tab(4), write('Split Information'), nl,
tab(4), write('-----------------'), nl, nl,
tab(4), write('No.'),
tab(4), write('Split'),
nl,
tab(4), write('---'),
tab(4), write('-----'),
nl,
show_split_list(Splits,1,Clause).
show_split_list([],_,_).
show_split_list([MCR-[[_,_,NF,_],[CLS,_,NS,_]]|Rest],SplitNum,Clause):-
copy_term(Clause,ClauseCopy),
split_clause(ClauseCopy,Head,Body),
copy_term(CLS,CLSCopy),
numbervars(CLSCopy,0,_),
split_clause(CLSCopy,Head,Body1),
get_goaldiffs(Body,Body1,Diff),
Gain is -1*MCR,
tab(4), write(SplitNum),
tab(4), write(Diff), nl,
tab(12), write('Succeeded (Right Branch): '), write(NS), nl,
tab(12), write('Failed (Left Branch) : '), write(NF), nl,
tab(12), write('Cost Reduction : '), write(Gain), nl, nl,
NextSplit is SplitNum + 1,
show_split_list(Rest,NextSplit,Clause).
ask_best_split(Splits,Gain,Left,Right):-
repeat,
tab(4), write('-> '),
write('Select Split Number (or "none.")'), nl,
read(Answer),
(Answer = none ->
Gain is -inf,
Left = [],
Right = [];
SplitNum is integer(Answer),
aleph_remove_nth(SplitNum,Splits,MCR-[Left,Right],_),
Gain is -1*MCR
),
!.
tree_refine_ok(model,Clause,M):-
M:'$aleph_global'(model,model(Name/Arity)),
functor(Model,Name,Arity),
in(Clause,Model,M), !,
fail.
tree_refine_ok(_,_,_M).
eval_treenode(S,Tree,Clause,PredictArg,PCov,N,Cost,M):-
arg(4,S,_/Evalfn),
treenode_cost(Tree,Evalfn,Clause,PCov,PredictArg,N,Cost,M).
eval_treenode(S,Tree,Clause,PredictArg,Pos,PCov,N,Cost,M):-
arg(4,S,_/Evalfn),
arg(13,S,MinPos),
arg(14,S,Depth),
arg(29,S,Time),
arg(34,S,Proof),
prove(Depth/Time/Proof,pos,Clause,Pos,PCov,PCount,M),
PCount >= MinPos,
treenode_cost(Tree,Evalfn,Clause,PCov,PredictArg,N,Cost,M).
treenode_cost(model,Evalfn,Clause,Covered,PredictArg,Total,Cost,M):-
!,
add_model(Evalfn,Clause,PredictArg,Covered,_,Total,Cost,M).
treenode_cost(Tree,Evalfn,_,Covered,PredictArg,Total,Cost,M):-
node_stats(Tree,Covered,PredictArg,Total-Distribution,M),
Total > 0,
impurity(Tree,Evalfn,Total-Distribution,Cost).
node_stats(Tree,Covered,PredictArg,D,M):-
M:'$aleph_search'(tree_startdistribution,d(Tree,D0)),
(Tree = regression ->
cont_distribution(Covered,PredictArg,D0,D,M);
discr_distribution(Covered,PredictArg,D0,D,M)).
discr_distribution([],_,D,D,_M).
discr_distribution([S-F|Intervals],PredictArg,T0-D0,D,M):-
discr_distribution(S,F,PredictArg,T0-D0,T1-D1,M),
discr_distribution(Intervals,PredictArg,T1-D1,D,M).
discr_distribution(N,F,_,D,D,_M):- N > F, !.
discr_distribution(N,F,PredictArg,T0-D0,D,M):-
M:example(N,pos,Example),
tparg(PredictArg,Example,Actual),
N1 is N + 1,
T1 is T0 + 1,
(aleph_delete(C0-Actual,D0,D1) ->
C1 is C0 + 1,
discr_distribution(N1,F,PredictArg,T1-[C1-Actual|D1],D,M);
discr_distribution(N1,F,PredictArg,T1-[1-Actual|D0],D,M)).
cont_distribution([],_,T-[S,SS],T-[Mean,Sd],_M):-
(T = 0 -> Mean = 0, Sd = 0;
Mean is S/T,
Sd is sqrt(SS/T - Mean*Mean)).
cont_distribution([S-F|Intervals],PredictArg,T0-D0,D,M):-
cont_distribution(S,F,PredictArg,T0-D0,T1-D1,M),
cont_distribution(Intervals,PredictArg,T1-D1,D,M).
cont_distribution(N,F,_,D,D,_M):- N > F, !.
cont_distribution(N,F,PredictArg,T0-[S0,SS0],D,M):-
M:example(N,pos,Example),
tparg(PredictArg,Example,Actual),
N1 is N + 1,
T1 is T0 + 1,
S1 is S0 + Actual,
SS1 is SS0 + Actual*Actual,
cont_distribution(N1,F,PredictArg,T1-[S1,SS1],D,M).
impurity(regression,sd,_-[_,Sd],Sd):- !.
impurity(classification,entropy,Total-Distribution,Cost):-
sum_entropy(Distribution,Total,S),
Cost is -S/(Total*log(2)), !.
impurity(classification,gini,Total-Distribution,Cost):-
sum_gini(Distribution,Total,Cost), !.
impurity(class_probability,entropy,Total-Distribution,Cost):-
sum_entropy(Distribution,Total,S),
Cost is -S/(Total*log(2)), !.
impurity(class_probability,gini,Total-Distribution,Cost):-
sum_gini(Distribution,Total,Cost), !.
impurity(_,_,_,_):-
err_message('inappropriate settings for tree_type and/or evalfn'),
fail.
sum_gini([],_,0).
sum_gini([N-_|Rest],Total,Sum):-
N > 0, !,
sum_gini(Rest,Total,C0),
P is N/Total,
Sum is P*(1-P) + C0.
sum_gini([_|Rest],Total,Sum):-
sum_gini(Rest,Total,Sum).
sum_entropy([],_,0).
sum_entropy([N-_|Rest],Total,Sum):-
N > 0, !,
sum_entropy(Rest,Total,C0),
Sum is N*log(N/Total) + C0.
sum_entropy([_|Rest],Total,Sum):-
sum_entropy(Rest,Total,Sum).
% only binary splits
% left = condition at node fails
% right = condition at node succeeds
split_leaf(Leaf,LeftTree,RightTree,[Left,Right],M):-
retract(M:'$aleph_search'(tree_leaf,l(Leaf,Parent,
[Clause,Cost,N],Examples))),
gen_leaf(Left,M),
gen_leaf(Right,M),
LeftTree = [ClF,CostF,NF,ExF],
RightTree = [ClS,CostS,NS,ExS],
asserta(M:'$aleph_search'(tree,t(Leaf,Parent,[Clause,Cost,N],
Examples,Left,Right))),
asserta(M:'$aleph_search'(tree_leaf,l(Left,Leaf,[ClF,CostF,NF],ExF))),
asserta(M:'$aleph_search'(tree_leaf,l(Right,Leaf,[ClS,CostS,NS],ExS))).
gen_leaf(Leaf1,M):-
retract(M:'$aleph_search'(tree_lastleaf,Leaf0)), !,
Leaf1 is Leaf0 + 1,
asserta(M:'$aleph_search'(tree_lastleaf,Leaf1)).
gen_leaf(0,M):-
asserta(M:'$aleph_search'(tree_lastleaf,0)).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% G C W S
% examine list of clauses to be specialised
% generate an exception theory for each clause that covers negative examples
gcws(M):-
setting(evalfn,EvalFn,M),
repeat,
retract(M:'$aleph_search'(sphyp,hypothesis([P,N,L|T],Clause,PCover,NCover))),
(PCover = _/_ -> label_create(pos,Clause,Label1,M),
extract_pos(Label1,PCover1),
interval_count(PCover1,P1);
PCover1 = PCover,
P1 = P),
(NCover = _/_ -> label_create(neg,Clause,Label2,M),
extract_neg(Label2,NCover1),
interval_count(NCover1,N1);
NCover1 = NCover,
N1 = N),
(N1 = 0 -> NewClause = Clause, NewLabel = [P1,N1,L|T];
MinAcc is P1/(2*P1 - 1),
set(minacc,MinAcc,M),
set(noise,N1,M),
gcws(Clause,PCover1,NCover1,NewClause,M),
L1 is L + 1,
complete_label(EvalFn,NewClause,[P,0,L1],NewLabel,M)),
assertz(M:'$aleph_search'(gcwshyp,hypothesis(NewLabel,NewClause,PCover1,[]))),
\+(M:'$aleph_search'(sphyp,hypothesis(_,_,_,_))), !.
% gcws(+Clause,+PCvr,+NCvr,-Clause1)
% specialise Clause that covers pos examples PCvr and neg examples NCvr
% result is is Clause extended with a single negated literal
% clauses in exception theory are added to list for specialisation
gcws(Clause,PCover,NCover,Clause1,M):-
gen_absym(AbName,M),
split_clause(Clause,Head,Body),
functor(Head,_,Arity),
add_determinations(AbName/Arity,true,M),
add_modes(AbName/Arity,M),
gen_ab_examples(AbName/Arity,PCover,NCover,M),
cwinduce(M),
Head =.. [_|Args],
AbLit =.. [AbName|Args],
(Body = true -> Body1 = not(AbLit) ; app_lit(not(AbLit),Body,Body1)),
Clause1 = (Head:-Body1).
% greedy set-cover based construction of abnormality theory
% starts with the first exceptional example
% each clause obtained is added to list of clauses to be specialised
cwinduce(M):-
store(greedy,M),
set(greedy,true,M),
M:'$aleph_global'(atoms_left,atoms_left(pos,PosSet)),
PosSet \= [],
repeat,
M:'$aleph_global'(atoms_left,atoms_left(pos,[Num-X|Y])),
sat(Num,M),
reduce(M:_),
retract(M:'$aleph_global'(hypothesis,hypothesis(Label,H,PCover,NCover))),
asserta(M:'$aleph_search'(sphyp,hypothesis(Label,H,PCover,NCover))),
rm_seeds1(PCover,[Num-X|Y],NewPosLeft),
retract(M:'$aleph_global'(atoms_left,atoms_left(pos,[Num-X|Y]))),
asserta(M:'$aleph_global'(atoms_left,atoms_left(pos,NewPosLeft))),
NewPosLeft = [],
retract(M:'$aleph_global'(atoms_left,atoms_left(pos,NewPosLeft))),
reinstate(greedy,M), !.
cwinduce(_M).
% gen_ab_examples(+Ab,+PCover,+NCover)
% obtain examples for abnormality predicate Ab by
% pos examples are copies of neg examples in NCover
% neg examples are copies of pos examples in PCover
% writes new examples to temporary ".f" and ".n" files
% to ensure example/3 remains a static predicate
% alters search parameters accordingly
gen_ab_examples(Ab/_,PCover,NCover,M):-
create_examples(PosFile,Ab,neg,NCover,pos,PCover1,M),
create_examples(NegFile,Ab,pos,PCover,neg,NCover1,M),
aleph_consult(PosFile,M),
aleph_consult(NegFile,M),
retractall(M:'$aleph_global'(atoms_left,_)),
retractall(M:'$aleph_global'(size,_)),
asserta(M:'$aleph_global'(atoms_left,atoms_left(pos,PCover1))),
asserta(M:'$aleph_global'(atoms_left,atoms_left(neg,NCover1))),
interval_count(PCover1,PSize),
interval_count(NCover1,NSize),
asserta(M:'$aleph_global'(size,size(pos,PSize))),
asserta(M:'$aleph_global'(size,size(neg,NSize))),
delete_file(PosFile),
delete_file(NegFile).
% create_examples(+File,+OldType,+OldE,+NewType,-NewE)
% copy OldE examples of OldType to give NewE examples of NewType
% copy stored in File
create_examples(File,Ab,OldT,OldE,NewT,[Next-Last],M):-
tmp_file_stream(utf8,File,Stream),
M:'$aleph_global'(last_example,last_example(NewT,OldLast)),
set_output(Stream),
create_copy(OldE,OldT,NewT,Ab,OldLast,Last,M),
close(Stream),
set_output(user_output),
Last > OldLast, !,
retract(M:'$aleph_global'(last_example,last_example(NewT,OldLast))),
Next is OldLast + 1,
asserta(M:'$aleph_global'(last_example,last_example(NewT,Last))).
create_examples(_,_,_,_,_,[],_M).
create_copy([],_,_,_,L,L,_M).
create_copy([X-Y|T],OldT,NewT,Ab,Num,Last,M):-
create_copy(X,Y,OldT,NewT,Ab,Num,Num1,M),
create_copy(T,OldT,NewT,Ab,Num1,Last,M).
create_copy(X,Y,_,_,_,L,L,_M):- X > Y, !.
create_copy(X,Y,OldT,NewT,Ab,Num,Last,M):-
M:example(X,OldT,Example),
Example =.. [_|Args],
NewExample =.. [Ab|Args],
Num1 is Num + 1,
aleph_writeq(example(Num1,NewT,NewExample)), write('.'), nl,
X1 is X + 1,
create_copy(X1,Y,OldT,NewT,Ab,Num1,Last,M).
% gen_absym(-Name)
% generate new abnormality predicate symbol
gen_absym(Name,M):-
(retract(M:'$aleph_global'(last_ab,last_ab(N))) ->
N1 is N + 1;
N1 is 0),
asserta(M:'$aleph_global'(last_ab,last_ab(N1))),
concat([ab,N1],Name).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% C L A U S E O P T I M I S A T I O N S
optimise(Clause,Clause1):-
remove_redundant(Clause,Clause0),
reorder_clause(Clause0,Clause1).
remove_redundant((Head:-Body),(Head1:-Body1)):-
goals_to_list((Head,Body),ClauseL),
remove_subsumed(ClauseL,[Head1|Body1L]),
(Body1L = [] -> Body1 = true; list_to_goals(Body1L,Body1)).
reorder_clause((Head:-Body), Clause) :-
% term_variables(Head,LHead),
vars_in_term([Head],[],LHead),
number_goals_and_get_vars(Body,LHead,1,_,[],Conj),
calculate_independent_sets(Conj,[],BSets),
compile_clause(BSets,Head,Clause).
number_goals_and_get_vars((G,Body),LHead,I0,IF,L0,[g(I0,LVF,NG)|LGs]) :- !,
I is I0+1,
get_goal_vars(G,LHead,LVF,NG),
number_goals_and_get_vars(Body,LHead,I,IF,L0,LGs).
number_goals_and_get_vars(G,LHead,I,I,L0,[g(I,LVF,NG)|L0]) :-
get_goal_vars(G,LHead,LVF,NG).
get_goal_vars(G,LHead,LVF,G) :-
% term_variables(G,LV0),
vars_in_term([G],[],LVI),
aleph_ord_subtract(LVI,LHead,LVF).
calculate_independent_sets([],BSets,BSets).
calculate_independent_sets([G|Ls],BSets0,BSetsF) :-
add_goal_to_set(G,BSets0,BSetsI),
calculate_independent_sets(Ls,BSetsI,BSetsF).
add_goal_to_set(g(I,LV,G),Sets0,SetsF) :-
add_to_sets(Sets0,LV,[g(I,LV,G)],SetsF).
add_to_sets([],LV,Gs,[[LV|Gs]]).
add_to_sets([[LV|Gs]|Sets0],LVC,GsC,[[LV|Gs]|SetsF]) :-
aleph_ord_disjoint(LV,LVC), !,
add_to_sets(Sets0,LVC,GsC,SetsF).
add_to_sets([[LV|Gs]|Sets0],LVC,GsC,SetsF) :-
aleph_ord_union(LV,LVC,LVN),
join_goals(Gs,GsC,GsN),
add_to_sets(Sets0,LVN,GsN,SetsF).
join_goals([],L,L):- !.
join_goals(L,[],L):- !.
join_goals([g(I1,VL1,G1)|T],[g(I2,VL2,G2)|T2],Z) :-
I1 < I2, !,
Z = [g(I1,VL1,G1)|TN],
join_goals(T,[g(I2,VL2,G2)|T2],TN).
join_goals([H|T],[g(I2,VL2,G2)|T2],Z) :-
Z = [g(I2,VL2,G2)|TN],
join_goals(T,[H|T2],TN).
compile_clause(Goals,Head,(Head:-Body)):-
compile_clause2(Goals,Body).
compile_clause2([[_|B]], B1):-
!,
glist_to_goals(B,B1).
compile_clause2([[_|B]|Bs],(B1,!,NB)):-
glist_to_goals(B,B1),
compile_clause2(Bs,NB).
glist_to_goals([g(_,_,Goal)],Goal):- !.
glist_to_goals([g(_,_,Goal)|Goals],(Goal,Goals1)):-
glist_to_goals(Goals,Goals1).
% remove literals subsumed in the body of a clause
remove_subsumed([Head|Lits],Lits1):-
delete(Lit,Lits,Left),
\+(\+(redundant(Lit,[Head|Lits],[Head|Left]))), !,
remove_subsumed([Head|Left],Lits1).
remove_subsumed(L,L).
% determine if Lit is subsumed by a body literal
redundant(Lit,Lits,[Head|Body]):-
copy_term([Head|Body],Rest1),
member(Lit1,Body),
Lit = Lit1,
aleph_subsumes(Lits,Rest1).
/**
* aleph_subsumes(+Lits:list,+Lits1:list) is det
*
* determine if Lits subsumes Lits1
*/
aleph_subsumes(Lits,Lits1):-
\+(\+((numbervars(Lits,0,_),numbervars(Lits1,0,_),aleph_subset1(Lits,Lits1)))).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% S A T / R E D U C E
/**
* sat(:Num:int) is det
*
* Num is an integer.
* Builds the bottom clause for positive example number Num.
* Positive examples are numbered from 1, and the numbering corresponds to the order of
* appearance.
*/
sat(M:Num):-
sat(Num,M).
sat(Num,M):-
integer(Num),!,
M:example(Num,pos,_),
sat(pos,Num,M),!.
sat(Example,M):-
record_example(check,uspec,Example,Num,M),
sat(uspec,Num,M), !.
sat(Type,Num,M):-
setting(construct_bottom,false,M), !,
sat_prelims(M),
M:example(Num,Type,Example),
broadcast(start(sat(Num))),
p1_message('sat'), p_message(Num), p_message(Example),
record_sat_example(Num,M),
asserta(M:'$aleph_sat'(example,example(Num,Type))),
asserta(M:'$aleph_sat'(hovars,[])),
broadcast(end(sat(Num, 0, 0.0))).
sat(Type,Num,M):-
setting(construct_bottom,reduction,M), !,
sat_prelims(M),
M:example(Num,Type,Example),
broadcast(start(sat(Num))),
p1_message('sat'), p_message(Num), p_message(Example),
record_sat_example(Num,M),
asserta(M:'$aleph_sat'(example,example(Num,Type))),
integrate_head_lit(HeadOVars,M),
asserta(M:'$aleph_sat'(hovars,HeadOVars)),
broadcast(end(sat(Num, 0, 0.0))).
sat(Type,Num,M):-
set(stage,saturation,M),
sat_prelims(M),
M:example(Num,Type,Example),
broadcast(start(sat(Num))),
p1_message('sat'), p_message(Num), p_message(Example),
record_sat_example(Num,M),
asserta(M:'$aleph_sat'(example,example(Num,Type))),
split_args(Example,Mode,Input,Output,Constants,M),
integrate_args(unknown,Example,Output,M),
stopwatch(StartClock),
assertz(M:'$aleph_sat_atom'(Example,mode(Mode,Output,Input,Constants))),
M:'$aleph_global'(i,set(i,Ival)),
flatten(0,Ival,0,Last1,M),
M:'$aleph_sat_litinfo'(1,_,Atom,_,_,_),
get_vars(Atom,Output,HeadOVars),
asserta(M:'$aleph_sat'(hovars,HeadOVars)),
get_vars(Atom,Input,HeadIVars),
asserta(M:'$aleph_sat'(hivars,HeadIVars)),
functor(Example,Name,Arity),
get_determs(Name/Arity,L,M),
(M:'$aleph_global'(determination,determination(Name/Arity,'='/2))->
asserta(M:'$aleph_sat'(eq,true));
asserta(M:'$aleph_sat'(eq,false))),
get_atoms(L,1,Ival,Last1,Last,M),
stopwatch(StopClock),
Time is StopClock - StartClock,
asserta(M:'$aleph_sat'(lastlit,Last)),
asserta(M:'$aleph_sat'(botsize,Last)),
update_generators(M),
rm_moderepeats(Last,Repeats,M),
rm_commutative(Last,Commutative,M),
rm_symmetric(Last,Symmetric,M),
rm_redundant(Last,Redundant,M),
rm_uselesslits(Last,NotConnected,M),
rm_nreduce(Last,NegReduced,M),
/* write("Last:"),nl,write(Last),nl,
write("Repeats:"),nl,write(Repeats),nl,
write("NotConnected:"),nl,write(NotConnected),nl,
write("Commutative:"),nl,write(Commutative),nl,
write("Symmetric:"),nl,write(Symmetric),nl,
write("Redundant:"),nl,write(Redundant),nl,
write("NegReduced:"),nl,write(NegReduced),nl, */
TotalLiterals is
Last-Repeats-NotConnected-Commutative-Symmetric-Redundant-NegReduced,
show(bottom,M),
p1_message('literals'), p_message(TotalLiterals),
p1_message('saturation time'), p_message(Time),
broadcast(end(sat(Num, TotalLiterals, Time))),
store(bottom,M),
noset(stage,M).
sat(_,_,M):-
noset(stage,M).
/**
* reduce(:Clause:term) is det
*
* Run a search on the current bottom clause, which can be obtained with the sat/1 command.
*/
reduce(M:Cl):-
setting(search,Search,M),
catch(reduce(Search,Cl,M),abort,reinstate_values), !.
reduce(Search,M):-
reduce(Search,_Cl,M).
% no search: add bottom clause as hypothesis
reduce(false,B,M):-
!,
add_bottom(B,M).
% iterative beam search as described by Ross Quinlan+MikeCameron-Jones,IJCAI-95
reduce(ibs,RClause,M):-
!,
retractall(M:'$aleph_search'(ibs_rval,_)),
retractall(M:'$aleph_search'(ibs_nodes,_)),
retractall(M:'$aleph_search'(ibs_selected,_)),
store_values([openlist,caching,explore],M),
set(openlist,1,M),
set(caching,true,M),
set(explore,true,M),
asserta(M:'$aleph_search'(ibs_rval,1.0)),
asserta(M:'$aleph_search'(ibs_nodes,0)),
setting(evalfn,Evalfn,M),
get_start_label(Evalfn,Label,M),
(M:'$aleph_sat'(example,example(Num,Type)) ->
M:example(Num,Type,Example),
asserta(M:'$aleph_search'(ibs_selected,selected(Label,(Example:-true),
[Num-Num],[])));
asserta(M:'$aleph_search'(ibs_selected,selected(Label,(false:-true),
[],[])))),
stopwatch(Start),
repeat,
setting(openlist,OldOpen,M),
p1_message('ibs beam width'), p_message(OldOpen),
find_clause(bf,M),
M:'$aleph_search'(current,current(_,Nodes0,[PC,NC|_]/_)),
N is NC + PC,
estimate_error_rate(Nodes0,0.5,N,NC,NewR),
p1_message('ibs estimated error'), p_message(NewR),
retract(M:'$aleph_search'(ibs_rval,OldR)),
retract(M:'$aleph_search'(ibs_nodes,Nodes1)),
M:'$aleph_search'(selected,selected(BL,RCl,PCov,NCov)),
NewOpen is 2*OldOpen,
Nodes2 is Nodes0 + Nodes1,
set(openlist,NewOpen,M),
asserta(M:'$aleph_search'(ibs_rval,NewR)),
asserta(M:'$aleph_search'(ibs_nodes,Nodes2)),
((NewR >= OldR; NewOpen > 512) -> true;
retract(M:'$aleph_search'(ibs_selected,selected(_,_,_,_))),
asserta(M:'$aleph_search'(ibs_selected,selected(BL,RCl,PCov,NCov))),
fail),
!,
stopwatch(Stop),
Time is Stop - Start,
retractall(M:'$aleph_search'(ibs_rval,_)),
retract(M:'$aleph_search'(ibs_nodes,Nodes)),
retract(M:'$aleph_search'(ibs_selected,selected(BestLabel,RClause,PCover,NCover))),
add_hyp(BestLabel,RClause,PCover,NCover,M),
p1_message('ibs clauses constructed'), p_message(Nodes),
p1_message('ibs search time'), p_message(Time),
p_message('ibs best clause'),
pp_dclause(RClause,M),
show_stats(Evalfn,BestLabel),
record_search_stats(RClause,Nodes,Time),
reinstate_values([openlist,caching,explore]).
% iterative deepening search
reduce(id,RClause,M):-
!,
retractall(M:'$aleph_search'(id_nodes,_)),
retractall(M:'$aleph_search'(id_selected,_)),
store_values([caching,clauselength],M),
setting(clauselength,MaxCLen,M),
set(clauselength,1,M),
set(caching,true,M),
asserta(M:'$aleph_search'(id_nodes,0)),
setting(evalfn,Evalfn,M),
get_start_label(Evalfn,Label,M),
(M:'$aleph_sat'(example,example(Num,Type)) ->
M:example(Num,Type,Example),
asserta(M:'$aleph_search'(id_selected,selected(Label,(Example:-true),
[Num-Num],[])));
asserta(M:'$aleph_search'(id_selected,selected(Label,(false:-true),
[],[])))),
stopwatch(Start),
repeat,
setting(clauselength,OldCLen,M),
p1_message('id clauselength setting'), p_message(OldCLen),
find_clause(df,M),
M:'$aleph_search'(current,current(_,Nodes0,_)),
retract(M:'$aleph_search'(id_nodes,Nodes1)),
M:'$aleph_search'(selected,selected([P,N,L,F|T],RCl,PCov,NCov)),
M:'$aleph_search'(id_selected,selected([_,_,_,F1|_],_,_,_)),
NewCLen is OldCLen + 1,
Nodes2 is Nodes0 + Nodes1,
set(clauselength,NewCLen,M),
M:'$aleph_search'(id_nodes,Nodes2),
(F1 >= F -> true;
retract(M:'$aleph_search'(id_selected,selected([_,_,_,F1|_],_,_,_))),
asserta(M:'$aleph_search'(id_selected,selected([P,N,L,F|T],RCl,PCov,NCov))),
set(best,[P,N,L,F|T],M)),
NewCLen > MaxCLen,
!,
stopwatch(Stop),
Time is Stop - Start,
retract(M:'$aleph_search'(id_nodes,Nodes)),
retract(M:'$aleph_search'(id_selected,selected(BestLabel,RClause,PCover,NCover))),
add_hyp(BestLabel,RClause,PCover,NCover,M),
p1_message('id clauses constructed'), p_message(Nodes),
p1_message('id search time'), p_message(Time),
p_message('id best clause'),
pp_dclause(RClause,M),
show_stats(Evalfn,BestLabel),
record_search_stats(RClause,Nodes,Time,M),
noset(best,M),
reinstate_values([caching,clauselength],M).
% iterative language search as described by Rui Camacho, 1996
reduce(ils,RClause,M):-
!,
retractall(M:'$aleph_search'(ils_nodes,_)),
retractall(M:'$aleph_search'(ils_selected,_)),
store_values([caching,language],M),
set(searchstrat,bf,M),
set(language,1,M),
set(caching,true,M),
asserta(M:'$aleph_search'(ils_nodes,0)),
setting(evalfn,Evalfn,M),
get_start_label(Evalfn,Label,M),
(M:'$aleph_sat'(example,example(Num,Type)) ->
M:example(Num,Type,Example),
asserta(M:'$aleph_search'(ils_selected,selected(Label,(Example:-true),
[Num-Num],[])));
asserta(M:'$aleph_search'(ils_selected,selected(Label,(false:-true),
[],[])))),
stopwatch(Start),
repeat,
setting(language,OldLang,M),
p1_message('ils language setting'), p_message(OldLang),
find_clause(bf,M),
M:'$aleph_search'(current,current(_,Nodes0,_)),
retract(M:'$aleph_search'(ils_nodes,Nodes1)),
M:'$aleph_search'(selected,selected([P,N,L,F|T],RCl,PCov,NCov)),
M:'$aleph_search'(ils_selected,selected([_,_,_,F1|_],_,_,_)),
NewLang is OldLang + 1,
Nodes2 is Nodes0 + Nodes1,
set(language,NewLang,M),
asserta(M:'$aleph_search'(ils_nodes,Nodes2)),
(F1 >= F -> true;
retract(M:'$aleph_search'(ils_selected,selected([_,_,_,F1|_],_,_,_))),
asserta(M:'$aleph_search'(ils_selected,selected([P,N,L,F|T],RCl,PCov,NCov))),
set(best,[P,N,L,F|T],M),
fail),
!,
stopwatch(Stop),
Time is Stop - Start,
retract(M:'$aleph_search'(ils_nodes,Nodes)),
retract(M:'$aleph_search'(ils_selected,selected(BestLabel,RClause,PCover,NCover))),
add_hyp(BestLabel,RClause,PCover,NCover,M),
p1_message('ils clauses constructed'), p_message(Nodes),
p1_message('ils search time'), p_message(Time),
p_message('ils best clause'),
pp_dclause(RClause,M),
show_stats(Evalfn,BestLabel),
record_search_stats(RClause,Nodes,Time,M),
noset(best,M),
reinstate_values([caching,language],M).
% implementation of a randomised local search for clauses
% currently, this can use either: simulated annealing with a fixed temp
% or a GSAT-like algorithm
% the choice of these is specified by the parameter: rls_type
% both annealing and GSAT employ random multiple restarts
% and a limit on the number of moves
% the number of restarts is specified by set(tries,...)
% the number of moves is specified by set(moves,...)
% annealing currently restricted to using a fixed temperature
% the temperature is specified by set(temperature,...)
% the use of a fixed temp. makes it equivalent to the Metropolis alg.
% GSAT if given a ``random-walk probability'' performs Selman et als walksat
% the walk probability is specified by set(walk,...)
% a walk probability of 0 is equivalent to doing standard GSAT
reduce(rls,RBest,M):-
!,
setting(tries,MaxTries,M),
MaxTries >= 1,
store_values([caching,refine,refineop],M),
set(searchstrat,heuristic,M),
set(caching,true,M),
setting(refine,Refine,M),
(Refine \= false -> true; set(refineop,rls,M)),
setting(threads,Threads,M),
rls_search(Threads, MaxTries, Time, Nodes, selected(BestLabel,
RBest,PCover,NCover),M),
add_hyp(BestLabel,RBest,PCover,NCover,M),
p1_message('rls nodes constructed'), p_message(Nodes),
p1_message('rls search time'), p_message(Time),
p_message('rls best result'),
pp_dclause(RBest,M),
setting(evalfn,Evalfn,M),
show_stats(Evalfn,BestLabel),
record_search_stats(RBest,Nodes,Time,M),
noset(best,M),
reinstate_values([caching,refine,refineop],M).
% stochastic clause selection based on ordinal optimisation
% see papers by Y.C. Ho and colleagues for more details
reduce(scs,RBest,M):-
!,
store_values([tries,moves,rls_type,clauselength_distribution],M),
stopwatch(Start),
(setting(scs_sample,SampleSize,M) -> true;
setting(scs_percentile,K,M),
K > 0.0,
setting(scs_prob,P,M),
P < 1.0,
SampleSize is integer(log(1-P)/log(1-K/100) + 1)),
(setting(scs_type,informed,M,M)->
(setting(clauselength_distribution,_D,M,M) -> true;
setting(clauselength,CL,M),
estimate_clauselength_distribution(CL,100,K,D,M),
% max_in_list(D,Prob-Length),
% p1_message('using clauselength distribution'),
% p_message([Prob-Length]),
% set(clauselength_distribution,[Prob-Length]));
p1_message('using clauselength distribution'),
p_message(D),
set(clauselength_distribution,D,M));
true),
set(tries,SampleSize,M),
set(moves,0,M),
set(rls_type,gsat,M),
reduce(rls,M),
stopwatch(Stop),
Time is Stop - Start,
M:'$aleph_search'(rls_nodes,Nodes),
M:'$aleph_search'(rls_selected,selected(BestLabel,RBest,_,_)),
p1_message('scs nodes constructed'), p_message(Nodes),
p1_message('scs search time'), p_message(Time),
p_message('scs best result'),
pp_dclause(RBest,M),
setting(evalfn,Evalfn,M),
show_stats(Evalfn,BestLabel),
record_search_stats(RBest,Nodes,Time,M),
p1_message('scs search time'), p_message(Time),
reinstate_values([tries,moves,rls_type,clauselength_distribution],M).
% simple association rule search
% For a much more sophisticated approach see: L. Dehaspe, PhD Thesis, 1998
% Here, simply find all rules within search that cover at least
% a pre-specificed fraction of the positive examples
reduce(ar,Cl,M):-
!,
clear_cache(M),
(setting(pos_fraction,PFrac,M) -> true;
p_message('value required for pos_fraction parameter'),
fail),
M:'$aleph_global'(atoms_left,atoms_left(pos,Pos)),
retract(M:'$aleph_global'(atoms_left,atoms_left(neg,Neg))),
interval_count(Pos,P),
MinPos is PFrac*P,
store_values([minpos,evalfn,explore,caching,minacc,good],M),
set(searchstrat,bf,M),
set(minpos,MinPos,M),
set(evalfn,coverage,M),
set(explore,true,M),
set(caching,true,M),
set(minacc,0.0,M),
set(good,true,M),
asserta(M:'$aleph_global'(atoms_left,atoms_left(neg,[]))),
find_clause(bf,M),
show(good,M),
good_clauses(Cl,M),
retract(M:'$aleph_global'(atoms_left,atoms_left(neg,[]))),
asserta(M:'$aleph_global'(atoms_left,atoms_left(neg,Neg))),
reinstate_values([minpos,evalfn,explore,caching,minacc,good],M).
% search for integrity constraints
% modelled on the work by L. De Raedt and L. Dehaspe, 1996
reduce(ic,Cl,M):-
!,
store_values([minpos,minscore,evalfn,explore,refineop],M),
setting(refineop,RefineOp,M),
(RefineOp = false -> set(refineop,auto,M); true),
set(minpos,0,M),
set(searchstrat,bf,M),
set(evalfn,coverage,M),
set(explore,true,M),
setting(noise,N,M),
MinScore is -N,
set(minscore,MinScore,M),
find_clause(bf,Cl,M),
reinstate_values([minpos,minscore,evalfn,explore,refineop],M).
reduce(bf,Cl,M):-
!,
find_clause(bf,Cl,M).
reduce(df,Cl,M):-
!,
find_clause(df,Cl,M).
reduce(heuristic,Cl,M):-
!,
find_clause(heuristic,Cl,M).
% find_clause(Search,M) where Search is one of bf, df, heuristic
find_clause(Search,M):-
find_clause(Search,_Cl,M).
find_clause(Search,RClause,M):-
set(stage,reduction,M),
set(searchstrat,Search,M),
p_message('reduce'),
reduce_prelims(L,P,N,M),
asserta(M:'$aleph_search'(openlist,[])),
get_search_settings(S,M),
arg(4,S,_/Evalfn),
get_start_label(Evalfn,Label,M),
(M:'$aleph_sat'(example,example(Num,Type)) ->
M:example(Num,Type,Example),
asserta(M:'$aleph_search'(selected,selected(Label,(Example:-true),
[Num-Num],[])));
asserta(M:'$aleph_search'(selected,selected(Label,(false:-true),[],[])))),
arg(13,S,MinPos),
interval_count(P,PosLeft),
PosLeft >= MinPos,
M:'$aleph_search'(selected,selected(L0,C0,P0,N0)),
add_hyp(L0,C0,P0,N0,M),
(M:'$aleph_global'(max_set,max_set(Type,Num,Label1,ClauseNum))->
BestSoFar = Label1/ClauseNum;
(M:'$aleph_global'(best,set(best,Label2))->
BestSoFar = Label2/0;
BestSoFar = Label/0)),
asserta(M:'$aleph_search'(best_label,BestSoFar)),
p1_message('best label so far'), p_message(BestSoFar),
arg(3,S,RefineOp),
stopwatch(StartClock),
(RefineOp = false ->
get_gains(S,0,BestSoFar,[],false,[],0,L,[1],P,N,[],1,Last,NextBest,M),
update_max_head_count(0,Last,M);
clear_cache(M),
interval_count(P,MaxPC),
asserta(M:'$aleph_local'(max_head_count,MaxPC)),
StartClause = 0-[Num,Type,[],aleph_false],
get_gains(S,0,BestSoFar,StartClause,_,_,_,L,[StartClause],
P,N,[],1,Last,NextBest,M)),
asserta(M:'$aleph_search_expansion'(1,0,1,Last)),
get_nextbest(S,_,M),
asserta(M:'$aleph_search'(current,current(1,Last,NextBest))),
search(S,Nodes,M),
stopwatch(StopClock),
Time is StopClock - StartClock,
M:'$aleph_search'(selected,selected(BestLabel,RClause,PCover,NCover)),
retract(M:'$aleph_search'(openlist,_)),
add_hyp(BestLabel,RClause,PCover,NCover,M),
p1_message('clauses constructed'), p_message(Nodes),
p1_message('search time'), p_message(Time),
p_message('best clause'),
pp_dclause(RClause,M),
show_stats(Evalfn,BestLabel),
update_search_stats(Nodes,Time,M),
record_search_stats(RClause,Nodes,Time,M),
noset(stage,M),
!.
find_clause(_,RClause,M):-
M:'$aleph_search'(selected,selected(BestLabel,RClause,PCover,NCover)),
retract(M:'$aleph_search'(openlist,_)),
add_hyp(BestLabel,RClause,PCover,NCover,M),
p_message('best clause'),
pp_dclause(RClause,M),
(setting(evalfn,Evalfn,M) -> true; Evalfn = coverage),
show_stats(Evalfn,BestLabel),
noset(stage,M),
!.
% find_theory(Search,M) where Search is rls only at present
find_theory(rls,Program,M):-
!,
retractall(M:'$aleph_search'(rls_move,_)),
retractall(M:'$aleph_search'(rls_nodes,_)),
retractall(M:'$aleph_search'(rls_parentstats,_)),
retractall(M:'$aleph_search'(rls_selected,_)),
setting(tries,MaxTries,M),
MaxTries >= 1,
store_values([caching,store_bottom],M),
set(caching,false,M),
set(store_bottom,true,M),
M:'$aleph_global'(atoms,atoms(pos,PosSet)),
M:'$aleph_global'(atoms,atoms(neg,NegSet)),
interval_count(PosSet,P0),
interval_count(NegSet,N0),
setting(evalfn,Evalfn,M),
complete_label(Evalfn,[0-[0,0,[],false]],[P0,N0,1],Label,M),
asserta(M:'$aleph_search'(rls_selected,selected(Label,[0-[0,0,[],false]],
PosSet,NegSet))),
asserta(M:'$aleph_search'(rls_nodes,0)),
asserta(M:'$aleph_search'(rls_restart,1)),
get_search_settings(S,M),
set(best,Label,M),
stopwatch(Start),
repeat,
retractall(M:'$aleph_search'(rls_parentstats,_)),
retractall(M:'$aleph_search'(rls_move,_)),
retractall(M:'$aleph_search_seen'(_,_)),
asserta(M:'$aleph_search'(rls_move,1)),
asserta(M:'$aleph_search'(rls_parentstats,stats(Label,PosSet,NegSet))),
M:'$aleph_search'(rls_restart,R),
p1_message('restart'), p_message(R),
find_theory1(rls,M),
M:'$aleph_search'(current,current(_,Nodes0,_)),
retract(M:'$aleph_search'(rls_nodes,Nodes1)),
M:'$aleph_search'(selected,selected([P,N,L,F|T],RCl,PCov,NCov)),
M:'$aleph_search'(rls_selected,selected([_,_,_,F1|_],_,_,_)),
retract(M:'$aleph_search'(rls_restart,R)),
R1 is R + 1,
asserta(M:'$aleph_search'(rls_restart,R1)),
Nodes2 is Nodes0 + Nodes1,
asserta(M:'$aleph_search'(rls_nodes,Nodes2)),
(F1 >= F -> true;
retract(M:'$aleph_search'(rls_selected,selected([_,_,_,F1|_],_,_,_))),
asserta(M:'$aleph_search'(rls_selected,selected([P,N,L,F|T],RCl,PCov,NCov))),
set(best,[P,N,L,F|T],M)),
setting(best,BestSoFar,M),
(R1 > MaxTries;discontinue_search(S,BestSoFar/_,Nodes2,M)),
!,
stopwatch(Stop),
Time is Stop - Start,
M:'$aleph_search'(rls_nodes,Nodes),
M:'$aleph_search'(rls_selected,selected(BestLabel,RBest,PCover,NCover)),
add_hyp(BestLabel,RBest,PCover,NCover,M),
p1_message('nodes constructed'), p_message(Nodes),
p1_message('search time'), p_message(Time),
p_message('best theory'),
Program=RBest,
pp_dclauses(RBest,M),
show_stats(Evalfn,BestLabel),
record_search_stats(RBest,Nodes,Time,M),
noset(best,M),
reinstate_values([caching,refine,refineop,store_bottom],M).
find_theory1(_,M):-
clean_up_reduce(M),
M:'$aleph_global'(atoms,atoms(pos,Pos)),
M:'$aleph_global'(atoms,atoms(neg,Neg)),
asserta(M:'$aleph_search'(openlist,[])),
asserta(M:'$aleph_search'(nextnode,none)),
stopwatch(StartClock),
get_search_settings(S,M),
arg(4,S,_/Evalfn),
interval_count(Pos,P),
interval_count(Neg,N),
complete_label(Evalfn,[0-[0,0,[],false]],[P,N,1],Label,M),
asserta(M:'$aleph_search'(selected,selected(Label,[0-[0,0,[],false]],Pos,Neg))),
get_theory_gain(S,0,Label/0,[0-[0,0,[],false]],Pos,Neg,P,N,NextBest,Last,M),
asserta(M:'$aleph_search'(current,current(0,Last,NextBest))),
get_nextbest(S,_,M),
tsearch(S,Nodes,M),
stopwatch(StopClock),
Time is StopClock - StartClock,
M:'$aleph_search'(selected,selected(BestLabel,RTheory,PCover,NCover)),
retract(M:'$aleph_search'(openlist,_)),
add_hyp(BestLabel,RTheory,PCover,NCover,M),
p1_message('theories constructed'), p_message(Nodes),
p1_message('search time'), p_message(Time),
p_message('best theory'),
pp_dclauses(RTheory,M),
show_stats(Evalfn,BestLabel),
update_search_stats(Nodes,Time,M),
record_tsearch_stats(RTheory,Nodes,Time,M).
estimate_error_rate(H,Del,N,E,R):-
TargetProb is 1-exp(log(1-Del)/H),
estimate_error(1.0/0.0,0.0/1.0,TargetProb,N,E,R).
estimate_error(L/P1,U/P2,P,N,E,R):-
M is (L+U)/2,
binom_lte(N,M,E,P3),
ADiff is abs(P - P3),
(ADiff < 0.00001 ->
R is M;
(P3 > P ->
estimate_error(L/P1,M/P3,P,N,E,R);
estimate_error(M/P3,U/P2,P,N,E,R)
)
).
zap_rest(Lits,M):-
retract(M:'$aleph_sat_litinfo'(LitNum,Depth,Atom,I,O,D)),
(aleph_member1(LitNum,Lits) ->
intersect1(Lits,D,D1,_),
asserta(M:'$aleph_sat_litinfo'(LitNum,Depth,Atom,I,O,D1));
true),
fail.
zap_rest(_,_M).
sat_prelims(M):-
clean_up_sat(M),
clean_up_hypothesis(M),
reset_counts(M),
set_up_builtins(M).
reduce_prelims(L,P,N,M):-
clean_up_reduce(M),
check_posonly(M),
check_auto_refine(M),
(M:'$aleph_sat'(lastlit,L) -> true;
L = 0, asserta(M:'$aleph_sat'(lastlit,L))),
(M:'$aleph_sat'(botsize,_B) -> true;
B = 0, asserta(M:'$aleph_sat'(botsize,B))),
((M:'$aleph_global'(lazy_evaluate,lazy_evaluate(_));setting(greedy,true,M))->
M:'$aleph_global'(atoms_left,atoms_left(pos,P));
M:'$aleph_global'(atoms,atoms(pos,P))),
setting(evalfn,E,M),
(E = posonly -> NType = rand; NType = neg),
M:'$aleph_global'(atoms_left,atoms_left(NType,N)),
asserta(M:'$aleph_search'(nextnode,none)).
set_up_builtins(M):-
gen_nlitnum(Cut,M),
asserta(M:'$aleph_sat_litinfo'(Cut,0,'!',[],[],[])).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% T H R E A D S
% multi-threaded randomised local search
rls_search(1, MaxTries, Time, Nodes, Selected,M) :-
!,
retractall(M:'$aleph_search'(rls_restart,_)),
retractall(M:'$aleph_search'(rls_nodes,_)),
retractall(M:'$aleph_search'(rls_selected,_)),
asserta(M:'$aleph_search'(rls_restart,1)),
setting(evalfn,Evalfn,M),
get_start_label(Evalfn,Label,M),
set(best,Label,M),
get_search_settings(S,M),
arg(4,S,SearchStrat/_),
(M:'$aleph_sat'(example,example(Num,Type)) ->
M:example(Num,Type,Example),
asserta(M:'$aleph_search'(rls_selected,selected(Label,
(Example:-true),[Num-Num],[])));
asserta(M:'$aleph_search'(rls_selected,selected(Label,
(false:-true),[],[])))
),
asserta(M:'$aleph_search'(rls_nodes,0)),
stopwatch(Start),
estimate_numbers(_,M),
repeat,
retract(M:'$aleph_search'(rls_restart,R)),
R1 is R + 1,
asserta(M:'$aleph_search'(rls_restart,R1)),
rls_thread(R, SearchStrat, Label, Nodes0, selected(Best,RCl,PCov,NCov),M),
Best = [_,_,_,F|_],
M:'$aleph_search'(rls_selected,selected([_,_,_,F1|_],_,_,_)),
(F1 >= F -> true;
retract(M:'$aleph_search'(rls_selected,selected([_,_,_,F1|_],
_,_,_))),
asserta(M:'$aleph_search'(rls_selected,selected(Best,RCl,
PCov,NCov))),
set(best,Best,M)
),
setting(best,BestSoFar,M),
retract(M:'$aleph_search'(rls_nodes,Nodes1)),
Nodes2 is Nodes0 + Nodes1,
asserta(M:'$aleph_search'(rls_nodes,Nodes2)),
(R1 > MaxTries; discontinue_search(S,BestSoFar/_,Nodes2,M)),
!,
stopwatch(Stop),
Time is Stop - Start,
retractall(M:'$aleph_search'(rls_restart,_)),
retract(M:'$aleph_search'(rls_nodes,Nodes)),
retract(M:'$aleph_search'(rls_selected,Selected)).
rls_search(N, MaxTries, Time, Nodes, Selected,M) :-
retractall(M:'$aleph_search'(rls_restart,_)),
retractall(M:'$aleph_search'(rls_nodes,_)),
retractall(M:'$aleph_search'(rls_selected,_)),
setting(evalfn,Evalfn,M),
get_start_label(Evalfn,Label,M),
set(best,Label,M),
get_search_settings(S,M),
arg(4,S,SearchStrat/_),
(M:'$aleph_sat'(example,example(Num,Type)) ->
M:example(Num,Type,Example),
asserta(M:'$aleph_search'(rls_selected,selected(Label,
(Example:-true),[Num-Num],[])));
asserta(M:'$aleph_search'(rls_selected,selected(Label,
(false:-true),[],[])))
),
asserta(M:'$aleph_search'(rls_nodes,0)),
estimate_numbers(_,M), % so all threads can use same estimates
thread_self(Master),
message_queue_create(Queue),
create_worker_pool(N, Master, Queue, WorkerIds,M),
forall(between(1, MaxTries, R),
thread_send_message(Queue, rls_restart(R, SearchStrat, Label,M))),
collect_results(rls_restart,MaxTries,[0,S],[Time|_],M),
kill_worker_pool(Queue, WorkerIds),
retractall(M:'$aleph_search'(rls_restart,_)),
retract(M:'$aleph_search'(rls_nodes,Nodes)),
retract(M:'$aleph_search'(rls_selected,Selected)).
rls_thread(R, SearchStrat, Label, Nodes0, selected(Best,RCl,PCov,NCov),M) :-
retractall(M:'$aleph_search'(best_refinement,_)),
retractall(M:'$aleph_search'(last_refinement,_)),
retractall(M:'$aleph_search'(rls_move,_)),
retractall(M:'$aleph_search'(rls_parentstats,_)),
retractall(M:'$aleph_search_seen'(_,_)),
asserta(M:'$aleph_search'(rls_move,1)),
asserta(M:'$aleph_search'(rls_parentstats,stats(Label,[],[]))),
p1_message('restart'), p_message(R),
find_clause(SearchStrat,M),
M:'$aleph_search'(current,current(_,Nodes0,_)),
M:'$aleph_search'(selected,selected(Best,RCl,PCov,NCov)),
retractall(M:'$aleph_search'(best_refinement,_)),
retractall(M:'$aleph_search'(last_refinement,_)),
retractall(M:'$aleph_search'(rls_move,_)),
retractall(M:'$aleph_search'(rls_parentstats,_)).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% T H R E A D S
create_worker_pool(N, Master, Queue, WorkerIds,M) :-
create_worker_pool(1, N, Master, Queue, WorkerIds,M).
create_worker_pool(I, N, _, _, [],_M) :-
I > N, !.
create_worker_pool(I, N, Master, Queue, [Id|T],M) :-
atom_concat(worker_, I, Alias),
thread_create(worker(Queue, Master,M), Id, [alias(Alias)]),
I2 is I + 1,
create_worker_pool(I2, N, Master, Queue, T,M).
kill_worker_pool(Queue, WorkerIds) :-
p_message('Killing workers'),
forall(aleph_member(Worker, WorkerIds),
kill_worker(Queue, Worker)),
p_message('Waiting for workers'),
forall(aleph_member(Worker, WorkerIds),
thread_join(Worker, _)),
message_queue_destroy(Queue),
p_message('Ok, all done').
kill_worker(Queue, Worker) :-
thread_send_message(Queue, all_done),
thread_signal(Worker, throw(surplus_to_requirements)).
worker(Queue, Master,M) :-
thread_get_message(Queue, Message),
work(Message, Master,M),
worker(Queue, Master).
work(rls_restart(R, SearchStrat, Label), Master,M) :-
statistics(cputime, CPU0),
rls_thread(R, SearchStrat, Label, Nodes, Selected,M),
statistics(cputime, CPU1),
CPU is CPU1 - CPU0,
thread_send_message(Master, done(CPU, Nodes, Selected)).
work(all_done, _,_M) :-
thread_exit(done).
collect_results(rls_restart,NResults,In,Out,M):-
collect_results(0,NResults,rls_restart,In,Out,M).
collect_results(R0,MaxR,Flag,In,Out,M):-
thread_get_message(Message),
collect(Flag,Message,In,Out1,Done,M),
R1 is R0 + 1,
( (Done == false,
R1 < MaxR)
-> collect_results(R1,MaxR,Flag,Out1,Out)
; Out = Out1
).
collect(rls_restart,done(CPU, Nodes, selected(Best,RCl,PCov,NCov)),[T0,S], [T1,S],Done,M) :-
T1 is CPU + T0,
Best = [_,_,_,F|_],
M:'$aleph_search'(rls_selected,selected([_,_,_,F1|_],_,_,_)),
(F1 >= F -> true;
retract(M:'$aleph_search'(rls_selected,selected(
[_,_,_,F1|_],_,_,_))),
asserta(M:'$aleph_search'(rls_selected,selected(Best,
RCl,PCov,NCov))),
set(best,Best,M)),
setting(best,BestSoFar,M),
retract(M:'$aleph_search'(rls_nodes,Nodes1)),
Nodes2 is Nodes + Nodes1,
asserta(M:'$aleph_search'(rls_nodes,Nodes2)),
( discontinue_search(S,BestSoFar/_,Nodes2,M)
-> Done = true
; Done = false
).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% C O N T R O L
/**
* induce_clauses(:Program:list) is det
*
* The basic theory construction predicate.
* Constructs theories 1 clause at a time.
*/
induce_clauses(M:Program):-
setting(interactive,true,M), !,
induce_incremental(M:Program).
induce_clauses(M:Program):-
induce(M:Program).
/**
* induce(:Program:list) is det
*
* Non-interactive theory construction.
* Constructs theories 1 clause at a time.
* Does greedy cover removal after each clause found
*/
induce(M:Program):-
clean_up(M),
set(greedy,true,M),
retractall(M:'$aleph_global'(search_stats,search_stats(_,_))),
M:'$aleph_global'(atoms_left,atoms_left(pos,PosSet)),
PosSet \= [],
store(portray_search,M),
set(portray_search,false,M),
setting(samplesize,S,M),
setting(abduce,Abduce,M),
record_settings(M),
stopwatch(StartClock),
repeat,
gen_sample(pos,S,M),
retractall(M:'$aleph_global'(besthyp,besthyp(_,_,_,_,_))),
asserta(M:'$aleph_global'(besthyp,besthyp([-inf,0,1,-inf],0,(false),[],[]))),
get_besthyp(Abduce,M),
(setting(gcws,true,M) -> sphyp(M), addgcws(M); addhyp(M)),
show_atoms_left(M),
record_atoms_left(M),
M:'$aleph_global'(atoms_left,atoms_left(pos,[])),
stopwatch(StopClock),
Time is StopClock - StartClock,
copy_theory(Program,M),
show(theory,M),
record_theory(Time,M),
noset(greedy,M),
reinstate(portray_search,M),
p1_message('time taken'), p_message(Time),
show_total_stats(M),
record_total_stats(M), !.
copy_theory(Program,M):-
M:'$aleph_global'(rules,rules(L)),
aleph_member(ClauseNum,L),
copy_theory_inner(ClauseNum,Program,M),
aleph_member(ClauseNum,L),
M:'$aleph_global'(theory,theory(ClauseNum,_,_,_,_)).
%copy_theory_eval(ClauseNum,Program,_).
copy_theory(_,_M).
copy_theory_inner(ClauseNum,[SubProgram|TailP],M):-
integer(ClauseNum),
ClauseNum > 0,!,
M:'$aleph_global'(theory,theory(ClauseNum,_,SubProgram,_,_)),
ClauseNum1 is ClauseNum-1,
copy_theory_inner(ClauseNum1,TailP,M).
copy_theory_inner(0,[],_M).
copy_modes(Modes,M):-
findall(mode(Mode,D),M:'$aleph_global'(mode,mode(Mode,D)),Modes).
copy_constraints(Constraints,M):-
findall(Clause,M:'$aleph_good'(_,_,Clause),Constraints).
copy_features(Features,M):-
findall((Head:-Body),M:'$aleph_feature'(feature,feature(_,_,_,Head,Body)),Features).
% ============= UNUSED ====================
copy_theory_eval(0,_,Label,M):-
M:'$aleph_global'(hypothesis,hypothesis(_,Clause,_,_)), !,
label_create(Clause,Label,M),
p_message('Rule 0'),
pp_dclause(Clause,M),
extract_count(pos,Label,PC),
extract_count(neg,Label,NC),
extract_length(Label,L),
label_print_eval([PC,NC,L]),
nl.
copy_theory_eval(ClauseNum,Program,_,M):-
integer(ClauseNum),
ClauseNum > 0,
M:'$aleph_global'(theory,theory(ClauseNum,_,Clause,_,_)),
!,
copy_theory_eval_inner(Clause,Program).
copy_theory_eval(_,_,_,_M).
copy_theory_eval_inner((H:-true),Program):-
!,
copy_theory_eval_inner(H,Program).
copy_theory_eval_inner((H:-B),Program):-
!,
copy_term((H:-B),(Head:-Body)),
numbervars((Head:-Body),0,_),
add_lit_to_program(Body,Program).
copy_theory_eval_inner((Lit),Program):-
!,
copy_term(Lit,Lit1),
numbervars(Lit1,0,_),
add_lit_to_program(Lit1,Program).
add_lit_to_program((Lit,Lits),[Lit|Program1]):-
add_lit_to_program(Lits,Program1).
add_lit_to_program((Lit),[Lit]).
% ============= /UNUSED ====================
/**
* induce_max(:Program:list) is det
*
* Construct theories 1 clause at a time.
* Does not perform greedy cover removal after each clause found.
* Constructs unique ``maximum cover set'' solution
* by obtaining the best clause covering each example
*/
% slow
induce_max(M:Program):-
clean_up(M),
retractall(M:'$aleph_global'(search_stats,search_stats(_,_))),
M:'$aleph_global'(atoms,atoms(pos,PosSet)),
PosSet \= [],
store(portray_search,M),
set(portray_search,false,M),
record_settings(M),
stopwatch(StartClock),
set(maxcover,true,M),
aleph_induce_max(PosSet,M),
stopwatch(StopClock),
Time is StopClock - StartClock,
copy_theory(Program,M),
show(theory,M),
record_theory(Time,M),
noset(maxcover,M),
reinstate(portray_search,M),
reinstate(greedy,M),
p1_message('time taken'), p_message(Time),
show_total_stats(M),
record_total_stats(M), !.
aleph_induce_max([],_M).
aleph_induce_max([Start-Finish|Intervals],M):-
asserta(M:'$aleph_local'(counter,Start)),
induce_max1(Finish,M),
aleph_induce_max(Intervals,M).
induce_max1(Finish,M):-
M:'$aleph_local'(counter,S),
S =< Finish, !,
(setting(resample,Resample,M) -> true; Resample = 1),
repeat,
retract(M:'$aleph_local'(counter,Start)),
gen_sample(Resample,pos,Start),
get_besthyp(false),
update_coverset(pos,Start),
Next is Start+1,
assertz(M:'$aleph_local'(counter,Next)),
Next > Finish, !,
retract(M:'$aleph_local'(counter,Next)).
induce_max1(_).
/**
* induce_cover(:Program:list) is det
*
* Construct theories 1 clause at a time.
* Does not perform greedy cover removal after each clause found.
*/
induce_cover(M:Program):-
clean_up(M),
retractall(M:'$aleph_global'(search_stats,search_stats(_,_))),
M:'$aleph_global'(atoms,atoms(pos,PosSet)),
PosSet \= [],
store(portray_search,M),
set(portray_search,false,M),
setting(samplesize,S,M),
setting(abduce,Abduce,M),
record_settings(M),
stopwatch(StartClock),
repeat,
gen_sample(pos,S,M),
asserta(M:'$aleph_global'(besthyp,besthyp([-inf,0,1,-inf],0,
(false),[],[]))),
get_besthyp(Abduce,M),
addhyp(M),
M:'$aleph_global'(atoms_left,atoms_left(pos,[])),
stopwatch(StopClock),
Time is StopClock - StartClock,
copy_theory(Program,M),
show(theory,M),
record_theory(Time,M),
reinstate(portray_search,M),
reinstate(greedy,M),
p1_message('time taken'), p_message(Time),
show_total_stats(M),
record_total_stats(M), !.
/**
* induce_incremental(:Program:list) is det
*
* Rudimentary version of an interactive, incremental rule learner.
*
* 1. ask the user for an example
* default is to use a new positive example from previous search
* if user responds with Ctrl-d (eof) then search stops
* if user responds with "ok" then default is used
* otherwise user has to provide an example
* 2. construct bottom clause using that example
* expects to have appropriate mode declarations
* 3. search for the best clause C
* 4. ask the user about C who can respond with
* - ok: clause added to theory
* - prune: statement added to prevent future
* clauses that are subsumed by C
* - overgeneral: constraint added to prevent future
* clauses that subsume C
* - overgeneral because not(E): E is added as a negative example
* - overspecific: C is added as new positive example
* - overspecific because E: E is added as a new positive example
* - X: where X is some aleph command like "covers"
* - Ctrl-d (eof): return to Step 1
*/
induce_incremental(M:Program):-
clean_up(M),
retractall(M:'$aleph_global'(search_stats,search_stats(_,_))),
store_values([interactive,portray_search,proof_strategy,mode],M),
set(portray_search,false,M),
set(proof_strategy,sld,M),
set(interactive,true,M),
record_settings(M),
stopwatch(StartClock),
repeat,
ask_example_web(E,M),
((E = end_of_file; E = none) -> true;
once(record_example(check,pos,E,N,M)),
retractall(M:'$aleph_global'(example_selected,
example_selected(_,_))),
asserta(M:'$aleph_global'(example_selected,
example_selected(pos,N))),
once(sat(N,M)),
once(reduce(M:_)),
once(process_hypothesis_web(M)),
fail),
!,
stopwatch(StopClock),
Time is StopClock - StartClock,
copy_theory(Program0,M),
reverse(Program0,Program),
show(theory,M),
show(pos,M),
show(neg,M),
show(aleph_false/0,M),
show(prune/1,M),
record_theory(Time,M),
reinstate_values([interactive,portray_search,proof_strategy,mode],M),
p1_message('time taken'), p_message(Time).
/**
* induce_theory(:Program:list) is det
*
* does theory-level search
* currently only with search = rls; and evalfn = accuracy
* induce entire theories from batch data
* using a randomised local search
* currently, this can use either: simulated annealing with a fixed temp,
* GSAT, or a WSAT-like algorithm
* the choice of these is specified by the parameter: rls_type
* all methods employ random multiple restarts
* and a limit on the number of moves
* the number of restarts is specified by aleph_set(tries,...)
* the number of moves is specified by aleph_set(moves,...)
* annealing currently restricted to using a fixed temperature
* the temperature is specified by aleph_set(temperature,...)
* the fixed temp. makes it equivalent to the Metropolis alg.
* WSAT requires a ``random-walk probability''
* the walk probability is specified by aleph_set(walk,...)
* a walk probability of 0 is equivalent to doing standard GSAT
* theory accuracy is the evaluation function
*/
aleph_induce_theory(rls,M):-
clean_up(M),
retractall(M:'$aleph_global'(search_stats,search_stats(_,_))),
store(evalfn,M),
set(evalfn,accuracy,M),
record_settings(M),
find_theory(rls,_,M),
reinstate(evalfn,M),
show_total_stats(M),
record_total_stats(M), !.
aleph_induce_theory(_,_M).
aleph_induce_theory(rls,Program,M):-
clean_up(M),
retractall(M:'$aleph_global'(search_stats,search_stats(_,_))),
store(evalfn,M),
set(evalfn,accuracy,M),
record_settings(M),
find_theory(rls,Program,M),
reinstate(evalfn,M),
show_total_stats(M),
record_total_stats(M), !.
induce_theory(M:Program):-
setting(search,Search,M),
aleph_induce_theory(Search,Program,M).
/**
* induce_constraints(:Constraints:list) is det
*
* search for logical constraints that
* hold in the background knowledge
* A constraint is a clause of the form aleph_false:-...
* This is modelled on the Claudien program developed by
* L. De Raedt and his colleagues in Leuven
* Constraints that are ``nearly true'' can be obtained
* by altering the noise setting
* All constraints found are stored as `good clauses'.
*/
induce_constraints(M:Constraints):-
clean_up(M),
retractall(M:'$aleph_global'(search_stats,search_stats(_,_))),
store_values([portray_search,search,construct_bottom,good,goodfile],M),
noset(goodfile,M),
set(portray_search,false,M),
set(construct_bottom,false,M),
set(search,ic,M),
set(good,true,M),
sat(uspec,0,M),
reduce(M:_),
copy_constraints(Constraints,M),
show(constraints,M),
reinstate_values([portray_search,search,construct_bottom,good,goodfile],M),
show_total_stats(M),
record_total_stats(M), !.
/**
* induce_modes(:Modes:list) is det
*
* search for an acceptable set of mode declarations
*/
induce_modes(M:Modes):-
clean_up(M),
store_values([typeoverlap],M),
search_modes(M),
reinstate_values([typeoverlap],M),
copy_modes(Modes,M),
show(modes,M),!.
/**
* induce_features(:Features:list) is det
*
* search for interesting boolean features
* each good clause found in a search constitutes a new boolean feature
* the maximum number of features is controlled by aleph_set(max_features,F)
* the features are constructed by doing the following:
* while (number of features =< F) do:
* 1. randomly select an example;
* 2. search for good clauses using the example selected;
* 3. construct new features using good clauses
*/
induce_features(M:Features):-
clean_up(M),
store_values([good,check_good,updateback,construct_features,samplesize,greedy,explore,lazy_on_contradiction],M),
set(good,true,M),
set(check_good,true,M),
set(updateback,false,M),
set(construct_features,true,M),
set(lazy_on_contradiction,true,M),
(setting(feature_construction,exhaustive,M) -> set(explore,true,M);
true),
setting(max_features,FMax,M),
record_settings(M),
stopwatch(StartClock),
M:'$aleph_global'(atoms_left,atoms_left(pos,AtomsLeft)),
repeat,
gen_sample(pos,0,M),
retractall(M:'$aleph_global'(besthyp,besthyp(_,_,_,_,_))),
asserta(M:'$aleph_global'(besthyp,besthyp([-inf,0,1,-inf],0,(false),[],[]))),
get_besthyp(false,M),
addhyp(M),
show_atoms_left(M),
record_atoms_left(M),
((M:'$aleph_search'(last_good,LastGood), LastGood >= FMax);
M:'$aleph_global'(atoms_left,atoms_left(pos,[]))), !,
gen_features(M),
stopwatch(StopClock),
Time is StopClock - StartClock,
copy_features(Features,M),
show(features,M),
record_features(Time,M),
retract(M:'$aleph_global'(atoms_left,atoms_left(pos,_))),
assertz(M:'$aleph_global'(atoms_left,atoms_left(pos,AtomsLeft))),
reinstate_values([good,check_good,updateback,construct_features,samplesize,greedy,explore,lazy_on_contradiction],M), !.
/**
* induce_tree(:Tree:list) is det
*
* construct a theory using recursive partitioning.
* rules are obtained by building a tree
* the tree constructed can be one of 4 types
* classification, regression, class_probability or model
* the type is set by aleph_set(tree_type,...)
* In addition, the following parameters are relevant
* - aleph_set(classes,ListofClasses): when tree_type is classification or
* or class_probability
* - aleph_set(prune_tree,Flag): for pruning rules from a tree
* - aleph_set(confidence,C): for pruning of rules as described by
* J R Quinlan in the C4.5 book
* - aleph_set(lookahead,L): lookahead for the refinement operator to avoid
* local zero-gain literals
* - aleph_set(dependent,A): argument of the dependent variable in the examples
*
* The basic procedure attempts to construct a tree to predict the dependent
* variable in the examples. Note that the mode declarations must specify the
* variable as an output argument. Paths from root to leaf constitute clauses.
* Tree-construction is viewed as a refinement operation: any leaf can currently
* be refined by extending the corresponding clause. The extension is done using
* Aleph's automatic refinement operator that extends clauses within the mode
* language. A lookahead option allows additions to include several literals.
* Classification problems currently use entropy gain to measure worth of additions.
* Regression and model trees use reduction in standard deviation to measure
* worth of additions. This is not quite correct for the latter.
* Pruning for classification is done on the final set of clauses from the tree.
* The technique used here is the reduced-error pruning method.
* For classification trees, this is identical to the one proposed by
* Quinlan in C4.5: Programs for Machine Learning, Morgan Kauffmann.
* For regression and model trees, this is done by using a pessimistic estimate
* of the sample standard deviation. This assumes normality of observed values
* in a leaf. This method and others have been studied by L. Torgo in
* "A Comparative Study of Reliable Error Estimators for Pruning Regression
* Trees"
* Following work by F Provost and P Domingos, pruning is not employed
* for class probability prediction.
* Currently no pruning is performed for model trees.
*/
induce_tree(M:Program):-
clean_up(M),
setting(tree_type,Type,M),
store_values([refine],M),
set(refine,auto,M),
setting(mingain,MinGain,M),
(MinGain =< 0.0 ->
err_message('inappropriate setting for mingain'),
fail;
true
),
record_settings(M),
stopwatch(StartClock),
construct_tree(Type,M),
stopwatch(StopClock),
Time is StopClock - StartClock,
copy_theory(Program0,M),
reverse(Program0,Program),
show(theory,M),
record_theory(Time,M),
reinstate_values([refine],M), !.
% utilities for the induce predicates
% randomly pick a positive example and construct bottom clause
% example is from those uncovered by current theory
% and whose bottom clause has not been stored away previously
% makes at most 100 attempts to find such an example
rsat(M):-
M:'$aleph_global'(atoms_left,atoms_left(pos,PosSet)),
PosSet \= [],
store(resample,M),
set(resample,1,M),
rsat(100,M),
reinstate(resample,M).
rsat(0,_M):- !.
rsat(N,M):-
gen_sample(pos,1,M),
M:'$aleph_global'(example_selected,example_selected(pos,Num)),
(\+(M:'$aleph_sat'(stored,stored(Num,pos,_))) ->
!,
retract(M:'$aleph_global'(example_selected,
example_selected(pos,Num))),
sat(pos,Num,M);
N1 is N - 1,
rsat(N1,M)).
get_besthyp(AbduceFlag,M):-
retract(M:'$aleph_global'(example_selected,
example_selected(pos,Num))),
reset_best_label(M), % set-up target to beat
sat(Num,M),
reduce(M:_),
update_besthyp(Num,M),
(AbduceFlag = true ->
M:example(Num,pos,Atom),
abgen(Atom,AbGen,M),
once(retract(M:'$aleph_global'(hypothesis,
hypothesis(Label,_,PCover,NCover)))),
assert(M:'$aleph_global'(hypothesis,
hypothesis(Label,AbGen,PCover,NCover))),
update_besthyp(Num,M);
true),
fail.
get_besthyp(_,M):-
retract(M:'$aleph_global'(besthyp,besthyp(L,Num,H,PC,NC))),
H \= false, !,
((setting(samplesize,S,M),S>1)->
setting(nodes,Nodes,M),
show_clause(sample,L,H,Nodes,M),
record_clause(sample,L,H,Nodes,M);
true),
add_hyp(L,H,PC,NC,M),
asserta(M:'$aleph_global'(example_selected,
example_selected(pos,Num))), !.
get_besthyp(_,_M).
reset_best_label(M):-
M:'$aleph_global'(besthyp,besthyp(Label1,_,Clause,P,N)),
M:'$aleph_search'(best_label,Label/_),
Label = [_,_,L,GainE|_],
Label1 = [_,_,L1,Gain1E|_],
% Gain > Gain1, !,
arithmetic_expression_value(GainE,Gain),
arithmetic_expression_value(Gain1E,Gain1),
((Gain1 > Gain);(Gain1 =:= Gain, L1 < L)), !,
retract(M:'$aleph_search'(best_label,Label/_)),
asserta(M:'$aleph_search'(best_label,Label1/0)),
retractall(M:'$aleph_search'(selected,_)),
asserta(M:'$aleph_search'(selected,selected(Label1,Clause,P,N))).
reset_best_label(_M).
update_besthyp(Num,M):-
M:'$aleph_global'(hypothesis,hypothesis(Label,H,PCover,NCover)),
M:'$aleph_global'(besthyp,besthyp(Label1,_,_,_,_)),
Label = [_,_,L,GainE|_],
Label1 = [_,_,L1,Gain1E|_],
% Gain > Gain1, !,
arithmetic_expression_value(GainE,Gain),
arithmetic_expression_value(Gain1E,Gain1),
((Gain > Gain1);(Gain =:= Gain1, L < L1)), !,
retract(M:'$aleph_global'(besthyp,besthyp(Label1,_,_,_,_))),
assertz(M:'$aleph_global'(besthyp,besthyp(Label,Num,H,PCover,NCover))).
update_besthyp(_,_M).
% generate a new feature from a good clause
gen_features(M):-
aleph_abolish('$aleph_feature'/2,M),
(setting(dependent,PredictArg,M) -> true; PredictArg is 0),
(setting(minscore,FMin,M) -> true; FMin = -inf),
M:'$aleph_good'(_,Label,Clause),
Label = [_,_,_,FE|_],
arithmetic_expression_value(FE,F),
F >= FMin,
split_clause(Clause,Head,Body),
Body \= true,
functor(Head,Name,Arity),
functor(Template,Name,Arity),
copy_iargs(Arity,Head,Template,PredictArg),
get_feature_class(PredictArg,Head,Body,Class,M),
gen_feature((Template:-Body),Label,Class,M),
fail.
gen_features(M):-
(setting(dependent,PredictArg,M) -> true; PredictArg is 0),
setting(good,true,M),
setting(goodfile,File,M),
aleph_open(File,read,Stream),
(setting(minscore,FMin,M) -> true; FMin = -inf),
repeat,
read(Stream,Fact),
(Fact = M:'$aleph_good'(_,Label,Clause) ->
Label = [_,_,_,FE|_],
arithmetic_expression_value(FE,F),
F >= FMin,
split_clause(Clause,Head,Body),
Body \= true,
functor(Head,Name,Arity),
functor(Template,Name,Arity),
copy_iargs(Arity,Head,Template,PredictArg),
get_feature_class(PredictArg,Head,Body,Class,M),
gen_feature((Template:-Body),Label,Class,M),
fail;
close(Stream), !
).
gen_features(_M).
get_feature_class(Argno,Head,Body,Class,M):-
has_class(Argno,Head,Body,Class,M), !.
get_feature_class(_,_,_,_,_M).
has_class(Argno,Head,_,Class,_M):-
arg(Argno,Head,Class),
ground(Class), !.
has_class(Argno,Head,Body,Class,M):-
arg(Argno,Head,DepVar),
in((DepVar=Class),Body,M),
ground(Class), !.
ask_example(E,M):-
(M:'$aleph_global'(example_selected,example_selected(pos,N)) ->
M:example(N,pos,E1);
E1 = none),
!,
show_options(example_selection),
tab(4),
write('Response '), p1_message(default:E1), write('?'), nl,
read(Response),
(Response = ok -> E = E1; E = Response).
ask_example_web(E,M):-
(M:'$aleph_global'(example_selected,example_selected(pos,N)) ->
M:example(N,pos,E1);
E1 = none),
!,
show_options_web(example_selection),
tab(4),
write('Response '), p1_message(default:E1), write('?'), nl,
read(Response),
(Response = ok -> E = E1; E = Response).
process_hypothesis(M):-
show(hypothesis,M),
repeat,
show_options(hypothesis_selection),
tab(4),
write('Response?'), nl,
read(Response),
process_hypothesis(Response,M),
(Response = end_of_file; Response = none), !.
process_hypothesis_web(M):-
show(hypothesis,M),
repeat,
show_options_web(hypothesis_selection),
tab(4),
write('Response?'), nl,
read(Response),
process_hypothesis(Response,M),
(Response = end_of_file; Response = none), !.
process_hypothesis(end_of_file,_M):-
nl, nl, !.
process_hypothesis(none,_M):-
nl, nl, !.
process_hypothesis(ok,M):-
!,
update_theory(_,M),
nl, p_message('added new clause').
process_hypothesis(prune,M):-
!,
retract(M:'$aleph_global'(hypothesis,hypothesis(_,H,_,_))),
Prune = (
hypothesis(Head,Body,_,M),
goals_to_list(Body,BodyL),
clause_to_list(H,HL),
aleph_subsumes(HL,[Head|BodyL])),
assertz(M:(prune(H):- Prune)),
nl, p_message('added new prune statement').
process_hypothesis(overgeneral,M):-
!,
retract(M:'$aleph_global'(hypothesis,hypothesis(_,H,_,_))),
Constraint = (
hypothesis(Head,Body,_,M),
goals_to_list(Body,BodyL),
clause_to_list(H,HL),
aleph_subsumes([Head|BodyL],HL)),
assertz(M:(aleph_false:- Constraint)),
nl, p_message('added new constraint').
process_hypothesis(overgeneral because not(E),M):-
!,
record_example(check,neg,E,_,M),
nl, p_message('added new negative example').
process_hypothesis(overspecific,M):-
!,
retract(M:'$aleph_global'(hypothesis,hypothesis(_,H,_,_))),
(retract(M:'$aleph_global'(example_selected,example_selected(_,_)))->
true;
true),
record_example(check,pos,H,N,M),
asserta(M:'$aleph_global'(example_selected,example_selected(pos,N))),
nl, p_message('added new positive example').
process_hypothesis(overspecific because E,M):-
!,
retract(M:'$aleph_global'(hypothesis,hypothesis(_,_,_,_))),
(retract(M:'$aleph_global'(example_selected,example_selected(_,_)))->
true;
true),
record_example(check,pos,E,N,M),
asserta(M:'$aleph_global'(example_selected,example_selected(pos,N))),
nl, p_message('added new positive example').
process_hypothesis(AlephCommand,M):-
M:AlephCommand.
show_options(example_selection):-
nl,
tab(4),
write('Options:'), nl,
tab(8),
write('-> "ok." to accept default example'), nl,
tab(8),
write('-> Enter an example'), nl,
tab(8),
write('-> ctrl-D or "none." to end'), nl, nl.
show_options(hypothesis_selection):-
nl,
tab(4),
write('Options:'), nl,
tab(8),
write('-> "ok." to accept clause'), nl,
tab(8),
write('-> "prune." to prune clause and its refinements from the search'), nl,
tab(8),
write('-> "overgeneral." to add clause as a constraint'), nl,
tab(8),
write('-> "overgeneral because not(E)." to add E as a negative example'), nl,
tab(8),
write('-> "overspecific." to add clause as a positive example'), nl,
tab(8),
write('-> "overspecific because E." to add E as a positive example'), nl,
tab(8),
write('-> any Aleph command'), nl,
tab(8),
write('-> "ctrl-D or "none." to end'), nl, nl.
show_options_web(example_selection):-
nl,
tab(4),
write('Options:'), nl,
tab(8),
write('-> "ok." to accept default example'), nl,
tab(8),
write('-> Enter an example'), nl,
tab(8),
write('-> "none." to end'), nl, nl.
show_options_web(hypothesis_selection):-
nl,
tab(4),
write('Options:'), nl,
tab(8),
write('-> "ok." to accept clause'), nl,
tab(8),
write('-> "prune." to prune clause and its refinements from the search'), nl,
tab(8),
write('-> "overgeneral." to add clause as a constraint'), nl,
tab(8),
write('-> "overgeneral because not(E)." to add E as a negative example'), nl,
tab(8),
write('-> "overspecific." to add clause as a positive example'), nl,
tab(8),
write('-> "overspecific because E." to add E as a positive example'), nl,
tab(8),
write('-> any Aleph command'), nl,
tab(8),
write('-> "none." to end'), nl, nl.
get_performance(M):-
setting(evalfn,Evalfn,M),
(Evalfn = sd; Evalfn = mse), !.
get_performance(M):-
findall(Example,M:example(_,pos,Example),Pos),
findall(Example,M:example(_,neg,Example),Neg),
(test_ex(Pos,noshow,Tp,TotPos,M)->
Fn is TotPos - Tp;
TotPos = 0, Tp = 0, Fn = 0),
(test_ex(Neg,noshow,Fp,TotNeg,M)->
Tn is TotNeg - Fp;
TotNeg = 0, Tn = 0, Fp = 0),
TotPos + TotNeg > 0,
p_message('Training set performance'),
write_cmatrix([Tp,Fp,Fn,Tn]),
p1_message('Training set summary'), p_message([Tp,Fp,Fn,Tn]),
fail.
get_performance(M):-
(setting(test_pos,PFile,M) ->
test(PFile,noshow,Tp,TotPos,M),
Fn is TotPos - Tp;
TotPos = 0, Tp = 0, Fn = 0),
(setting(test_neg,NFile,M) ->
test(NFile,noshow,Fp,TotNeg,M),
Tn is TotNeg - Fp;
TotNeg = 0, Tn = 0, Fp = 0),
TotPos + TotNeg > 0,
p_message('Test set performance'),
write_cmatrix([Tp,Fp,Fn,Tn]),
p1_message('Test set summary'), p_message([Tp,Fp,Fn,Tn]),
fail.
get_performance(_M).
write_cmatrix([Tp,Fp,Fn,Tn]):-
P is Tp + Fn, N is Fp + Tn,
PP is Tp + Fp, PN is Fn + Tn,
Total is PP + PN,
(Total = 0 -> Accuracy is 0.5; Accuracy is (Tp + Tn)/Total),
find_max_width([Tp,Fp,Fn,Tn,P,N,PP,PN,Total],0,W1),
W is W1 + 2,
tab(5), write(' '), tab(W), write('Actual'), nl,
tab(5), write(' '), write_entry(W,'+'), tab(6), write_entry(W,'-'), nl,
tab(5), write('+'),
write_entry(W,Tp), tab(6), write_entry(W,Fp), tab(6), write_entry(W,PP), nl,
write('Pred '), nl,
tab(5), write('-'),
write_entry(W,Fn), tab(6), write_entry(W,Tn), tab(6), write_entry(W,PN), nl, nl,
tab(5), write(' '), write_entry(W,P), tab(6), write_entry(W,N),
tab(6), write_entry(W,Total), nl, nl,
write('Accuracy = '), write(Accuracy), nl.
find_max_width([],W,W).
find_max_width([V|T],W1,W):-
name(V,VList),
length(VList,VL),
(VL > W1 -> find_max_width(T,VL,W);
find_max_width(T,W1,W)).
write_entry(W,V):-
name(V,VList),
length(VList,VL),
Y is integer((W-VL)/2),
tab(Y), write(V), tab(Y).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% A B D U C T I O N
% Generalisation of an abductive explanation for a fact.
% The basic procedure is a simplified variant of S. Moyle's Alecto
% program. Alecto is described in some detail in S. Moyle,
% "Using Theory Completion to Learn a Navigation Control Program",
% Proceedings of the Twelfth International Conference on ILP (ILP2002),
% S. Matwin and C.A. Sammut (Eds), LNAI 2583, pp 182-197,
% 2003.
% Alecto does the following: for each positive example, an
% abductive explanation is obtained. This explanation is set of
% ground atoms. The union of abductive explanations from all
% positive examples is formed (this is also a set of ground atoms).
% These are then generalised to give the final theory. The
% ground atoms in an abductive explanation are obtained using
% Yamamoto's SOLD resolution or SOLDR (Skip Ordered Linear resolution for
% Definite clauses).
% One complication with abductive learning is this: for a given
% positive example to be provable, we require all the ground atoms
% in its abductive explanation to be true. Correctly therefore,
% we would need to assert the abductive explanation before
% checking the utility of any hypothesis. To avoid unnecessary
% asserts and retracts, the "pclause" trick is used here (see
% record_testclause/0).
abgen(Fact,M):-
abgen(Fact,_,M).
abgen(Fact,AbGen,M):-
retractall(M:'$aleph_search'(abgenhyp,hypothesis(_,_,_,_))),
Minf is -inf,
asserta(M:'$aleph_search'(abgenhyp,
hypothesis([Minf,0,1,Minf],[false],[],[]))),
setting(max_abducibles,Max,M),
abgen(Fact,Max,AbGen,M),
M:'$aleph_global'(hypothesis,hypothesis(Label,_,PCover,NCover)),
Label = [_,_,LE,GainE|_],
arithmetic_expression_value(LE,L),
arithmetic_expression_value(GainE,Gain),
M:'$aleph_search'(abgenhyp,hypothesis(Label1,_,_,_)),
Label1 = [_,_,L1E,Gain1E|_],
arithmetic_expression_value(L1E,L1),
arithmetic_expression_value(Gain1E,Gain1),
once(((Gain > Gain1); (Gain =:= Gain1, L < L1))),
once(retract(M:'$aleph_search'(abgenhyp,hypothesis(_,_,_,_)))),
asserta(M:'$aleph_search'(abgenhyp,
hypothesis(Label,AbGen,PCover,NCover))),
fail.
abgen(_,AbGen,M):-
retractall(M:'$aleph_global'(hypothesis,hypothesis(_,_,_,_))),
M:'$aleph_search'(abgenhyp,hypothesis(Label,AbGen,PCover,NCover)),
asserta(M:'$aleph_global'(hypothesis,
hypothesis(Label,AbGen,PCover,NCover))).
abgen(Fact,Max,AbGen,M):-
sold_prove(Fact,AbAtoms,M),
ground(AbAtoms),
length(AbAtoms,N),
N =< Max,
prolog_type(Prolog),
(Prolog = yap ->
store_abduced_atoms(AbAtoms,AssertRefs,M);
store_abduced_atoms(AbAtoms,M)),
store(proof_strategy,M),
set(proof_strategy,sld,M),
gen_abduced_atoms(AbAtoms,AbGen,M),
reinstate(proof_strategy,M),
(Prolog = yap ->
erase_refs(AssertRefs);
remove_abduced_atoms(AbAtoms,M)).
gen_abduced_atoms([],[],_M).
gen_abduced_atoms([AbAtom|AbAtoms],[AbGen|AbGens],M):-
functor(AbAtom,Name,Arity),
add_determinations(Name/Arity,true,M),
sat(AbAtom,M),
reduce(M:_),
M:'$aleph_global'(hypothesis,hypothesis(_,AbGen,_,_)),
remove_explained(AbAtoms,AbGen,AbAtoms1,M),
gen_abduced_atoms(AbAtoms1,AbGens,M).
remove_explained([],_,[],_M).
remove_explained([AbAtom|AbAtoms],(Head:-Body),Rest,M):-
\+((\+ M:((AbAtom = Head), Body))), !,
remove_explained(AbAtoms,(Head:-Body),Rest,M).
remove_explained([AbAtom|AbAtoms],(Head:-Body),[AbAtom|Rest],M):-
remove_explained(AbAtoms,(Head:-Body),Rest,M).
store_abduced_atoms([],[],_M).
store_abduced_atoms([AbAtom|AbAtoms],[DbRef|DbRefs],M):-
assertz(M:'$aleph_search'(abduced,pclause(AbAtom,true)),DbRef),
store_abduced_atoms(AbAtoms,DbRefs,M).
store_abduced_atoms([],_M).
store_abduced_atoms([AbAtom|AbAtoms],M):-
assertz(M:'$aleph_search'(abduced,pclause(AbAtom,true))),
store_abduced_atoms(AbAtoms,M).
remove_abduced_atoms([],_M).
remove_abduced_atoms([AbAtom|AbAtoms],M):-
retract(M:'$aleph_search'(abduced,pclause(AbAtom,true))),
remove_abduced_atoms(AbAtoms,M).
% sold_prove(+G,-A)
% Where G is an input goal (comma separated conjunction of atoms)
% and A is a list of atoms (containing the abductive explanation).
% This procedure is due to S.Moyle
sold_prove(Goal,SkippedGoals,M):-
soldnf_solve(Goal,Skipped,M),
sort(Skipped,SkippedGoals).
soldnf_solve(Goal,Skipped,M):-
soldnf_solve(Goal,true,[],Skipped,M).
soldnf_solve((Goal,Goals),Status,SkippedSoFar,Skipped,M):-
!,
soldnf_solve(Goal,Status1,SkippedSoFar,Skipped1,M),
soldnf_solve(Goals,Status2,Skipped1,Skipped,M),
conj_status(Status1,Status2,Status).
soldnf_solve(not(Goal),true,SkippedSoFar,Skipped,M):-
soldnf_solve(Goal,false,SkippedSoFar,Skipped,M).
soldnf_solve(not(Goal),false,SkippedSoFar,Skipped,M):-
!,
soldnf_solve(Goal,true,SkippedSoFar,Skipped,M).
soldnf_solve(Goal,Status,SkippedSoFar,SkippedSoFar,M):-
soldnf_builtin(Goal,M), !,
soldnfcall(Goal,Status,M).
soldnf_solve(Goal,Status,SkippedSoFar,Skipped,M):-
soldnf_clause(Goal,Body,M),
soldnf_solve(Body,Status,SkippedSoFar,Skipped,M).
soldnf_solve(Goal,true,SkippedSoFar,[Goal|SkippedSoFar],M):-
skippable(Goal,M).
soldnf_clause(Goal,_Body,M):-soldnf_builtin(Goal,M),!,fail.
soldnf_clause(Goal,Body,M):-
clause(M:Goal,Body).
soldnf_builtin(not(_Goal),_M):-!,fail.
soldnf_builtin(A,M):-predicate_property(M:A,built_in).
soldnfcall(Goal,true,M):-
M:Goal, !.
soldnfcall(_,false,_M).
conj_status(true,true,true):- !.
conj_status(_,_,false).
skippable(Pred,M):-
functor(Pred,Name,Arity),
M:'$aleph_global'(abducible,abducible(Name/Arity)).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% L A Z Y E V A L U A T I O N
% lazy_evaluate_theory(+Clauses,+Lazy,+Pos,+Neg,-Theory)
% evaluate lazy preds in a set of clauses
% untested
lazy_evaluate_theory([],_,_,_,[],_M).
lazy_evaluate_theory([Refine|T],LazyPreds,Pos,Neg,[Refine1|T1],M):-
Refine = A-[B,C,D,Clause],
lazy_evaluate_refinement(D,Clause,LazyPreds,Pos,Neg,D1,Clause1,M),
Refine1 = A-[B,C,D1,Clause1],
lazy_evaluate_theory(T,LazyPreds,Pos,Neg,T1,M).
% lazy evaluation of literals in a refinement operation
lazy_evaluate_refinement([],Refine,Lazy,Pos,Neg,[],NewRefine,M):-
clause_to_list(Refine,Lits),
lazy_evaluate_refinement(Lits,Lazy,[],Pos,Neg,Lits1,M),
list_to_clause(Lits1,NewRefine), !.
lazy_evaluate_refinement(Lits,_,Lazy,Pos,Neg,Lits1,NewRefine,M):-
Lits \= [],
lazy_evaluate_refinement(Lits,Lazy,[],Pos,Neg,Lits1,M),
get_pclause(Lits1,[],NewRefine,_,_,_,M), !.
lazy_evaluate_refinement(Lits,Refine,_,_,_,Lits,Refine,_M).
lazy_evaluate_refinement([],_,L,_,_,L,_M):- !.
lazy_evaluate_refinement([Lit|Lits],LazyPreds,Path,PosCover,NegCover,Refine,M):-
lazy_evaluate([Lit],LazyPreds,Path,PosCover,NegCover,[Lit1],M),
aleph_append([Lit1],Path,Path1), !,
lazy_evaluate_refinement(Lits,LazyPreds,Path1,PosCover,NegCover,Refine,M).
% lazy evaluation of specified literals
% all #'d arguments of these literals are evaluated at reduction-time
% From Version 5 (dated Sat Nov 29 13:02:36 GMT 2003), collects both
% input and output args (previously only collected input args)
lazy_evaluate(Lits,[],_,_,_,Lits,_M):- !.
lazy_evaluate([],_,_,_,_,[],_M):- !.
lazy_evaluate([LitNum|LitNums],LazyPreds,Path,PosCover,NegCover,Lits,M):-
(integer(LitNum) ->
BottomExists = true,
M:'$aleph_sat_litinfo'(LitNum,Depth,Atom,I,O,D),
functor(Atom,Name,Arity),
aleph_member1(Name/Arity,LazyPreds), !,
get_pclause([LitNum|Path],[],(Lit:-(Goals)),_,_,_,M);
BottomExists = false,
Atom = LitNum,
Depth = 0,
functor(Atom,Name,Arity),
aleph_member1(Name/Arity,LazyPreds), !,
split_args(LitNum,_,I,O,C,M),
D = [],
list_to_clause([LitNum|Path],(Lit:-(Goals)))),
goals_to_clause(Goals,Clause),
lazy_prove(pos,Lit,Clause,PosCover,M),
(M:'$aleph_global'(positive_only,positive_only(Name/Arity))->
true;
lazy_prove_negs(Lit,Clause,NegCover,M)),
functor(LazyLiteral,Name,Arity),
collect_args(I,LazyLiteral,M),
collect_args(O,LazyLiteral,M),
lazy_evaluate1(BottomExists,Atom,Depth,I,O,C,D,LazyLiteral,NewLits,M),
retractall(M:'$aleph_local'(lazy_evaluate,_)),
lazy_evaluate(LitNums,LazyPreds,Path,PosCover,NegCover,NewLits1,M),
update_list(NewLits1,NewLits,Lits).
lazy_evaluate([LitNum|LitNums],LazyPreds,Path,PosCover,NegCover,[LitNum|Lits],M):-
lazy_evaluate(LitNums,LazyPreds,Path,PosCover,NegCover,Lits,M).
lazy_prove_negs(Lit,Clause,_,M):-
M:'$aleph_global'(lazy_negs,set(lazy_negs,true)), !,
M:'$aleph_global'(atoms,atoms(neg,NegCover)),
lazy_prove(neg,Lit,Clause,NegCover,M).
lazy_prove_negs(Lit,Clause,NegCover,M):-
lazy_prove(neg,Lit,Clause,NegCover,M).
collect_args([],_,_M).
collect_args([Argno/_|Args],Literal,M):-
findall(Term,
(M:'$aleph_local'(lazy_evaluate,eval(pos,Lit)),
tparg(Argno,Lit,Term)),
PTerms),
findall(Term,
(M:'$aleph_local'(lazy_evaluate,eval(neg,Lit)),
tparg(Argno,Lit,Term)),
NTerms),
tparg(Argno,Literal,[PTerms,NTerms]),
collect_args(Args,Literal,M).
% when construct_bottom = false
% currently do not check if user's definition of lazily evaluated
% literal corresponds to recall number in the modes
lazy_evaluate1(false,Atom,_,I,O,C,_,Lit,NewLits,M):-
functor(Atom,Name,Arity),
p1_message('lazy evaluation'), p_message(Name),
functor(NewLit,Name,Arity),
findall(NewLit,(M:Lit,copy_args(Lit,NewLit,C)),NewLits),
copy_io_args(NewLits,Atom,I,O).
lazy_evaluate1(true,Atom,Depth,I,O,_,D,Lit,NewLits,M):-
% M:'$aleph_sat'(lastlit,_),
call_library_pred(Atom,Depth,Lit,I,O,D,M),
findall(LitNum,(retract(M:'$aleph_local'(lazy_evaluated,LitNum))),NewLits).
call_library_pred(OldLit,Depth,Lit,I,O,D,M):-
functor(OldLit,Name,Arity),
M:'$aleph_global'(lazy_recall,lazy_recall(Name/Arity,Recall)),
asserta(M:'$aleph_local'(callno,1)),
p1_message('lazy evaluation'), p_message(Name),
repeat,
evaluate(OldLit,Depth,Lit,I,O,D,M),
retract(M:'$aleph_local'(callno,CallNo)),
NextCall is CallNo + 1,
asserta(M:'$aleph_local'(callno,NextCall)),
NextCall > Recall,
!,
p_message('completed'),
retract(M:'$aleph_local'(callno,NextCall)).
evaluate(OldLit,_,Lit,I,O,D,M):-
functor(OldLit,Name,Arity),
functor(NewLit,Name,Arity),
M:Lit,
copy_args(OldLit,NewLit,I),
copy_args(OldLit,NewLit,O),
copy_consts(Lit,NewLit,Arity),
update_lit(LitNum,false,NewLit,I,O,D,M),
\+(M:'$aleph_local'(lazy_evaluated,LitNum)),
asserta(M:'$aleph_local'(lazy_evaluated,LitNum)), !.
evaluate(_,_,_,_,_,_,_M).
copy_io_args([],_,_,_).
copy_io_args([New|NewL],Old,I,O):-
copy_args(Old,New,I),
copy_args(Old,New,O),
copy_io_args(NewL,Old,I,O).
copy_args(_,_,[]).
copy_args(Old,New,[Arg/_|T]):-
tparg(Arg,Old,Term),
tparg(Arg,New,Term),
copy_args(Old,New,T), !.
copy_consts(_,_,0):- !.
copy_consts(Old,New,Arg):-
arg(Arg,Old,Term),
arg(Arg,New,Term1),
var(Term1), !,
Term1 = aleph_const(Term),
Arg0 is Arg - 1,
copy_consts(Old,New,Arg0).
copy_consts(Old,New,Arg):-
Arg0 is Arg - 1,
copy_consts(Old,New,Arg0).
% copy_modeterm(+Old,-New)
% copy term structure from Old to New
% by finding an appropriate mode declaration
copy_modeterm(Lit1,Lit2,M):-
functor(Lit1,Name,Arity),
find_mode(mode,Name/Arity,Mode,M),
functor(Lit2,Name,Arity),
copy_modeterms(Mode,Lit2,Arity),
\+((\+ (Lit1 = Lit2))).
% find_mode(+modetype,+Name/+Arity,-Mode)
% find a mode for Name/Arity of type modetype
find_mode(mode,Name/Arity,Mode,M):-
!,
functor(Mode,Name,Arity),
M:'$aleph_global'(mode,mode(_,Mode)).
find_mode(modeh,Name/Arity,Mode,M):-
!,
functor(Mode,Name,Arity),
M:'$aleph_global'(modeh,modeh(_,Mode)).
find_mode(modeb,Name/Arity,Mode,M):-
!,
functor(Mode,Name,Arity),
M:'$aleph_global'(modeb,modeb(_,Mode)).
% copy_modeterms(+Mode,+Lit,+Arity)
% copy all term structures in a mode template
copy_modeterms(_,_,0):- !.
copy_modeterms(Mode,Lit,Arg):-
arg(Arg,Mode,Term),
nonvar(Term),
functor(Term,Name,Arity),
\+((Name = '+'; Name = '-'; Name = '#')), !,
functor(NewTerm,Name,Arity),
arg(Arg,Lit,NewTerm),
copy_modeterms(Term,NewTerm,Arity),
Arg0 is Arg - 1,
copy_modeterms(Mode,Lit,Arg0).
copy_modeterms(Mode,Lit,Arg):-
Arg0 is Arg - 1,
copy_modeterms(Mode,Lit,Arg0).
% theorem-prover for lazy evaluation of literals
lazy_prove(Type,Lit,Clause,Intervals,M):-
(Clause = (Head:-Body)->
lazy_prove(Intervals,Type,Lit,Head,Body,M);
lazy_prove(Intervals,Type,Lit,Clause,true,M)).
lazy_prove([],_,_,_,_,_M).
lazy_prove([Interval|Intervals],Type,Lit,Head,Body,M):-
lazy_index_prove(Interval,Type,Lit,Head,Body,M),
lazy_prove(Intervals,Type,Lit,Head,Body,M).
lazy_index_prove(Start-Finish,_,_,_,_,_M):-
Start > Finish, !.
lazy_index_prove(Start-Finish,Type,Lit,Head,Body,M):-
lazy_index_prove1(Type,Lit,Head,Body,Start,M),
Start1 is Start + 1,
lazy_index_prove(Start1-Finish,Type,Lit,Head,Body,M).
% bind input args of lazy literal
% each example gives an set of input bindings
% this is different from Aleph 2 where only a single binding was obtained
lazy_index_prove1(Type,Lit,Head,Body,Num,M):-
depth_bound_call((example(Num,Type,Head),Body),M),
\+(M:'$aleph_local'(lazy_evaluate,eval(Type,Lit))),
asserta(M:'$aleph_local'(lazy_evaluate,eval(Type,Lit))),
fail.
lazy_index_prove1(_,_,_,_,_,_M).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% S L P
% implemented as described by Muggleton, ILP-96
condition_target(M):-
M:'$aleph_global'(condition,set(condition,true)),
add_generator(M),
M:'$aleph_global'(modeh,modeh(_,Pred)),
functor(Pred,Name,Arity),
p_message('conditioning'),
make_sname(Name,SName),
functor(SPred,SName,Arity),
SPred =.. [_|Args],
functor(Fact,Name,Arity),
M:example(_,_,Fact),
Fact =.. [_|Args],
condition(SPred,M),
fail.
condition_target(M):-
\+(M:'$aleph_global'(condition,set(condition,true))),
add_generator(M), !.
condition_target(_M).
add_generator(M):-
M:'$aleph_global'(modeh,modeh(_,Pred)),
functor(Pred,Name,Arity),
make_sname(Name,SName),
functor(SPred,SName,Arity),
(clause(M:SPred,_)->
true;
add_generator(Name/Arity,M),
p1_message('included generator'), p_message(SName/Arity)),
fail.
add_generator(_M).
add_generator(Name/Arity,M):-
make_sname(Name,SName),
functor(SPred,SName,Arity),
find_mode(modeh,Name/Arity,Mode,M),
once(copy_modeterms(Mode,SPred,Arity)),
split_args(Mode,Mode,Input,Output,Constants,M),
range_restrict(Input,SPred,[],B1),
range_restrict(Output,SPred,B1,B2),
range_restrict(Constants,SPred,B2,B3),
list_to_goals(B3,Body),
\+(clause(M:SPred,Body)),
asserta(M:(SPred:-Body)),
fail.
add_generator(_,_M).
make_sname(Name,SName):-
concat(['*',Name],SName).
range_restrict([],_,R,R).
range_restrict([Pos/Type|T],Pred,R0,R):-
functor(TCheck,Type,1),
tparg(Pos,Pred,X),
arg(1,TCheck,X),
range_restrict(T,Pred,[TCheck|R0],R).
condition(Fact,M):-
slprove(condition,Fact,M), !.
condition(_,_M).
sample(_,0,[],_M):- !.
sample(Name/Arity,N,S,M):-
functor(Pred,Name,Arity),
retractall(M:'$aleph_local'(slp_samplenum,_)),
retractall(M:'$aleph_local'(slp_sample,_)),
asserta(M:'$aleph_local'(slp_samplenum,1)),
repeat,
slprove(stochastic,Pred,M),
asserta(M:'$aleph_local'(slp_sample,Pred)),
retract(M:'$aleph_local'(slp_samplenum,N1)),
N2 is N1 + 1,
asserta(M:'$aleph_local'(slp_samplenum,N2)),
N2 > N,
!,
retract(M:'$aleph_local'(slp_samplenum,N2)),
functor(Fact,Name,Arity),
findall(Fact,(retract(M:'$aleph_local'(slp_sample,Fact))),S).
gsample(Name/Arity,_,M):-
make_sname(Name,SName),
functor(SPred,SName,Arity),
clause(M:SPred,Body),
ground((SPred:-Body)), !,
update_gsample(Name/Arity,_,M).
gsample(_,0,_M):- !.
gsample(Name/Arity,N,M):-
functor(Pred,Name,Arity),
make_sname(Name,SName),
functor(SPred,SName,Arity),
Pred =.. [_|Args],
retractall(M:'$aleph_local'(slp_samplenum,_)),
asserta(M:'$aleph_local'(slp_samplenum,0)),
repeat,
slprove(stochastic,SPred,M),
SPred =..[_|Args],
retract(M:'$aleph_local'(slp_samplenum,N1)),
N2 is N1 + 1,
asserta(M:'$aleph_local'(slp_samplenum,N2)),
assertz(M:example(N2,rand,Pred)),
N2 >= N,
!,
retract(M:'$aleph_local'(slp_samplenum,N2)),
asserta(M:'$aleph_global'(size,size(rand,N))),
asserta(M:'$aleph_global'(last_example,last_example(rand,N))),
asserta(M:'$aleph_global'(atoms,atoms(rand,[1-N]))),
asserta(M:'$aleph_global'(atoms_left,atoms_left(rand,[1-N]))).
update_gsample(Name/Arity,_,M):-
functor(Pred,Name,Arity),
make_sname(Name,SName),
functor(SPred,SName,Arity),
retractall(M:'$aleph_global'(gsample,gsample(_))),
retractall(M:'$aleph_local'(slp_samplenum,_)),
asserta(M:'$aleph_local'(slp_samplenum,0)),
SPred =.. [_|Args],
Pred =.. [_|Args],
clause(M:SPred,Body),
ground((SPred:-Body)),
record_example(check,rand,(Pred:-Body),N1,M),
retract(M:'$aleph_local'(slp_samplenum,_)),
asserta(M:'$aleph_local'(slp_samplenum,N1)),
fail.
update_gsample(_,N,M):-
M:'$aleph_local'(slp_samplenum,N),
N > 0, !,
retract(M:'$aleph_local'(slp_samplenum,N)),
set(gsamplesize,N,M),
retract(M:'$aleph_global'(atoms,atoms(rand,_))),
retract(M:'$aleph_global'(atoms_left,atoms_left(rand,_))),
retract(M:'$aleph_global'(last_example,last_example(rand,_))),
assert(M:'$aleph_global'(atoms,atoms(rand,[1-N]))),
assert(M:'$aleph_global'(atoms_left,atoms_left(rand,[1-N]))),
assert(M:'$aleph_global'(last_example,last_example(rand,N))).
update_gsample(_,_,_M).
slprove(_,true,_M):-
!.
slprove(Mode,not(Goal),M):-
slprove(Mode,Goal,M),
!,
fail.
slprove(Mode,(Goal1,Goal2),M):-
!,
slprove(Mode,Goal1,M),
slprove(Mode,Goal2,M).
slprove(Mode,(Goal1;Goal2),M):-
!,
slprove(Mode,Goal1,M);
slprove(Mode,Goal2,M).
slprove(_,Goal,_M):-
predicate_property(Goal,built_in), !,
Goal.
slprove(stochastic,Goal,M):-
findall(Count/Clause,
(clause(M:Goal,Body),Clause=(Goal:-Body),find_count(Clause,Count,M)),
ClauseCounts),
renormalise(ClauseCounts,Normalised),
aleph_random(X),
rselect_clause(X,Normalised,(Goal:-Body)),
slprove(stochastic,Body,M).
slprove(condition,Goal,M):-
functor(Goal,Name,Arity),
functor(Head,Name,Arity),
clause(M:Head,Body),
\+(\+((Head=Goal,slprove(condition,Body,M)))),
inc_count((Head:-Body),M).
renormalise(ClauseCounts,Normalised):-
sum_counts(ClauseCounts,L),
L > 0,
renormalise(ClauseCounts,L,Normalised).
sum_counts([],0).
sum_counts([N/_|T],C):-
sum_counts(T,C1),
C is N + C1.
renormalise([],_,[]).
renormalise([Count/Clause|T],L,[Prob/Clause|T1]):-
Prob is Count/L,
renormalise(T,L,T1).
rselect_clause(X,[P/C|_],C):- X =< P, !.
rselect_clause(X,[P/_|T],C):-
X1 is X - P,
rselect_clause(X1,T,C).
find_count(Clause,N,M):-
copy_term(Clause,Clause1),
M:'$aleph_global'(slp_count,Clause1,N), !.
find_count(_,1,_M).
inc_count(Clause,M):-
retract(M:'$aleph_global'(slp_count,Clause,N)), !,
N1 is N + 1,
asserta(M:'$aleph_global'(slp_count,Clause,N1)).
inc_count(Clause,M):-
asserta(M:'$aleph_global'(slp_count,Clause,2)).
find_posgain(PCover,P,M):-
M:'$aleph_global'(greedy,set(greedy,true)), !,
interval_count(PCover,P).
find_posgain(PCover,P,M):-
M:'$aleph_global'(atoms_left,atoms_left(pos,PLeft)),
intervals_intersection(PLeft,PCover,PC),
interval_count(PC,P).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% S E A R C H I / O
record_clause(good,Label,Clause,_,M):-
setting(good,true,M),
setting(goodfile_stream,Stream,M), !,
set_output(Stream),
Label = [_,_,L|_],
aleph_writeq('$aleph_good'(L,Label,Clause)), write('.'), nl,
flush_output(Stream),
set_output(user_output).
record_clause(Flag,Label,Clause,Nodes,M):-
Flag \= good,
setting(recordfile_stream,Stream,M), !,
set_output(Stream),
show_clause(Flag,Label,Clause,Nodes,M),
flush_output(Stream),
set_output(user_output).
record_clause(_,_,_,_,_M).
record_theory(Flag,Label,Clauses,Nodes,M):-
setting(recordfile_stream,Stream,M), !,
set_output(Stream),
show_theory(Label,Clauses,Nodes,Flag,M),
flush_output(Stream),
set_output(user_output).
record_theory(_,_,_,_,_M).
record_theory(Flag,Label,Clauses,Nodes,M):-
setting(recordfile_stream,Stream,M), !,
set_output(Stream),
show_theory(Label,Clauses,Nodes,Flag,M),
flush_output(Stream),
set_output(user_output).
record_theory(_,_,_,_,_).
record_sat_example(N,M):-
setting(recordfile_stream,Stream,M), !,
set_output(Stream),
p1_message('sat'), p_message(N),
flush_output(Stream),
set_output(user_output).
record_sat_example(_,_M).
record_search_stats(Clause,Nodes,Time,M):-
setting(recordfile_stream,Stream,M), !,
set_output(Stream),
p1_message('clauses constructed'), p_message(Nodes),
p1_message('search time'), p_message(Time),
p_message('best clause'),
pp_dclause(Clause,M),
% show(hypothesis),
flush_output(Stream),
set_output(user_output).
record_search_stats(_,_,_,_M).
record_tsearch_stats(Theory,Nodes,Time,M):-
setting(recordfile_stream,Stream,M), !,
set_output(Stream),
p1_message('theories constructed'), p_message(Nodes),
p1_message('search time'), p_message(Time),
p_message('best theory'),
pp_dclauses(Theory,M),
% show(hypothesis),
flush_output(Stream),
set_output(user_output).
record_tsearch_stats(_,_,_,_).
record_theory(Time,M):-
setting(recordfile_stream,Stream,M), !,
set_output(Stream),
show(theory,M),
p1_message('time taken'), p_message(Time),
nl,
(M:'$aleph_global'(maxcover,set(maxcover,true))->
show(theory/5,M), nl,
show(max_set/4,M), nl,
show(rules/1,M);
true),
flush_output(Stream),
set_output(user_output).
record_theory(_,_M).
record_features(Time,M):-
setting(recordfile_stream,Stream,M), !,
set_output(Stream),
show(features,M),
p1_message('time taken'), p_message(Time),
flush_output(Stream),
set_output(user_output).
record_features(_,_).
record_settings(M):-
setting(recordfile_stream,Stream,M), !,
set_output(Stream),
(M:'$aleph_global'(os,set(os,unix)) ->
execute(date),
execute(hostname);
true),
show(settings,M),
flush_output(Stream),
set_output(user_output).
record_settings(_M).
show_clause(Flag,Label,Clause,Nodes,M):-
broadcast(clause(Flag,Label,Clause,Nodes)),
p_message('-------------------------------------'),
(Flag=good -> p_message('good clause');
(Flag=sample-> p_message('selected from sample');
p_message('found clause'))),
pp_dclause(Clause,M),
(setting(evalfn,Evalfn,M)-> true; Evalfn = coverage),
show_stats(Evalfn,Label),
p1_message('clause label'), p_message(Label),
p1_message('clauses constructed'), p_message(Nodes),
p_message('-------------------------------------').
show_theory(Flag,Label,Clauses,Nodes,M):-
p_message('-------------------------------------'),
(Flag=good -> p_message('good theory');
(Flag=sample-> p_message('selected from sample');
p_message('found theory'))),
pp_dclauses(Clauses,M),
(setting(evalfn,Evalfn,M)-> true; Evalfn = accuracy),
show_stats(Evalfn,Label),
p1_message('theory label'), p_message(Label),
p1_message('theories constructed'), p_message(Nodes),
p_message('-------------------------------------').
update_search_stats(N,T,M):-
(retract(M:'$aleph_global'(search_stats,search_stats(N0,T0))) ->
N1 is N0 + N,
T1 is T0 + T;
N1 is N,
T1 is T),
asserta(M:'$aleph_global'(search_stats,search_stats(N1,T1))).
record_total_stats(M):-
setting(recordfile_stream,Stream,M), !,
set_output(Stream),
show_total_stats(M),
flush_output(Stream),
set_output(user_output).
record_total_stats(_M).
record_atoms_left(M):-
setting(recordfile_stream,Stream,M), !,
set_output(Stream),
show_atoms_left(M),
flush_output(Stream),
set_output(user_output).
record_atoms_left(_M).
show_total_stats(M):-
M:'$aleph_global'(search_stats,search_stats(Nodes,_)), !,
p1_message('total clauses constructed'), p_message(Nodes).
show_total_stats(_M).
show_atoms_left(M):-
M:'$aleph_global'(atoms_left,atoms_left(pos,PLeft)),
interval_count(PLeft,NLeft),
M:'$aleph_global'(size,size(pos,NPos)),
M:'$aleph_global'(search_stats,search_stats(_,Time)),
EstTime is (Time*NLeft)/(NPos - NLeft),
p1_message('positive examples left'), p_message(NLeft),
p1_message('estimated time to finish (secs)'), p_message(EstTime), !.
show_atoms_left(_M).
show_stats(Evalfn,[P,N,_,F|_]):-
((Evalfn = user; Evalfn = entropy; Evalfn = gini) ->
Value is -F;
Value is F
),
concat(['pos cover = ',P,' neg cover = ',N],Mess),
p1_message(Mess),
print_eval(Evalfn,Value).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% A U T O -- R E F I N E
%
% built-in refinement operator
gen_auto_refine(M):-
(setting(autorefine,true,M) -> true;
set(autorefine,true,M),
process_modes(M),
process_determs(M)),
!.
gen_auto_refine(_M).
process_modes(M):-
once(aleph_abolish('$aleph_link_vars'/2,M)),
once(aleph_abolish('$aleph_has_vars'/3,M)),
once(aleph_abolish('$aleph_has_ovar'/4,M)),
once(aleph_abolish('$aleph_has_ivar'/4,M)),
M:'$aleph_global'(modeb,modeb(_,Mode)),
process_mode(Mode,M),
fail.
process_modes(M):-
M:'$aleph_global'(determination,determination(Name/Arity,_)),
find_mode(modeh,Name/Arity,Mode,M),
split_args(Mode,Mode,I,O,_,M),
functor(Lit,Name,Arity),
copy_modeterms(Mode,Lit,Arity),
add_ivars(Lit,I,M),
add_ovars(Lit,O,M),
add_vars(Lit,I,O,M),
fail.
process_modes(_M).
process_determs(M):-
once(aleph_abolish('$aleph_determination'/2,M)),
M:'$aleph_global'(determination,determination(Name/Arity,Name1/Arity1)),
functor(Pred,Name1,Arity1),
find_mode(modeb,Name1/Arity1,Mode,M),
copy_modeterms(Mode,Pred,Arity1),
Determ = M:'$aleph_determination'(Name/Arity,Pred),
(Determ -> true; assert(Determ)),
fail.
process_determs(_M).
process_mode(Mode,M):-
functor(Mode,Name,Arity),
split_args(Mode,Mode,I,O,C,M),
functor(Lit,Name,Arity),
copy_modeterms(Mode,Lit,Arity),
add_ioc_links(Lit,I,O,C,M),
add_ovars(Lit,O,M),
add_vars(Lit,I,O,M).
add_ioc_links(Lit,I,O,C,M):-
Clause = ('$aleph_link_vars'(Lit,Lits):-
aleph:var_types(Lits,VT,M),
Body),
get_o_links(O,Lit,VT,true,OGoals,M),
get_i_links(I,Lit,VT,OGoals,IOGoals),
get_c_links(C,Lit,IOGoals,Body),
assert(M:Clause).
add_ovars(Lit,O,M):-
aleph_member(Pos/Type,O),
tparg(Pos,Lit,V),
(M:'$aleph_has_ovar'(Lit,V,Type,Pos)->true;
assert(M:'$aleph_has_ovar'(Lit,V,Type,Pos))),
fail.
add_ovars(_,_,_M).
add_ivars(Lit,I,M):-
aleph_member(Pos/Type,I),
tparg(Pos,Lit,V),
(M:'$aleph_has_ivar'(Lit,V,Type,Pos)->true;
assert(M:'$aleph_has_ivar'(Lit,V,Type,Pos))),
fail.
add_ivars(_,_,_M).
add_vars(Lit,I,O,M):-
get_var_types(I,Lit,IVarTypes),
get_var_types(O,Lit,OVarTypes),
(M:'$aleph_has_vars'(Lit,IVarTypes,OVarTypes) -> true;
assert(M:'$aleph_has_vars'(Lit,IVarTypes,OVarTypes))).
get_var_types([],_,[]).
get_var_types([Pos/Type|PlaceTypes],Lit,[Var/Type|Rest]):-
tparg(Pos,Lit,Var),
get_var_types(PlaceTypes,Lit,Rest).
get_o_links([],_,_,Goals,Goals,_M).
get_o_links([Pos/Type|T],Lit,VarTypes,GoalsSoFar,Goals,M):-
tparg(Pos,Lit,V),
Goal = (aleph:aleph_output_var(V,Type,VarTypes);
aleph:aleph_output_var(V,Type,Lit,Pos,M)),
prefix_lits((Goal),GoalsSoFar,G1),
get_o_links(T,Lit,VarTypes,G1,Goals,M).
get_i_links([],_,_,Goals,Goals).
get_i_links([Pos/Type|T],Lit,VarTypes,GoalsSoFar,Goals):-
tparg(Pos,Lit,V),
Goal = aleph:aleph_input_var(V,Type,VarTypes),
prefix_lits((Goal),GoalsSoFar,G1),
get_i_links(T,Lit,VarTypes,G1,Goals).
get_c_links([],_,Goals,Goals).
get_c_links([Pos/Type|T],Lit,GoalsSoFar,Goals):-
tparg(Pos,Lit,V),
TypeFact =.. [Type,C],
Goal = (TypeFact,V=C),
prefix_lits((Goal),GoalsSoFar,G1),
get_c_links(T,Lit,G1,Goals).
aleph_input_var(Var,Type,VarTypes):-
aleph_member(Var/Type1,VarTypes),
nonvar(Type1),
Type = Type1.
aleph_output_var(Var,Type,VarTypes):-
aleph_member(Var/Type1,VarTypes),
nonvar(Type1),
Type = Type1.
aleph_output_var(_,_,_).
aleph_output_var(Var,Type,Lit,ThisPos,M):-
M:'$aleph_has_ovar'(Lit,Var,Type,Pos),
Pos @< ThisPos.
/**
* var_types(+Atoms:list,-VarTypes:list,+Module:atomic) is det
*
* Returns the types of variables in Atoms. Internal predicate.
*/
var_types([Head|Body],VarTypes,M):-
hvar_types(Head,HVarTypes,M),
bvar_types(Body,HVarTypes,BVarTypes,M),
aleph_append(BVarTypes,HVarTypes,VarTypesList),
sort(VarTypesList,VarTypes).
hvar_types(Head,HVarTypes,M):-
M:'$aleph_has_vars'(Head,IVarTypes,OVarTypes),
aleph_append(IVarTypes,OVarTypes,HVarTypes).
bvar_types([],V,V,_M).
bvar_types([Lit|Lits],VTSoFar,BVarTypes,M):-
M:'$aleph_has_vars'(Lit,IVarTypes,OVarTypes),!,
consistent_vartypes(IVarTypes,VTSoFar),
\+ inconsistent_vartypes(OVarTypes,VTSoFar),
aleph_append(OVarTypes,VTSoFar,VT1),
bvar_types(Lits,VT1,BVarTypes,M).
bvar_types([not(Lit)|Lits],VTSoFar,BVarTypes,M):-
M:'$aleph_has_vars'(Lit,IVarTypes,OVarTypes),
consistent_vartypes(IVarTypes,VTSoFar),
\+ inconsistent_vartypes(OVarTypes,VTSoFar),
aleph_append(OVarTypes,VTSoFar,VT1),
bvar_types(Lits,VT1,BVarTypes,M).
consistent_vartypes([],_).
consistent_vartypes([Var/Type|VarTypes],VTSoFar):-
aleph_member2(Var/Type,VTSoFar),
consistent_vartypes(VarTypes,VTSoFar).
inconsistent_vartypes([Var/Type|_],VTSoFar):-
aleph_member(Var1/Type1,VTSoFar),
Var == Var1,
Type \== Type1, !.
inconsistent_vartypes([_|VarTypes],VTSoFar):-
inconsistent_vartypes(VarTypes,VTSoFar).
aleph_get_hlit(Name/Arity,Head,M):-
functor(Head,Name,Arity),
find_mode(modeh,Name/Arity,Mode,M),
once(split_args(Mode,Mode,_,_,C,M)),
copy_modeterms(Mode,Head,Arity),
get_c_links(C,Head,true,Equalities),
M:Equalities.
aleph_get_lit(Lit,[H|Lits],M):-
functor(H,Name,Arity),
aleph_get_lit(Lit,Name/Arity,M),
M:'$aleph_link_vars'(Lit,[H|Lits]),
\+(aleph_member2(Lit,[H|Lits])).
aleph_get_lit(Lit,Target,M):-
M:'$aleph_determination'(Target,Lit).
% aleph_mode_linked(+Lits)
% checks to see if a sequence of literals are within mode language
% using information compiled by process_modes/0
aleph_mode_linked([H|B],M):-
aleph_mode_linked(B,[H],M).
aleph_mode_linked([],_,_M):- !.
aleph_mode_linked([Lit|Lits],LitsSoFar,M):-
M:'$aleph_link_vars'(Lit,LitsSoFar),
aleph_append([Lit],LitsSoFar,L1),
aleph_mode_linked(Lits,L1,M).
auto_refine(aleph_false,Head,M):-
example_saturated(Example,M),
functor(Example,Name,Arity),
aleph_get_hlit(Name/Arity,Head,M),
Head \== aleph_false.
auto_refine(aleph_false,Head,M):-
M:'$aleph_global'(modeh,modeh(_,Pred)),
functor(Pred,Name,Arity),
aleph_get_hlit(Name/Arity,Head,M),
Head \== aleph_false.
auto_refine((H:-B),(H1:-B1),M):-
!,
goals_to_list((H,B),LitList),
setting(clauselength,L,M),
length(LitList,ClauseLength),
ClauseLength < L,
aleph_get_lit(Lit,LitList,M),
aleph_append([Lit],LitList,LitList1),
list_to_goals(LitList1,(H1,B1)),
\+(M:prune((H1:-B1))),
\+(tautology((H1:-B1),M)),
(setting(language,Lang,M) ->
lang_ok(Lang,H1,B1);
true),
(setting(newvars,NewVars,M) ->
newvars_ok(NewVars,H1,B1);
true).
auto_refine(Head,Clause,M):-
auto_refine((Head:-true),Clause,M).
% refinement with lookahead
auto_refine(1,Clause1,Clause2,M):-
!,
auto_refine(Clause1,Clause2,M).
auto_refine(L,Clause1,Clause2,M):-
L1 is L - 1,
auto_refine(L1,Clause1,Clause,M),
(Clause2 = Clause;
auto_refine(Clause,Clause2,M)).
auto_extend((H:-B),Lit,(H1:-B1),M):-
!,
goals_to_list((H,B),LitList),
setting(clauselength,L,M),
length(LitList,ClauseLength),
ClauseLength < L,
aleph_get_lit(Lit,LitList,M),
aleph_append([Lit],LitList,LitList1),
list_to_goals(LitList1,(H1,B1)),
(setting(language,Lang,M) ->
lang_ok(Lang,H1,B1);
true),
(setting(newvars,NewVars,M) ->
newvars_ok(NewVars,H1,B1);
true),
\+(tautology((H1:-B1),M)),
\+(M:prune((H1:-B1))).
auto_extend((H),Lit,(H1:-B1),M):-
!,
goals_to_list(H,LitList),
setting(clauselength,L,M),
length(LitList,ClauseLength),
ClauseLength < L,
aleph_get_lit(Lit,LitList,M),
aleph_append([Lit],LitList,LitList1),
list_to_goals(LitList1,(H1,B1)),
(setting(language,Lang,M) ->
lang_ok(Lang,H1,B1);
true),
(setting(newvars,NewVars,M) ->
newvars_ok(NewVars,H1,B1);
true),
\+(tautology((H1:-B1),M)),
\+(M:prune((H1:-B1))).
tautology((aleph_false:-Body),M):-
!,
in(Body,L1,Rest,M),
in(Rest,not(L2),M),
L1 == L2.
tautology((Head:-Body),M):-
in(Body,Lit,M),
Head == Lit, !.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% A U T O -- M O D E
% automatic inference of mode declarations given a set of
% determinations. The procedure works in two parts: (i) finding
% equivalence classes of types; and (ii) finding an input/output
% assignment.
%
% Finding equivalence classes of types is similar to
% the work of McCreath and Sharma, Proc of the 8th Australian
% Joint Conf on AI pages 75-82, 1995. However, unlike there
% types in the same equivalence class are given the same name only if
% they "overlap" significantly (the overlap of type1 with type2
% is the proportion of elements of type1 that are also elements of type2).
% Significantly here means an overlap at least some threshold
% T (set using typeoverlap, with default 0.95).
% Since this may not be perfect, modes are also produced
% for equality statements that re-introduce co-referencing amongst
% differently named types in the same equivalence class.
% The user has to however explicitly include a determination declaration for
% the equality predicate.
%
% The i/o assignment is not straightforward, as we may be dealing
% with non-functional definitions. The assignment sought here is one
% that maximises the number of input args as this gives the
% largest bottom clause. This assignment is
% is sought by means of a search procedure over mode sequences.
% Suppose we have a mode sequence M = <m1,m2,..m{i-1}> that uses the types T.
% An argument of type t in mode m{i} is an input iff t overlaps
% significantly (used in the same sense as earlier) with some type in T.
% Otherwise the argument is an output.
% The utility of each mode sequence M is f(M) = g(M) + h(M) where
% g(M) is the number of input args in M; and h(M) is a (lower) estimate
% of the number of input args in any mode sequence of which M is a prefix.
% The search strategy adopted is a simple hill-climbing one.
%
% All very complicated: there must be a simpler approach.
% Requires generative background predicates.
search_modes(M):-
M:'$aleph_global'(targetpred,targetpred(N/A)),
findall(N1/A1,determinations(N/A,N1/A1,M),L),
number_types([N/A|L],0,TypedPreds,Last),
get_type_elements(TypedPreds,M),
interval_to_list(1-Last,Types),
get_type_equivalences(Types,Equiv1,M),
merge_equivalence_classes(Equiv1,Equiv,M),
store_type_equivalences(Equiv,M),
setting(typeoverlap,Thresh,M),
infer_modes(TypedPreds,Thresh,Types,Modes,M),
infer_equalities(EqModes,M),
Modes = [_|BodyModes],
infer_negations(BodyModes,NegModes),
(setting(updateback,Update,M) -> true; Update = true),
p_message('found modes'),
add_inferred_modes(Modes,Update,M),
add_inferred_modes(EqModes,Update,M),
add_inferred_modes(NegModes,Update,M),
fail.
search_modes(_M).
number_types([],Last,[],Last).
number_types([N/A|T],L0,[Pred|T1],L1):-
functor(Pred,N,A),
L is L0 + A,
number_types(A,L,Pred),
number_types(T,L,T1,L1).
number_types(0,_,_):- !.
number_types(A,N,Pred):-
arg(A,Pred,N),
A1 is A - 1,
N1 is N - 1,
number_types(A1,N1,Pred).
get_type_elements([],_M).
get_type_elements([Pred|Preds],M):-
functor(Pred,Name,Arity),
functor(Template,Name,Arity),
interval_to_list(1-Arity,AL),
get_type_elements(M:example(_,_,Template),Template,Pred,AL,M),
get_type_elements(Template,Template,Pred,AL,M),
get_type_elements(Preds,M).
get_type_elements(Fact,Template,Pred,AL,M):-
aleph_member(Arg,AL),
findall(Val,(M:Fact,ground(Fact),arg(Arg,Template,Val)),Vals),
arg(Arg,Pred,Type),
sort(Vals,SVals),
(retract(M:'$aleph_search'(modes,type(Type,_,OtherVals))) ->
aleph_ord_union(SVals,OtherVals,ArgVals);
ArgVals = SVals),
length(ArgVals,N),
asserta(M:'$aleph_search'(modes,type(Type,N,ArgVals))),
fail.
get_type_elements(_,_,_,_,_M).
get_type_equivalences([],[],_M).
get_type_equivalences([First|Rest],[Class|Classes],M):-
get_type_equivalence(Rest,[First],Class,Left,M),
get_type_equivalences(Left,Classes,M).
get_type_equivalence([],Class1,Class,[],_):-
sort(Class1,Class).
get_type_equivalence([Type|Rest],Class1,Class,Left,M):-
type_equivalent(Class1,Type,M), !,
get_type_equivalence(Rest,[Type|Class1],Class,Left,M).
get_type_equivalence([Type|Rest],Class1,Class,[Type|Left],M):-
get_type_equivalence(Rest,Class1,Class,Left,M).
merge_equivalence_classes([Class],[Class],_M):- !.
merge_equivalence_classes(Classes1,Classes2,M):-
aleph_delete(Class1,Classes1,Left),
aleph_delete(Class2,Left,Left1),
class_equivalent(Class1,Class2,M), !,
aleph_ord_union(Class1,Class2,NewClass),
merge_equivalence_classes([NewClass|Left1],Classes2,M).
merge_equivalence_classes(Classes,Classes,_M).
class_equivalent(Class1,Class2,M):-
aleph_member(Type1,Class1),
type_equivalent(Class2,Type1,M), !.
type_equivalent([T1|_],T2,M):-
M:'$aleph_search'(modes,type(T1,_,E1)),
M:'$aleph_search'(modes,type(T2,_,E2)),
intersects(E1,E2), !.
type_equivalent([_|T],T2,M):-
type_equivalent(T,T2,M).
store_type_equivalences([],_M).
store_type_equivalences([[CType|Class]|Classes],M):-
length([CType|Class],N),
store_type_equivalence([CType|Class],CType,N,M),
store_type_equivalences(Classes,M).
store_type_equivalence([],_,_,_M).
store_type_equivalence([Type|Types],CType,Neq,M):-
retract(M:'$aleph_search'(modes,type(Type,N,Elements))),
store_type_overlaps(Types,Type,Elements,N,M),
asserta(M:'$aleph_search'(modes,type(Type,CType,Neq,N,Elements))),
store_type_equivalence(Types,CType,Neq,M).
store_type_overlaps([],_,_,_,_M).
store_type_overlaps([T1|Types],T,E,N,M):-
M:'$aleph_search'(modes,type(T1,N1,E1)),
aleph_ord_intersection(E1,E,Int),
length(Int,NInt),
O is NInt/N,
O1 is NInt/N1,
asserta(M:'$aleph_search'(modes,typeoverlap(T,T1,O,O1))),
store_type_overlaps(Types,T,E,N,M).
infer_modes([Head|Rest],Thresh,Types,[Head1|Rest1],M):-
infer_mode(Head,Thresh,head,[],Head1,Seen,M),
aleph_delete_list(Seen,Types,TypesLeft),
infer_ordered_modes(Rest,Thresh,body,Seen,TypesLeft,Rest1,M).
infer_ordered_modes([],_,_,_,_,[],_M):- !.
infer_ordered_modes(L,Thresh,Loc,Seen,Left,[Mode|Rest],M):-
score_modes(L,Thresh,Seen,Left,ScoredPreds,M),
keysort(ScoredPreds,[_-Pred|_]),
infer_mode(Pred,Thresh,Loc,Seen,Mode,Seen1,M),
aleph_delete(Pred,L,L1),
aleph_delete_list(Seen1,Left,Left1),
infer_ordered_modes(L1,Thresh,Loc,Seen1,Left1,Rest,M).
score_modes([],_,_,_,[],_M).
score_modes([Pred|Preds],Thresh,Seen,Left,[Cost-Pred|Rest],M):-
Pred =.. [_|Types],
evaluate_backward(Types,Thresh,Seen,G,M),
aleph_delete_list(Types,Left,Left1),
estimate_forward(Seen,Thresh,Left1,H0,M),
estimate_forward(Types,Thresh,Left1,H1,M),
Diff is H1 - H0,
(Diff < 0 -> H is 0; H is Diff),
Cost is -(G + H),
score_modes(Preds,Thresh,Seen,Left,Rest,M).
evaluate_backward([],_,_,0.0,_M).
evaluate_backward([Type|Types],Thresh,Seen,Score,M):-
best_overlap(Seen,Type,_,Overlap,M),
(Overlap >= Thresh -> Score1 = 1.0; Score1 = 0.0),
evaluate_backward(Types,Thresh,Seen,Score2,M),
Score is Score1 + Score2.
estimate_forward([],_,_,0.0,_M).
estimate_forward([Type|Types],Thresh,Left,Score,M):-
estimate_forward1(Left,Thresh,Type,S1,M),
estimate_forward(Types,Thresh,Left,S2,M),
Score is S1 + S2.
estimate_forward1([],_,_,0.0,_M).
estimate_forward1([T1|Types],Thresh,T,Score,M):-
type_overlap(T1,T,O1,M),
(O1 >= Thresh -> S1 is 1.0; S1 is 0.0),
estimate_forward1(Types,Thresh,T,S2,M),
Score is S1 + S2.
infer_mode(Pred,Thresh,Loc,Seen0,InferredMode,Seen,M):-
Pred =.. [Name|Types],
infer_mode1(Types,Thresh,Loc,Seen0,Modes,M),
Mode =.. [Name|Modes],
length(Types,Arity),
(M:'$aleph_global'(targetpred,targetpred(Name/Arity)) ->
InferredMode = modeh(*,Mode);
InferredMode = mode(*,Mode)),
aleph_ord_union(Seen0,Types,Seen).
infer_mode1([],_,_,_,[],_M).
infer_mode1([Type|Types],Thresh,Loc,Seen,[Mode|Modes],M):-
best_overlap(Seen,Type,Best,Overlap,M),
(Overlap >= Thresh ->
M:'$aleph_search'(modes,typemapped(Best,_,NewType)),
asserta(M:'$aleph_search'(modes,typemapped(Type,Best,NewType))),
concat([type,NewType],Name),
Mode = +Name;
(Overlap > 0.0 ->
asserta(M:'$aleph_search'(modes,typemapped(Type,Best,Type)));
asserta(M:'$aleph_search'(modes,typemapped(Type,Type,Type)))),
concat([type,Type],Name),
(Loc = head -> Mode = +Name; Mode = -Name)
),
infer_mode1(Types,Thresh,Loc,Seen,Modes,M).
best_overlap([T1],T,T1,O,M):-
!,
type_overlap(T,T1,O,M).
best_overlap([T1|Types],T,Best,O,M):-
type_overlap(T,T1,O1,M),
best_overlap(Types,T,T2,O2,M),
(O2 > O1 -> O is O2, Best = T2; O is O1, Best = T1).
best_overlap([],T,T,0.0,_M).
type_overlap(T,T1,O,M):-
T > T1, !,
(M:'$aleph_search'(modes,typeoverlap(T1,T,_,O)) -> true; O = 0.0).
type_overlap(T,T1,O,M):-
(M:'$aleph_search'(modes,typeoverlap(T,T1,O,_)) -> true; O = 0.0).
infer_equalities(EqModes,M):-
findall(mode(1,(Eq)),(pairwise_equality(Eq,M);grounding_equality(Eq,M)),
EqL),
sort(EqL,EqModes).
infer_negations([],[]).
infer_negations([mode(_,Pred)|Modes],NegModes):-
Pred =.. [_|Args],
aleph_member1(-_,Args), !,
infer_negations(Modes,NegModes).
infer_negations([mode(_,Pred)|Modes],[mode(1,not(Pred))|NegModes]):-
infer_negations(Modes,NegModes).
pairwise_equality((+N1 = +N2),M):-
M:'$aleph_search'(modes,typemapped(_,Best,T1)),
M:'$aleph_search'(modes,typemapped(Best,_,T2)),
T1 \== T2,
concat([type,T1],N1),
concat([type,T2],N2).
grounding_equality((+N1 = #N1),M):-
M:'$aleph_search'(modes,typemapped(T1,_,T1)),
concat([type,T1],N1).
add_inferred_modes([],_,_M).
add_inferred_modes([Mode|Modes],Flag,M):-
write(Mode), nl,
(Flag = true -> M:Mode; true),
add_inferred_modes(Modes,Flag,M).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% S T O C H A S T I C S E A R C H
% sample_clauses(+N,-Clauses)
% return sample of at most N legal clauses from hypothesis space
% If a bottom clause exists then
% Each clause is drawn randomly. The length of the clause is
% determined by:
% (a) user-specified distribution over clauselengths
% using set(clauselength_distribution,Distribution);
% Distribution is a list of the form p1-1, p2-2,...
% specifying that clauselength 1 has prob p1, etc.
% Note: sum pi must = 1. This is not checked; or
% (b) uniform distribution over all legal clauses.
% (if clauselength_distribution is not set)
% this uses a Monte-Carlo estimate of the number of
% legal clauses in the hypothesis space
% If a bottom clause does not exist, then legal clauses are constructed
% using the mode declarations. Only option (a) is allowed. If
% clauselength_distribution is not set, then a uniform distribution over
% lengths is assumed.
% Each element of Clauses is of the form L-[E,T,Lits,Clause] where
% L is the clauselength; E,T are example number and type (pos, neg) used
% to build the bottom clause; Lits contains the literal numbers in the
% bottom clause for Clause. If no bottom clause then E,T = 0 and Lits = []
% Clauses is in ascending order of clause length
sample_clauses(N,Clauses,M):-
setting(construct_bottom,Bottom,M),
sample_nclauses(Bottom,N,Clauses,M).
sample_nclauses(false,N,Clauses,M):-
!,
gen_auto_refine(M),
(setting(clauselength_distribution,D,M) -> true;
setting(clauselength,CL,M),
Uniform is 1.0/CL,
distrib(1-CL,Uniform,D)),
sample_nclauses_using_modes(N,D,CList,M),
remove_alpha_variants(CList,CList1),
keysort(CList1,Clauses).
sample_nclauses(_,N,Clauses,M):-
retractall(M:'$aleph_sat'(random,rselect(_))),
(M:'$aleph_sat'(example,example(_,_)) -> true; rsat(M)),
setting(clauselength,CL,M),
(setting(clauselength_distribution,Universe,M) ->
Sample is N;
estimate_numbers(CL,1,400,Universe,M),
(N > Universe -> Sample is Universe; Sample is N)),
get_clause_sample(Sample,Universe,CL,CList,M),
keysort(CList,Clauses).
% sample_nclauses_using_modes(+N,+D,-Clauses)
% get upto N legal clauses using mode declarations
% and distribution D over clauselengths
sample_nclauses_using_modes(0,_,[],_M):- !.
sample_nclauses_using_modes(N,D,[Clause|Rest],M):-
legal_clause_using_modes(100,D,Clause,M),
N1 is N - 1,
sample_nclauses_using_modes(N1,D,Rest,M).
% legal_clause_using_modes(+N,+D,-Clause,M)
% make at most N attempts to obtain a legal clause Clause
% from mode language using distribution D over clauselengths
% if all N attempts fail, then just return most general clause
legal_clause_using_modes(N,D,L-[0,0,[],Clause],M):-
N > 0,
sample_clause_using_modes(D,L,Clause,M),
\+(M:prune(Clause)),
split_clause(Clause,Head,Body),
(setting(language,Lang,M) ->
lang_ok(Lang,Head,Body);
true),
(setting(newvars,NewVars,M) ->
newvars_ok(NewVars,Head,Body);
true),
!.
legal_clause_using_modes(N,D,Clause,M):-
N > 1,
N1 is N - 1,
legal_clause_using_modes(N1,D,Clause,M), !.
legal_clause_using_modes(_,_,1-[0,0,[],Clause],M):-
sample_clause_using_modes([1.0-1],1,Clause,M).
sample_clause_using_modes(D,L,Clause,M):-
findall(H,auto_refine(aleph_false,H,M),HL),
HL \= [],
random_select(Head,HL,_),
draw_element(D,L),
(L = 1 -> Clause = Head;
L1 is L - 1,
sample_clause_using_modes(L1,Head,Clause,M)).
sample_clause_using_modes(N,ClauseSoFar,Clause,M):-
findall(C,auto_refine(ClauseSoFar,C,M),CL),
CL \= [], !,
(N = 1 -> random_select(Clause,CL,_);
random_select(C1,CL,_),
N1 is N - 1,
sample_clause_using_modes(N1,C1,Clause,M)).
sample_clause_using_modes(_,Clause,Clause,_M).
% get_clause_sample(+N,+U,+CL,-Clauses,M)
% get upto N legal clauses of at most length CL drawn from universe U
% U is either the total number of legal clauses
% or a distribution over clauselengths
% the clauses are constructed by drawing randomly from bottom
get_clause_sample(0,_,_,[],_):- !.
get_clause_sample(N,Universe,CL,[L-[E,T,C1,C]|Clauses],M):-
(number(Universe) ->
get_rrandom(Universe,ClauseNum),
num_to_length(ClauseNum,CL,L,M),
UpperLim is CL;
draw_element(Universe,L),
UpperLim is L),
draw_legalclause_wo_repl(L,UpperLim,C,C1,M), !,
M:'$aleph_sat'(example,example(E,T)),
N1 is N - 1,
get_clause_sample(N1,Universe,CL,Clauses,M).
get_clause_sample(N,Universe,CL,Clauses,M):-
N1 is N - 1,
get_clause_sample(N1,Universe,CL,Clauses,M).
% draw_legalclause_wo_repl(+L,+CL,-C,-Lits,M)
% randomly draw without replacement a legal clause of length >= L and =< CL
% also returns literals from bottom used to construct clause
draw_legalclause_wo_repl(L,CL,C,C1,M):-
L =< CL,
randclause_wo_repl(L,C,legal,C1,M), !.
draw_legalclause_wo_repl(L,CL,C,C1,M):-
L < CL,
L1 is L + 1,
draw_legalclause_wo_repl(L1, CL,C,C1,M).
% estimate_clauselength_distribution(+L,+T,+K,-D,M)
% for each clauselength l <= L, estimate the probability of
% drawing a good clause
% here, a ``good clause'' is one that is in the top K-percentile of clauses
% estimation is by Monte Carlo using at most T trials
% probabilities are normalised to add to 1
estimate_clauselength_distribution(L,T,K,D,M):-
M:'$aleph_sat'(example,example(Type,Example)),
M:'$aleph_sat'(random,clauselength_distribution(Type,Example,L,T,K,D)), !.
estimate_clauselength_distribution(L,T,K,D,M):-
setting(evalfn,Evalfn,M),
estimate_clauselength_scores(L,T,Evalfn,[],S,M),
select_good_clauses(S,K,Good),
estimate_frequency(L,Good,Freq),
normalise_distribution(Freq,D),
(M:'$aleph_sat'(example,example(Type,Example)) ->
asserta(M:'$aleph_sat'(random,clauselength_distribution(Type,
Example,L,T,K,D)));
true).
estimate_clauselength_scores(0,_,_,S,S,_):- !.
estimate_clauselength_scores(L,T,Evalfn,S1,S,M):-
set(clauselength_distribution,[1.0-L],M),
p1_message('Estimate scores of clauses with length'), p_message(L),
sample_clauses(T,Clauses,M),
estimate_scores(Clauses,Evalfn,S1,S2,M),
L1 is L - 1,
estimate_clauselength_scores(L1,T,Evalfn,S2,S,M).
estimate_scores([],_,S,S,_M):- !.
estimate_scores([L-[_,_,_,C]|Rest],Evalfn,S1,S,M):-
label_create(C,Label,M),
extract_count(pos,Label,PC),
extract_count(neg,Label,NC),
complete_label(Evalfn,C,[PC,NC,L],[_,_,_,Val|_],M),
estimate_scores(Rest,Evalfn,[-Val-L|S1],S,M).
% ``good'' clauses are defined to be those in the top K-percentile
% policy on ties is to include them
select_good_clauses(S,K,Good):-
keysort(S,S1),
length(S1,Total),
N is integer(K*Total/100),
select_good_clauses(S1,N,[],Good).
select_good_clauses([],_,Good,Good):- !.
select_good_clauses(_,N,Good,Good):- N =< 0, !.
select_good_clauses([Score-X|T],N,GoodSoFar,Good):-
select_good_clauses(T,Score,N,[Score-X|GoodSoFar],N0,Good1,T1),
N1 is N0 - 1,
select_good_clauses(T1,N1,Good1,Good).
select_good_clauses([],_,N,G,N,G,[]):- !.
select_good_clauses([Score-X|T],Score,N,GoodSoFar,N0,Good1,T1):-
!,
N1 is N - 1,
select_good_clauses(T,Score,N1,[Score-X|GoodSoFar],N0,Good1,T1).
select_good_clauses(L,_,N,G,N,G,L).
estimate_frequency(0,_,[]).
estimate_frequency(L,Good,[N-L|T]):-
count_frequency(Good,L,N),
L1 is L - 1,
estimate_frequency(L1,Good,T).
count_frequency([],_,0).
count_frequency([Entry|T],X,N):-
count_frequency(T,X,N1),
(Entry = _-X -> N is N1 + 1; N is N1).
% estimate total number of legal clauses in space
% bounded by bot
estimate_numbers(Total,M):-
(M:'$aleph_sat'(example,example(_,_)) -> true; rsat),
setting(clauselength,CL,M),
estimate_numbers(CL,1,400,Total,M).
% estimate_numbers(+L,+Trials,+Sample,-T,M)
% estimate total number of legal clauses of length <= L in space
% bounded by bot
% estimated number is cached for future use
% estimation is by Monte Carlo, averaged over Trials trials
% with given sample size
estimate_numbers(L,Trials,Sample,Total,M):-
M:'$aleph_sat'(example,example(Type,Example)),
M:'$aleph_sat'(random,sample(Type,Example,L,Trials,Sample)),
M:'$aleph_sat'(random,hypothesis_space(Total)), !.
estimate_numbers(L,Trials,Sample,Total,M):-
retractall(M:'$aleph_sat'(random,sample(_,_,_,_,_))),
retractall(M:'$aleph_sat'(random,hypothesis_space(_))),
estimate_numbers(L,Trials,Sample,0,Total,M),
asserta(M:'$aleph_sat'(random,hypothesis_space(Total))),
M:'$aleph_sat'(example,example(Type,Example)),
asserta(M:'$aleph_sat'(random,sample(Type,Example,L,Trials,Sample))).
% estimate_numbers(+L,+Trials,+Sample,+TotalSoFar,-Total)
% estimate the number of legal clauses of length <= L
% estimated number of legal clauses at each length are cached for future use
% TotalSoFar is an accumulator of the number legal clauses so far
% Total is the cumulative total of the number of legal clauses
estimate_numbers(0,_,_,T,T,_M):- !.
estimate_numbers(L,Trials,Sample,TotalSoFar,T,M):-
retractall(M:'$aleph_sat'(random,number_of_clauses(L,_))),
estimate_number(Trials,Sample,L,T0,M),
asserta(M:'$aleph_sat'(random,number_of_clauses(L,T0))),
L1 is L - 1,
T1 is T0 + TotalSoFar,
estimate_numbers(L1,Trials,Sample,T1,T,M).
% estimate_number(+T,+S,+L,-N,M)
% monte carlo estimate of number of legal clauses of length L
% estimate formed from average over T trials with sample S
estimate_number(_,_,L,0,M):-
M:'$aleph_sat'(lastlit,Last),
Last < L, !.
estimate_number(T,S,L,N,M):-
T > 0,
p1_message('Estimate legal clauses with length'), p_message(L),
estimate_number(T,S,0,L,Total,M),
N is float(Total/T),
concat(['trials=',T,' sample=', S, ' estimate=', N],Mess),
p_message(Mess).
estimate_number(1,S,Total,L,N,M):-
!,
estimate_number(L,S,N1,M),
N is Total + N1.
estimate_number(T,S,Total,L,N,M):-
p_message('New Trial'),
estimate_number(L,S,N1,M),
Total1 is Total + N1,
T1 is T - 1,
estimate_number(T1,S,Total1,L,N,M).
% estimate_number(+L,+S,-N)
% estimate the number of legal clauses of length L in the search space
% estimation based on sample size S
estimate_number(1,_,1,_M):- !.
estimate_number(L,S,N,M):-
estimate_proportion(S,L,legal,P,_,M),
M:'$aleph_sat'(lastlit,Last),
total_clauses(L,Last,Total),
N is float(P*Total).
% estimate_proportion(+N,+L,+S,-P,-Clauses,M)
% estimate prop. of at most N random clauses of length L and status S
% clauses are generated without replacement
% S is one of legal or illegal depending on whether C is inside or
% outside the mode language provided
% Clauses is the list of at most N def. clauses
% If S is a variable then clauses can be legal or illegal
% Thus estimate_proportion(10000,2,S,P,C,M) returns the
% proportion and list of 2 literal clauses which are either
% legal or illegal in a sample of at most 10000
% Keeps legal clauses obtained in rselect_legal for later use
estimate_proportion(0,_,_,0,[],_M):- !.
estimate_proportion(N,L,S,P,Clauses,M):-
retractall(M:'$aleph_sat'(random,rselect(_))),
retractall(M:'$aleph_sat'(random,rselect_legal(L,_,_,_,_))),
get_random_wo_repl(N,L,Clauses,M),
length(Clauses,Total),
count_clause_status(Clauses,S,A,_),
(Total = 0 -> P = 0; P is A/Total),
M:'$aleph_sat'(example,example(E,T)),
retractall(M:'$aleph_sat'(random,rselect(_))),
store_legal_clauses(Clauses,L,E,T,M).
% get_random_wo_repl(+N,+L,-List,M)
% randomly construct at most N definite clauses of length L
% returns Status/Clause list where Status is one of legal/illegal
get_random_wo_repl(0,_,[],_,_M):- !.
get_random_wo_repl(N,L,[S/[C,C1]|Clauses],M):-
randclause_wo_repl(L,C,S,C1,M), !,
N1 is N - 1,
get_random_wo_repl(N1,L,Clauses,M).
get_random_wo_repl(_,_,[],_M).
% print_distribution
print_distribution(M):-
write('Clause Length'), tab(8), write('Estimated number of clauses'), nl,
write('_____________'), tab(8), write('___________________________'), nl,
findall(L-N,M:'$aleph_sat'(random,number_of_clauses(L,N)),List),
sort(List,List1),
aleph_member(L-N,List1),
write(L), tab(20), write(N), nl,
fail.
print_distribution(M):-
nl,
write('Estimated size of hypothesis space = '),
(M:'$aleph_sat'(random,hypothesis_space(S)) -> true; S = 0),
write(S), write(' clauses'), nl.
% count_clause_status(+List,+Status,-C1,-C2)
% count number of clauses in List with status Status
% C1 is the number of such clauses
% C2 is the number of clauses with some other status
count_clause_status(_,S,_,0):-
var(S), !.
count_clause_status(Clauses,S,A,B):-
count_clause_status1(Clauses,S,A,B).
count_clause_status1([],_,0,0):- !.
count_clause_status1([S1/_|T],S,A,B):-
count_clause_status1(T,S,A1,B1),
(S == S1 -> A is A1 + 1, B is B1; A is A1, B is B1 + 1).
% store_legal_clauses(+List,+L,+E,+T)
% store all legal clauses of length L obtained with bottom clause for
% example E of type T
% useful later when a random legal clause of length L is required
store_legal_clauses([],_,_,_,_M).
store_legal_clauses([S/[C,C1]|Clauses],L,E,T,M):-
(S == legal ->
asserta(M:'$aleph_sat'(random,rselect_legal(L,E,T,C,C1)));
true),
store_legal_clauses(Clauses,L,E,T,M).
% randclause_wo_repl(+L,-C,-S,-Lits)
% as randclause/4 but ensures that clause obtained is without replacement
% only makes at most 100 attempts to find such a clause
% also returns lits from bottom clause selected
% if all attempts fail, then return the most general clause
randclause_wo_repl(L,C,S,C1,M):-
randclause_wo_repl(100,L,C,S,C1,M).
randclause_wo_repl(N,L,C,S,C1,M):-
N > 0,
randclause(L,C,S,C1,M), % if not accounting for variable renamings
% copy_term(C,C1), % if accounting for variable renamings
% numbervars(C1,0,_), % if accounting for variable renamings
\+(M:prune(C)),
split_clause(C,Head,Body),
(setting(language,Lang,M) ->
lang_ok(Lang,Head,Body);
true),
(setting(newvars,NewVars,M) ->
newvars_ok(NewVars,Head,Body);
true),
\+(M:'$aleph_sat'(random,rselect(C1))), !,
asserta(M:'$aleph_sat'(random,rselect(C1))).
randclause_wo_repl(N,L,C,S,C1,M):-
N > 0,
N1 is N - 1,
randclause_wo_repl(N1,L,C,S,C1,M), !.
randclause_wo_repl(_,1,C,S,C1,M):-
randclause(1,C,S,C1,M). % if not accounting for variable renamings
% copy_term(C,C1), % if accounting for variable renamings
% numbervars(C1,0,_), % if accounting for variable renamings
% randclause(+L,-C,-S,-Lits,M)
% returns definite clause C of length L with status S comprised of Lits
% drawn at random from the bottom clause
% also returns the literals in the bottom clause that were selected
% body literals of C are randomly selected from the bottom clause
% S is one of legal or illegal depending on whether C is inside or
% outside the mode language provided
% needs a bottom clause to be constructed before it is meaningful
% this can be done with the sat predicate for eg: sat(1)
% if set(store_bottom,true) then use stored bottom clause instead
% if S is legal, then checks to see if previously generated legal
% clauses exist for this bottom clause (these would have been generated
% when trying to estimate the number of legal clause at each length)
randclause(1,C,legal,[1],M):-
!,
bottom_key(_,_,Key,_,M),
(Key = false ->
get_pclause([1],[],C,_,_,_,M);
get_pclause([1],Key,[],C,_,_,_,M)).
randclause(L,C,Status,Lits,M):-
Status == legal,
M:'$aleph_sat'(example,example(E,T)),
retract(M:'$aleph_sat'(random,rselect_legal(L,E,T,C,Lits))).
% can do things more efficiently if we want to generate legal clauses only
randclause(L,C,Status,Lits,M):-
Status == legal, !,
bottom_key(_,_,Key,_,M),
(Key = false ->
M:'$aleph_sat_litinfo'(1,_,_,_,_,D);
M:'$aleph_sat_litinfo'(1,Key,_,_,_,_,D)),
L1 is L - 1,
repeat,
randselect1(L1,Key,D,[1],BodyLits,M),
Lits = [1|BodyLits],
clause_status(Lits,Key,[],legal,legal,M), !,
(Key = false ->
get_pclause(Lits,[],C,_,_,_,M);
get_pclause(Lits,Key,[],C,_,_,_,M)).
randclause(L,C,Status,Lits,M):-
L1 is L - 1,
bottom_key(_,_,Key,_,M),
(Key = false ->
M:'$aleph_sat'(lastlit,Last);
M:'$aleph_sat'(lastlit,Key,Last)),
repeat,
randselect(L1,Last,Key,[],BodyLits,M),
aleph_append(BodyLits,[1],Lits),
clause_status(Lits,Key,[],legal,Status1,M),
Status1 = Status, !,
(Key = false ->
get_pclause(Lits,[],C,_,_,_,M);
get_pclause(Lits,Key,[],C,_,_,_,M)).
% clause_status(+Lits,+LitsSoFar,+StatusSoFar,-Status,M)
% compute status of a clause
% Lits is the lits left to add to the clause
% LitsSoFar is the lits in the clause so far
% StatusSoFar is the Status of the clause so far
% if a literal to be added contains unbound input vars then
% status is illegal
clause_status(Lits,LitsSoFar,Status1,Status2,M):-
bottom_key(_,_,Key,_,M),
clause_status(Lits,Key,LitsSoFar,Status1,Status2,M).
clause_status([],_,_,S,S,_M):- !.
clause_status([Lit|Lits],Key,LitsSoFar,S,S1,M):-
get_ovars(LitsSoFar,Key,[],OVars,M),
get_ivars([Lit],Key,[],IVars,M),
aleph_subset1(IVars,OVars), !,
aleph_append([Lit],LitsSoFar,Lits1),
clause_status(Lits,Key,Lits1,S,S1,M).
clause_status(_,_,_,_,illegal,_M).
% randselect(+L,+Last,+Key,+LitsSoFar,-Lits,M)
% randomly select L distinct literals to give Lits
% Last is the last literal number in the bottom clause
% LitsSoFar is the literals selected so far
randselect(0,_,_,_,[],_M):- !.
randselect(_,Last,_,LitsSoFar,[],_M):-
length(LitsSoFar,L1),
L1 is Last - 1, !.
randselect(L,Last,Key,LitsSoFar,[LitNum|Lits],M):-
get_rand_lit(Last,Key,LitsSoFar,LitNum,M),
L1 is L - 1,
randselect(L1,Last,Key,[LitNum|LitsSoFar],Lits,M).
% randselect1(+L,+Key,+Avail,+LitsSoFar,-Lits,M)
% randomly select L distinct literals from Avail to give Lits
% LitsSoFar is the literals selected so far
randselect1(0,_,_,_,[],_M):- !.
randselect1(_,_,[],_,[],_M):- !.
randselect1(L,Key,Avail,LitsSoFar,[LitNum|Lits],M):-
random_select(LitNum,Avail,Left),
(Key = false ->
M:'$aleph_sat_litinfo'(LitNum,_,_,_,_,D);
M:'$aleph_sat_litinfo'(LitNum,Key,_,_,_,_,D)),
update_list(D,Left,Left1),
aleph_delete_list([LitNum|LitsSoFar],Left1,Avail1),
L1 is L - 1,
randselect1(L1,Key,Avail1,[LitNum|LitsSoFar],Lits,M).
% get_rand_lit(+Last,+Key,+LitsSoFar,-LitNum,M)
% randomly select a literal number from 2 - Last
% and not in list LitsSoFar
% 2 because 1 is reserved for head literal
get_rand_lit(Last,Key,LitsSoFar,LitNum,M):-
repeat,
get_rand_lit(Last,Key,LitNum,M),
\+(aleph_member(LitNum,LitsSoFar)),
!.
% have to use repeat/0 in case literal number from random no generator
% no longer exists in lits database
get_rand_lit(Last,Key,LitNum,M):-
repeat,
get_random(Last,LitNum),
LitNum > 1,
(Key = false ->
M:'$aleph_sat_litinfo'(LitNum,_,_,_,_,_);
M:'$aleph_sat_litinfo'(LitNum,Key,_,_,_,_,_)), !.
% total_clauses(+L,+N1,-N2)
% total number of clauses of length L is N2
% constructed from bottom clause of length N1
total_clauses(1,_,1.0):- !.
total_clauses(L,Bot,N):-
L1 is L - 1,
Bot1 is Bot - 1,
total_clauses(L1,Bot1,N1),
N is N1*Bot1.
% num_to_length(+N,+CL,-L,M)
% find length of clause numbered N
% clause length should be =< CL
num_to_length(N,_,1,_M):- N =< 1.0, !.
num_to_length(N,CL,L,M):-
num_to_length1(2,CL,N,1.0,L,M).
num_to_length1(L,CL,_,_,CL,_M):-
L >= CL, !.
num_to_length1(L,CL,N,TotalSoFar,Length,M):-
M:'$aleph_sat'(random,number_of_clauses(L,T)),
NClauses is TotalSoFar + T,
(N =< NClauses ->
(T < 1.0 -> Length is L - 1; Length = L) ;
L1 is L + 1,
num_to_length1(L1,CL,N,NClauses,Length,M)).
% refinement operator for randomised local search
% Type is one of clauses or theories
rls_refine(clauses,_-[_,_,_,aleph_false],Clause,M):-
!,
sample_clauses(1,[Clause],M),
\+(old_move(clauses,Clause,M)).
rls_refine(clauses,Clause1,Clause2,M):-
setting(moves,Max,M),
MaxMoves is Max,
once(retract(M:'$aleph_search'(rls_move,Mov))),
Mov =< MaxMoves,
p1_message('move'), p_message(Mov),
Mov1 is Mov + 1,
asserta(M:'$aleph_search'(rls_move,Mov1)),
clause_move(Move,Clause1,Clause2,M),
p_message(Move),
\+(old_move(clauses,Clause2,M)).
rls_refine(theories,[_-[_,_,_,aleph_false]],Theory,M):-
!,
once(theory_move(add_clause,[],Theory,M)),
\+(old_move(theories,Theory,M)).
rls_refine(theories,Theory1,Theory2,M):-
setting(moves,MaxMoves,M),
once(retract(M:'$aleph_search'(rls_move,M))),
M =< MaxMoves,
p1_message('move'), p_message(M),
M1 is M + 1,
asserta(M:'$aleph_search'(rls_move,M1)),
theory_move(_,Theory1,Theory2,M),
\+(old_move(theories,Theory2,M)).
% clause_move(+Type,+C1,-C2,M)
% local moves from clause C1 to give C2
% A move is:
% a) delete a literal from C1 (Type = delete_lit)
% b) add a legal literal to C1 (Type = add_lit)
clause_move(delete_lit,C1,C2,M):-
C1 = L-[E,T,Lits,Clause],
(Lits = [H|Rest] ->
aleph_delete(_,Rest,Left),
Lits1 = [H|Left],
bottom_key(E,T,Key,_,M),
clause_status(Lits1,Key,[],legal,legal,M),
L1 is L - 1,
(Key = false ->
get_pclause(Lits1,[],Clause1,_,_,_,M);
get_pclause(Lits1,Key,[],Clause1,_,_,_,M)),
\+(M:prune(Clause1)) ;
clause_to_list(Clause,[Head|Body]),
aleph_delete(_,Body,Left),
aleph_mode_linked([Head|Left],M),
list_to_clause([Head|Left],Clause1),
\+(M:prune(Clause1)),
L1 is L - 1,
Lits1 = []),
C2 = L1-[E,T,Lits1,Clause1].
clause_move(add_lit,C1,C2,M):-
C1 = L-[E,T,Lits,Clause],
setting(clauselength,CL,M),
L < CL,
(Lits = [] ->
auto_refine(Clause,Clause1,M),
L1 is L + 1,
Lits1 = [];
aleph_delete(Lit,Lits,Left),
bottom_key(E,T,Key,_,M),
(Key = false ->
M:'$aleph_sat_litinfo'(Lit,_,_,_,_,D);
M:'$aleph_sat_litinfo'(Lit,Key,_,_,_,_,D)),
aleph_member(Lit1,D),
\+(aleph_member(Lit1,Left)),
aleph_append([Lit1],Lits,Lits1),
clause_status(Lits1,Key,[],legal,legal,M),
L1 is L + 1,
(Key = false ->
get_pclause(Lits1,[],Clause1,_,_,_,M);
get_pclause(Lits1,Key,[],Clause1,_,_,_,M)),
\+(M:prune(Clause1))),
C2 = L1-[E,T,Lits1,Clause1].
% theory_move(+Type,+T1,-T2,M)
% local moves from theory T1 to give T2
% A move is:
% a) delete a clause from T1 (Type = delete_clause)
% b) add a legal clause to T1 (Type = add_clause)
% c) delete a literal from a clause in T1 (Type = delete_lit)
% d) add a legal literal to a clause in T1 (Type = add_lit)
theory_move(delete_clause,T1,T2,_M):-
aleph_delete(_,T1,T2),
T2 \= [].
theory_move(add_clause,T1,T2,M):-
setting(clauses,Max,M),
length(T1,L),
L < Max,
sample_clauses(1,[Clause],M),
aleph_append([Clause],T1,T2).
theory_move(delete_lit,T1,T2,M):-
aleph_delete(Clause,T1,T),
clause_move(delete_lit,Clause,Clause1,M),
aleph_append([Clause1],T,T2).
theory_move(add_lit,T1,T2,M):-
aleph_delete(Clause,T1,T),
clause_move(add_lit,Clause,Clause1,M),
aleph_append([Clause1],T,T2).
old_move(clauses,N-[_,_,L,C],M):-
(setting(cache_clauselength,N1,M) -> true; N1 = 3),
N =< N1,
(L = [] ->
clause_to_list(C,C1),
sort(C1,Hash),
numbervars(Hash,0,_);
sort(L,Hash)),
(M:'$aleph_search_seen'(N,Hash) ->
p_message('old move'),
true;
asserta(M:'$aleph_search_seen'(N,Hash)), !,
fail).
old_move(theories,T,M):-
% remove_alpha_variants(T,T1),
numbervars(T,0,_),
length(T,N),
(M:'$aleph_search_seen'(N,_Hash) ->
p_message('old move'),
true;
asserta(M:'$aleph_search_seen'(N,_Hash)), !,
fail).
extract_clauses_with_length([],[]).
extract_clauses_with_length([L-[_,_,_,C]|T],[L-C|T1]):-
extract_clauses_with_length(T,T1).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% U T I L I T I E S
% concatenate elements of a list into an atom
concat([Atom],Atom):- !.
concat([H|T],Atom):-
concat(T,AT),
name(AT,L2),
name(H,L1),
aleph_append(L2,L1,L),
name(Atom,L).
split_clause((Head:-true),Head,true):- !.
split_clause((Head:-Body1),Head,Body2):- !, Body1 = Body2.
split_clause([Head|T],Head,T):- !.
split_clause([Head],Head,[true]):- !.
split_clause(Head,Head,true).
strip_true((Head:-true),Head):- !.
strip_true(Clause,Clause).
% pretty print a definite clause
pp_dclause(Clause,M):-
(M:'$aleph_global'(portray_literals,set(portray_literals,true))->
pp_dclause(Clause,true,M);
pp_dclause(Clause,false,M)).
% pretty print a set of definite clauses
pp_dclauses(Theory,M):-
aleph_member(_-[_,_,_,Clause],Theory),
pp_dclause(Clause,M),
fail.
pp_dclauses(_,_M):- nl.
pp_dclause((H:-true),Pretty,M):-
!,
pp_dclause(H,Pretty,M).
pp_dclause((H:-B),Pretty,M):-
!,
copy_term((H:-B),(Head:-Body)),
numbervars((Head:-Body),0,_),
aleph_portray(Head,Pretty,M),
(Pretty = true ->
write(' if:');
write(' :-')),
nl,
M:'$aleph_global'(print,set(print,N)),
print_lits(Body,Pretty,1,N,M).
pp_dclause((Lit),Pretty,M):-
copy_term(Lit,Lit1),
numbervars(Lit1,0,_),
aleph_portray(Lit1,Pretty,M),
write('.'), nl.
% pretty print a definite clause list: head of list is + literal
pp_dlist([],_M):- !.
pp_dlist(Clause,M):-
(M:'$aleph_global'(portray_literals,set(portray_literals,true))->
pp_dlist(Clause,true,M);
pp_dlist(Clause,false,M)).
pp_dlist(Clause,Pretty,M):-
copy_term(Clause,[Head1|Body1]),
numbervars([Head1|Body1],0,_),
aleph_portray(Head1,Pretty,M),
(Body1 = [] ->
write('.'), nl;
(Pretty = true ->
write(' if:');
write(' :-')),
nl,
M:'$aleph_global'(print,set(print,N)),
print_litlist(Body1,Pretty,1,N,M)).
print_litlist([],_,_,_,_M).
print_litlist([Lit],Pretty,LitNum,_,M):-
!,
print_lit(Lit,Pretty,LitNum,LitNum,'.',_,M).
print_litlist([Lit|Lits],Pretty,LitNum,LastLit,M):-
print_lit(Lit,Pretty,LitNum,LastLit,', ',NextLit,M),
print_litlist(Lits,Pretty,NextLit,LastLit,M).
print_lits((Lit,Lits),Pretty,LitNum,LastLit,M):-
!,
(Pretty = true ->
Sep = ' and ';
Sep = ', '),
print_lit(Lit,Pretty,LitNum,LastLit,Sep,NextLit,M),
print_lits(Lits,Pretty,NextLit,LastLit,M).
print_lits((Lit),Pretty,LitNum,_,M):-
print_lit(Lit,Pretty,LitNum,LitNum,'.',_,M).
print_lit(Lit,Pretty,LitNum,LastLit,Sep,NextLit,M):-
(LitNum = 1 -> tab(3);true),
aleph_portray(Lit,Pretty,M), write(Sep),
(LitNum=LastLit-> nl,NextLit=1; NextLit is LitNum + 1).
p1_message(Mess):-
write('['), write(Mess), write('] ').
p_message(Mess):-
write('['), write(Mess), write(']'), nl.
err_message(Mess):-
p1_message('error'), p_message(Mess).
aleph_delete_all(_,[],[]).
aleph_delete_all(X,[Y|T],T1):-
X == Y, !,
aleph_delete_all(X,T,T1).
aleph_delete_all(X,[Y|T],[Y|T1]):-
aleph_delete_all(X,T,T1).
aleph_delete_list([],L,L).
aleph_delete_list([H1|T1],L1,L):-
aleph_delete(H1,L1,L2), !,
aleph_delete_list(T1,L2,L).
aleph_delete_list([_|T1],L1,L):-
aleph_delete_list(T1,L1,L).
/**
* aleph_delete(-El:term,-List:list,-Rest:list) is nondet
*
* Deletes element from list.
*/
aleph_delete(H,[H|T],T).
aleph_delete(H,[H1|T],[H1|T1]):-
aleph_delete(H,T,T1).
aleph_delete1(H,[H|T],T):- !.
aleph_delete1(H,[H1|T],[H1|T1]):-
aleph_delete1(H,T,T1).
aleph_delete0(_,[],[]).
aleph_delete0(H,[H|T],T):- !.
aleph_delete0(H,[H1|T],[H1|T1]):-
aleph_delete0(H,T,T1).
aleph_append(A,[],A):-!.
aleph_append(A,[H|T],[H|T1]):-
aleph_append(A,T,T1).
% aleph_remove_nth(+N,+List1,-Elem,-List2)
% remove the nth elem from a List
aleph_remove_nth(1,[H|T],H,T):- !.
aleph_remove_nth(N,[H|T],X,[H|T1]):-
N1 is N - 1,
aleph_remove_nth(N1,T,X,T1).
% aleph_remove_n(+N,+List1,-List2,-List3)
% remove the n elems from List1 into List2. List3 is the rest of List1
aleph_remove_n(0,L,[],L):- !.
aleph_remove_n(_,[],[],[]):- !.
aleph_remove_n(N,[H|T],[H|T1],L):-
N1 is N - 1,
aleph_remove_n(N1,T,T1,L).
% aleph_rpermute(+List1,-List2)
% randomly permute the elements of List1 into List2
aleph_rpermute(List1,List2):-
length(List1,N1),
aleph_rpermute(List1,N1,List2).
aleph_rpermute([],0,[]):- !.
aleph_rpermute(L1,N1,[X|Rest]):-
get_random(N1,R),
aleph_remove_nth(R,L1,X,L2),
N2 is N1 - 1,
aleph_rpermute(L2,N2,Rest).
% aleph_rsample(+N,+List1,-List2)
% randomly sample N elements from List1 into List2
aleph_rsample(N,List1,List2):-
length(List1,N1),
aleph_rsample(N,N1,List1,List2).
aleph_rsample(N,N1,L,L):- N >= N1, !.
aleph_rsample(SampleSize,Total,[X|L1],[X|L2]):-
get_random(Total,R),
R =< SampleSize, !,
SampleSize0 is SampleSize - 1,
Total0 is Total - 1,
aleph_rsample(SampleSize0,Total0,L1,L2).
aleph_rsample(SampleSize,Total,[_|L1],L2):-
Total0 is Total - 1,
aleph_rsample(SampleSize,Total0,L1,L2).
% get_first_n(+N,+List1,-List2)
% get the first n elements in List1
get_first_n(0,_,[]):- !.
get_first_n(_,[],[]):- !.
get_first_n(N,[H|T],[H|T1]):-
N1 is N - 1,
get_first_n(N1,T,T1).
% erase_refs(+List)
% erase database references: only works for Yap
erase_refs([]).
erase_refs([DbRef|DbRefs]):-
erase(DbRef),
erase_refs(DbRefs).
% max_in_list(+List,-Max)
% return largest element in a list
max_in_list([X],X):- !.
max_in_list([X|T],Z):-
max_in_list(T,Y),
(X @> Y -> Z = X; Z = Y).
% min_in_list(+List,-Max)
% return largest element in a list
min_in_list([X],X):- !.
min_in_list([X|T],Z):-
min_in_list(T,Y),
(X @> Y -> Z = Y; Z = X).
% remove_alpha_variants(+List1,-List2):-
% remove alphabetic variants from List1 to give List2
remove_alpha_variants([],[]).
remove_alpha_variants([X|Y],L):-
aleph_member(X1,Y),
alphabetic_variant(X,X1), !,
remove_alpha_variants(Y,L).
remove_alpha_variants([X|Y],[X|L]):-
remove_alpha_variants(Y,L).
% alphabetic_variant(+Term1,+Term2)
% true if Term1 is the alphabetic variant of Term2
alphabetic_variant(Term1,Term2):-
copy_term(Term1/Term2,T1/T2),
numbervars(T1,0,_),
numbervars(T2,0,_),
T1 = T2.
% tparg(+TermPlace,+Term1,?Term2)
% return Term2 at position specified by TermPlace in Term1
tparg([Place],Term,Arg):-
!,
arg(Place,Term,Arg).
tparg([Place|Places],Term,Arg):-
arg(Place,Term,Term1),
tparg(Places,Term1,Arg).
aleph_member1(H,[H|_]):- !.
aleph_member1(H,[_|T]):-
aleph_member1(H,T).
aleph_member2(X,[Y|_]):- X == Y, !.
aleph_member2(X,[_|T]):-
aleph_member2(X,T).
aleph_member3(A,A-B):- A =< B.
aleph_member3(X,A-B):-
A < B,
A1 is A + 1,
aleph_member3(X,A1-B).
aleph_member(X,[X|_]).
aleph_member(X,[_|T]):-
aleph_member(X,T).
aleph_reverse(L1, L2) :- revzap(L1, [], L2).
revzap([X|L], L2, L3) :- revzap(L, [X|L2], L3).
revzap([], L, L).
goals_to_clause((Head,Body),(Head:-Body)):- !.
goals_to_clause(Head,Head).
/**
* clause_to_list(+Cl:term,-List:list) is det
*
* From a clause to a list
*/
clause_to_list((Head:-true),[Head]):- !.
clause_to_list((Head:-Body),[Head|L]):-
!,
goals_to_list(Body,L).
clause_to_list(Head,[Head]).
extend_clause(false,Lit,(Lit)):- !.
extend_clause((Head:-Body),Lit,(Head:-Body1)):-
!,
app_lit(Lit,Body,Body1).
extend_clause(Head,Lit,(Head:-Lit)).
app_lit(L,(L1,L2),(L1,L3)):-
!,
app_lit(L,L2,L3).
app_lit(L,L1,(L1,L)).
prefix_lits(L,true,L):- !.
prefix_lits(L,L1,((L),L1)).
get_goaldiffs((G1,G2),(G1,G3),Diffs):-
!,
get_goaldiffs(G2,G3,Diffs).
get_goaldiffs(true,G,G):- !.
get_goaldiffs(G1,(G1,G2),G2).
nlits((_:-B),N):-
!,
nlits(B,N1),
N is N1 + 1.
nlits((_,Lits),N):-
!,
nlits(Lits,N1),
N is N1 + 1.
nlits(_,1).
list_to_clause([Goal],(Goal:-true)):- !.
list_to_clause([Head|Goals],(Head:-Body)):-
list_to_goals(Goals,Body).
list_to_goals([Goal],Goal):- !.
list_to_goals([Goal|Goals],(Goal,Goals1)):-
list_to_goals(Goals,Goals1).
/**
* goals_to_list(-Goals:term,-List:list) is det
*
* Converts a coonjunction of goals to a list
*
*/
goals_to_list((true,Goals),T):-
!,
goals_to_list(Goals,T).
goals_to_list((Goal,Goals),[Goal|T]):-
!,
goals_to_list(Goals,T).
goals_to_list(true,[]):- !.
goals_to_list(Goal,[Goal]).
% get_litnums(+First,+Last,-LitNums,M)
% get list of Literal numbers in the bottom clause
get_litnums(LitNum,Last,[],_M):-
LitNum > Last, !.
get_litnums(LitNum,Last,[LitNum|LitNums],M):-
M:'$aleph_sat_litinfo'(LitNum,_,_,_,_,_), !,
NextLit is LitNum + 1,
get_litnums(NextLit,Last,LitNums,M).
get_litnums(LitNum,Last,LitNums,M):-
NextLit is LitNum + 1,
get_litnums(NextLit,Last,LitNums,M).
get_clause(LitNum,Last,_,[],_M):-
LitNum > Last, !.
get_clause(LitNum,Last,TVSoFar,[FAtom|FAtoms],M):-
M:'$aleph_sat_litinfo'(LitNum,_,Atom,_,_,_), !,
get_flatatom(Atom,TVSoFar,FAtom,TV1),
NextLit is LitNum + 1,
get_clause(NextLit,Last,TV1,FAtoms,M).
get_clause(LitNum,Last,TVSoFar,FAtoms,M):-
NextLit is LitNum + 1,
get_clause(NextLit,Last,TVSoFar,FAtoms,M).
get_flatatom(not(Atom),TVSoFar,not(FAtom),TV1):-
!,
get_flatatom(Atom,TVSoFar,FAtom,TV1).
get_flatatom(Atom,TVSoFar,FAtom,TV1):-
functor(Atom,Name,Arity),
functor(FAtom,Name,Arity),
flatten_args(Arity,Atom,FAtom,TVSoFar,TV1).
get_pclause([LitNum],TVSoFar,Clause,TV,Length,LastDepth,M):-
!,
get_pclause1([LitNum],TVSoFar,TV,Clause,Length,LastDepth,M).
get_pclause([LitNum|LitNums],TVSoFar,Clause,TV,Length,LastDepth,M):-
get_pclause1([LitNum],TVSoFar,TV1,Head,Length1,_,M),
get_pclause1(LitNums,TV1,TV,Body,Length2,LastDepth,M),
Clause = (Head:-Body),
Length is Length1 + Length2.
get_pclause1([LitNum],TVSoFar,TV1,Lit,Length,LastDepth,M):-
!,
M:'$aleph_sat_litinfo'(LitNum,LastDepth,Atom,_,_,_),
get_flatatom(Atom,TVSoFar,Lit,TV1),
functor(Lit,Name,_),
(Name = '='-> Length = 0; Length = 1).
get_pclause1([LitNum|LitNums],TVSoFar,TV2,(Lit,Lits1),Length,LastDepth,M):-
M:'$aleph_sat_litinfo'(LitNum,_,Atom,_,_,_),
get_flatatom(Atom,TVSoFar,Lit,TV1),
get_pclause1(LitNums,TV1,TV2,Lits1,Length1,LastDepth,M),
functor(Lit,Name,_),
(Name = '='-> Length = Length1; Length is Length1 + 1).
get_pclause([LitNum],Key,TVSoFar,Clause,TV,Length,LastDepth,M):-
!,
get_pclause1([LitNum],Key,TVSoFar,TV,Clause,Length,LastDepth,M).
get_pclause([LitNum|LitNums],Key,TVSoFar,Clause,TV,Length,LastDepth,M):-
get_pclause1([LitNum],Key,TVSoFar,TV1,Head,Length1,_,M),
get_pclause1(LitNums,Key,TV1,TV,Body,Length2,LastDepth,M),
Clause = (Head:-Body),
Length is Length1 + Length2.
get_pclause1([LitNum],Key,TVSoFar,TV1,Lit,Length,LastDepth,M):-
!,
M:'$aleph_sat_litinfo'(LitNum,Key,LastDepth,Atom,_,_,_),
get_flatatom(Atom,TVSoFar,Lit,TV1),
functor(Lit,Name,_),
(Name = '='-> Length = 0; Length = 1).
get_pclause1([LitNum|LitNums],Key,TVSoFar,TV2,(Lit,Lits1),Length,LastDepth,M):-
M:'$aleph_sat_litinfo'(LitNum,Key,_,Atom,_,_,_),
get_flatatom(Atom,TVSoFar,Lit,TV1),
get_pclause1(LitNums,Key,TV1,TV2,Lits1,Length1,LastDepth,M),
functor(Lit,Name,_),
(Name = '='-> Length = Length1; Length is Length1 + 1).
flatten_args(0,_,_,TV,TV):- !.
flatten_args(Arg,Atom,FAtom,TV,TV1):-
arg(Arg,Atom,Term),
Arg1 is Arg - 1,
(Term = aleph_const(Const) ->
arg(Arg,FAtom,Const),
flatten_args(Arg1,Atom,FAtom,TV,TV1);
(integer(Term) ->
update(TV,Term/Var,TV0),
arg(Arg,FAtom,Var),
flatten_args(Arg1,Atom,FAtom,TV0,TV1);
(functor(Term,Name,Arity),
functor(FTerm,Name,Arity),
arg(Arg,FAtom,FTerm),
flatten_args(Arity,Term,FTerm,TV,TV0),
flatten_args(Arg1,Atom,FAtom,TV0,TV1)
)
)
).
% returns intersection of S1, S2 and S1-Intersection
intersect1(Elems,[],[],Elems):- !.
intersect1([],_,[],[]):- !.
intersect1([Elem|Elems],S2,[Elem|Intersect],ElemsLeft):-
aleph_member1(Elem,S2), !,
intersect1(Elems,S2,Intersect,ElemsLeft).
intersect1([Elem|Elems],S2,Intersect,[Elem|ElemsLeft]):-
intersect1(Elems,S2,Intersect,ElemsLeft).
aleph_subset1([],_).
aleph_subset1([Elem|Elems],S):-
aleph_member1(Elem,S), !,
aleph_subset1(Elems,S).
aleph_subset2([X|Rest],[X|S]):-
aleph_subset2(Rest,S).
aleph_subset2(S,[_|S1]):-
aleph_subset2(S,S1).
aleph_subset2([],[]).
% two sets are equal
equal_set([],[]).
equal_set([H|T],S):-
aleph_delete1(H,S,S1),
equal_set(T,S1), !.
uniq_insert(_,X,[],[X]).
uniq_insert(descending,H,[H1|T],[H,H1|T]):-
H @> H1, !.
uniq_insert(ascending,H,[H1|T],[H,H1|T]):-
H @< H1, !.
uniq_insert(_,H,[H|T],[H|T]):- !.
uniq_insert(Order,H,[H1|T],[H1|T1]):-
!,
uniq_insert(Order,H,T,T1).
quicksort(_,[],[]).
quicksort(Order,[X|Tail],Sorted):-
partition(X,Tail,Small,Big),
quicksort(Order,Small,SSmall),
quicksort(Order,Big,SBig),
(Order=ascending-> aleph_append([X|SBig],SSmall,Sorted);
aleph_append([X|SSmall],SBig,Sorted)).
partition(_,[],[],[]).
partition(X,[Y|Tail],[Y|Small],Big):-
X @> Y, !,
partition(X,Tail,Small,Big).
partition(X,[Y|Tail],Small,[Y|Big]):-
partition(X,Tail,Small,Big).
update_list([],L,L).
update_list([H|T],L,Updated):-
update(L,H,L1), !,
update_list(T,L1,Updated).
update([],H,[H]).
update([H|T],H,[H|T]):- !.
update([H1|T],H,[H1|T1]):-
update(T,H,T1).
% checks if 2 sets intersect
intersects(S1,S2):-
aleph_member(Elem,S1), aleph_member1(Elem,S2), !.
% checks if bitsets represented as lists of intervals intersect
intervals_intersects([L1-L2|_],I):-
intervals_intersects1(L1-L2,I), !.
intervals_intersects([_|I1],I):-
intervals_intersects(I1,I).
intervals_intersects1(L1-_,[M1-M2|_]):-
L1 >= M1, L1 =< M2, !.
intervals_intersects1(L1-L2,[M1-_|_]):-
M1 >= L1, M1 =< L2, !.
intervals_intersects1(L1-L2,[_|T]):-
intervals_intersects1(L1-L2,T).
% checks if bitsets represented as lists of intervals intersect
% returns first intersection
intervals_intersects([L1-L2|_],I,I1):-
intervals_intersects1(L1-L2,I,I1), !.
intervals_intersects([_|ILeft],I,I1):-
intervals_intersects(ILeft,I,I1).
intervals_intersects1(I1,[I2|_],I):-
interval_intersection(I1,I2,I), !.
intervals_intersects1(I1,[_|T],I):-
intervals_intersects1(I1,T,I).
interval_intersection(L1-L2,M1-M2,L1-L2):-
L1 >= M1, L2 =< M2, !.
interval_intersection(L1-L2,M1-M2,M1-M2):-
M1 >= L1, M2 =< L2, !.
interval_intersection(L1-L2,M1-M2,L1-M2):-
L1 >= M1, M2 >= L1, M2 =< L2, !.
interval_intersection(L1-L2,M1-M2,M1-L2):-
M1 >= L1, M1 =< L2, L2 =< M2, !.
%most of the time no intersection, so optimise on that
% optimisation by James Cussens
intervals_intersection([],_,[]).
intervals_intersection([A-B|T1],[C-D|T2],X) :-
!,
(A > D ->
intervals_intersection([A-B|T1],T2,X);
(C > B ->
intervals_intersection(T1,[C-D|T2],X);
(B > D ->
(C > A ->
X=[C-D|Y];
X=[A-D|Y]
),
intervals_intersection([A-B|T1],T2,Y);
(C > A ->
X=[C-B|Y];
X=[A-B|Y]
),
intervals_intersection(T1,[C-D|T2],Y)
)
)
).
intervals_intersection(_,[],[]).
% finds length of intervals in a list
interval_count([],0).
interval_count([L1-L2|T],N):-
N1 is L2 - L1 + 1,
interval_count(T,N2),
N is N1 + N2.
interval_count(I/_,N):-
interval_count(I,N).
% interval_select(+N,+List1,-Elem)
% select the Nth elem from an interval list
interval_select(N,[A-B|_],X):-
N =< B - A + 1, !,
X is A + N - 1.
interval_select(N,[A-B|T],X):-
N1 is N - (B - A + 1),
interval_select(N1,T,X).
% interval_sample(+N,List1,-List2)
% get a random sample of N elements from List1
interval_sample(N,List1,List2):-
intervals_to_list(List1,L1),
aleph_rsample(N,L1,L2),
list_to_intervals(L2,List2).
% convert list to intervals
list_to_intervals(List,Intervals):-
sort(List,List1),
list_to_intervals1(List1,Intervals).
list_to_intervals1([],[]).
list_to_intervals1([Start|T],[Start-Finish|I1]):-
list_to_interval(Start,T,Finish,T1),
list_to_intervals1(T1,I1).
list_to_interval(Finish,[],Finish,[]).
list_to_interval(Finish,[Next|T],Finish,[Next|T]):-
Next - Finish > 1,
!.
list_to_interval(_,[Start|T],Finish,Rest):-
list_to_interval(Start,T,Finish,Rest).
% converts an interval-list into a list of (sorted) numbers
intervals_to_list(L,L1):-
intervals_to_list(L,[],L0),
sort(L0,L1), !.
intervals_to_list([],L,L).
intervals_to_list([Interval|Intervals],L1,L2):-
interval_to_list(Interval,L1,L),
intervals_to_list(Intervals,L,L2).
% converts an interval into a list
interval_to_list(Start-Finish,[]):-
Start > Finish, !.
interval_to_list(Start-Finish,[Start|T]):-
Start1 is Start+1,
interval_to_list(Start1-Finish,T).
% converts an interval into a list
% with an accumulator list. Result will be in reverse order
interval_to_list(Start-Finish,L,L):-
Start > Finish, !.
interval_to_list(Start-Finish,L,L1):-
Start1 is Start+1,
interval_to_list(Start1-Finish,[Start|L],L1).
% interval_subsumes(+I1,+I2)
% checks to see if interval I1 subsumes I2
interval_subsumes(Start1-Finish1,Start2-Finish2):-
Start1 =< Start2,
Finish1 >= Finish2.
interval_subtract(Start1-Finish1,Start1-Finish1,[]):- !.
interval_subtract(Start1-Finish1,Start1-Finish2,[S2-Finish1]):-
!,
S2 is Finish2 + 1.
interval_subtract(Start1-Finish1,Start2-Finish1,[Start1-S1]):-
!,
S1 is Start2 - 1.
interval_subtract(Start1-Finish1,Start2-Finish2,[Start1-S1,S2-Finish1]):-
S1 is Start2 - 1,
S2 is Finish2 + 1,
S1 >= Start1, Finish1 >= S2, !.
% code for set manipulation utilities
% taken from the Yap library
% aleph_ord_subtract(+Set1,+Set2,?Difference)
% is true when Difference contains all and only the elements of Set1
% which are not also in Set2.
aleph_ord_subtract(Set1,[],Set1) :- !.
aleph_ord_subtract([],_,[]) :- !.
aleph_ord_subtract([Head1|Tail1],[Head2|Tail2],Difference) :-
compare(Order,Head1,Head2),
aleph_ord_subtract(Order,Head1,Tail1,Head2,Tail2,Difference).
aleph_ord_subtract(=,_, Tail1,_, Tail2,Difference) :-
aleph_ord_subtract(Tail1,Tail2,Difference).
aleph_ord_subtract(<,Head1,Tail1,Head2,Tail2,[Head1|Difference]) :-
aleph_ord_subtract(Tail1,[Head2|Tail2],Difference).
aleph_ord_subtract(>,Head1,Tail1,_, Tail2,Difference) :-
aleph_ord_subtract([Head1|Tail1],Tail2,Difference).
% aleph_ord_disjoint(+Set1,+Set2)
% is true when the two ordered sets have no element in common. If the
% arguments are not ordered,I have no idea what happens.
aleph_ord_disjoint([],_) :- !.
aleph_ord_disjoint(_,[]) :- !.
aleph_ord_disjoint([Head1|Tail1],[Head2|Tail2]) :-
compare(Order,Head1,Head2),
aleph_ord_disjoint(Order,Head1,Tail1,Head2,Tail2).
aleph_ord_disjoint(<,_,Tail1,Head2,Tail2) :-
aleph_ord_disjoint(Tail1,[Head2|Tail2]).
aleph_ord_disjoint(>,Head1,Tail1,_,Tail2) :-
aleph_ord_disjoint([Head1|Tail1],Tail2).
% aleph_ord_union(+Set1,+Set2,?Union)
% is true when Union is the union of Set1 and Set2. Note that when
% something occurs in both sets,we want to retain only one copy.
aleph_ord_union(Set1,[],Set1) :- !.
aleph_ord_union([],Set2,Set2) :- !.
aleph_ord_union([Head1|Tail1],[Head2|Tail2],Union) :-
compare(Order,Head1,Head2),
aleph_ord_union(Order,Head1,Tail1,Head2,Tail2,Union).
aleph_ord_union(=,Head, Tail1,_, Tail2,[Head|Union]) :-
aleph_ord_union(Tail1,Tail2,Union).
aleph_ord_union(<,Head1,Tail1,Head2,Tail2,[Head1|Union]) :-
aleph_ord_union(Tail1,[Head2|Tail2],Union).
aleph_ord_union(>,Head1,Tail1,Head2,Tail2,[Head2|Union]) :-
aleph_ord_union([Head1|Tail1],Tail2,Union).
% aleph_ord_union(+Set1,+Set2,?Union,?Difference)
% is true when Union is the union of Set1 and Set2 and Difference is the
% difference between Set2 and Set1.
aleph_ord_union(Set1,[],Set1,[]) :- !.
aleph_ord_union([],Set2,Set2,Set2) :- !.
aleph_ord_union([Head1|Tail1],[Head2|Tail2],Union,Diff) :-
compare(Order,Head1,Head2),
aleph_ord_union(Order,Head1,Tail1,Head2,Tail2,Union,Diff).
aleph_ord_union(=,Head, Tail1,_, Tail2,[Head|Union],Diff) :-
aleph_ord_union(Tail1,Tail2,Union,Diff).
aleph_ord_union(<,Head1,Tail1,Head2,Tail2,[Head1|Union],Diff) :-
aleph_ord_union(Tail1,[Head2|Tail2],Union,Diff).
aleph_ord_union(>,Head1,Tail1,Head2,Tail2,[Head2|Union],[Head2|Diff]) :-
aleph_ord_union([Head1|Tail1],Tail2,Union,Diff).
aleph_ord_intersection(_,[],[]) :- !.
aleph_ord_intersection([],_,[]) :- !.
aleph_ord_intersection([Head1|Tail1],[Head2|Tail2],Intersection) :-
compare(Order,Head1,Head2),
aleph_ord_intersection(Order,Head1,Tail1,Head2,Tail2,Intersection).
aleph_ord_intersection(=,Head,Tail1,_,Tail2,[Head|Intersection]) :-
aleph_ord_intersection(Tail1,Tail2,Intersection).
aleph_ord_intersection(<,_,Tail1,Head2,Tail2,Intersection) :-
aleph_ord_intersection(Tail1,[Head2|Tail2],Intersection).
aleph_ord_intersection(>,Head1,Tail1,_,Tail2,Intersection) :-
aleph_ord_intersection([Head1|Tail1],Tail2,Intersection).
aleph_ord_subset([], _) :- !.
aleph_ord_subset([Head1|Tail1], [Head2|Tail2]) :-
compare(Order, Head1, Head2),
aleph_ord_subset(Order, Head1, Tail1, Head2, Tail2).
aleph_ord_subset(=, _, Tail1, _, Tail2) :-
aleph_ord_subset(Tail1, Tail2).
aleph_ord_subset(>, Head1, Tail1, _, Tail2) :-
aleph_ord_subset([Head1|Tail1], Tail2).
vars_in_term([],Vars,Vars1):- sort(Vars,Vars1), !.
vars_in_term([Var|T],VarsSoFar,Vars):-
var(Var), !,
vars_in_term(T,[Var|VarsSoFar],Vars).
vars_in_term([Term|T],VarsSoFar,Vars):-
Term =.. [_|Terms], !,
vars_in_term(Terms,VarsSoFar,V1),
vars_in_term(T,V1,Vars).
vars_in_term([_|T],VarsSoFar,Vars):-
vars_in_term(T,VarsSoFar,Vars).
occurs_in(Vars,(Lit,_)):-
occurs_in(Vars,Lit), !.
occurs_in(Vars,(_,Lits)):-
!,
occurs_in(Vars,Lits).
occurs_in(Vars,Lit):-
functor(Lit,_,Arity),
occurs1(Vars,Lit,1,Arity).
occurs1(Vars,Lit,Argno,MaxArgs):-
Argno =< MaxArgs,
arg(Argno,Lit,Term),
vars_in_term([Term],[],Vars1),
aleph_member(X,Vars), aleph_member(Y,Vars1),
X == Y, !.
occurs1(Vars,Lit,Argno,MaxArgs):-
Argno < MaxArgs,
Next is Argno + 1,
occurs1(Vars,Lit,Next,MaxArgs).
declare_dynamic(Name/Arity,M):-
M:dynamic(Name/Arity).
aleph_abolish(Name/Arity,M):-
functor(Pred,Name,Arity),
(predicate_property(M:Pred,dynamic) ->
retractall(M:Pred);
abolish(M:Name/Arity)).
% AXO: Tolto perché infastidisce e non serve
aleph_open(File,read,Stream):-
!,
(exists(File) ->
open(File,read,Stream);
fail).
aleph_open(File,Mode,Stream):-
open(File,Mode,Stream).
clean_up(M):-
clean_up_init(M),
clean_up_sat(M),
clean_up_reduce(M).
clean_up_init(M):-
aleph_abolish('$aleph_good'/3,M),
retractall(M:'$aleph_search'(last_good,_)),
aleph_abolish('$aleph_feature'/2,M).
clean_up_sat(M):-
aleph_abolish('$aleph_sat'/2,M),
aleph_abolish('$aleph_local'/2,M),
aleph_abolish('$aleph_sat_atom'/2,M),
aleph_abolish('$aleph_sat_ovars'/2,M),
aleph_abolish('$aleph_sat_ivars'/2,M),
aleph_abolish('$aleph_sat_varscopy'/3,M),
aleph_abolish('$aleph_sat_varequiv'/3,M),
aleph_abolish('$aleph_sat_terms'/4,M),
aleph_abolish('$aleph_sat_vars'/4,M),
aleph_abolish('$aleph_sat_litinfo'/6,M),
retractall(M:'$aleph_search'(pclause,_)),
garbage_collect.
clean_up_reduce(M):-
aleph_abolish('$aleph_local'/2,M),
clean_up_search(M),
retractall(M:'$aleph_search'(pclause,_)),
garbage_collect.
clean_up_search(M):-
retractall(M:'$aleph_search'(bad,_)),
retractall(M:'$aleph_search'(best,_)),
retractall(M:'$aleph_search'(best_label,_)),
retractall(M:'$aleph_search'(clauseprior,_)),
retractall(M:'$aleph_search'(covers,_)),
retractall(M:'$aleph_search'(coversn,_)),
retractall(M:'$aleph_search'(current,_)),
retractall(M:'$aleph_search'(label,_)),
retractall(M:'$aleph_search'(modes,_)),
retractall(M:'$aleph_search'(nextnode,_)),
retractall(M:'$aleph_search'(openlist,_)),
retractall(M:'$aleph_search'(pclause,_)),
retractall(M:'$aleph_search'(selected,_)),
retractall(M:'$aleph_search_seen'(_,_)),
retractall(M:'$aleph_search_expansion'(_,_,_,_)),
retractall(M:'$aleph_search_gain'(_,_,_,_)),
retractall(M:'$aleph_search_node'(_,_,_,_,_,_,_,_)).
clean_up_examples(M):-
clean_up_examples(pos,M),
clean_up_examples(neg,M),
clean_up_examples(rand,M).
clean_up_tre(M):-
retractall(M:'$aleph_search'(tree,_)),
retractall(M:'$aleph_search'(tree_startdistribution,_)),
retractall(M:'$aleph_search'(tree_leaf,_)),
retractall(M:'$aleph_search'(tree_lastleaf,_)),
retractall(M:'$aleph_search'(tree_newleaf,_)),
retractall(M:'$aleph_search'(tree_besterror,_)),
retractall(M:'$aleph_search'(tree_gain,_)).
clean_up_examples(Type,M):-
retractall(M:'$aleph_global'(size,size(Type,_))),
retractall(M:'$aleph_global'(atoms,atoms(Type,_))),
retractall(M:'$aleph_global'(atoms_left,atoms_left(Type,_))),
retractall(M:'$aleph_global'(last_example,last_example(Type,_))).
clean_up_hypothesis(M):-
retractall(M:'$aleph_global'(hypothesis,hypothesis(_,_,_,_))).
depth_bound_call(G,M):-
M:'$aleph_global'(depth,set(depth,D)),
call_with_depth_bound(G,D,M).
call_with_depth_bound((H:-B),D,M):-
!,
call_with_depth_bound((H,B),D,M).
call_with_depth_bound((A,B),D,M):-
!,
depth_bound_call(A,D,M),
call_with_depth_bound(B,D,M).
call_with_depth_bound(A,D,M):-
depth_bound_call(A,D,M).
binom_lte(_,_,O,0.0):- O < 0, !.
binom_lte(N,P,O,Prob):-
binom(N,P,O,Prob1),
O1 is O - 1,
binom_lte(N,P,O1,Prob2),
Prob is Prob1 + Prob2, !.
binom(N,_,O,0.0):- O > N, !.
binom(N,P,O,Prob):-
aleph_choose(N,O,C),
E1 is P^O,
P2 is 1 - P,
O2 is N - O,
E2 is P2^O2,
Prob is C*E1*E2, !.
aleph_choose(N,I,V):-
NI is N-I,
(NI > I -> pfac(N,NI,I,V) ; pfac(N,I,NI,V)).
pfac(0,_,_,1).
pfac(1,_,_,1).
pfac(N,N,_,1).
pfac(N,I,C,F):-
N1 is N-1,
C1 is C-1,
pfac(N1,I,C1,N1F),
F1 is N/C,
F is N1F*F1.
% record_example(+Check,+Type,+Example,-N)
% records Example of type Type
% if Check = check, then checks to see if example exists
% also updates number of related databases accordingly
% if Check = nocheck then no check is done
% returns example number N and Flag
% if Flag = new then example is a new example of Type
record_example(check,Type,Example,N1,M):-
(once(M:example(N1,Type,Example)) -> true;
record_example(nocheck,Type,Example,N1,M),
(retract(M:'$aleph_global'(atoms,atoms(Type,Atoms))) ->
true;
Atoms = []),
(retract(M:'$aleph_global'(atoms_left,atoms_left(Type,AtomsLeft)))->
true;
AtomsLeft = []),
(retract(M:'$aleph_global'(last_example,last_example(Type,_))) ->
true;
true),
update(Atoms,N1-N1,NewAtoms),
update(AtomsLeft,N1-N1,NewAtomsLeft),
asserta(M:'$aleph_global'(atoms,atoms(Type,NewAtoms))),
asserta(M:'$aleph_global'(atoms_left,atoms_left(Type,
NewAtomsLeft))),
asserta(M:'$aleph_global'(last_example,last_example(Type,N1)))),
!.
record_example(nocheck,Type,Example,N1,M):-
(retract(M:'$aleph_global'(size,size(Type,N)))->
true;
N is 0),
N1 is N + 1,
asserta(M:'$aleph_global'(size,size(Type,N1))),
(Type \= neg ->
setting(skolemvars,Sk1,M),
skolemize(Example,Fact,Body,Sk1,SkolemVars),
record_skolemized(Type,N1,SkolemVars,Fact,Body,M),
(Sk1 = SkolemVars -> true;
set(skolemvars,SkolemVars,M));
split_clause(Example,Head,Body),
record_nskolemized(Type,N1,Head,Body,M)), !.
record_targetpred(M):-
retract(M:'$aleph_local'(backpred,Name/Arity)),
once(M:'$aleph_global'(determination,determination(Name/Arity,_))),
asserta(M:'$aleph_global'(targetpred,targetpred(Name/Arity))),
record_testclause(Name/Arity,M),
fail.
record_targetpred(_M).
check_recursive_calls(M):-
M:'$aleph_global'(targetpred,targetpred(Name/Arity)),
M:'$aleph_global'(determination,determination(Name/Arity,Name/Arity)),
record_recursive_sat_call(Name/Arity,M),
set(recursion,true,M),
fail.
check_recursive_calls(_M).
check_posonly(M):-
M:'$aleph_global'(size,size(rand,N)),
N > 0, !.
check_posonly(M):-
setting(evalfn,posonly,M),
\+(M:'$aleph_global'(modeh,modeh(_,_))),
p1_message('error'),
p_message('missing modeh declaration in posonly mode'), !,
fail.
check_posonly(M):-
retractall(M:'$aleph_global'(slp_count,_,_)),
retractall(M:'$aleph_local'(slp_sample,_)),
retractall(M:'$aleph_local'(slp_samplenum,_)),
setting(evalfn,posonly,M),
setting(gsamplesize,S,M),
condition_target(M),
M:'$aleph_global'(targetpred,targetpred(Name/Arity)),
gsample(Name/Arity,S,M), !.
check_posonly(_M).
check_prune_defs(M):-
clause(M:prune(_),_), !,
set(prune_defs,true,M).
check_prune_defs(_M).
check_auto_refine(M):-
(setting(construct_bottom,reduction,M);setting(construct_bottom,false,M)),
\+(setting(autorefine,true,M)), !,
(setting(refine,user,M) -> true; set(refine,auto,M)).
check_auto_refine(_M).
check_user_search(M):-
setting(evalfn,user,M),
\+(cost_cover_required(M)),
set(lazy_on_cost,true,M), !.
check_user_search(_M).
check_abducibles(M):-
M:'$aleph_global'(abducible,abducible(Name/Arity)),
record_testclause(Name/Arity,M),
record_abclause(Name/Arity,M),
fail.
check_abducibles(_M).
cost_cover_required(M):-
clause(M:cost(_,Label,Cost),Body),
vars_in_term([Label],[],Vars),
(occurs_in(Vars,p(Cost)); occurs_in(Vars,Body)), !.
set_lazy_recalls(M):-
M:'$aleph_global'(lazy_evaluate,lazy_evaluate(Name/Arity)),
functor(Pred,Name,Arity),
% asserta('$aleph_global'(lazy_recall,lazy_recall(Name/Arity,1))),
asserta(M:'$aleph_global'(lazy_recall,lazy_recall(Name/Arity,0))),
M:'$aleph_global'(mode,mode(Recall,Pred)),
M:'$aleph_global'(lazy_recall,lazy_recall(Name/Arity,N)),
(Recall = '*' -> RecallNum = 100; RecallNum = Recall),
RecallNum > N,
retract(M:'$aleph_global'(lazy_recall,lazy_recall(Name/Arity,N))),
asserta(M:'$aleph_global'(lazy_recall,lazy_recall(Name/Arity,RecallNum))),
fail.
set_lazy_recalls(_M).
set_lazy_on_contradiction(_,_,M):-
M:'$aleph_global'(lazy_on_contradiction,set(lazy_on_contradiction,false)), !.
set_lazy_on_contradiction(P,N,M):-
Tot is P + N,
Tot >= 100, !,
set(lazy_on_contradiction,true,M).
set_lazy_on_contradiction(_,_,_M).
% The "pclause" trick: much more effective with the use of recorded/3
% clause for testing partial clauses obtained in search
% only needed when learning recursive theories or
% proof_strategy is not restricted_sld.
record_testclause(Name/Arity,M):-
functor(Head,Name,Arity),
Clause = (Head:-
'$aleph_search'(pclause,pclause(Head,Body)),
Body, !),
assertz(M:Clause).
% The "pclause" trick for abducible predicates
record_abclause(Name/Arity,M):-
functor(Head,Name,Arity),
Clause = (Head:-
'$aleph_search'(abduced,pclause(Head,Body)),
Body, !),
assertz(M:Clause).
% clause for incorporating recursive calls into bottom clause
% this is done by allowing calls to the positive examples
record_recursive_sat_call(Name/Arity,M):-
functor(Head,Name,Arity),
Clause = (Head:-
'$aleph_global'(stage,set(stage,saturation)),
'$aleph_sat'(example,example(Num,Type)),
example(Num1,Type,Head),
Num1 \= Num, !), % to prevent tautologies
assertz(M:Clause).
skolemize((Head:-Body),SHead,SBody,Start,SkolemVars):-
!,
copy_term((Head:-Body),(SHead:-Body1)),
numbervars((SHead:-Body1),Start,SkolemVars),
goals_to_list(Body1,SBody).
skolemize(UnitClause,Lit,[],Start,SkolemVars):-
copy_term(UnitClause,Lit),
numbervars(Lit,Start,SkolemVars).
skolemize(UnitClause,Lit):-
skolemize(UnitClause,Lit,[],0,_).
record_nskolemized(Type,N1,Head,true,M):-
!,
assertz(M:example(N1,Type,Head)).
record_nskolemized(Type,N1,Head,Body,M):-
assertz(M:(example(N1,Type,Head):-Body)).
record_skolemized(Type,N1,SkolemVars,Head,Body,M):-
assertz(M:example(N1,Type,Head)),
functor(Head,Name,Arity),
update_backpreds(Name/Arity,M),
add_backs(Body,M),
add_skolem_types(SkolemVars,Head,Body,M).
add_backs([],_M).
add_backs([Lit|Lits],M):-
asserta(M:'$aleph_global'(back,back(Lit))),
functor(Lit,Name,Arity),
declare_dynamic(Name/Arity,M),
assertz(M:Lit),
add_backs(Lits,M).
add_skolem_types(10000,_,_,_M):- !. % no new skolem variables
add_skolem_types(_,Head,Body,M):-
add_skolem_types([Head],M),
add_skolem_types(Body,M).
add_skolem_types([],_M).
add_skolem_types([Lit|Lits],M):-
functor(Lit,PSym,Arity),
get_modes(PSym/Arity,L,M),
add_skolem_types1(L,Lit,M),
add_skolem_types(Lits,M).
add_skolem_types1([],_,_M).
add_skolem_types1([Lit|Lits],Fact,M):-
split_args(Lit,_,I,O,C,M),
add_skolem_types2(I,Fact,M),
add_skolem_types2(O,Fact,M),
add_skolem_types2(C,Fact,M),
add_skolem_types1(Lits,Fact,M).
add_skolem_types2([],_,_M).
add_skolem_types2([Pos/Type|Rest],Literal,M):-
tparg(Pos,Literal,Arg),
SkolemType =.. [Type,Arg],
(M:'$aleph_global'(back,back(SkolemType))-> true;
asserta(M:'$aleph_global'(back,back(SkolemType))),
asserta(M:SkolemType)),
add_skolem_types2(Rest,Literal,M).
copy_args(_,_,Arg,Arity):-
Arg > Arity, !.
copy_args(Lit,Lit1,Arg,Arity):-
arg(Arg,Lit,T),
arg(Arg,Lit1,T),
NextArg is Arg + 1,
copy_args(Lit,Lit1,NextArg,Arity).
copy_iargs(0,_,_,_):- !.
copy_iargs(Arg,Old,New,Arg):-
!,
Arg1 is Arg - 1,
copy_iargs(Arg1,Old,New,Arg).
copy_iargs(Arg,Old,New,Out):-
arg(Arg,Old,Val),
arg(Arg,New,Val),
Arg1 is Arg - 1,
copy_iargs(Arg1,Old,New,Out).
index_clause((Head:-true),NextClause,(Head),M):-
!,
retract(M:'$aleph_global'(last_clause,last_clause(ClauseNum))),
NextClause is ClauseNum + 1,
asserta(M:'$aleph_global'(last_clause,last_clause(NextClause))).
index_clause(Clause,NextClause,Clause,M):-
retract(M:'$aleph_global'(last_clause,last_clause(ClauseNum))),
NextClause is ClauseNum + 1,
asserta(M:'$aleph_global'(last_clause,last_clause(NextClause))).
update_backpreds(Name/Arity,M):-
M:'$aleph_local'(backpred,Name/Arity), !.
update_backpreds(Name/Arity,M):-
assertz(M:'$aleph_local'(backpred,Name/Arity)).
reset_counts(M):-
retractall(M:'$aleph_sat'(lastterm,_)),
retractall(M:'$aleph_sat'(lastvar,_)),
asserta(M:'$aleph_sat'(lastterm,0)),
asserta(M:'$aleph_sat'(lastvar,0)), !.
% reset the number of successes for a literal: cut to avoid useless backtrack
reset_succ(M):-
retractall(M:'$aleph_local'(last_success,_)),
asserta(M:'$aleph_local'(last_success,0)), !.
skolem_var(Var,_M):-
atomic(Var), !,
name(Var,[36|_]).
skolem_var(Var,M):-
gen_var(Num,M),
name(Num,L),
name(Var,[36|L]).
gen_var(Var1,M):-
retract(M:'$aleph_sat'(lastvar,Var0)), !,
Var1 is Var0 + 1,
asserta(M:'$aleph_sat'(lastvar,Var1)).
gen_var(0,M):-
asserta(M:'$aleph_sat'(lastvar,0)).
copy_var(OldVar,NewVar,Depth,M):-
gen_var(NewVar,M),
M:'$aleph_sat_vars'(OldVar,TNo,_,_),
asserta(M:'$aleph_sat_vars'(NewVar,TNo,[],[])),
asserta(M:'$aleph_sat_varscopy'(NewVar,OldVar,Depth)).
gen_litnum(Lit1,M):-
retract(M:'$aleph_sat'(lastlit,Lit0)), !,
Lit1 is Lit0 + 1,
asserta(M:'$aleph_sat'(lastlit,Lit1)).
gen_litnum(0,M):-
asserta(M:'$aleph_sat'(lastlit,0)).
gen_nlitnum(Lit1,M):-
retract(M:'$aleph_sat'(lastnlit,Lit0)), !,
Lit1 is Lit0 - 1,
asserta(M:'$aleph_sat'(lastnlit,Lit1)).
gen_nlitnum(-1,M):-
asserta(M:'$aleph_sat'(lastnlit,-1)).
% generate a new feature number
% provided it is less than the maximum number of features allowed
gen_featurenum(Feature1,M):-
M:'$aleph_feature'(last_feature,Feature0), !,
Feature1 is Feature0 + 1,
setting(max_features,FMax,M),
Feature1 =< FMax,
retract(M:'$aleph_feature'(last_feature,Feature0)),
asserta(M:'$aleph_feature'(last_feature,Feature1)).
gen_featurenum(1,M):-
asserta(M:'$aleph_feature'(last_feature,1)).
gen_lits([],[],_M).
gen_lits([Lit|Lits],[LitNum|Nums],M):-
gen_litnum(LitNum,M),
asserta(M:'$aleph_sat_litinfo'(LitNum,0,Lit,[],[],[])),
gen_lits(Lits,Nums,M).
update_theory(ClauseIndex,M):-
retract(M:'$aleph_global'(hypothesis,hypothesis(OldLabel,Hypothesis,
OldPCover,OldNCover))),
index_clause(Hypothesis,ClauseIndex,Clause,M),
(M:'$aleph_global'(example_selected,example_selected(_,Seed))-> true;
PCover = [Seed-_|_]),
(setting(lazy_on_cost,true,M) ->
nlits(Clause,L),
label_create(Clause,Label,M),
extract_pos(Label,PCover),
extract_neg(Label,NCover),
interval_count(PCover,PC),
interval_count(NCover,NC),
setting(evalfn,Evalfn,M),
complete_label(Evalfn,Clause,[PC,NC,L],NewLabel,M),
assertz(M:'$aleph_global'(theory,theory(ClauseIndex,
NewLabel/Seed,Clause,
PCover,NCover)));
assertz(M:'$aleph_global'(theory,theory(ClauseIndex,
OldLabel/Seed,Clause,
OldPCover,OldNCover)))),
add_clause_to_background(ClauseIndex,M).
add_clause_to_background(ClauseIndex,M):-
M:'$aleph_global'(theory,theory(ClauseIndex,Label/_,Clause,_,_)),
(setting(minpos,PMin,M) -> true; PMin = 1),
Label = [PC,_,_,F|_],
PC >= PMin,
setting(minscore,MinScore,M),
F >= MinScore, !,
(retract(M:'$aleph_global'(rules,rules(Rules)))->
asserta(M:'$aleph_global'(rules,rules([ClauseIndex|Rules])));
asserta(M:'$aleph_global'(rules,rules([ClauseIndex])))),
(setting(updateback,Update,M) -> true; Update = true),
(Update = true -> assertz(M:Clause); true), !.
add_clause_to_background(_,_M).
rm_seeds(M):-
update_theory(ClauseIndex,M), !,
M:'$aleph_global'(theory,theory(ClauseIndex,_,_,PCover,NCover)),
rm_seeds(pos,PCover,M),
(setting(evalfn,posonly,M) -> rm_seeds(rand,NCover,M); true),
M:'$aleph_global'(atoms_left,atoms_left(pos,PLeft)),
interval_count(PLeft,PL),
p1_message('atoms left'), p_message(PL),
!.
rm_seeds(_M).
rm_seeds(pos,PCover,M):-
setting(construct_features,true,M),
setting(feature_construction,exhaustive,M), !,
retract(M:'$aleph_global'(atoms_left,atoms_left(pos,OldIntervals))),
(M:'$aleph_global'(example_selected,example_selected(_,Seed))-> true;
PCover = [Seed-_|_]),
rm_seeds1([Seed-Seed],OldIntervals,NewIntervals),
assertz(M:'$aleph_global'(atoms_left,atoms_left(pos,NewIntervals))).
rm_seeds(Type,RmIntervals,M) :-
retract(M:'$aleph_global'(atoms_left,atoms_left(Type,OldIntervals))),
rm_seeds1(RmIntervals,OldIntervals,NewIntervals),
assertz(M:'$aleph_global'(atoms_left,atoms_left(Type,NewIntervals))).
rm_seeds1([],Done,Done).
rm_seeds1([Start-Finish|Rest],OldIntervals,NewIntervals) :-
rm_interval(Start-Finish,OldIntervals,MidIntervals),!,
rm_seeds1(Rest,MidIntervals,NewIntervals).
% update lower estimate on maximum size cover set for an atom
update_coverset(Type,_,M):-
M:'$aleph_global'(hypothesis,hypothesis(Label,_,PCover,_)),
Label = [_,_,_,GainE|_],
arithmetic_expression_value(GainE,Gain),
worse_coversets(PCover,Type,Gain,Worse,M),
(Worse = [] -> true;
update_theory(NewClause,M),
update_coversets(Worse,NewClause,Type,Label,M)).
% revise coversets of previous atoms
worse_coversets(_,_,_,[],M):-
\+(M:'$aleph_global'(maxcover,set(maxcover,true))), !.
worse_coversets([],_,_,[],_M).
worse_coversets([Interval|Intervals],Type,Gain,Worse,M):-
worse_coversets1(Interval,Type,Gain,W1,M),
worse_coversets(Intervals,Type,Gain,W2,M),
aleph_append(W2,W1,Worse), !.
worse_coversets1(Start-Finish,_,_,[],_M):-
Start > Finish, !.
worse_coversets1(Start-Finish,Type,Gain,Rest,M):-
M:'$aleph_global'(max_set,max_set(Type,Start,Label1,_)),
Label1 = [_,_,_,Gain1E|_],
arithmetic_expression_value(Gain1E,Gain1),
Gain1 >= Gain, !,
Next is Start + 1,
worse_coversets1(Next-Finish,Type,Gain,Rest,M), !.
worse_coversets1(Start-Finish,Type,Gain,[Start|Rest],M):-
Next is Start + 1,
worse_coversets1(Next-Finish,Type,Gain,Rest,M), !.
update_coversets([],_,_,_,_M).
update_coversets([Atom|Atoms],ClauseNum,Type,Label,M):-
(retract(M:'$aleph_global'(max_set,max_set(Type,Atom,_,_)))->
true;
true),
asserta(M:'$aleph_global'(max_set,max_set(Type,Atom,Label,ClauseNum))),
update_coversets(Atoms,ClauseNum,Type,Label,M), !.
rm_intervals([],I,I).
rm_intervals([I1|I],Intervals,Result):-
rm_interval(I1,Intervals,Intervals1),
rm_intervals(I,Intervals1,Result), !.
rm_interval(_,[],[]).
rm_interval(I1,[Interval|Rest],Intervals):-
interval_intersection(I1,Interval,I2), !,
interval_subtract(Interval,I2,I3),
rm_interval(I1,Rest,I4),
aleph_append(I4,I3,Intervals).
rm_interval(I1,[Interval|Rest],[Interval|Intervals]):-
rm_interval(I1,Rest,Intervals).
% gen_sample(+Type,+N,M)
% select N random samples from the set of examples uncovered. Type is one of pos/neg
% if N = 0 returns first example in Set
% resamples the same example R times where set(resample,R)
gen_sample(Type,0,M):-
!,
M:'$aleph_global'(atoms_left,atoms_left(Type,[ExampleNum-_|_])),
retractall(M:'$aleph_global'(example_selected,example_selected(_,_))),
p1_message('select example'), p_message(ExampleNum),
(setting(resample,Resample,M) -> true; Resample = 1),
gen_sample(Resample,Type,ExampleNum,M).
gen_sample(Type,SampleSize,M):-
M:'$aleph_global'(atoms_left,atoms_left(Type,Intervals)),
% p1_message('select from'), p_message(Intervals),
interval_count(Intervals,AtomsLeft),
N is min(AtomsLeft,SampleSize),
assertz(M:'$aleph_local'(sample_num,0)),
retractall(M:'$aleph_global'(example_selected,example_selected(_,_))),
(setting(resample,Resample,M) -> true; Resample = 1),
repeat,
M:'$aleph_local'(sample_num,S1),
S is S1 + 1,
(S =< N ->
get_random(AtomsLeft,INum),
select_example(INum,0,Intervals,ExampleNum),
\+(M:'$aleph_global'(example_selected,
example_selected(Type,ExampleNum))),
p1_message('select example'), p_message(ExampleNum),
retract(M:'$aleph_local'(sample_num,S1)),
assertz(M:'$aleph_local'(sample_num,S)),
gen_sample(Resample,Type,ExampleNum,M),
fail;
retract(M:'$aleph_local'(sample_num,S1))), !.
gen_sample(0,_,_,_M):- !.
gen_sample(R,Type,ExampleNum,M):-
assertz(M:'$aleph_global'(example_selected,
example_selected(Type,ExampleNum))),
R1 is R - 1,
gen_sample(R1,Type,ExampleNum,M).
select_example(Num,NumberSoFar,[Start-Finish|_],ExampleNum):-
Num =< NumberSoFar + Finish - Start + 1, !,
ExampleNum is Num - NumberSoFar + Start - 1.
select_example(Num,NumberSoFar,[Start-Finish|Rest],ExampleNum):-
N1 is NumberSoFar + Finish - Start + 1,
select_example(Num,N1,Rest,ExampleNum).
% get_random(+Last,-Num)
% get a random integer between 1 and Last
get_random(Last,INum):-
aleph_random(X),
INum1 is integer(X*Last + 0.5),
(INum1 = 0 ->
INum = 1;
(INum1 > Last ->
INum = Last;
INum = INum1
)
).
% get_rrandom(+Last,-Num)
% get a random floating point number between 1 and Last
get_rrandom(Last,Num):-
aleph_random(X),
Num is X*Last.
% distrib(+Interval,+Prob,-Distrib)
% generate discrete distribution Distrib
% by assigning all elements in Interval the probability Prob
distrib(X-Y,_,[]):- X > Y, !.
distrib(X-Y,P,[P-X|D]):-
X1 is X + 1,
distrib(X1-Y,P,D).
% draw_element(+D,-E)
% draw element E using distribution D
% D is a list specifying the probability of each element E
% in the form p1-e1, p2-e2, ... ,pn-en
% proportions pi are normalised to add to 1
draw_element(D,E):-
normalise_distribution(D,Distr),
aleph_random(X),
draw_element(Distr,0,X,E).
draw_element([P1-E1|T],CumProb,X,E):-
CumProb1 is CumProb + P1,
(X =< CumProb1 -> E = E1;
draw_element(T,CumProb1,X,E)).
normalise_distribution(D,Distr):-
key_sum(D,Sum),
(0.0 is float(Sum) -> Distr = D;
normalise_distribution(D,Sum,D1),
keysort(D1,Distr)).
key_sum([],0.0).
key_sum([K1-_|T],Sum):-
key_sum(T,S1),
Sum is float(K1 + S1).
normalise_distribution([],_,[]).
normalise_distribution([K1-X1|T],Sum,[K2-X1|T1]):-
K2 is K1/Sum,
normalise_distribution(T,Sum,T1).
% random_select(-Num,+List1,-List2)
% randomly remove an element Num from List1 to give List2
random_select(X,[X],[]):- !.
random_select(X,L,Left):-
length(L,N),
N > 0,
get_random(N,I),
aleph_remove_nth(I,L,X,Left).
% random_nselect(+Num,+List1,-List2)
% randomly remove Num elements from List1 to give List2
random_nselect(0,_,[]):- !.
random_nselect(_,[],[]):- !.
random_nselect(N,List1,[X|List2]):-
random_select(X,List1,Left),
N1 is N - 1,
random_nselect(N1,Left,List2).
% random_select_from_intervals(-Num,+IList)
% randomly select an element from an interval list
random_select_from_intervals(N,IList):-
interval_count(IList,L),
get_random(L,X),
interval_select(X,IList,N).
normal(Mean,Sigma,X):-
std_normal(X1),
X is Mean + Sigma*X1.
get_normal(0,_,_,[]):- !.
get_normal(N,Mean,Sigma,[X|Xs]):-
N > 0,
normal(Mean,Sigma,X),
N1 is N - 1,
get_normal(N1,Mean,Sigma,Xs).
% Polar method for generating random variates
% from a standard normal distribution.
% From A.M. Law and W.D. Kelton, "Simulation Modeling and Analysis",
% McGraw-Hill,2000
std_normal(X):-
aleph_random(U1),
aleph_random(U2),
V1 is 2*U1 - 1,
V2 is 2*U2 - 1,
W is V1^2 + V2^2,
(W > 1 -> std_normal(X);
Y is sqrt((-2.0*log(W))/W),
X is V1*Y).
% Approximate method for computing the chi-square value
% given the d.f. and probability (to the right). Uses
% a normal approximation and Monte-Carlo simulation.
% The normal approximation used is the one proposed by
% E.B. Wilson and M.M. Hilferty (1931). "The distribution of chi-square"
% PNAS, 17, 684.
% Monte-Carlo simulation uses 1000 trials.
chi_square(DF,Prob,ChisqVal):-
DF > 0,
Mean is 1 - 2/(9*DF),
Sigma is sqrt(2/(9*DF)),
NTrials is 1000,
get_normal(NTrials,Mean,Sigma,X),
sort(X,Z),
ProbLeft is 1.0 - Prob,
Index is integer(ProbLeft*NTrials),
(Index > NTrials ->
aleph_remove_nth(NTrials,Z,Val,_);
aleph_remove_nth(Index,Z,Val,_)),
ChisqVal is DF*(Val^3).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% L A B E L S A N D E V A L F N S
%
label_create(Clause,Label,M):-
M:'$aleph_global'(last_example,last_example(pos,Last1)),
Type1 = pos,
(setting(evalfn,posonly,M) ->
M:'$aleph_global'(last_example,last_example(rand,Last2)),
Type2 = rand;
M:'$aleph_global'(last_example,last_example(neg,Last2)),
Type2 = neg),
label_create(Clause,Type1,[1-Last1],Type2,[1-Last2],Label,M).
label_create(Type,Clause,Label,M):-
M:'$aleph_global'(last_example,last_example(Type,Last)),
label_create(Clause,Type,[1-Last],Label,M).
label_create(Clause,Type1,Set1,Type2,Set2,Label,M):-
split_clause(Clause,Head,Body),
nlits((Head,Body),Length),
assertz(M:'$aleph_search'(pclause,pclause(Head,Body))),
setting(depth,Depth,M),
setting(prooftime,Time,M),
setting(proof_strategy,Proof,M),
prove(Depth/Time/Proof,Type1,(Head:-Body),Set1,Cover1,_,M),
prove(Depth/Time/Proof,Type2,(Head:-Body),Set2,Cover2,_,M),
retractall(M:'$aleph_search'(pclause,_)),
assemble_label(Cover1,Cover2,Length,Label), !.
label_create(Clause,Type,Set,Label,M):-
split_clause(Clause,Head,Body),
assertz(M:'$aleph_search'(pclause,pclause(Head,Body))),
setting(depth,Depth,M),
setting(prooftime,Time,M),
setting(proof_strategy,Proof,M),
prove(Depth/Time/Proof,Type,(Head:-Body,M),Set,Cover,_,M),
retractall(M:'$aleph_search'(pclause,_)),
(Type = pos ->
assemble_label(Cover,unknown,unknown,Label);
assemble_label(unknown,Cover,unknown,Label)).
label_pcover(Label,P):-
extract_cover(pos,Label,P).
label_ncover(Label,N):-
extract_cover(neg,Label,N).
label_union([],Label,Label):- !.
label_union(Label,[],Label):- !.
label_union(Label1,Label2,Label):-
extract_cover(pos,Label1,Pos1),
extract_cover(pos,Label2,Pos2),
extract_cover(neg,Label1,Neg1),
extract_cover(neg,Label2,Neg2),
extract_length(Label1,L1),
extract_length(Label2,L2),
update_list(Pos2,Pos1,Pos),
update_list(Neg2,Neg1,Neg),
Length is L1 + L2,
list_to_intervals(Pos,PCover),
list_to_intervals(Neg,NCover),
assemble_label(PCover,NCover,Length,Label).
label_print_examples(Type,Label,M):-
extract_cover(Type,Label,C),
examples(Type,C,M).
label_print_eval([],_M):- !.
label_print_eval(Label,M):-
Eval = coverage,
evalfn(Eval,Label,Val,M),
print_eval(Eval,Val).
print_eval(Evalfn,Val):-
evalfn_name(Evalfn,Name),
p1_message(Name), p_message(Val).
eval_rule(0,Label,M):-
M:'$aleph_global'(hypothesis,hypothesis(_,Clause,_,_)), !,
label_create(Clause,Label,M),
p_message('Rule 0'),
pp_dclause(Clause,M),
extract_count(pos,Label,PC),
extract_count(neg,Label,NC),
extract_length(Label,L),
label_print_eval([PC,NC,L],M),
nl.
eval_rule(ClauseNum,Label,M):-
integer(ClauseNum),
ClauseNum > 0,
M:'$aleph_global'(theory,theory(ClauseNum,_,Clause,_,_)),
!,
label_create(Clause,Label,M),
extract_count(pos,Label,PC),
extract_count(neg,Label,NC),
concat(['Rule ',ClauseNum],RuleTag),
(setting(evalfn,posonly,M) ->
concat(['Pos cover = ',PC,' Rand cover = ',NC],CoverTag);
concat(['Pos cover = ',PC,' Neg cover = ',NC],CoverTag)),
p1_message(RuleTag), p_message(CoverTag),
pp_dclause(Clause,M),
setting(verbosity,V,M),
(V >= 2 ->
p_message('positive examples covered'),
label_print_examples(pos,Label,M),
p_message('negative examples covered'),
label_print_examples(neg,Label,M);
true),
nl.
eval_rule(_,_,_M).
evalfn(Label,Val,M):-
(setting(evalfn,Eval,M)->true;Eval=coverage,M),
evalfn(Eval,Label,Val,M).
evalfn_name(compression,'compression').
evalfn_name(coverage,'pos-neg').
evalfn_name(accuracy,'accuracy').
evalfn_name(wracc,'novelty').
evalfn_name(laplace,'laplace estimate').
evalfn_name(pbayes,'pseudo-bayes estimate').
evalfn_name(auto_m,'m estimate').
evalfn_name(mestimate,'m estimate').
evalfn_name(mse,'mse').
evalfn_name(posonly,'posonly bayes estimate').
evalfn_name(entropy,'entropy').
evalfn_name(gini,'gini value').
evalfn_name(sd,'standard deviation').
evalfn_name(user,'user defined cost').
evalfn(compression,[P,N,L|_],Val,_M):-
(P = -inf -> Val is -inf;
Val is P - N - L + 1), !.
evalfn(coverage,[P,N,_|_],Val,_M):-
(P = -inf -> Val is -inf;
Val is P - N), !.
evalfn(laplace,[P,N|_],Val,_M):-
(P = -inf -> Val is 0.5;
Val is (P + 1) / (P + N + 2)), !.
% the evaluation function below is due to Steve Moyle's implementation
% of the work by Lavrac, Flach and Zupan
evalfn(wracc,[P,N|_],Val,M):-
(M:'$aleph_search'(clauseprior,Total-[P1-pos,_]) ->
Val is P/Total - (P1/Total)*((P+N)/Total);
Val is -0.25), !.
evalfn(entropy,[P,N|_],Val,_M):-
(P = -inf -> Val is 1.0;
((P is 0); (N is 0) -> Val is 0.0;
Total is P + N,
P1 is P/Total,
Q1 is 1-P1,
Val is -(P1*log(P1) + Q1*log(Q1))/log(2)
)
), !.
evalfn(gini,[P,N|_],Val,_M):-
(P = -inf -> Val is 1.0;
Total is P + N,
P1 is P/Total,
Val is 2*P1*(1-P1)), !.
evalfn(accuracy,[P,N|_],Val,_M):-
(P = -inf -> Val is 0.5;
Val is P / (P + N)), !.
% the evaluation functions below are due to James Cussens
evalfn(pbayes,[P,N|_],Val,M):-
(P = -inf -> Val is 0.5;
Acc is P/(P+N),
setting(prior,PriorD,M),
normalise_distribution(PriorD,NPriorD),
aleph_member1(Prior-pos,NPriorD),
(0 is Prior-Acc ->
Val=Prior;
K is (Acc*(1 - Acc)) / ((Prior-Acc)^2 ),
Val is (P + K*Prior) / (P + N + K))), !.
evalfn(posonly,[P,0,L|_],Val,M):-
M:'$aleph_global'(size,size(rand,RSize)),
Val is log(P) + log(RSize+2.0) - (L+1)/P, !.
evalfn(auto_m,[P,N|_],Val,M):-
(P = -inf -> Val is 0.5;
Cover is P + N,
setting(prior,PriorD,M),
normalise_distribution(PriorD,NPriorD),
aleph_member1(Prior-pos,NPriorD),
K is sqrt(Cover),
Val is (P + K*Prior) / (Cover+K)), !.
evalfn(mestimate,[P,N|_],Val,M):-
(P = -inf -> Val is 0.5;
Cover is P + N,
setting(prior,PriorD,M),
normalise_distribution(PriorD,NPriorD),
aleph_member1(Prior-pos,NPriorD),
(setting(m,MM,M) -> K = MM; K is sqrt(Cover)),
Val is (P + K*Prior) / (Cover+K)), !.
evalfn(_,_,X,_M):- X is -inf.
assemble_label(P,N,L,[P,N,L]).
extract_cover(pos,[P,_,_],P1):-
intervals_to_list(P,P1), !.
extract_cover(neg,[_,N,_],N1):-
intervals_to_list(N,N1),!.
extract_cover(_,[]).
extract_count(pos,[P,_,_],P1):-
interval_count(P,P1), !.
extract_count(neg,[_,N,_],N1):-
interval_count(N,N1), !.
extract_count(neg,_,0).
extract_pos([P|_],P).
extract_neg([_,N|_],N).
extract_length([_,_,L|_],L).
get_start_label(_,[0,0,0,F],M):-
(setting(interactive,true,M); setting(search,ic,M)), !,
F is -inf.
get_start_label(user,[1,0,2,F],_M):- !, F is -inf.
get_start_label(entropy,[1,0,2,-0.5],_M):- !.
get_start_label(gini,[1,0,2,-0.5],_M):- !.
get_start_label(wracc,[1,0,2,-0.25],_M):- !.
get_start_label(Evalfn,[1,0,2,Val],M):-
evalfn(Evalfn,[1,0,2],Val,M).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% I / O S T U F F
% read_all(+Prefix)
% read background and examples
read_all(M:Prefix):-
initialize(M),
read_all(Prefix,Prefix,Prefix,M).
% read_all/2 and read_all/3 largely
% provided by Stasinos Konstantopoulos and Mark Reid
read_all(BPrefix,EPrefix):-
read_all(BPrefix,EPrefix,EPrefix).
read_all(Back,Pos,Neg,M):-
clean_up(M),
reset(M),
read_background(Back,M),
read_examples(Pos,Neg,M),
record_targetpred(M),
check_recursive_calls(M),
check_prune_defs(M),
check_user_search(M),
check_posonly(M),
check_auto_refine(M),
check_abducibles(M).
read_background(Back,M):-
construct_name(background,Back,File,M),
consult(M:File),
broadcast(background(loaded)).
read_examples(Pos,Neg,M):-
(setting(train_pos,PosF,M) ->
set(use_file_extensions,false,M),
read_examples_files(pos,PosF,_,M),
noset(use_file_extensions,M);
read_examples_files(pos,Pos,PosF,M),
set(train_pos,PosF,M)
),
(setting(train_neg,NegF,M) ->
set(use_file_extensions,false,M),
read_examples_files(neg,NegF,_,M),
noset(use_file_extensions,M);
read_examples_files(neg,Neg,NegF,M),
set(train_neg,NegF,M)
),
M:'$aleph_global'(size,size(pos,P)),
M:'$aleph_global'(size,size(neg,N)),
set_lazy_recalls(M),
(setting(prior,_,M) -> true;
normalise_distribution([P-pos,N-neg],Prior),
set(prior,Prior,M)
),
reset_counts(M),
asserta(M:'$aleph_global'(last_clause,last_clause(0))),
broadcast(examples(loaded)).
aleph_read_pos_examples(Type,M) :-
broadcast(background(loaded)),
clean_up_examples(Type,M),
asserta(M:'$aleph_global'(size,size(Type,0))),
M:'$aleph_global'(size,size(Type,N)),
(N > 0 -> Ex = [1-N]; Ex = []),
asserta(M:'$aleph_global'(atoms,atoms(Type,Ex))),
asserta(M:'$aleph_global'(atoms_left,atoms_left(Type,Ex))),
asserta(M:'$aleph_global'(last_example,last_example(Type,N))).
aleph_read_neg_examples(Type,M) :-
clean_up_examples(Type,M),
asserta(M:'$aleph_global'(size,size(Type,0))),
/*
record_example(nocheck,neg,eastbound(west1),_),
record_example(nocheck,neg,eastbound(west2),_),
record_example(nocheck,neg,eastbound(west3),_),
record_example(nocheck,neg,eastbound(west4),_),
record_example(nocheck,neg,eastbound(west5),N1),
*/
%my_record_examples(Type),
%findall(C,M:inc(C),L),
M:'$aleph_global'(size,size(Type,N)),
(N > 0 -> Ex = [1-N]; Ex = []),
asserta(M:'$aleph_global'(atoms,atoms(Type,Ex))),
asserta(M:'$aleph_global'(atoms_left,atoms_left(Type,Ex))),
asserta(M:'$aleph_global'(last_example,last_example(Type,N))).
read_examples_files(Type,Name,F,M):-
clean_up_examples(Type,M),
asserta(M:'$aleph_global'(size,size(Type,0))),
(Name = [_|_] ->
read_examples_from_files(Name,Type,F,M);
read_examples_from_file(Type,Name,F,M)),
M:'$aleph_global'(size,size(Type,N)),
(N > 0 -> Ex = [1-N]; Ex = []),
asserta(M:'$aleph_global'(atoms,atoms(Type,Ex))),
asserta(M:'$aleph_global'(atoms_left,atoms_left(Type,Ex))),
asserta(M:'$aleph_global'(last_example,last_example(Type,N))).
read_examples_from_files([],_,[],_M).
read_examples_from_files([Name|Files],Type,[FileName|FileNames],M):-
read_examples_from_file(Type,Name,FileName,M),
read_examples_from_files(Files,Type,FileNames,M).
read_examples_from_file(Type,Name,File,M):-
construct_name(Type,Name,File,M),
(aleph_open(File,read,Stream) ->
concat(['consulting ',Type, ' examples'],Mess),
p1_message(Mess), p_message(File);
p1_message('cannot open'), p_message(File),
fail),
repeat,
read(Stream,Example),
(Example=end_of_file-> close(Stream);
record_example(nocheck,Type,Example,_,M),
fail),
!.
read_examples_from_file(_,_,'?',_M).
construct_name(_,Name,Name,M):-
setting(use_file_extensions,false,M), !.
construct_name(Type,Prefix,Name,_M):-
name(Prefix,PString),
file_extension(Type,SString),
aleph_append(SString,PString,FString),
name(Name,FString).
file_extension(pos,Suffix):- name('.f',Suffix).
file_extension(neg,Suffix):- name('.n',Suffix).
file_extension(background,Suffix):- name('.b',Suffix).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% M I S C. D E F I N I T I O N S
execute(C):-
system(C), !.
execute(_).
% store critical values of current search state
store(searchstate,M):-
!,
retractall(M:'$aleph_global'(save,save(searchstate,_))),
(M:'$aleph_global'(atoms_left,atoms_left(pos,PosLeft)) ->
asserta(M:'$aleph_global'(save,
save(searchstate,atoms_left(pos,PosLeft))));
true),
(M:'$aleph_global'(atoms_left,atoms_left(neg,NegLeft)) ->
asserta(M:'$aleph_global'(save,
save(searchstate,atoms_left(neg,NegLeft))));
true),
(M:'$aleph_global'(size,size(pos,PSize)) ->
asserta(M:'$aleph_global'(save,
save(searchstate,size(pos,PSize))));
true),
(M:'$aleph_global'(size,size(neg,NSize)) ->
asserta(M:'$aleph_global'(save,
save(searchstate,size(neg,NSize))));
true),
(M:'$aleph_global'(noise,set(noise,Noise)) ->
asserta(M:'$aleph_global'(save,
save(searchstate,set(noise,Noise))));
true),
(M:'$aleph_global'(minacc,set(minacc,MinAcc)) ->
asserta(M:'$aleph_global'(save,
save(searchstate,set(minacc,MinAcc))));
true).
% store current bottom clause
store(bottom,M):-
!,
(M:'$aleph_global'(store_bottom,set(store_bottom,true)) ->
store_bottom;
true).
store(Parameter,M):-
(M:'$aleph_global'(Parameter,set(Parameter,Value)) -> true; Value = unknown),
retractall(M:'$aleph_global'(save,save(Parameter,_))),
asserta(M:'$aleph_global'(save,save(Parameter,Value))).
% store values of a list of parameters
store_values([],_M).
store_values([Parameter|T],M):-
store(Parameter,M),
store_values(T,M).
% store all relevant info related to current bottom
% details are stored in 5 idbs:
% 1. bottom: points to 2 other idbs sat_X_n and lits_X_N
% 2. sat_X_N: where X is the type of the current example and N the number
% this contains misc stuff recorded by sat/2 for use by reduce/1
% 3. lits_X_N: contains the lits in bottom
% 4. ovars_X_N: contains output vars of lits in bottom
% 5. ivars_X_N: contains input vars of lits in bottom
store_bottom(M):-
bottom_key(Num,Type,Key,true,M),
asserta(M:'$aleph_sat'(stored,stored(Num,Type,Key))),
'$aleph_sat'(lastterm,LastTerm),
asserta(M:'$aleph_sat'(lasterm,Key,LastTerm)),
'$aleph_sat'(lastvar,LastVar),
asserta(M:'$aleph_sat'(lastvar,Key,LastVar)),
'$aleph_sat'(botsize,BotSize),
asserta(M:'$aleph_sat'(botsize,Key,BotSize)),
'$aleph_sat'(lastlit,LastLit),
asserta(M:'$aleph_sat'(lastlit,Key,LastLit)),
'$aleph_sat'(hovars,HOVars),
asserta(M:'$aleph_sat'(hovars,Key,HOVars)),
'$aleph_sat'(hivars,HIVars),
asserta(M:'$aleph_sat'(hivars,Key,HIVars)),
'$aleph_sat'(eq,Eq),
asserta(M:'$aleph_sat'(eq,Key,Eq)),
'$aleph_sat_ivars'(Lit,IVars),
asserta(M:'$aleph_sat_ivars'(Lit,Key,IVars)),
'$aleph_sat_ovars'(Lit,OVars),
asserta(M:'$aleph_sat_ovars'(Lit,Key,OVars)),
'$aleph_sat_litinfo'(Lit,Depth,Atom,I,O,D),
asserta(M:'$aleph_sat_litinfo'(Lit,Key,Depth,Atom,I,O,D)),
fail.
store_bottom(_M).
reinstate(searchstate,M):-
!,
retractall(M:'$aleph_global'(atoms_left,atoms_left(_,_))),
retractall(M:'$aleph_global'(size,size(_,_))),
(M:'$aleph_global'(save,save(searchstate,atoms_left(pos,PosLeft))) ->
asserta(M:'$aleph_global'(atoms_left,atoms_left(pos,PosLeft)));
true),
(M:'$aleph_global'(save,save(searchstate,atoms_left(neg,NegLeft))) ->
asserta(M:'$aleph_global'(atoms_left,atoms_left(neg,NegLeft)));
true),
(M:'$aleph_global'(save,save(searchstate,size(pos,PSize))) ->
asserta(M:'$aleph_global'(size,size(pos,PSize)));
true),
(M:'$aleph_global'(save,save(searchstate,size(neg,NSize))) ->
asserta(M:'$aleph_global'(size,size(neg,NSize)));
true),
(M:'$aleph_global'(save,save(searchstate,set(noise,Noise))) ->
set(noise,Noise,M);
true),
(M:'$aleph_global'(save,save(searchstate,set(minacc,MinAcc))) ->
set(minacc,MinAcc,M);
true),
retractall(M:'$aleph_global'(save,save(searchstate,_))).
reinstate(Parameter,M):-
retract(M:'$aleph_global'(save,save(Parameter,Value))), !,
(Value = unknown -> noset(Parameter,M); set(Parameter,Value,M)).
reinstate(_,_M).
% reinstate list of values of parameters
reinstate_values([],_M).
reinstate_values([Parameter|T],M):-
reinstate(Parameter,M),
reinstate_values(T,M).
% reinstate all saved values
reinstate_values(M):-
reinstate_file_streams(M),
M:'$aleph_global'(save,save(_,_)),
repeat,
retract(M:'$aleph_global'(save,save(Parameter,Value))),
(Value = unknown -> noset(Parameter,M) ; set(Parameter,Value,M)),
\+(M:'$aleph_global'(save,save(_,_))),
!.
reinstate_values(_M).
reinstate_file_streams(M):-
setting(recordfile,File,M),
set(recordfile,File,M),
fail.
reinstate_file_streams(M):-
setting(goodfile,File,M),
set(goodfile,File,M),
fail.
reinstate_file_streams(_M).
% bottom_key(?N,?T,-Key,-Flag)
% returns key that indexes bottom clause info for example N of type T
% Flag is one of "true" or "false" depending on whether bottom
% requires storing
bottom_key(N,T,Key,Flag,M):-
((var(N),var(T)) ->
M:'$aleph_sat'(example,example(N,T));
true),
(setting(store_bottom,true,M) ->
(M:'$aleph_sat'(stored,stored(N,T,Key)) ->
Flag = false;
concat([T,'_',N],Key),
Flag = true
);
Key = false,
Flag = false).
/**
* aleph_set(:Parameter:atomic,+Value:term) is det
*
* Sets the value of a parameter.
*/
aleph_set(M:Variable,Value):-
set(Variable,Value,M).
set(Variable,Value,M):-
check_setting(Variable,Value),
(Value = inf -> V is inf;
(Value = +inf -> V is inf;
(Value = -inf -> V is -inf; V = Value)
)
),
retractall(M:'$aleph_global'(Variable,set(Variable,_))),
assertz(M:'$aleph_global'(Variable,set(Variable,V))),
broadcast(set(Variable,V)),
special_consideration(Variable,Value,M).
/**
* aleph_setting(:Parameter:atomic,+Value:term) is det
*
* Reads the value of a parameter.
*/
aleph_setting(M:Variable,Value):-
setting(Variable,Value,M).
setting(Variable,Value,M):-
nonvar(Variable),
M:'$aleph_global'(Variable,set(Variable,Value1)), !,
Value = Value1.
setting(Variable,Value,_M):-
default_setting(Variable,Value).
noset(M:Variable):-
noset(Variable,M).
noset(Variable,M):-
nonvar(Variable),
retract(M:'$aleph_global'(Variable,set(Variable,Value))), !,
rm_special_consideration(Variable,Value,M),
set_default(Variable,M).
noset(_,_M).
/**
* man(-Manual:URL) is det
*
* returns manual URL
*
*/
man(M):-
aleph_manual(M).
determinations(Pred1,Pred2,M):-
M:'$aleph_global'(determination,determination(Pred1,Pred2)).
determination(Pred1,Pred2,M):-
nonvar(Pred1),
M:'$aleph_global'(determination,determination(Pred1,Pred2)), !.
determination(Pred1,Pred2,M):-
noset(autorefine,M),
assertz(M:'$aleph_global'(determination,determination(Pred1,Pred2))),
(nonvar(Pred1) ->
update_backpreds(Pred1,M);
true).
/**
* abducible(:Pred:term) is det
*
* Pred is of the form N/A, where the atom N is the name of the predicate, and A its arity.
* Specifies that ground atoms with symbol N/A can be abduced if required.
*/
abducible(M:Name/Arity):-
abducible(Name/Arity,M).
abducible(Name/Arity,M):-
assertz(M:'$aleph_global'(abducible,abducible(Name/Arity))).
/**
* commutative(:Pred:term) is det
*
* Pred is of the form N/A, where the atom N is the name of the predicate, and A its arity.
* Specifies that literals with symbol N/A are commutative.
*/
commutative(M:Name/Arity):-
commutative(Name/Arity,M).
commutative(Name/Arity,M):-
assertz(M:'$aleph_global'(commutative,commutative(Name/Arity))).
/**
* symmetric(:Pred:term) is det
*
* Pred is of the form N/A, where the atom N is the name of the predicate, and A its arity.
* Specifies that literals with symbol N/A are symmetric.
*/
symmetric(M:Name/Arity):-
symmetric(Name/Arity,M).
symmetric(Name/Arity,M):-
assertz(M:'$aleph_global'(symmetric,symmetric(Name/Arity))).
/**
* lazy_evaluate(:Pred:term) is det
*
* Pred V is of the form N/A, where the atom N is the name of the predicate, and A its arity.
* Specifies that outputs and constants for literals with symbol N/A are to be evaluated
* lazily during the search. This is particularly useful if the constants required
* cannot be obtained from the bottom clause constructed by using a single example.
* During the search, the literal is called with a list containing a pair of lists for each
* input argument representing `positive' and `negative' substitutions obtained
* for the input arguments of the literal. These substitutions are obtained by executing
* the partial clause without this literal on the positive and negative examples.
* The user needs to provide a definition capable of processing a call with a list of
* list-pairs in each argument, and how the outputs are to be computed from such information.
* For further details see A. Srinivasan and R. Camacho, Experiments in numerical reasoning with
* ILP, Jnl. Logic Programming.
*/
lazy_evaluate(M:Name/Arity):-
lazy_evaluate(Name/Arity,M).
lazy_evaluate(Name/Arity,M):-
assertz(M:'$aleph_global'(lazy_evaluate,lazy_evaluate(Name/Arity))).
/**
* model(:Pred:term) is det
*
* Pred is of the form N/A, where the atom N is the name of the predicate, and A its arity.
* Specifies that predicate N/A will be used to construct and execute models
* in the leaves of model trees. This automatically results in predicate N/A being
* lazily evaluated (see lazy_evaluate/1).
*/
model(M:Name/Arity):-
model(Name/Arity,M).
model(Name/Arity,M):-
assertz(M:'$aleph_global'(model,model(Name/Arity))).
/**
* positive_only(:Pred:term) is det
*
* Pred is of the form N/A, where the atom N is the name of the predicate,
* and A its arity. States that only positive substitutions are required
* during lazy evaluation of literals with symbol N/A.
* This saves some theorem-proving effort.
*/
positive_only(M:Name/Arity):-
positive_only(Name/Arity,M).
positive_only(Name/Arity,M):-
assertz(M:'$aleph_global'(positive_only,positive_only(Name/Arity))).
/**
* mode(:Recall:int,+PredicateMode:term) is det
*
* Declare the mode of call for predicates that can appear in any clause hypothesised by Aleph
*/
mode(M:Recall,Pred):-
mode(Recall,Pred,M).
mode(Recall,Pred,M):-
modeh(Recall,Pred,M),
modeb(Recall,Pred,M).
modes(N/A,Mode,M):-
Mode = modeh(_,Pred),
M:'$aleph_global'(modeh,Mode),
functor(Pred,N,A).
modes(N/A,Mode,M):-
Mode = modeb(_,Pred),
M:'$aleph_global'(modeb,Mode),
functor(Pred,N,A).
/**
* modeh(:Recall:int,+PredicateMode:term) is det
*
* Recall is one of: a positive integer or *. Mode is a mode template as in a mode/2 declaration.
* Declares a mode for the head of a hypothesised clause. Required when evalfn is posonly.
*/
modeh(M:Recall,Pred):-
modeh(Recall,Pred,M).
modeh(Recall,Pred,M):-
(M:'$aleph_global'(mode,mode(Recall,Pred)) -> true;
noset(autorefine,M),
assertz(M:'$aleph_global'(modeh,modeh(Recall,Pred))),
assertz(M:'$aleph_global'(mode,mode(Recall,Pred))),
functor(Pred,Name,Arity),
update_backpreds(Name/Arity,M)).
/**
* modeb(:Recall:int,+PredicateMode:term) is det
*
* Recall is one of: a positive integer or *. Mode is a mode template as in a mode/2 declaration.
* Declares a mode for a literal in the body of a hypothesised clause.
*/
modeb(M:Recall,Pred):-
modeb(Recall,Pred,M).
modeb(Recall,Pred,M):-
(M:'$aleph_global'(modeb,modeb(Recall,Pred)) -> true;
noset(autorefine,M),
assertz(M:'$aleph_global'(modeb,modeb(Recall,Pred))),
(M:'$aleph_global'(mode,mode(Recall,Pred)) -> true;
assertz(M:'$aleph_global'(mode,mode(Recall,Pred))))).
% add_determinations(+PSym,Stratified)
% add determination declarations for a background predicate
% these are obtained from the determinations of the target predicate
% If Stratified is true then only stratified definitions are allowed
add_determinations(PSym,Stratified,M):-
M:'$aleph_global'(targetpred,targetpred(Target)),
determinations(Target,OtherPred,M),
(Stratified = true -> OtherPred \= Target; true),
determination(PSym,OtherPred,M),
fail.
add_determinations(_,_,_M).
% add_modes(+PSym)
% add modes declarations for a (new) predicate
% these are obtained from the modes of the target predicate
add_modes(Name/_,M):-
M:'$aleph_global'(targetpred,targetpred(Target)),
modes(Target,Mode,M),
Mode =.. [ModeType,Recall,TargetMode],
TargetMode =.. [_|Args],
PredMode =.. [Name|Args],
NewMode =.. [ModeType,Recall,PredMode,M],
call(NewMode),
fail.
add_modes(_,_M).
feature(Id,Feature,M):-
M:'$aleph_feature'(feature,feature(Id,_,_,Template,Body)),
Feature = (Template:-Body).
gen_feature(Feature,Label,Class,M):-
nonvar(Feature), !,
gen_featurenum(Id,M),
split_clause(Feature,Template,Body),
assertz(M:'$aleph_feature'(feature,feature(Id,Label,Class,Template,Body))).
/**
* show(+V:atomic) is det
*
* Different values of V result in showing the following.
* - bottom Current bottom clause.
* - constraints Constraints found by induce_constraints.
* - determinations Current determination declarations.
* - features Propositional features constructed from good clauses found so far.
* - gcws Hypothesis constructed by the gcws procedure.
* - good Good clauses found in searches conducted so far (good clauses all have a utility above that specified by minscore).
* - hypothesis Current hypothesised clause.
* - modes Current mode declarations (including all modeh and modeb declarations).
* - modehs Current modeh declarations.
* - modebs Current modeb declarations.
* - neg Current negative examples.
* - pos Current positive examples.
* - posleft Positive examples not covered by theory so far.
* - rand Current randomly-generated examples (used when evalfn is posonly).
* - search Current search (requires definition for portray(search)).
* - settings Current parameter settings.
* - sizes Current sizes of positive and negative examples.
* - theory Current theory constructed.
* - test_neg Examples in the file associated with the parameter test_neg.
* - test_pos Examples in the file associated with the parameter test_pos.
* - train_neg Examples in the file associated with the parameter train_neg.
* - train_pos Examples in the file associated with the parameter train_pos.
* - Name/Arity Current definition of the predicate Name/Arity.
*/
show(M:S):-
show(S,M).
show(settings,M):-
nl,
p_message('settings'),
findall(P-V,M:'$aleph_global'(P,set(P,V)),L),
sort(L,L1),
aleph_member(Parameter-Value,L1),
tab(8), write(Parameter=Value), nl,
fail.
show(determinations,M):-
nl,
p_message('determinations'),
show_global(determination,determination(_,_),M).
show(modes,M):-
nl,
p_message('modes'),
show_global(mode,mode(_,_),M).
show(modehs,M):-
nl,
p_message('modehs'),
show_global(modeh,modeh(_,_),M).
show(modebs,M):-
nl,
p_message('modebs'),
show_global(modeb,modeb(_,_),M).
show(sizes,M):-
nl,
p_message('sizes'),
show_global(size,size(_,_),M).
show(bottom,M):-
nl,
p_message('bottom clause'),
setting(verbosity,V,M),
V > 0,
M:'$aleph_sat'(lastlit,Last),
get_clause(1,Last,[],FlatClause,M),
pp_dlist(FlatClause,M).
show(theory,M):-
nl,
p_message('theory'),
nl,
M:'$aleph_global'(rules,rules(L)),
aleph_reverse(L,L1),
aleph_member(ClauseNum,L1),
M:'$aleph_global'(theory,theory(ClauseNum,_,_,_,_)),
eval_rule(ClauseNum,_,M),
% pp_dclause(Clause),
fail.
show(theory,M):-
get_performance(M).
show(pos,M):-
nl,
p_message('positives'),
store(greedy,M),
examples(pos,_,M),
reinstate(greedy,M),
fail.
show(posleft,M):-
nl,
p_message('positives left'),
M:example(_,pos,Atom),
\+(Atom),
write(Atom), write('.'), nl,
fail.
show(neg,M):-
nl,
p_message('negatives'),
store(greedy,M),
examples(neg,_,M),
reinstate(greedy,M),
fail.
show(rand,M):-
nl,
p_message('random'),
examples(rand,_,M),
fail.
show(uspec,M):-
nl,
p_message('uspec'),
examples(uspec,_,M),
fail.
show(gcws,M):-
nl,
p_message('gcws hypothesis'),
M:'$aleph_search'(gcwshyp,hypothesis(_,C,_,_)),
pp_dclause(C,M),
fail.
show(abgen,M):-
nl,
p_message('abduced hypothesis'),
M:'$aleph_search'(abgenhyp,hypothesis(_,AbGen,_,_)),
aleph_member(C,AbGen),
pp_dclause(C,M),
fail.
show(hypothesis,M):-
setting(portray_hypothesis,Pretty,M),
aleph_portray(hypothesis,Pretty,M),
fail.
show(search,M):-
setting(portray_search,Pretty,M),
aleph_portray(search,Pretty,M).
show(good,M):-
setting(good,true,M),
nl,
p_message('good clauses'),
(setting(minscore,FMin,M) -> true; FMin is -inf),
setting(evalfn,Evalfn,M),
M:'$aleph_good'(_,Label,Clause),
Label = [_,_,_,F|_],
F >= FMin,
pp_dclause(Clause,M),
show_stats(Evalfn,Label),
fail.
show(good,M):-
setting(good,true,M),
setting(goodfile,File,M),
aleph_open(File,read,Stream),
(setting(minscore,FMin,M) -> true; FMin is -inf),
setting(evalfn,Evalfn,M),
repeat,
read(Stream,Fact),
(Fact = M:'$aleph_good'(_,Label,Clause) ->
Label = [_,_,_,F|_],
F >= FMin,
show_stats(Evalfn,Label),
pp_dclause(Clause,M),
fail;
close(Stream), !
).
show(features,M):-
setting(evalfn,Evalfn,M),
(M:'$aleph_feature'(feature,_) -> true;
gen_features(M)),
p_message('features from good clauses'),
M:'$aleph_feature'(feature,feature(Id,Label,_,Head,Body)),
show_stats(Evalfn,Label),
pp_dclause(feature(Id,(Head:-Body)),M),
fail.
show(constraints,M):-
setting(good,true,M),
nl,
p_message('constraints'),
setting(noise,N,M),
FMin is -N,
M:'$aleph_good'(_,Label,Clause),
split_clause(Clause,false,_),
Label = [_,_,_,F],
F >= FMin,
pp_dclause(Clause,M),
show_stats(coverage,Label),
fail.
show(constraints,M):-
show(aleph_false/0,M).
show(Name/Arity,M):-
functor(Pred,Name,Arity),
%current_predicate(M:Name,Pred),
nl,
p1_message('definition'), p_message(Name/Arity),
clause(M:Pred,Body),
\+(in(Body,'$aleph_search'(pclause,pclause(_,_)),M)),
pp_dclause((Pred:-Body),M),
fail.
show(train_pos,M):-
setting(portray_examples,Pretty,M),
aleph_portray(train_pos,Pretty,M).
show(train_neg,M):-
setting(portray_examples,Pretty,M),
aleph_portray(train_neg,Pretty,M).
show(test_pos,M):-
setting(portray_examples,Pretty,M),
aleph_portray(test_pos,Pretty,M).
show(test_neg,M):-
setting(portray_examples,Pretty,M),
aleph_portray(test_neg,Pretty,M).
show(_,_M).
settings(M):-
show(settings,M).
/**
* good_clauses(:GoodClauses:list) is det
*
* Good clauses found in searches conducted so far (good clauses all have a utility
* above that specified by minscore).
*/
good_clauses(M:GC):-
good_clauses(GC,M).
good_clauses(GC,M):-
(setting(minscore,FMin,M) -> true; FMin is -inf),
findall(Clause,
(M:'$aleph_good'(_,Label,Clause),
Label = [_,_,_,F|_],
F >= FMin),GC).
% examples(?Type,?List)
% show all examples numbers in List of Type
examples(Type,List,M):-
setting(portray_literals,Pretty,M),
M:example(Num,Type,Atom),
aleph_member1(Num,List),
aleph_portray(Atom,Pretty,M), write('.'), nl,
fail.
examples(_,_,_M).
/**
* bottom(:BottomClause:term) is det
*
* BottomClause is the current bottom clause.
*/
bottom(M:Clause):-
bottom(Clause,M).
bottom(Clause,M):-
M:'$aleph_sat'(lastlit,Last),
get_clause(1,Last,[],ClauseList,M),
list_to_clause(ClauseList,Clause).
% posleft(-List)
% returns positive examples left to be covered
posleft(PList,M):-
M:'$aleph_global'(atoms_left,atoms_left(pos,PosLeft)),
intervals_to_list(PosLeft,PList).
% write_rules/0 due to Mark Reid
write_rules(M):-
setting(rulefile,File,M),
write_rules(File), !.
write_rules(_M).
write_features(M):-
setting(featurefile,File,M),
write_features(File,M), !.
write_features(_M).
write_rules(File,M):-
aleph_open(File,write,Stream),
set_output(Stream),
M:'$aleph_global'(rules,rules(L)),
aleph_reverse(L,L1),
write_rule(L1,M),
flush_output(Stream),
set_output(user_output).
write_rule(Rules,M):-
aleph_member(RuleId,Rules),
M:'$aleph_global'(theory,theory(RuleId,_,Rule,_,_)),
pp_dclause(Rule,M),
fail.
write_rule(_,_M).
write_features(File,M):-
aleph_open(File,write,Stream),
set_output(Stream),
listing(M:'$aleph_feature'/2),
close(Stream),
set_output(user_output).
write_features(_,_M).
best_hypothesis(Head1,Body1,[P,N,L],M):-
M:'$aleph_search'(selected,selected([P,N,L|_],Clause,_,_)),
split_clause(Clause,Head2,Body2), !,
Head1 = Head2, Body1 = Body2.
/**
* hypothesis(:Head:term,-Body:term,-Label:list) is det
*
* Head is the head of the current hypothesised clause.
* Body is the body of the current hypothesised clause.
* Label is the list [P,N,L] where P is the positive examples covered by the
* hypothesised clause, N is the negative examples covered by the
* hypothesised clause, and L is the number of literals in the
* hypothesised clause.
*
*/
hypothesis(M:Head1,Body1,Label):-
hypothesis(Head1,Body1,Label,M).
/**
* hypothesis(-Head:term,-Body:term,-Label:list,+Module:atomic) is det
*
* Head is the head of the current hypothesised clause.
* Body is the body of the current hypothesised clause.
* Label is the list [P,N,L] where P is the positive examples covered by the
* hypothesised clause, N is the negative examples covered by the
* hypothesised clause, and L is the number of literals in the
* hypothesised clause. Module is the module of the input file.
* Internal predicates.
*/
hypothesis(Head1,Body1,Label,M):-
M:'$aleph_search'(pclause,pclause(Head2,Body2)), !,
Head1 = Head2, Body1 = Body2,
get_hyp_label((Head2:-Body2),Label,M).
hypothesis(Head1,Body1,Label,M):-
M:'$aleph_global'(hypothesis,hypothesis(_,Theory,_,_)),
(Theory = [_|_] -> aleph_member(Clause,Theory);
Theory = Clause),
split_clause(Clause,Head2,Body2),
Head1 = Head2, Body1 = Body2,
get_hyp_label((Head2:-Body2),Label,M).
/**
* rdhyp(:V:var) is det
*
* Read a hypothesised clause from the user.
* Internal predicate, to be called as `rdhyp/0`.
*
*/
rdhyp(M:_):-
retractall(M:'$aleph_search'(pclause,_)),
retractall(M:'$aleph_search'(covers,_)),
retractall(M:'$aleph_search'(coversn,_)),
read(Clause),
add_hyp(Clause,M),
nl,
show(hypothesis,M).
/**
* addhyp_i(:V:var) is det
*
* Add current hypothesised clause to theory.
* If a search is interrupted, then the current best hypothesis will be added to the theory.
* Internal predicate, to be called as `addhyp/0`.
*
*/
addhyp_i(M:_):-
addhyp(M).
addhyp(M):-
M:'$aleph_global'(hypothesis,hypothesis(Label,Theory,PCover,NCover)),
Theory = [_|_], !,
add_theory(Label,Theory,PCover,NCover,M).
addhyp(M):-
M:'$aleph_global'(hypothesis,hypothesis(Label,_,PCover,_)), !,
rm_seeds(M),
worse_coversets(PCover,pos,Label,Worse,M),
(Worse = [] -> true;
M:'$aleph_global'(last_clause,last_clause(NewClause)),
update_coversets(Worse,NewClause,pos,Label,M)), !.
addhyp(M):-
M:'$aleph_search'(selected,selected(Label,RClause,PCover,NCover)), !,
add_hyp(Label,RClause,PCover,NCover,M),
rm_seeds(M),
worse_coversets(PCover,pos,Label,Worse,M),
(Worse = [] -> true;
M:'$aleph_global'(last_clause,last_clause(NewClause)),
update_coversets(Worse,NewClause,pos,Label,M)), !.
% add bottom clause as hypothesis
% provided minacc, noise and search constraints are met
% otherwise the example saturated is added as hypothesis
add_bottom(Bottom,M):-
retractall(M:'$aleph_search'(selected,selected(_,_,_,_))),
bottom(Bottom,M),
add_hyp(Bottom,M),
M:'$aleph_global'(hypothesis,hypothesis(Label,Clause,_,_)),
(clause_ok(Clause,Label,M) -> true;
M:'$aleph_sat'(example,example(Num,Type)),
M:example(Num,Type,Example),
retract(M:'$aleph_global'(hypothesis,hypothesis(_,_,_,_))),
setting(evalfn,Evalfn,M),
complete_label(Evalfn,Example,[1,0,1],Label1),
asserta(M:'$aleph_global'(hypothesis,hypothesis(Label1,(Example:-true),[Num-Num],[])))).
% specialise a hypothesis by recursive construction of
% abnormality predicates
/**
* sphyp_i(:V:var) is det
*
* Specialise a hypothesis by recursive construction of
* abnormality predicates.
* Internal predicate, to be called as `sphyp/0`.
*/
sphyp_i(M:_):-
sphyp(M).
sphyp(M):-
retractall(M:'$aleph_search'(sphyp,hypothesis(_,_,_,_))),
retractall(M:'$aleph_search'(gcwshyp,hypothesis(_,_,_,_))),
retract(M:'$aleph_global'(hypothesis,
hypothesis([P,N,L|T],Clause,PCover,NCover))),
asserta(M:'$aleph_search'(sphyp,hypothesis([P,N,L|T],Clause,PCover,NCover))),
store(searchstate,M),
gcws(M),
retractall(M:'$aleph_global'(hypothesis,hypothesis(_,_,_,_))),
asserta(M:'$aleph_global'(hypothesis,
hypothesis([P,N,L|T],Clause,PCover,NCover))),
reinstate(searchstate,M).
/**
* addgcws_i(:V:var) is det
*
* Add hypothesis constructed by performing GCWS to theory.
* Internal predicate, to be called as `addgcws/0`.
*
*/
addgcws_i(M:_):-
addgcws(M).
addgcws(M):-
retract(M:'$aleph_search'(gcwshyp,hypothesis(Label,C,P,N))), !,
asserta(M:'$aleph_search'(gcwshyp,hypothesis(Label,C,P,N))),
addhyp(M),
add_gcws(M).
/**
* rmhyp_i(:V:var) is det
*
* Remove the current hypothesised clause from theory
* Internal predicate, to be called as `rmhyp/0`.
*
*/
rmhyp_i(M:_):-
rmhyp(M).
rmhyp(M):-
retract(M:'$aleph_search'(pclause,pclause(Head,Body))),
asserta(M:'$aleph_local'(pclause,pclause(Head,Body))), !.
rmhyp(M):-
retract(M:'$aleph_global'(hypothesis,hypothesis(Label,Clause1,P,N))),
asserta(M:'$aleph_local'(hypothesis,hypothesis(Label,Clause1,P,N))), !.
rmhyp(_).
/**
* covers(-P:int) is det
*
* Show positive examples covered by hypothesised clause.
*
*/
covers(M:PC):-
get_hyp(Hypothesis,M),
label_create(Hypothesis,Label,M),
extract_cover(pos,Label,P),
examples(pos,P,M),
length(P,PC),
p1_message('examples covered'),
p_message(PC),
retractall(M:'$aleph_search'(covers,_)),
asserta(M:'$aleph_search'(covers,covers(P,PC))).
/**
* coversn(-N:int) is det
*
* Show negative examples covered by hypothesised clause.
*
*/
coversn(M:NC):-
get_hyp(Hypothesis,M),
label_create(Hypothesis,Label,M),
extract_cover(neg,Label,N),
examples(neg,N,M),
length(N,NC),
p1_message('examples covered'),
p_message(NC),
retractall(M:'$aleph_search'(coversn,_)),
asserta(M:'$aleph_search'(coversn,coversn(N,NC))).
% covers(-Number)
% as in covers/0, but first checks if being done
% within a greedy search
covers(P,M):-
get_hyp(Hypothesis,M),
(setting(greedy,true,M) ->
M:'$aleph_global'(atoms,atoms_left(pos,Pos));
M:'$aleph_global'(atoms,atoms(pos,Pos))),
label_create(Hypothesis,pos,Pos,Label,M),
retractall(M:'$aleph_search'(covers,_)),
extract_pos(Label,PCover),
interval_count(PCover,P),
asserta(M:'$aleph_search'(covers,covers(PCover,P))).
% coversn(-Number)
% as in coversn/0, but first checks if being done
% within a greedy search
coversn(N,M):-
get_hyp(Hypothesis,M),
(setting(greedy,true,M) ->
M:'$aleph_global'(atoms_left,atoms_left(neg,Neg));
M:'$aleph_global'(atoms_left,atoms(neg,Neg))),
label_create(Hypothesis,neg,Neg,Label,M),
retractall(M:'$aleph_search'(coversn,_)),
extract_neg(Label,NCover),
interval_count(NCover,N),
asserta(M:'$aleph_search'(coversn,coverns(NCover,N))).
% covers(-List,-Number)
% as in covers/1, but returns list of examples covered and their count
covers(PList,P,M):-
get_hyp(Hypothesis,M),
(setting(greedy,true,M) ->
M:'$aleph_global'(atoms,atoms_left(pos,Pos));
M:'$aleph_global'(atoms,atoms(pos,Pos))),
label_create(Hypothesis,pos,Pos,Label,M),
retractall(M:'$aleph_search'(covers,_)),
extract_pos(Label,PCover),
intervals_to_list(PCover,PList),
length(PList,P),
asserta(M:'$aleph_search'(covers,covers(PCover,P))).
% coversn(-List,-Number)
% as in coversn/1, but returns list of examples covered and their count
coversn(NList,N,M):-
get_hyp(Hypothesis,M),
(setting(greedy,true,M) ->
M:'$aleph_global'(atoms_left,atoms_left(neg,Neg));
M:'$aleph_global'(atoms_left,atoms(neg,Neg))),
label_create(Hypothesis,neg,Neg,Label,M),
retractall(M:'$aleph_search'(coversn,_)),
extract_neg(Label,NCover),
intervals_to_list(NCover,NList),
length(NList,N),
asserta(M:'$aleph_search'(coversn,coverns(NCover,N))).
/**
* example_saturated(:Ex:term) is det
*
* Ex is a positive example. This is the current example saturated.
*
*/
example_saturated(M:Example):-
example_saturated(Example,M).
example_saturated(Example,M):-
M:'$aleph_sat'(example,example(Num,Type)),
M:example(Num,Type,Example).
reset(M):-
clean_up(M),
clear_cache(M),
aleph_abolish('$aleph_global'/2,M),
aleph_abolish(example/3,M),
assert(M:example(0,uspec,aleph_false)),
set_default(_,M),
!.
% Generic timing routine due to Mark Reid.
% Under cygwin, cputime cannot be trusted
% so walltime is used instead. To use cputime, set the body of this
% predicate to "Time is cputime".
stopwatch(Time) :-
Time is cputime.
% statistics(walltime,[Time|_]).
wallclock(Time):-
statistics(real_time,[Time|_]).
time(P,N,[Mean,Sd]):-
time_loop(N,P,Times),
mean(Times,Mean),
sd(Times,Sd).
test_ex(Exs,Flag,N,T,M):-
retractall(M:'$aleph_local'(covered,_)),
retractall(M:'$aleph_local'(total,_)),
asserta(M:'$aleph_local'(covered,0)),
asserta(M:'$aleph_local'(total,0)),
test_ex1(Exs,Flag,M),
retract(M:'$aleph_local'(covered,N)),
retract(M:'$aleph_local'(total,T)).
test_ex1(Exs,Flag,M):-
setting(portray_examples,Pretty,M),
member(Example,Exs),
retract(M:'$aleph_local'(total,T0)),
T1 is T0 + 1,
asserta(M:'$aleph_local'(total,T1)),
(once(depth_bound_call(Example,M)) ->
(Flag = show ->
p1_message(covered),
aleph_portray(Example,Pretty,M),
nl;
true);
(Flag = show ->
p1_message('not covered'),
aleph_portray(Example,Pretty,M),
nl;
true),
fail),
retract(M:'$aleph_local'(covered,N0)),
N1 is N0 + 1,
asserta(M:'$aleph_local'(covered,N1)),
fail.
test_ex1(_,_,_).
test(F,Flag,N,T,M):-
retractall(M:'$aleph_local'(covered,_)),
retractall(M:'$aleph_local'(total,_)),
asserta(M:'$aleph_local'(covered,0)),
asserta(M:'$aleph_local'(total,0)),
(F = [_|_] ->
test_files(F,Flag,M);
test_file(F,Flag,M)
),
retract(M:'$aleph_local'(covered,N)),
retract(M:'$aleph_local'(total,T)).
test_files([],_,_M).
test_files([File|Files],Flag,M):-
test_file(File,Flag,M),
test_files(Files,Flag,M).
test_file('?',_,_M):- !.
test_file(File,Flag,M):-
setting(portray_examples,Pretty,M),
aleph_open(File,read,Stream), !,
repeat,
read(Stream,Example),
(Example = end_of_file -> close(Stream);
retract(M:'$aleph_local'(total,T0)),
T1 is T0 + 1,
asserta(M:'$aleph_local'(total,T1)),
(once(depth_bound_call(Example,M)) ->
(Flag = show ->
p1_message(covered),
aleph_portray(Example,Pretty,M),
nl;
true);
(Flag = show ->
p1_message('not covered'),
aleph_portray(Example,Pretty,M),
nl;
true),
fail),
retract(M:'$aleph_local'(covered,N0)),
N1 is N0 + 1,
asserta(M:'$aleph_local'(covered,N1)),
fail),
!.
test_file(File,_,_M):-
p1_message('cannot open'), p_message(File).
in(false,_,_M):-
!,
fail.
in(bottom,Lit,M):-
!,
M:'$aleph_sat'(lastlit,Last),
get_clause(1,Last,[],FlatClause),
aleph_member(Lit,FlatClause).
in((Head:-true),Head,_M):- !.
in((Head:-Body),L,M):-
!,
in((Head,Body),L,M).
in((L1,_),L1,_M).
in((_,R),L,M):-
!,
in(R,L,M).
in(L,L,_M).
in((L1,L),L1,L,_M).
in((L1,L),L2,(L1,Rest),M):-
!,
in(L,L2,Rest,M).
in(L,L,true,_M).
/**
* random(-X:term,+Dist:term) is det
*
* draw a random number from a distribution
*/
random(X,normal(Mean,Sigma)):-
var(X), !,
normal(Mean,Sigma,X).
random(X,normal(_,_)):-
!,
number(X).
% X >= Mean - 3*Sigma,
% X =< Mean + 3*Sigma.
random(X,Distr):-
Distr = [_|_],
var(X), !,
draw_element(Distr,X1),
X = X1.
random(X,Distr):-
Distr = [_|_],
nonvar(X), !,
aleph_member(Prob-X,Distr),
Prob > 0.0.
mean(L,M):-
sum(L,Sum),
length(L,N),
M is Sum/N.
sd(L,Sd):-
length(L,N),
(N = 1 -> Sd = 0.0;
sum(L,Sum),
sumsq(L,SumSq),
Sd is sqrt(SumSq/(N-1) - (Sum*Sum)/(N*(N-1)))).
sum([],0).
sum([X|T],S):-
sum(T,S1),
S is X + S1.
sumsq([],0).
sumsq([X|T],S):-
sumsq(T,S1),
S is X*X + S1.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% auxilliary definitions for some of the above
set_default(A,M):-
default_setting(A,B),
set(A,B,M),
fail.
set_default(_,_M).
default_setting(A,B):-
set_def(A,_,_,_,B,_),
B \= ''.
% special case for threads as only SWI supports it
check_setting(threads,B):-
set_def(threads,_,_,Dom,_,_),
check_legal(Dom,B),
prolog_type(P),
(B > 1 ->
(P = swi -> true;
err_message(set(threads,B)),
fail
);
true
), !.
check_setting(A,B):-
set_def(A,_,_,Dom,_,_), !,
(check_legal(Dom,B) -> true;
err_message(set(A,B))).
check_setting(_,_).
check_legal(int(L)-int(U),X):-
!,
number(L,IL),
number(U,IU),
number(X,IX),
IX >= IL,
IX =< IU.
check_legal(float(L)-float(U),X):-
!,
number(L,FL),
number(U,FU),
number(X,FX),
FX >= FL,
FX =< FU.
check_legal([H|T],X):-
!,
aleph_member1(X,[H|T]).
/* AXO: Tolto perche infastidisce e non serve */
check_legal(read(filename),X):-
X \= '?',
!,
exists(X).
/* il commento finiva qua */
check_legal(_,_).
number(+inf,Inf):-
Inf is inf, !.
number(-inf,MInf):-
MInf is -inf, !.
number(X,Y):-
Y is X, !.
setting_definition(A,B,C,D,E,F1):-
set_def(A,B,C,D,E,F),
(F = noshow -> F1 = dontshow; F = F1).
% set_def(Parameter,Class,TextDescr,Type,Default,Flag)
set_def(abduce, search-search_strategy,
'Abduce Atoms and Generalise',
[true, false], false,
show).
set_def(best, search-search_space,
'Label to beat',
prolog_term,'',
show).
set_def(cache_clauselength, miscellaneous,
'Maximum Length of Cached Clauses',
int(1)-int(+inf), 3,
show).
set_def(caching, miscellaneous,
'Cache Clauses in Search',
[true, false], false,
show).
set_def(check_redundant, miscellaneous,
'Check for Redundant Literals',
[true, false], false,
show).
set_def(check_good, miscellaneous,
'Check good clauses for duplicates',
[true, false], false,
show).
set_def(check_useless, saturation,
'Remove I/O unconnected Literals',
[true, false], false,
show).
set_def(classes, tree,
'Class labels',
prolog_term,'',
show).
set_def(clauselength_distribution, search-search_strategy,
'Probablity Distribution over Clauses',
prolog_term,'',
show).
set_def(clauselength, search-search_space,
'Maximum Clause Length',
int(1)-int(+inf), 4,
show).
set_def(clauses, search-search_space,
'Maximum Clauses per Theory',
int(1)-int(+inf),'',
show).
set_def(condition, evaluation,
'Condition SLP',
[true, false], false,
show).
set_def(confidence, tree,
'Confidence for Rule Pruning',
float(0.0)-float(1.0), 0.95,
show).
set_def(construct_bottom, saturation,
'Build a bottom clause',
[saturation, reduction, false], saturation,
show).
set_def(depth, miscellaneous,
'Theorem Proving Depth',
int(1)-int(+inf), 10,
show).
set_def(evalfn, evaluation,
'Evaluation Function',
[coverage, compression, posonly, pbayes, accuracy, laplace,
auto_m, mestimate, mse, entropy, gini, sd, wracc, user], coverage,
show).
set_def(explore, search-search_space,
'Exhaustive Search of all alternatives',
[true, false], false,
show).
set_def(good, miscellaneous,
'Store good clauses',
[true, false], false,
show).
set_def(goodfile, miscellaneous,
'File of good clauses',
write(filename),'',
show).
set_def(gsamplesize, evaluation,
'Size of random sample',
int(1)-int(+inf), 100,
show).
set_def(i, saturation,
'bound layers of new variables',
int(1)-int(+inf), 2,
show).
set_def(interactive, search-search_strategy,
'Interactive theory construction',
[true, false], false,
show).
set_def(language, search-search_space,
'Maximum occurrence of any predicate symbol in a clause',
int(1)-int(+inf), +inf,
show).
set_def(lazy_negs, evaluation,
'Lazy theorem proving on negative examples',
[true, false], false,
show).
set_def(lazy_on_contradiction, evaluation,
'Lazy theorem proving on contradictions',
[true, false], false,
show).
set_def(lazy_on_cost, evaluation,
'Lazy theorem proving on cost',
[true, false], false,
show).
set_def(lookahead, search-search_space,
'Lookahead for automatic refinement operator',
int(1)-int(+inf), 1,
show).
set_def(m, evaluation,
'M-estimate',
float(0.0)-float(+inf),'',
show).
set_def(max_abducibles, search-search_space,
'Maximum number of atoms in an abductive explanation',
int(1)-int(+inf), 2,
show).
set_def(max_features, miscellaneous,
'Maximum number of features to be constructed',
int(1)-int(+inf), +inf,
show).
set_def(minacc, evaluation,
'Minimum clause accuracy',
float(0.0)-float(1.0), 0.0,
show).
set_def(mingain, tree,
'Minimum expected gain',
float(0.000001)-float(+inf), 0.05,
show).
set_def(minpos, evaluation,
'Minimum pos covered by a clause',
int(0)-int(+inf), 1,
show).
set_def(minposfrac, evaluation,
'Minimum proportion of positives covered by a clause',
float(0.0)-float(1.0), 0,
show).
set_def(minscore, evaluation,
'Minimum utility of an acceptable clause',
float(-inf)-float(+inf), -inf,
show).
set_def(moves, search-search_strategy,
'Number of moves in a randomised local search',
int(0)-int(+inf), 5,
show).
set_def(newvars, search-search_space,
'Existential variables in a clause',
int(0)-int(+inf), +inf,
show).
set_def(nodes, search-search_space,
'Nodes to be explored in the search',
int(1)-int(+inf), 5000,
show).
set_def(noise, evaluation,
'Maximum negatives covered',
int(0)-int(+inf), 0,
show).
set_def(nreduce_bottom, saturation,
'Negative examples based reduction of bottom clause',
[true, false], false,
show).
set_def(openlist, search-search_space,
'Beam width in a greedy search',
int(1)-int(+inf), +inf,
show).
set_def(optimise_clauses, miscellaneous,
'Perform query Optimisation',
[true, false], false,
show).
set_def(permute_bottom, saturation,
'Randomly permute order of negative literals in the bottom clause',
[true, false], false,
show).
set_def(portray_examples, miscellaneous,
'Pretty print examples',
[true, false], false,
show).
set_def(portray_hypothesis, miscellaneous,
'Pretty print hypotheses',
[true, false], false,
show).
set_def(portray_literals, miscellaneous,
'Pretty print literals',
[true, false], false,
show).
set_def(portray_search, miscellaneous,
'Pretty print search',
[true, false], false,
show).
set_def(print, miscellaneous,
'Literals printed per line',
int(1)-int(+inf), 4,
show).
set_def(prior, miscellaneous,
'Prior class distribution',
prolog_term,'',
show-ro).
set_def(proof_strategy, miscellaneous,
'Current proof strategy',
[restricted_sld, sld, user], restricted_sld,
show).
set_def(prooftime, miscellaneous,
'Theorem proving time',
float(0.0)-float(+inf), +inf,
show).
set_def(prune_tree, tree,
'Tree pruning',
[true, false], false,
show).
set_def(recordfile, miscellaneous,
'Log filename',
write(filename),'',
show).
set_def(record, miscellaneous,
'Log to file',
[true, false], false,
show).
set_def(refineop, search-search_strategy,
'Current refinement operator',
[user, auto, scs, false],'',
show-ro).
set_def(refine, search-search_strategy,
'Nature of customised refinement operator',
[user, auto, scs, false], false,
show).
set_def(resample, search-search_strategy,
'Number of times to resample an example',
int(1)-int(+inf), 1,
show).
set_def(rls_type, search-search_strategy,
'Type of randomised local search',
[gsat, wsat, rrr, anneal], gsat,
show).
set_def(rulefile, miscellaneous,
'Rule file',
write(filename),'',
show).
set_def(samplesize, search-search_strategy,
'Size of sample',
int(0)-int(+inf), 0,
show).
set_def(scs_percentile, search-search_strategy,
'Percentile of good clauses for SCS search',
float(0.0)-float(100.0),'',
show).
set_def(scs_prob, search-search_strategy,
'Probability of getting a good clause in SCS search',
float(0.0)-float(1.0),'',
show).
set_def(scs_sample, search-search_strategy,
'Sample size in SCS search',
int(1)-int(+inf), '',
show).
set_def(search, search-search_strategy,
'Search Strategy',
[bf, df, heuristic, ibs, ils, rls, scs, id, ic, ar, false], bf,
show).
set_def(searchstrat, search-search_strategy,
'Current Search Strategy',
[bf, df, heuristic, ibs, ils, rls, scs, id, ic, ar], bf,
show-ro).
set_def(searchtime, search-search_strategy,
'Search time in seconds',
float(0.0)-float(+inf), +inf,
show).
set_def(skolemvars, miscellaneous,
'Counter for non-ground examples',
int(1)-int(+inf), 10000,
show).
set_def(splitvars, saturation,
'Split variable co-refencing',
[true, false], false,
show).
set_def(stage, miscellaneous,
'Aleph processing mode',
[saturation, reduction, command], command,
show-ro).
set_def(store_bottom, saturation,
'Store bottom',
[true, false], false,
show).
set_def(subsample, search-search_strategy,
'Subsample for evaluating a clause',
[true,false], false,
show).
set_def(subsamplesize, search-search_strategy,
'Size of subsample for evaluating a clause',
int(1)-int(+inf), +inf,
show).
set_def(temperature, search-search_strategy,
'Temperature for randomised search annealing',
float(0.0)-float(+inf), '',
show).
set_def(test_neg, miscellaneous,
'Negative examples for testing theory',
read(filename),'',
show).
set_def(test_pos, miscellaneous,
'Positive examples for testing theory',
read(filename),'',
show).
set_def(threads, miscellaneous,
'Number of threads',
int(1)-int(+inf), 1,
show).
set_def(train_neg, miscellaneous,
'Negative examples for training',
read(filename),'',
show).
set_def(train_pos, miscellaneous,
'Positive examples for training',
read(filename),'',
show).
set_def(tree_type, tree,
'Type of tree to construct',
[classification, class_probability, regression, model], '',
show).
set_def(tries, search-search_strategy,
'Number of restarts for a randomised search',
int(1)-int(+inf), 10,
show).
set_def(typeoverlap, miscellaneous,
'Type overlap for induce_modes',
float(0.0)-float(1.0), 0.95,
show).
set_def(uniform_sample, search-search_strategy,
'Distribution to draw clauses from randomly',
[true, false], false,
show).
set_def(updateback, miscellaneous,
'Update background knowledge with clauses found on search',
[true, false], true,
noshow).
set_def(verbosity, miscellaneous,
'Level of verbosity',
int(1)-int(+inf), 1,
show).
set_def(version, miscellaneous,
'Aleph version',
int(0)-int(+inf), 5,
show-ro).
set_def(walk, search-search_strategy,
'Random walk probability for Walksat',
float(0.0)-float(1.0), '',
show).
% the following needed for compatibility with P-Progol
special_consideration(search,ida,M):-
set(search,bf,M), set(evalfn,coverage,M), !.
special_consideration(search,compression,M):-
set(search,heuristic,M), set(evalfn,compression,M), !.
special_consideration(search,posonly,M):-
set(search,heuristic,M), set(evalfn,posonly,M), !.
special_consideration(search,user,M):-
set(search,heuristic,M), set(evalfn,user,M), !.
special_consideration(refine,Refine,M):-
set(refineop,Refine,M), !.
special_consideration(refineop,auto,M):-
gen_auto_refine(M), !.
special_consideration(portray_literals,true,M):-
set(print,1,M), !.
special_consideration(record,true,M):-
noset(recordfile_stream,M),
(setting(recordfile,F,M) ->
aleph_open(F,append,Stream),
set(recordfile_stream,Stream,M);
true), !.
special_consideration(record,false,M):-
noset(recordfile_stream,M), !.
special_consideration(recordfile,File,M):-
noset(recordfile_stream,M),
(setting(record,true,M) ->
aleph_open(File,append,Stream),
set(recordfile_stream,Stream,M);
true), !.
special_consideration(good,true,M):-
noset(goodfile_stream,M),
(setting(goodfile,F,M) ->
aleph_open(F,append,Stream),
set(goodfile_stream,Stream,M);
true), !.
special_consideration(good,false,M):-
noset(goodfile_stream,M), !.
special_consideration(goodfile,File,M):-
noset(goodfile_stream,M),
(setting(good,true,M) ->
aleph_open(File,append,Stream),
set(goodfile_stream,Stream,M);
true), !.
special_consideration(minscore,_,M):-
aleph_abolish('$aleph_feature'/2,M), !.
special_consideration(_,_,_M).
rm_special_consideration(portray_literals,_,M):-
set_default(print,M), !.
rm_special_consideration(refine,_,M):-
set_default(refineop,M), !.
rm_special_consideration(record,_,M):-
noset(recordfile_stream,M), !.
rm_special_consideration(recordfile_stream,_,M):-
(setting(recordfile_stream,S,M) -> close(S); true), !.
rm_special_consideration(good,_,M):-
noset(goodfile_stream,M), !.
rm_special_consideration(goodfile_stream,_,M):-
(setting(goodfile_stream,S,M) -> close(S); true), !.
rm_special_consideration(_,_,_M).
get_hyp((Head:-Body),M):-
M:'$aleph_search'(pclause,pclause(Head,Body)), !.
get_hyp(Hypothesis,M):-
M:'$aleph_global'(hypothesis,hypothesis(_,Hypothesis,_,_)).
add_hyp(end_of_file,_M):- !.
add_hyp(Clause,M):-
nlits(Clause,L),
label_create(Clause,Label,M),
extract_count(pos,Label,PCount),
extract_count(neg,Label,NCount),
retractall(M:'$aleph_global'(hypothesis,hypothesis(_,_,_,_))),
extract_pos(Label,P),
extract_neg(Label,N),
setting(evalfn,Evalfn,M),
complete_label(Evalfn,Clause,[PCount,NCount,L],Label1,M),
asserta(M:'$aleph_global'(hypothesis,hypothesis(Label1,Clause,P,N))).
add_hyp(Label,Clause,P,N,M):-
retractall(M:'$aleph_global'(hypothesis,hypothesis(_,_,_,_))),
asserta(M:'$aleph_global'(hypothesis,hypothesis(Label,Clause,P,N))).
add_theory(Label,Theory,PCover,NCover,M):-
aleph_member(C,Theory),
add_hyp(Label,C,PCover,NCover,M),
update_theory(_,M),
fail.
add_theory(_,_,PCover,NCover,M):-
rm_seeds(pos,PCover,M),
(setting(evalfn,posonly,M) -> rm_seeds(rand,NCover,M); true),
M:'$aleph_global'(atoms_left,atoms_left(pos,PLeft)),
interval_count(PLeft,PL),
p1_message('atoms left'), p_message(PL), !.
add_gcws(M):-
retract(M:'$aleph_search'(gcwshyp,hypothesis(L,C,P,N))),
asserta(M:'$aleph_global'(hypothesis,hypothesis(L,C,P,N))),
update_theory(_,M),
fail.
add_gcws(_M).
restorehyp(M):-
retract(M:'$aleph_local'(pclause,pclause(Head,Body))),
assertz(M:'$aleph_search'(pclause,pclause(Head,Body))), !.
restorehyp(M):-
retract(M:'$aleph_local'(hypothesis,hypothesis(Label,Clause1,P,N))),
asserta(M:'$aleph_global'(hypothesis,hypothesis(Label,Clause1,P,N))), !.
restorehyp(_).
get_hyp_label(_,Label,_M):- var(Label), !.
get_hyp_label((_:-Body),[P,N,L],M):-
nlits(Body,L1),
L is L1 + 1,
(M:'$aleph_search'(covers,covers(_,P))-> true;
covers(_,M),
M:'$aleph_search'(covers,covers(_,P))),
(M:'$aleph_search'(coversn,coverns(_,N))-> true;
coversn(_,M),
M:'$aleph_search'(coversn,coversn(_,N))).
show_global(Key,Pred,M):-
M:'$aleph_global'(Key,Pred),
copy_term(Pred,Pred1), numbervars(Pred1,0,_),
aleph_writeq(Pred1), write('.'), nl,
fail.
show_global(_,_,_M).
aleph_portray(hypothesis,true,M):-
M:aleph_portray(hypothesis), !.
aleph_portray(hypothesis,false,M):-
p_message('hypothesis'),
hypothesis(Head,Body,_,M),
pp_dclause((Head:-Body),M), !.
aleph_portray(_,hypothesis,_M):- !.
aleph_portray(search,true,M):-
M:aleph_portray(search), !.
aleph_portray(search,_,_M):- !.
aleph_portray(train_pos,true,M):-
M:aleph_portray(train_pos), !.
aleph_portray(train_pos,_,M):-
!,
setting(train_pos,File,M),
show_file(File).
aleph_portray(train_neg,true,M):-
M:aleph_portray(train_neg), !.
aleph_portray(train_neg,_,M):-
!,
setting(train_neg,File,M),
show_file(File).
aleph_portray(test_pos,true,M):-
M:aleph_portray(test_pos), !.
aleph_portray(test_pos,_,M):-
!,
setting(test_pos,File,M),
show_file(File).
aleph_portray(test_neg,true,M):-
M:aleph_portray(test_neg), !.
aleph_portray(test_neg,_,M):-
!,
setting(test_neg,File,M),
show_file(File).
aleph_portray(Lit,true,M):-
M:aleph_portray(Lit), !.
aleph_portray(Lit,_,_M):-
aleph_writeq(Lit).
aleph_writeq(Lit):-
write_term(Lit,[numbervars(true),quoted(true)]).
show_file(File):-
aleph_open(File,read,Stream),
repeat,
read(Stream,Clause),
(Clause = end_of_file -> close(Stream), !
;
writeq(Clause), write('.'), nl,
fail).
time_loop(0,_,[]):- !.
time_loop(N,P,[T|Times]):-
wallclock(S),
P,
wallclock(F),
T is F - S,
N1 is N - 1,
time_loop(N1,P,Times).
list_profile :-
% get number of calls for each profiled procedure
findall(D-P,profile_data(P,calls,D),LP),
% sort them
sort(LP,SLP),
% and output (note the most often called predicates will come last
write_profile_data(SLP).
write_profile_data([]).
write_profile_data([D-P|SLP]) :-
% just swap the two calls to get most often called predicates first.
format('~w: ~w~n', [P,D]),
write_profile_data(SLP).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% F I N A L C O M M A N D S
:- multifile sandbox:safe_meta/2.
sandbox:safe_meta(aleph:induce(_), []).
sandbox:safe_meta(aleph:induce_tree(_), []).
sandbox:safe_meta(aleph:induce_max(_), []).
sandbox:safe_meta(aleph:induce_cover(_), []).
sandbox:safe_meta(aleph:induce_incremental(_), []).
sandbox:safe_meta(aleph:induce_clauses(_), []).
sandbox:safe_meta(aleph:induce_theory(_), []).
sandbox:safe_meta(aleph:induce_modes(_), []).
sandbox:safe_meta(aleph:induce_features(_), []).
sandbox:safe_meta(aleph:induce_constraints(_), []).
sandbox:safe_meta(aleph:sat(_), []).
sandbox:safe_meta(aleph:aleph_set(_,_), []).
sandbox:safe_meta(aleph:aleph_setting(_,_), []).
sandbox:safe_meta(aleph:noset(_), []).
sandbox:safe_meta(aleph:model(_), []).
sandbox:safe_meta(aleph:mode(_,_), []).
sandbox:safe_meta(aleph:modeh(_,_), []).
sandbox:safe_meta(aleph:modeb(_,_), []).
sandbox:safe_meta(aleph:show(_), []).
sandbox:safe_meta(aleph:hypothesis(_,_,_), []).
sandbox:safe_meta(aleph:rdhyp(_), []).
sandbox:safe_meta(aleph:addhyp_i(_), []).
sandbox:safe_meta(aleph:sphyp_i(_), []).
sandbox:safe_meta(aleph:covers(_), []).
sandbox:safe_meta(aleph:coversn(_), []).
sandbox:safe_meta(aleph:reduce(_), []).
sandbox:safe_meta(aleph:abducible(_), []).
sandbox:safe_meta(aleph:bottom(_), []).
sandbox:safe_meta(aleph:commutative(_), []).
sandbox:safe_meta(aleph:symmetric(_), []).
sandbox:safe_meta(aleph:lazy_evaluate(_), []).
sandbox:safe_meta(aleph:positive_only(_), []).
sandbox:safe_meta(aleph:example_saturated(_), []).
sandbox:safe_meta(aleph:addgcws_i(_), []).
sandbox:safe_meta(aleph:rmhyp_i(_), []).
sandbox:safe_meta(aleph:addgcws_i(_), []).
sandbox:safe_meta(aleph:good_clauses(_), []).
| TeamSPoon/logicmoo_workspace | packs_lib/aleph/prolog/aleph.pl | Perl | mit | 368,896 |
use strict;
use Irssi;
use vars qw($VERSION %IRSSI);
use MIME::Base64;
use Authen::SASL "Perl";
$VERSION = "1.1";
%IRSSI = (
authors => ['Mantas Mikulėnas',
'Michael Tharp',
'Jilles Tjoelker'],
contact => ['grawity@gmail.com',
'gxti@partiallystapled.com'],
name => 'cap_authen_sasl.pl',
description => 'Implements SASL authentication using Authen::SASL for use with charybdis ircds, and enables CAP MULTI-PREFIX',
license => 'GNU General Public License',
url => 'http://sasl.charybdis.be/',
);
my %sasl_auth = ();
sub timeout;
sub server_connected {
my ($server) = @_;
if ($server->{chat_type} eq "IRC") {
$server->send_raw_now("CAP LS");
}
}
sub event_cap {
my ($server, $args, $nick, $address) = @_;
my ($subcmd, $caps, $tosend);
$tosend = '';
if ($args =~ /^\S+ (\S+) :(.*)$/) {
$subcmd = uc $1;
$caps = ' '.$2.' ';
if ($subcmd eq 'LS') {
$tosend .= ' multi-prefix' if $caps =~ / multi-prefix /i;
$tosend .= ' sasl' if $caps =~ / sasl /i && defined($sasl_auth{$server->{tag}});
$tosend =~ s/^ //;
$server->print('', "CLICAP: supported by server:$caps");
if (!$server->{connected}) {
if ($tosend eq '') {
$server->send_raw_now("CAP END");
} else {
$server->print('', "CLICAP: requesting: $tosend");
$server->send_raw_now("CAP REQ :$tosend");
}
}
#Irssi::signal_stop();
} elsif ($subcmd eq 'ACK') {
$server->print('', "CLICAP: now enabled:$caps");
if ($caps =~ / sasl /i) {
my $sasl = $sasl_auth{$server->{tag}};
$sasl->{buffer} = "";
$sasl->{obj} = Authen::SASL->new($sasl->{mech},
callback => {
user => $sasl->{user},
pass => $sasl->{password}
})->client_new("host", $server->{address});
$sasl->{started} = 0;
if($sasl->{obj}) {
$server->send_raw_now("AUTHENTICATE " . $sasl->{mech});
Irssi::timeout_add_once(10*1000, \&timeout, $server->{tag});
}else{
$server->print('', 'SASL: attempted to start unknown mechanism "' . $sasl->{mech} . '"');
}
}
elsif (!$server->{connected}) {
$server->send_raw_now("CAP END");
}
#Irssi::signal_stop();
} elsif ($subcmd eq 'NAK') {
$server->print('', "CLICAP: refused:$caps");
if (!$server->{connected}) {
$server->send_raw_now("CAP END");
}
#Irssi::signal_stop();
} elsif ($subcmd eq 'LIST') {
$server->print('', "CLICAP: currently enabled:$caps");
#Irssi::signal_stop();
}
}
}
sub event_authenticate {
my ($server, $args, $nick, $address) = @_;
my $sasl = $sasl_auth{$server->{tag}};
return unless $sasl && $sasl->{obj};
$sasl->{buffer} .= $args;
return if length($args) == 400;
my $in = $sasl->{buffer} eq '+' ? '' : decode_base64($sasl->{buffer});
my $out;
if (!$sasl->{started}) {
if ($in) {
$out = $sasl->{obj}->client_start();
if ($out) {
$server->print("", "SASL: Sanity check: both server and client want to go first", "CLIENTERROR");
return sasl_abort($server);
}
$out = $sasl->{obj}->client_step($in);
} else {
$out = $sasl->{obj}->client_start();
}
$sasl->{started} = 1;
} else {
$out = $sasl->{obj}->client_step($in);
}
$out = ($out // '') eq '' ? '+' : encode_base64($out, '');
while(length $out >= 400) {
my $subout = substr($out, 0, 400, '');
$server->send_raw_now("AUTHENTICATE $subout");
}
if(length $out) {
$server->send_raw_now("AUTHENTICATE $out");
}else{ # Last piece was exactly 400 bytes, we have to send some padding to indicate we're done
$server->send_raw_now("AUTHENTICATE +");
}
$sasl->{buffer} = '';
Irssi::signal_stop();
}
sub event_saslend {
my ($server, $args, $nick, $address) = @_;
my $data = $args;
$data =~ s/^\S+ :?//;
# need this to see it, ?? -- jilles
$server->print('', $data);
if (!$server->{connected}) {
$server->send_raw_now("CAP END");
}
}
sub event_sasl_authed {
my ($server, $args, $nick, $address) = @_;
my ($mynick, $mynuh, $authcid, $text) = split / /, $args, 4;
$server->print("", "Authenticated as $authcid ($mynuh)");
# CAP END will be sent by 903
}
sub timeout {
my ($tag) = @_;
my $server = Irssi::server_find_tag($tag);
if($server and !$server->{connected}) {
$server->print('', "SASL: authentication timed out");
$server->send_raw_now("CAP END");
}
}
sub sasl_abort {
my ($server) = @_;
$server->send_raw_now("AUTHENTICATE *");
$server->send_raw_now("CAP END");
}
sub has_mech {
return defined eval {Authen::SASL->new(shift)->client_new};
}
sub cmd_sasl {
my ($data, $server, $item) = @_;
if ($data ne '') {
Irssi::command_runsub ('sasl', $data, $server, $item);
} else {
cmd_sasl_show(@_);
}
}
sub cmd_sasl_set {
my ($data, $server, $item) = @_;
if (my($net, $u, $p, $m) = $data =~ /^(\S+) (\S+) (\S+) (\S+)$/) {
$m = uc $m;
if(has_mech $m) {
$sasl_auth{$net}{user} = $u;
$sasl_auth{$net}{password} = $p;
$sasl_auth{$net}{mech} =$m;
Irssi::print("SASL: added $net: [$m] $sasl_auth{$net}{user} *");
}else{
Irssi::print("SASL: unknown mechanism $m");
}
} elsif ($data =~ /^(\S+)$/) {
$net = $1;
if (defined($sasl_auth{$net})) {
delete $sasl_auth{$net};
Irssi::print("SASL: deleted $net");
} else {
Irssi::print("SASL: no entry for $net");
}
} else {
Irssi::print("SASL: usage: /sasl set <net> <user> <password or keyfile> <mechanism>");
}
}
sub cmd_sasl_show {
#my ($data, $server, $item) = @_;
my $count = 0;
for my $net (keys %sasl_auth) {
Irssi::print("SASL: $net: [$sasl_auth{$net}{mech}] $sasl_auth{$net}{user} *");
$count++;
}
Irssi::print("SASL: no networks defined") if !$count;
}
sub cmd_sasl_save {
#my ($data, $server, $item) = @_;
my $file = Irssi::get_irssi_dir()."/sasl.auth";
open my $fh, ">", $file or return;
for my $net (keys %sasl_auth) {
printf $fh ("%s\t%s\t%s\t%s\n", $net, $sasl_auth{$net}{user}, $sasl_auth{$net}{password}, $sasl_auth{$net}{mech});
}
close $fh;
Irssi::print("SASL: auth saved to $file");
}
sub cmd_sasl_load {
#my ($data, $server, $item) = @_;
my $file = Irssi::get_irssi_dir()."/sasl.auth";
open my $fh, "<", $file or return;
%sasl_auth = ();
while (<$fh>) {
chomp;
my ($net, $u, $p, $m) = split (/\t/, $_, 4);
$m ||= "PLAIN";
if(has_mech(uc $m)) {
$sasl_auth{$net}{user} = $u;
$sasl_auth{$net}{password} = $p;
$sasl_auth{$net}{mech} = uc $m;
}else{
Irssi::print("SASL: unknown mechanism $m");
}
}
close $fh;
Irssi::print("SASL: auth loaded from $file");
}
#sub cmd_sasl_mechanisms {
# Irssi::print("SASL: mechanisms supported: " . join(" ", keys %mech));
#}
Irssi::signal_add_first('server connected', \&server_connected);
Irssi::signal_add('event cap', \&event_cap);
Irssi::signal_add('event authenticate', \&event_authenticate);
# 900 nick nick!user@host authcid :Logged in
# 903 nick :SASL auth successful
Irssi::signal_add('event 900', \&event_sasl_authed);
Irssi::signal_add('event 903', \&event_saslend);
Irssi::signal_add('event 904', \&event_saslend);
Irssi::signal_add('event 905', \&event_saslend);
Irssi::signal_add('event 906', \&event_saslend);
Irssi::signal_add('event 907', \&event_saslend);
Irssi::command_bind('sasl', \&cmd_sasl);
Irssi::command_bind('sasl load', \&cmd_sasl_load);
Irssi::command_bind('sasl save', \&cmd_sasl_save);
Irssi::command_bind('sasl set', \&cmd_sasl_set);
Irssi::command_bind('sasl show', \&cmd_sasl_show);
#rssi::command_bind('sasl mechanisms', \&cmd_sasl_mechanisms);
cmd_sasl_load();
# vim: ts=4
| grawity/hacks | irc/irssi/cap_authen_sasl.pl | Perl | mit | 7,455 |
# This file is auto-generated by the Perl DateTime Suite time zone
# code generator (0.07) This code generator comes with the
# DateTime::TimeZone module distribution in the tools/ directory
#
# Generated from /tmp/Q713JNUf8G/asia. Olson data version 2016a
#
# Do not edit this file directly.
#
package DateTime::TimeZone::Asia::Ashgabat;
$DateTime::TimeZone::Asia::Ashgabat::VERSION = '1.95';
use strict;
use Class::Singleton 1.03;
use DateTime::TimeZone;
use DateTime::TimeZone::OlsonDB;
@DateTime::TimeZone::Asia::Ashgabat::ISA = ( 'Class::Singleton', 'DateTime::TimeZone' );
my $spans =
[
[
DateTime::TimeZone::NEG_INFINITY, # utc_start
60694517188, # utc_end 1924-05-01 20:06:28 (Thu)
DateTime::TimeZone::NEG_INFINITY, # local_start
60694531200, # local_end 1924-05-02 00:00:00 (Fri)
14012,
0,
'LMT',
],
[
60694517188, # utc_start 1924-05-01 20:06:28 (Thu)
60888139200, # utc_end 1930-06-20 20:00:00 (Fri)
60694531588, # local_start 1924-05-02 00:06:28 (Fri)
60888153600, # local_end 1930-06-21 00:00:00 (Sat)
14400,
0,
'ASHT',
],
[
60888139200, # utc_start 1930-06-20 20:00:00 (Fri)
62490596400, # utc_end 1981-03-31 19:00:00 (Tue)
60888157200, # local_start 1930-06-21 01:00:00 (Sat)
62490614400, # local_end 1981-04-01 00:00:00 (Wed)
18000,
0,
'ASHT',
],
[
62490596400, # utc_start 1981-03-31 19:00:00 (Tue)
62506404000, # utc_end 1981-09-30 18:00:00 (Wed)
62490618000, # local_start 1981-04-01 01:00:00 (Wed)
62506425600, # local_end 1981-10-01 00:00:00 (Thu)
21600,
1,
'ASHST',
],
[
62506404000, # utc_start 1981-09-30 18:00:00 (Wed)
62522132400, # utc_end 1982-03-31 19:00:00 (Wed)
62506422000, # local_start 1981-09-30 23:00:00 (Wed)
62522150400, # local_end 1982-04-01 00:00:00 (Thu)
18000,
0,
'ASHT',
],
[
62522132400, # utc_start 1982-03-31 19:00:00 (Wed)
62537940000, # utc_end 1982-09-30 18:00:00 (Thu)
62522154000, # local_start 1982-04-01 01:00:00 (Thu)
62537961600, # local_end 1982-10-01 00:00:00 (Fri)
21600,
1,
'ASHST',
],
[
62537940000, # utc_start 1982-09-30 18:00:00 (Thu)
62553668400, # utc_end 1983-03-31 19:00:00 (Thu)
62537958000, # local_start 1982-09-30 23:00:00 (Thu)
62553686400, # local_end 1983-04-01 00:00:00 (Fri)
18000,
0,
'ASHT',
],
[
62553668400, # utc_start 1983-03-31 19:00:00 (Thu)
62569476000, # utc_end 1983-09-30 18:00:00 (Fri)
62553690000, # local_start 1983-04-01 01:00:00 (Fri)
62569497600, # local_end 1983-10-01 00:00:00 (Sat)
21600,
1,
'ASHST',
],
[
62569476000, # utc_start 1983-09-30 18:00:00 (Fri)
62585290800, # utc_end 1984-03-31 19:00:00 (Sat)
62569494000, # local_start 1983-09-30 23:00:00 (Fri)
62585308800, # local_end 1984-04-01 00:00:00 (Sun)
18000,
0,
'ASHT',
],
[
62585290800, # utc_start 1984-03-31 19:00:00 (Sat)
62601022800, # utc_end 1984-09-29 21:00:00 (Sat)
62585312400, # local_start 1984-04-01 01:00:00 (Sun)
62601044400, # local_end 1984-09-30 03:00:00 (Sun)
21600,
1,
'ASHST',
],
[
62601022800, # utc_start 1984-09-29 21:00:00 (Sat)
62616747600, # utc_end 1985-03-30 21:00:00 (Sat)
62601040800, # local_start 1984-09-30 02:00:00 (Sun)
62616765600, # local_end 1985-03-31 02:00:00 (Sun)
18000,
0,
'ASHT',
],
[
62616747600, # utc_start 1985-03-30 21:00:00 (Sat)
62632472400, # utc_end 1985-09-28 21:00:00 (Sat)
62616769200, # local_start 1985-03-31 03:00:00 (Sun)
62632494000, # local_end 1985-09-29 03:00:00 (Sun)
21600,
1,
'ASHST',
],
[
62632472400, # utc_start 1985-09-28 21:00:00 (Sat)
62648197200, # utc_end 1986-03-29 21:00:00 (Sat)
62632490400, # local_start 1985-09-29 02:00:00 (Sun)
62648215200, # local_end 1986-03-30 02:00:00 (Sun)
18000,
0,
'ASHT',
],
[
62648197200, # utc_start 1986-03-29 21:00:00 (Sat)
62663922000, # utc_end 1986-09-27 21:00:00 (Sat)
62648218800, # local_start 1986-03-30 03:00:00 (Sun)
62663943600, # local_end 1986-09-28 03:00:00 (Sun)
21600,
1,
'ASHST',
],
[
62663922000, # utc_start 1986-09-27 21:00:00 (Sat)
62679646800, # utc_end 1987-03-28 21:00:00 (Sat)
62663940000, # local_start 1986-09-28 02:00:00 (Sun)
62679664800, # local_end 1987-03-29 02:00:00 (Sun)
18000,
0,
'ASHT',
],
[
62679646800, # utc_start 1987-03-28 21:00:00 (Sat)
62695371600, # utc_end 1987-09-26 21:00:00 (Sat)
62679668400, # local_start 1987-03-29 03:00:00 (Sun)
62695393200, # local_end 1987-09-27 03:00:00 (Sun)
21600,
1,
'ASHST',
],
[
62695371600, # utc_start 1987-09-26 21:00:00 (Sat)
62711096400, # utc_end 1988-03-26 21:00:00 (Sat)
62695389600, # local_start 1987-09-27 02:00:00 (Sun)
62711114400, # local_end 1988-03-27 02:00:00 (Sun)
18000,
0,
'ASHT',
],
[
62711096400, # utc_start 1988-03-26 21:00:00 (Sat)
62726821200, # utc_end 1988-09-24 21:00:00 (Sat)
62711118000, # local_start 1988-03-27 03:00:00 (Sun)
62726842800, # local_end 1988-09-25 03:00:00 (Sun)
21600,
1,
'ASHST',
],
[
62726821200, # utc_start 1988-09-24 21:00:00 (Sat)
62742546000, # utc_end 1989-03-25 21:00:00 (Sat)
62726839200, # local_start 1988-09-25 02:00:00 (Sun)
62742564000, # local_end 1989-03-26 02:00:00 (Sun)
18000,
0,
'ASHT',
],
[
62742546000, # utc_start 1989-03-25 21:00:00 (Sat)
62758270800, # utc_end 1989-09-23 21:00:00 (Sat)
62742567600, # local_start 1989-03-26 03:00:00 (Sun)
62758292400, # local_end 1989-09-24 03:00:00 (Sun)
21600,
1,
'ASHST',
],
[
62758270800, # utc_start 1989-09-23 21:00:00 (Sat)
62773995600, # utc_end 1990-03-24 21:00:00 (Sat)
62758288800, # local_start 1989-09-24 02:00:00 (Sun)
62774013600, # local_end 1990-03-25 02:00:00 (Sun)
18000,
0,
'ASHT',
],
[
62773995600, # utc_start 1990-03-24 21:00:00 (Sat)
62790325200, # utc_end 1990-09-29 21:00:00 (Sat)
62774017200, # local_start 1990-03-25 03:00:00 (Sun)
62790346800, # local_end 1990-09-30 03:00:00 (Sun)
21600,
1,
'ASHST',
],
[
62790325200, # utc_start 1990-09-29 21:00:00 (Sat)
62806050000, # utc_end 1991-03-30 21:00:00 (Sat)
62790343200, # local_start 1990-09-30 02:00:00 (Sun)
62806068000, # local_end 1991-03-31 02:00:00 (Sun)
18000,
0,
'ASHT',
],
[
62806050000, # utc_start 1991-03-30 21:00:00 (Sat)
62821778400, # utc_end 1991-09-28 22:00:00 (Sat)
62806068000, # local_start 1991-03-31 02:00:00 (Sun)
62821796400, # local_end 1991-09-29 03:00:00 (Sun)
18000,
1,
'ASHST',
],
[
62821778400, # utc_start 1991-09-28 22:00:00 (Sat)
62824190400, # utc_end 1991-10-26 20:00:00 (Sat)
62821792800, # local_start 1991-09-29 02:00:00 (Sun)
62824204800, # local_end 1991-10-27 00:00:00 (Sun)
14400,
0,
'ASHT',
],
[
62824190400, # utc_start 1991-10-26 20:00:00 (Sat)
62831455200, # utc_end 1992-01-18 22:00:00 (Sat)
62824204800, # local_start 1991-10-27 00:00:00 (Sun)
62831469600, # local_end 1992-01-19 02:00:00 (Sun)
14400,
0,
'TMT',
],
[
62831455200, # utc_start 1992-01-18 22:00:00 (Sat)
DateTime::TimeZone::INFINITY, # utc_end
62831473200, # local_start 1992-01-19 03:00:00 (Sun)
DateTime::TimeZone::INFINITY, # local_end
18000,
0,
'TMT',
],
];
sub olson_version {'2016a'}
sub has_dst_changes {11}
sub _max_year {2026}
sub _new_instance {
return shift->_init( @_, spans => $spans );
}
1;
| jkb78/extrajnm | local/lib/perl5/DateTime/TimeZone/Asia/Ashgabat.pm | Perl | mit | 7,405 |
% Autor: Jan Bartels, Patrick Steinhauer
% Datum: 22.03.2016
transportiere(1, Von, _, Nach) :-
write('Bringe eine Scheibe von '), write(Von), write(' nach '), write(Nach), write(.), nl.
transportiere(AnzahlScheiben, Anfang, Mitte, Ende) :-
AnzahlScheiben > 1,
ObereScheiben is AnzahlScheiben - 1,
write('@ '),writeln(ObereScheiben),
transportiere(ObereScheiben, Anfang, Ende, Mitte),
transportiere(1, Anfang, Mitte, Ende),
transportiere(ObereScheiben, Mitte, Anfang, Ende).
| Blackgen/HAW-IS | Aufgabe 1/Hanoi.pl | Perl | mit | 492 |
#!/usr/bin/perl
print<<1;
Content-type:text/html\n\n
<html>
<body>
<table cellpadding=5 border=1>
<tr>
<th>ENV_VARIABLES</th>
<th>VALUE</th>
</tr>
1
foreach $i (sort keys %ENV)
{
print "<tr><td>$i</td><td>$ENV{$i}</td></tr>";
}
print "</table></body></html>";
| KarthikNayak/VTU | CSE/7thSem/web-lab/5/5a.pl | Perl | mit | 287 |
#!/usr/bin/env perl
#---------------------------------------------------------------------------------------------------
# This script will create a new gallery taking photos with their full resolution and scaling them
# down to an acceptable level for reasonably fast presentation per web browser.
#
#---------------------------------------------------------------------------------------------------
use strict;
my $SRC = @ARGV[0];
my $TGT = @ARGV[1];
my $OPT = @ARGV[2];
my $WIDTH = 1000;
my $HTTP = "http://paushaus.dyndns.org/photos";
my $TEMPLATE = "/home/paus/www/photos/template";
my ($dir,$dir1,$dir2,$cdir);
my $cmd;
my $file;
my @f;
# get last directories visible in http
@f = split('/',$TGT);
$dir1 = pop(@f);
$dir2 = pop(@f);
$dir = "$dir2/$dir1";
$cdir = "$dir2 -- $dir1";
if (-e "$SRC/orientation.txt") {
printf "Configuration file for photo orientations exists.\n";
}
else {
printf "Create configuration file for photo orientations.\n";
$cmd = "/bin/ls -1 $SRC | grep jpg > o.txt;";
$cmd .= "sudo mv o.txt $SRC/orientation.txt";
printf "$cmd\n";
system($cmd);
}
if ("$OPT" eq "") {
@f = split("/",$TGT); pop(@f);
my $stub = join("/",@f);
if (-d "$TGT") {
printf "INFO: directory ($stub) already exists... update styles etc.\n";
$cmd = "cp $TEMPLATE/index.html $TEMPLATE/buildgallery.php $TGT/";
printf "UPDATE TEMPLATES: $cmd\n";
system($cmd);
}
else {
$cmd = "mkdir -p $stub";
system("$cmd");
# create copy of the template directory and set access right correctly
$cmd = "cp -r $TEMPLATE $TGT";
printf "COPY TEMPLATE: $cmd\n";
system($cmd);
$cmd = "chmod 777 $TGT/gallery.xml $TGT/thumbs";
printf "ADJUST ACCESS: $cmd\n";
system($cmd);
}
}
opendir(DIR,"$SRC") || printf " cannot open $SRC\n";
foreach $file (grep(/\.jpg$/,readdir(DIR))) {
my $opt = `grep $file $SRC/orientation.txt`; chop $opt;
@f = split(" ",$opt); shift(@f);
$opt = join(" ",@f);
printf "jpg file: $file (options: $opt)\n";
# make the new thumbnail
if (-e "$TGT/thumbs/$file") {
printf " Thumbnail already exists.\n";
}
else {
$cmd = "convert $SRC/$file $opt -thumbnail 200 $TGT/thumbs/$file";
printf " $cmd\n";
system($cmd);
}
# make the new image
if (-e "$TGT/images/$file") {
printf " Picture already exists.\n";
}
else {
$cmd = "convert $SRC/$file $opt -resize $WIDTH $TGT/images/$file";
printf " $cmd\n";
system($cmd);
}
}
closedir(DIR);
if ($OPT eq '') {
printf "Update title for the gallery.\n";
$cmd = "cd $TGT; repstr \"XX--TITLE--XX\" \"$dir\" buildgallery.php gallery.xml index.html";
system($cmd);
$cmd = "cd $TGT; chmod 777 gallery.xml";
system($cmd);
}
$cmd = "wget $HTTP/$dir/buildgallery.php -O test.txt; rm test.txt";
printf "GENERATE GALLERY: $cmd\n";
system($cmd);
exit 0;
| cpausmit/Config | bin/photos/createGallery.pl | Perl | mit | 2,890 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2020] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
=head1 NAME
Bio::EnsEMBL::Utils::SchemaConversion - Utility module for Vega schema conversion script
=head1 SYNOPSIS
my $serverroot = '/path/to/ensembl';
my $conversion =
Bio::EnsEMBL::Utils::ConversionSupport->new($serverroot);
# parse common options
$conversion->conv_usage->parse_common_options;
# convert from schema 19 to 20+
$conversion->do_conversion()
=head1 DESCRIPTION
This module is a helper module for database conversion, for
both vega-vega and ensembl-vega schemas. It provides a wrapper
around SeqStoreConverter::BasicConverter and the species specific
methods therein. Also provides access to helper functions in
Bio::EnsEMBL::Utils::ConversionSupport
=head1 METHODS
=cut
package Bio::EnsEMBL::Utils::SchemaConversion;
use strict;
use warnings;
use Bio::EnsEMBL::Utils::ConversionSupport;
use Data::Dumper;
=head2 new
Example : $conversion->Bio::EnsEMBL::Utils::SchemaConversion->new($serverroot);
Description : Constructor, including an instance of a Bio::EnsEMBL::Utils::ConversionSupport
object. Parses input file and checks input with user
Return type : Bio::EnsEMBL::Utils::SchemaConversion object
Exceptions : thrown if $Siteroot not passed over
Caller : $Siteroot/utils/vega_schema_conversion
=cut
sub new {
my $class = shift;
my $support = shift;
my $self = {};
bless ($self,$class);
$self->{config} = Bio::EnsEMBL::Utils::ConversionSupport->new($support);
$self->conv_support->parse_common_options;
$self->conv_support->parse_extra_options('do_vega_sc=s',
'do_ens_sc=s',
'source_db=s',
'core_sql=s',
'vega_sql=s',
'patch_sql=s',
'force=s',
'do_features=s');
#check input and show help
$self->conv_usage() if ($self->conv_support->param("help"));
$self->conv_usage("configuration file needed") unless ($self->conv_support->param("conffile"));
$self->conv_usage("password for database access needed") unless ($self->conv_support->param("pass"));
$self->conv_usage("can only do conversion to ensembl OR Vega, not both") if ($self->conv_support->param('do_vega_sc') && $self->conv_support->param('do_ens_sc'));
$self->conv_usage("You need to do vega->veg or ensembl->vega conversion") unless ($self->conv_support->param('do_vega_sc') || $self->conv_support->param('do_ens_sc'));
# ask user to confirm parameters to proceed
$self->conv_support->allowed_params('conffile',
'do_vega_sc',
'do_ens_sc',
'host',
'port',
'user',
'pass',
'source_db',
'dbname',
'force',
'do_features',
'verbose',
'logpath',
'logfile',
'core_sql',
'vega_sql',
'patch_sql');
$self->conv_support->confirm_params;
return $self;
}
=head2 conv_support
Example : $conversion->conv_support;
Description : Provides access to Bio::EnsEMBL::Utils::ConversionSupport methods
Return type : Bio::EnsEMBL::Utils::ConversionSuppor object
Exceptions : none
Caller : general
=cut
sub conv_support {
my $self = shift;
return $self->{config};
}
=head2 conv_obj
Example : $conversion->conv_obj;
Description : Provides access to SeqStoreConverter::BasicConverter methods
Return type : SeqStoreConverter::BasicConverter object
Exceptions : none
Caller : general
=cut
sub conv_obj {
my $self = shift;
return $self->{'converter_object'};
}
=head2 species_alias
Example : $self->species_alias
Description : examines name of source database to determine which conversion module to use
Return type : string
Exceptions : die if wrong species name used
Caller : $self
=cut
sub species_alias {
my $self=shift;
my $name = shift;
return 'CanisFamiliaris' if $name =~ /canis/;
return 'HomoSapiens' if $name =~ /homo/;
return 'MusMusculus' if $name =~ /mus/;
return 'DanioRerio' if $name =~ /danio/;
##hack - should use own modules
return 'HomoSapiens' if $name =~ /sus/;
die "invalid name of source database, please check configuration file";
}
=head2 choose_conversion_type
Example : $conversion->choose_conversion_type
Description : compares conversion type (ensembl or vega) and species type with
available modules and chooses that to use for the conversion. Stores
a converter object within the caller
Return type : none
Exceptions : none
Caller : $Siteroot/utils/vega_schema_conversion
=cut
sub choose_conversion_type {
my $self = shift;
my $converter;
my $species;
$species = $self->species_alias($self->conv_support->param('source_db'));
if ($self->conv_support->param('do_vega_sc')) {
$species = "vega::".$species;
eval "require SeqStoreConverter::$species"; ## no critic
if($@) {
warn("Could not require conversion module SeqStoreConverter::$species\ for vega conversion\n" .
"Using SeqStoreConverter::BasicConverter instead:\n$@");
require SeqStoreConverter::BasicConverter;
$species = "BasicConverter";
}
else {
warn "Using conversion module SeqStoreConverter::$species for vega conversion\n";
}
}
else {
eval "require SeqStoreConverter::$species"; ## no critic
if($@) {
warn("Could not require conversion module SeqStoreConverter::$species for Ensembl conversion\n" .
"Using SeqStoreConverter::BasicConverter instead:\n$@");
require SeqStoreConverter::BasicConverter;
$species = "BasicConverter";
}
else {
warn "Using conversion module SeqStoreConverter::$species for Ensembl conversion\n";
}
$self->conv_support->param('vega_sql',0);
}
$converter = "SeqStoreConverter::$species"->new
( $self->conv_support->param('user'),
$self->conv_support->param('pass'),
$self->conv_support->param('host').':'.$self->conv_support->param('port'),
$self->conv_support->param('source_db'),
$self->conv_support->param('dbname'),
$self->conv_support->param('core_sql'),
$self->conv_support->param('vega_sql'),
$self->conv_support->param('force'),
$self->conv_support->param('verbose'),
'',
);
$self->{'converter_object'} = $converter;
}
=head2 do_conversion
Example : $conversion->do_conversion
Description : does the database conversion
Return type : none
Exceptions : none
Caller : $Siteroot/utils/vega_schema_conversion
=cut
sub do_conversion {
my $self= shift;
$self->conv_obj->debug( "\n\n*** converting " . $self->conv_obj->source . " to " .
$self->conv_obj->target() . " ***");
$self->conv_obj->transfer_meta();
$self->conv_obj->create_coord_systems();
$self->conv_obj->create_seq_regions();
$self->conv_obj->create_assembly();
$self->conv_obj->create_attribs();
$self->conv_obj->set_top_level();
$self->conv_obj->transfer_dna();
$self->conv_obj->back_patch_schema();
$self->conv_obj->transfer_genes();
$self->conv_obj->transfer_prediction_transcripts();
if ($self->conv_support->param('do_features')) {
$self->conv_obj->transfer_features();
}
#use this for both ensembl and vega for now,
#but might need changing when vega gets eg transcript modified dates
$self->conv_obj->transfer_vega_stable_ids();
$self->conv_obj->copy_other_tables();
$self->conv_obj->copy_repeat_consensus();
$self->conv_obj->create_meta_coord();
if ($self->conv_support->param('do_vega_sc')) {
$self->conv_obj->copy_other_vega_tables();
$self->conv_obj->update_clone_info();
$self->conv_obj->remove_supercontigs();
$self->conv_obj->copy_internal_clone_names();
$self->conv_obj->copy_assembly_exception;
}
}
=head2 make_schema_up_to_date
Example : $conversion->make_schema_up_to_date
Description : patches schema to latest version
Return type : none
Exceptions : none
Caller : $conversion
=cut
sub make_schema_up_to_date {
my $self = shift;
$self->conv_obj->debug ("\nPatching schema to latest version\n");
my $user = $self->conv_obj->user;
my $pass = $self->conv_obj->password;
my $port = $self->conv_obj->port;
my $host = $self->conv_obj->host;
my $target = $self->conv_obj->target;
my $patch_schema = $self->conv_support->param('patch_sql');
my $cmd = "/usr/local/mysql/bin/mysql -u $user -p$pass -P $port -h $host $target < $patch_schema";
system ($cmd);
}
=head2 conv_usage
Example : $conversion->conv_usage("message")
Description : prints usage information and exits
Return type : none
Exceptions : none
Caller : $Siteroot/utils/vega_schema_conversion
=cut
sub conv_usage {
my $self = shift;
my $msg = shift;
print STDERR "\nMSG: $msg\n" if($msg);
print STDERR <<EOF;
** Source and target databases must be on the same mysql instance
usage: ./conversion_densities.pl <options>
options: --conf <conf_file> configuration file (uses conf/Conversion.ini by default):
fields:
do_vega_sc (do vega conversion: 0 or 1)
do_ens_sc (do ensembl conversion: 0 or 1)
user (a mysql db user with read/write priveleges)
host (eg ecs3f)
port (eg 3310)
source_db (schema 19 source database)
dbname (schema 20+ target database)
force (overwrite existing target database: 0 or 1)
verbose (print out debug statements: 0 or 1)
logpath (location of log file)
do_features (transfer dna- and protein-align features, for debugging: 0 or 1)
core_sql (location of ensembl schema creation script: ensembl/sql/table.sql)
vega_sql (location of creation script for additional vega tables: ensembl/sql/vega_specific_tables.sql)
patch_sql (location of schema patching script: ensembl/sql/vega_latest_schema.sql)
--log name of log_file
--help display this message
EOF
exit;
}
1;
| james-monkeyshines/ensembl | modules/Bio/EnsEMBL/Utils/SchemaConversion.pm | Perl | apache-2.0 | 11,670 |
## Configuration file for the Epo Low Coverage pipeline
package Bio::EnsEMBL::Compara::PipeConfig::EpoLowCoverage_conf;
use strict;
use warnings;
use base ('Bio::EnsEMBL::Compara::PipeConfig::ComparaGeneric_conf'); # All Hive databases configuration files should inherit from HiveGeneric, directly or indirectly
sub default_options {
my ($self) = @_;
return {
'ensembl_cvs_root_dir' => $ENV{'HOME'}.'/src/ensembl_main/',
'release' => 63,
'prev_release' => 62,
'release_suffix'=> '', # set it to '' for the actual release
'pipeline_name' => 'LOW35_'.$self->o('release').$self->o('release_suffix'), # name used by the beekeeper to prefix job names on the farm
#location of new pairwise mlss if not in the pairwise_default_location eg:
#'pairwise_exception_location' => { 517 => 'mysql://ensro@compara4/kb3_hsap_nleu_lastz_62'},
'pairwise_exception_location' => { 521 => 'mysql://ensro@compara1/kb3_hsap_mluc_lastz_63'},
'pipeline_db' => {
-host => 'compara1',
-port => 3306,
-user => 'ensadmin',
-pass => $self->o('password'),
-dbname => $ENV{USER}.'_epo_35way_'.$self->o('release').$self->o('release_suffix'),
},
#Location of compara db containing most pairwise mlss ie previous compara
'live_compara_db' => {
-host => 'ens-livemirror',
-port => 3306,
-user => 'ensro',
-pass => '',
-dbname => 'ensembl_compara_62',
-driver => 'mysql',
},
#Location of compara db containing the high coverage alignments
'epo_db' => {
-host => 'compara3',
-port => 3306,
-user => 'ensro',
-pass => '',
-dbname => 'sf5_63compara_ortheus12way',
-driver => 'mysql',
},
master_db => {
-host => 'compara1',
-port => 3306,
-user => 'ensadmin',
-pass => $self->o('password'),
-dbname => 'sf5_ensembl_compara_master',
-driver => 'mysql',
},
'populate_new_database_program' => $self->o('ensembl_cvs_root_dir')."/ensembl-compara/scripts/pipeline/populate_new_database.pl",
'reg1' => {
-host => 'ens-staging1',
-port => 3306,
-user => 'ensro',
-pass => '',
-db_version => $self->o('release'),
},
'reg2' => {
-host => 'ens-staging2',
-port => 3306,
-user => 'ensro',
-pass => '',
-db_version => $self->o('release'),
},
'live_db' => {
-host => 'ens-livemirror',
-port => 3306,
-user => 'ensro',
-pass => '',
-db_version => $self->o('prev_release'),
},
'low_epo_mlss_id' => $self->o('low_epo_mlss_id'), #mlss_id for low coverage epo alignment
'high_epo_mlss_id' => $self->o('high_epo_mlss_id'), #mlss_id for high coverage epo alignment
'ce_mlss_id' => $self->o('ce_mlss_id'), #mlss_id for low coverage constrained elements
'cs_mlss_id' => $self->o('cs_mlss_id'), #mlss_id for low coverage conservation scores
'master_db_name' => 'sf5_ensembl_compara_master',
'ref_species' => 'homo_sapiens', #ref species for pairwise alignments
'max_block_size' => 1000000, #max size of alignment before splitting
'pairwise_default_location' => $self->dbconn_2_url('live_compara_db'), #default location for pairwise alignments
'gerp_version' => '2.1', #gerp program version
'gerp_program_file' => '/software/ensembl/compara/gerp/GERPv2.1', #gerp program
'gerp_window_sizes' => '[1,10,100,500]', #gerp window sizes
'no_gerp_conservation_scores' => 0, #Not used in productions but is a valid argument
'species_tree_file' => $self->o('ensembl_cvs_root_dir').'/ensembl-compara/scripts/pipeline/species_tree_blength.nh', #location of full species tree, will be pruned
'newick_format' => 'simple',
'work_dir' => $self->o('work_dir'), #location to put pruned tree file
};
}
sub pipeline_create_commands {
my ($self) = @_;
return [
@{$self->SUPER::pipeline_create_commands}, # inheriting database and hive tables' creation
];
}
sub pipeline_wide_parameters { # these parameter values are visible to all analyses, can be overridden by parameters{} and input_id{}
my ($self) = @_;
return {
'pipeline_name' => $self->o('pipeline_name'), #Essential for the beekeeper to work correctly
};
}
sub resource_classes {
my ($self) = @_;
return {
0 => { -desc => 'default, 8h', 'LSF' => '' },
1 => { -desc => 'urgent', 'LSF' => '-q yesterday' },
2 => { -desc => 'compara1', 'LSF' => '-R"select[compara1<800] rusage[compara1=10:duration=3]"' },
};
}
sub pipeline_analyses {
my ($self) = @_;
#my $epo_low_coverage_logic_name = $self->o('logic_name_prefix');
print "pipeline_analyses\n";
return [
# ---------------------------------------------[Turn all tables except 'genome_db' to InnoDB]---------------------------------------------
{ -logic_name => 'innodbise_table_factory',
-module => 'Bio::EnsEMBL::Hive::RunnableDB::JobFactory',
-parameters => {
'inputquery' => "SELECT table_name FROM information_schema.tables WHERE table_schema ='".$self->o('pipeline_db','-dbname')."' AND table_name!='genome_db' AND engine='MyISAM' ",
'fan_branch_code' => 2,
},
-input_ids => [{}],
-flow_into => {
2 => [ 'innodbise_table' ],
1 => [ 'populate_new_database' ],
},
},
{ -logic_name => 'innodbise_table',
-module => 'Bio::EnsEMBL::Hive::RunnableDB::SqlCmd',
-parameters => {
'sql' => "ALTER TABLE #table_name# ENGINE='InnoDB'",
},
-hive_capacity => 10,
},
# ---------------------------------------------[Run poplulate_new_database.pl script ]---------------------------------------------------
{ -logic_name => 'populate_new_database',
-module => 'Bio::EnsEMBL::Hive::RunnableDB::SystemCmd',
-parameters => {
'program' => $self->o('populate_new_database_program'),
'master' => $self->o('master_db_name'),
'mlss_id' => $self->o('low_epo_mlss_id'),
'ce_mlss_id' => $self->o('ce_mlss_id'),
'cs_mlss_id' => $self->o('cs_mlss_id'),
'cmd' => "#program# --master " . $self->dbconn_2_url('master_db') . " --new " . $self->dbconn_2_url('pipeline_db') . " --mlss #mlss_id# --mlss #ce_mlss_id# --mlss #cs_mlss_id# ",
},
-wait_for => [ 'innodbise_table_factory', 'innodbise_table' ],
-flow_into => {
1 => [ 'load_genomedb_factory' ],
},
},
# ---------------------------------------------[Load GenomeDB entries from master+cores]--------------------------------------------------
{ -logic_name => 'load_genomedb_factory',
-module => 'Bio::EnsEMBL::Compara::RunnableDB::LoadGenomedbFactory',
-parameters => {
'compara_db' => $self->o('master_db'), # that's where genome_db_ids come from
'mlss_id' => $self->o('low_epo_mlss_id'),
},
-wait_for => [ 'innodbise_table_factory', 'innodbise_table' ],
-flow_into => {
2 => ['load_genomedb' ],
1 => [ 'set_internal_ids' ],
},
},
{ -logic_name => 'load_genomedb',
-module => 'Bio::EnsEMBL::Compara::RunnableDB::LoadOneGenomeDB',
-parameters => {
'registry_dbs' => [ $self->o('reg1'), $self->o('reg2'), $self->o('live_db')],
# 'registry_dbs' => [ $self->o('live_db'), $self->o('reg1'), $self->o('reg2')],
},
-hive_capacity => 1, # they are all short jobs, no point doing them in parallel
},
# ------------------------------------------------------[Set internal ids ]---------------------------------------------------------------
{ -logic_name => 'set_internal_ids',
-module => 'Bio::EnsEMBL::Hive::RunnableDB::SqlCmd',
-parameters => {
'low_epo_mlss_id' => $self->o('low_epo_mlss_id'),
'sql' => [
'ALTER TABLE genomic_align_block AUTO_INCREMENT=#expr(($low_epo_mlss_id * 10**10) + 1)expr#',
'ALTER TABLE genomic_align AUTO_INCREMENT=#expr(($low_epo_mlss_id * 10**10) + 1)expr#',
'ALTER TABLE genomic_align_group AUTO_INCREMENT=#expr(($low_epo_mlss_id * 10**10) + 1)expr#',
'ALTER TABLE genomic_align_tree AUTO_INCREMENT=#expr(($low_epo_mlss_id * 10**10) + 1)expr#',
],
},
-wait_for => [ 'load_genomedb' ], # have to wait until genome_db table has been populated
-flow_into => {
1 => [ 'ImportAlignment' , 'make_species_tree', 'CreateDefaultPairwiseMlss'],
},
},
# -------------------------------------------------------------[Load species tree]--------------------------------------------------------
{ -logic_name => 'make_species_tree',
-module => 'Bio::EnsEMBL::Compara::RunnableDB::MakeSpeciesTree',
-parameters => { },
-input_ids => [
{'blength_tree_file' => $self->o('species_tree_file'), 'newick_format' => 'simple' }, #species_tree
{'newick_format' => 'njtree' }, #taxon_tree
],
-hive_capacity => -1, # to allow for parallelization
-flow_into => {
3 => { 'mysql:////meta' => { 'meta_key' => 'taxon_tree', 'meta_value' => '#species_tree_string#' } },
4 => { 'mysql:////meta' => { 'meta_key' => 'tree_string', 'meta_value' => '#species_tree_string#' } },
},
},
# -----------------------------------[Create a list of pairwise mlss found in the default compara database]-------------------------------
{ -logic_name => 'CreateDefaultPairwiseMlss',
-module => 'Bio::EnsEMBL::Compara::RunnableDB::EpoLowCoverage::CreateDefaultPairwiseMlss',
-parameters => {
'new_method_link_species_set_id' => $self->o('low_epo_mlss_id'),
'base_method_link_species_set_id' => $self->o('high_epo_mlss_id'),
'pairwise_default_location' => $self->o('pairwise_default_location'),
'base_location' => $self->dbconn_2_url('epo_db'),
'reference_species' => $self->o('ref_species'),
'fan_branch_code' => 3,
},
-flow_into => {
3 => [ 'mysql:////meta' ],
}
},
# ------------------------------------------------[Import the high coverage alignments]---------------------------------------------------
{ -logic_name => 'ImportAlignment',
-module => 'Bio::EnsEMBL::Compara::RunnableDB::EpoLowCoverage::ImportAlignment',
-parameters => {
'method_link_species_set_id' => $self->o('high_epo_mlss_id'),
'from_db_url' => $self->dbconn_2_url('epo_db'),
},
-wait_for => [ 'CreateDefaultPairwiseMlss', 'make_species_tree'],
-flow_into => {
1 => [ 'create_low_coverage_genome_jobs' ],
},
},
# ------------------------------------------------------[Low coverage alignment]----------------------------------------------------------
{ -logic_name => 'create_low_coverage_genome_jobs',
-module => 'Bio::EnsEMBL::Hive::RunnableDB::JobFactory',
-parameters => {
'inputquery' => 'SELECT genomic_align_block_id FROM genomic_align ga LEFT JOIN dnafrag USING (dnafrag_id) WHERE method_link_species_set_id=' . $self->o('high_epo_mlss_id') . ' AND genome_db_id <> 63 GROUP BY genomic_align_block_id',
'fan_branch_code' => 2,
},
-flow_into => {
1 => [ 'delete_alignment' ],
2 => [ 'LowCoverageGenomeAlignment' ],
}
},
{ -logic_name => 'LowCoverageGenomeAlignment',
-module => 'Bio::EnsEMBL::Compara::RunnableDB::EpoLowCoverage::LowCoverageGenomeAlignment',
-parameters => {
'max_block_size' => $self->o('max_block_size'),
'method_link_species_set_id' => $self->o('low_epo_mlss_id'),
'reference_species' => $self->o('ref_species'),
'pairwise_exception_location' => $self->o('pairwise_exception_location'),
'pairwise_default_location' => $self->o('pairwise_default_location'),
},
-batch_size => 5,
-hive_capacity => 30,
-flow_into => {
2 => [ 'Gerp' ],
},
},
# ---------------------------------------------------------------[Gerp]-------------------------------------------------------------------
{ -logic_name => 'Gerp',
-module => 'Bio::EnsEMBL::Compara::RunnableDB::GenomicAlignBlock::Gerp',
-program_version => $self->o('gerp_version'),
-program_file => $self->o('gerp_program_file'),
-parameters => {'window_sizes' => $self->o('gerp_window_sizes') },
-hive_capacity => 600,
},
# ---------------------------------------------------[Delete high coverage alignment]-----------------------------------------------------
{ -logic_name => 'delete_alignment',
-module => 'Bio::EnsEMBL::Hive::RunnableDB::SqlCmd',
-parameters => {
'sql' => [
'DELETE genomic_align_tree FROM genomic_align_tree LEFT JOIN genomic_align_group USING (node_id) LEFT JOIN genomic_align USING (genomic_align_id) WHERE method_link_species_set_id=' . $self->o('high_epo_mlss_id'),
'DELETE genomic_align_group FROM genomic_align_group LEFT JOIN genomic_align using (genomic_align_id) WHERE method_link_species_set_id=' . $self->o('high_epo_mlss_id'),
'DELETE FROM genomic_align WHERE method_link_species_set_id=' . $self->o('high_epo_mlss_id'),
'DELETE FROM genomic_align_block WHERE method_link_species_set_id=' . $self->o('high_epo_mlss_id'),
],
},
#-input_ids => [{}],
-wait_for => [ 'LowCoverageGenomeAlignment', 'Gerp' ],
-flow_into => {
1 => [ 'UpdateMaxAlignmentLength' ],
},
},
# ---------------------------------------------------[Update the max_align data in meta]--------------------------------------------------
{ -logic_name => 'UpdateMaxAlignmentLength',
-module => 'Bio::EnsEMBL::Compara::Production::GenomicAlignBlock::UpdateMaxAlignmentLength',
-flow_into => {
1 => [ 'create_neighbour_nodes_jobs_alignment' ],
},
},
# --------------------------------------[Populate the left and right node_id of the genomic_align_tree table]-----------------------------
{ -logic_name => 'create_neighbour_nodes_jobs_alignment',
-module => 'Bio::EnsEMBL::Hive::RunnableDB::JobFactory',
-parameters => {
'inputquery' => 'SELECT root_id FROM genomic_align_tree WHERE parent_id = 0',
'fan_branch_code' => 2,
},
-flow_into => {
1 => [ 'ConservationScoreHealthCheck' ],
2 => [ 'SetNeighbourNodes' ],
}
},
{ -logic_name => 'SetNeighbourNodes',
-module => 'Bio::EnsEMBL::Compara::RunnableDB::EpoLowCoverage::SetNeighbourNodes',
-parameters => {
'method_link_species_set_id' => $self->o('low_epo_mlss_id')
},
-batch_size => 10,
-hive_capacity => 15,
},
# -----------------------------------------------------------[Run healthcheck]------------------------------------------------------------
{ -logic_name => 'ConservationScoreHealthCheck',
-module => 'Bio::EnsEMBL::Compara::RunnableDB::HealthCheck',
-wait_for => [ 'SetNeighbourNodes' ],
-input_ids => [
{'test' => 'conservation_jobs',
'params' => {'logic_name'=>'Gerp','method_link_type'=>'EPO_LOW_COVERAGE'},
},
{'test' => 'conservation_scores',
'params' => {'method_link_species_set_id'=>$self->o('cs_mlss_id')},
},
],
},
];
}
1;
| adamsardar/perl-libs-custom | EnsemblAPI/ensembl-compara/modules/Bio/EnsEMBL/Compara/PipeConfig/EpoLowCoverage_conf.pm | Perl | apache-2.0 | 15,887 |
package AcdWideWeb::Main;
use Mojo::Base 'Mojolicious::Controller';
sub welcome {
my $self = shift;
# Render template "example/welcome.html.ep" with message
$self->render(
message => 'Welcome to the Mojolicious real-time web framework!'),
}
1;
| ivanoff/ACDwide | opt/ACDwide/web/lib/AcdWideWeb/Main.pm | Perl | apache-2.0 | 257 |
=head1 LICENSE
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2022] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
=head1 NAME
Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveLincRNAFinder -
=head1 SYNOPSIS
Find RNAseq models that don't overlap with protein coding (models) predictions and store them as lincRNA candidates (rnaseq)
=head1 DESCRIPTION
=head1 METHODS
=cut
package Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveLincRNAFinder;
use warnings;
use strict;
use Bio::EnsEMBL::Analysis;
use Bio::EnsEMBL::Analysis::Runnable::lincRNAFinder;
use Bio::EnsEMBL::Analysis::Tools::GeneBuildUtils qw(id coord_string lies_inside_of_slice);
use Bio::EnsEMBL::Analysis::Tools::GeneBuildUtils::GeneUtils;
use Bio::EnsEMBL::Analysis::Tools::LincRNA qw(get_genes_of_biotypes_by_db_hash_ref) ;
use parent ('Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveBaseRunnableDB');
=head2 new
Arg [1] : Bio::EnsEMBL::Analysis::RunnableDB::lincRNAFinder
Function : instatiates a lincRNAFinder object and reads and checks the config
file
Returntype: Bio::EnsEMBL::Analysis::RunnableDB::lincRNAFinder
Exceptions:
Example :
=cut
sub fetch_input {
my ($self) = @_;
# This call will set the config file parameters. Note this will set REFGB (which overrides the
# value in $self->db and OUTDB
$self->create_analysis;
my $dba = $self->hrdb_get_dba( $self->param('output_db') );
$self->hrdb_set_con( $dba, 'output_db' );
# get cdnas and convert them to single transcript genes. The convert_to_single_transcript_gene located: ensembl-analysis/modules/Bio/EnsEMBL/Analysis/Tools/GeneBuildUtils/GeneUtils.pm
my $new_cdna = $self->get_genes_of_biotypes_by_db_hash_ref( $self->NEW_SET_1_CDNA );
my @single_transcript_cdnas = map { @{ convert_to_single_transcript_gene($_) } } @$new_cdna;
# get protein_coding genes and convert them to single transcript genes
my $new_set_prot = $self->get_genes_of_biotypes_by_db_hash_ref( $self->NEW_SET_2_PROT );
my @single_trans_pc = map { @{ convert_to_single_transcript_gene($_) } } @$new_set_prot;
# create runnable
my $runnable = Bio::EnsEMBL::Analysis::Runnable::lincRNAFinder->new(
-query => $self->query,
-analysis => $self->analysis,
);
# add hash-keys and hash-values directly to the $runnable hashref. quicker than using constructors...
$runnable->set_1_cdna_genes( \@single_transcript_cdnas );
$runnable->set_2_prot_genes( \@single_trans_pc );
$runnable->ignore_strand( $self->CDNA_CODING_GENE_CLUSTER_IGNORE_STRAND ); # it is not working know, but this need to change!
$runnable->find_single_exon_candidates($self->FIND_SINGLE_EXON_LINCRNA_CANDIDATES );
$runnable->maximum_translation_length_ratio($self->MAXIMUM_TRANSLATION_LENGTH_RATIO );
$runnable->max_translations_stored_per_gene($self->MAX_TRANSLATIONS_PER_GENE );
$self->runnable($runnable);
}
sub write_output {
my ($self) = @_;
my $adaptor = $self->hrdb_get_con('output_db')->get_GeneAdaptor;
print "Final output is: \n";
print "have " . @{ $self->output } . " genes to write\n";
my $analysis = $self->analysis;
GENE: foreach my $gene ( @{ $self->output } ) {
if ( !defined $gene->get_all_Transcripts ) {
$self->throw(" gene does not have any transcripts ....\n");
}
empty_Gene($gene);
attach_Analysis_to_Gene($gene, $analysis);
$gene->biotype( $self->OUTPUT_BIOTYPE );
$adaptor->store($gene);
}
}
#######
#CHECKS
#######
=head2
Arg [1] : Bio::EnsEMBL::Analysis::RunnableDB::GeneBuilder
Arg [2] : Varies, tends to be boolean, a string, a arrayref or a hashref
Function : Getter/Setter for config variables
Returntype: again varies
Exceptions:
Example :
=cut
sub NEW_SET_1_CDNA {
my ( $self, $arg ) = @_;
if ($arg) {
$self->param( 'NEW_SET_1_CDNA', $arg );
}
return $self->param('NEW_SET_1_CDNA');
}
sub NEW_SET_2_PROT {
my ( $self, $arg ) = @_;
if ($arg) {
$self->param( 'NEW_SET_2_PROT', $arg );
}
return $self->param('NEW_SET_2_PROT');
}
sub OUTPUT_DB {
my ( $self, $arg ) = @_;
if ($arg) {
$self->param( 'OUTPUT_DB', $arg );
}
return $self->param('OUTPUT_DB');
}
sub OUTPUT_BIOTYPE {
my ( $self, $arg ) = @_;
if ( defined $arg ) {
$self->param( 'OUTPUT_BIOTYPE', $arg );
}
return $self->param('OUTPUT_BIOTYPE');
}
sub DEBUG_OUTPUT_DB {
my ( $self, $arg ) = @_;
if ( defined $arg ) {
$self->param( 'DEBUG_OUTPUT_DB', $arg );
}
return $self->param('DEBUG_OUTPUT_DB');
}
sub MAX_TRANSLATIONS_PER_GENE {
my ( $self, $arg ) = @_;
if ( defined $arg ) {
$self->param( 'MAX_TRANSLATIONS_PER_GENE', $arg );
}
return $self->param('MAX_TRANSLATIONS_PER_GENE');
}
sub MAXIMUM_TRANSLATION_LENGTH_RATIO {
my ( $self, $arg ) = @_;
if ( defined $arg ) {
$self->param( 'MAXIMUM_TRANSLATION_LENGTH_RATIO', $arg );
}
return $self->param('MAXIMUM_TRANSLATION_LENGTH_RATIO');
}
sub WRITE_DEBUG_OUTPUT {
my ( $self, $arg ) = @_;
if ( defined $arg ) {
$self->param( 'WRITE_DEBUG_OUTPUT', $arg );
}
return $self->param('WRITE_DEBUG_OUTPUT');
}
sub CDNA_CODING_GENE_CLUSTER_IGNORE_STRAND {
my ( $self, $arg ) = @_;
if ( defined $arg ) {
$self->param( 'CDNA_CODING_GENE_CLUSTER_IGNORE_STRAND', $arg );
}
return $self->param('CDNA_CODING_GENE_CLUSTER_IGNORE_STRAND');
}
sub FIND_SINGLE_EXON_LINCRNA_CANDIDATES {
my ( $self, $arg ) = @_;
if ( defined $arg ) {
$self->param( 'FIND_SINGLE_EXON_LINCRNA_CANDIDATES', $arg );
}
return $self->param('FIND_SINGLE_EXON_LINCRNA_CANDIDATES');
}
1;
| Ensembl/ensembl-analysis | modules/Bio/EnsEMBL/Analysis/Hive/RunnableDB/HiveLincRNAFinder.pm | Perl | apache-2.0 | 6,326 |
use Bio::DB::Sam;
use strict;
# Copyright {2015} Yuxiang Tan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#check if there are 3 parameters
die "Usage: perl split_bam_file.pl FILE_DIR BAM_FILE PROCESS_DIR" if @ARGV < 3;
my $FILEDIR=$ARGV[0];
my $BAMFILE=$ARGV[1];
my $PROCESSDIR=$ARGV[2];
#make a processing directory
if (!(-d $PROCESSDIR)){
print 'making'.$PROCESSDIR.'... ';
unless(mkdir $PROCESSDIR) {
die "Unable to create $PROCESSDIR\n";
}
}
#open bam file
my $bam = Bio::DB::Bam->open($FILEDIR.$BAMFILE);
my $header = $bam->header;
#open output files and write headers
my $singleton = Bio::DB::Bam->open($PROCESSDIR.'singleton.bam',"w");
my $paired = Bio::DB::Bam->open($PROCESSDIR.'paired.bam',"w");
my $unmapped = Bio::DB::Bam->open($PROCESSDIR.'unmapped.bam',"w");
#to save reads that have secondary alignment location.
my $secondary = Bio::DB::Bam->open($PROCESSDIR.'sec_alignment.bam',"w");
my $status_code = $singleton ->header_write($header);
$status_code = $paired ->header_write($header);
$status_code = $unmapped ->header_write($header);
$status_code = $secondary ->header_write($header);
my $bytes;
my $flag;
my @counter= (0,0,0);
#read line by line and output in 3 different files
while (my $align = $bam->read1) {
$flag=$align->flag;
#check if fourth flag is set (fragment is unmapped)
if ($flag & 256){
#0x256 ==1 secondary alignment
$bytes = $secondary ->write1($align);
$counter[3]++;
} else{
if ($flag & 4){
#check if eight flag is set (next fragment is unmapped)
if ($flag & 8) {
#0x4 ==1 & 0x8 == 1
$bytes = $unmapped ->write1($align);
$counter[2]++;
}else{
#0x4 ==1 & 0x8 == 0
$bytes = $singleton ->write1($align);
$counter[1]++;
}
}else{
if ($flag & 8) {
#0x4 == 0 & 0x8 == 1
$bytes = $singleton ->write1($align);
$counter[1]++;
}else{
#0x4 == 0 & 0x8 == 0
$bytes = $paired ->write1($align);
$counter[0]++;
}
}
}
}
#write stats into file
my $out=$PROCESSDIR.'stats.txt';
open(OUTFILE, ">$out") or die "Cannot write $out: $!.\n";
print OUTFILE 'Singletons: ', $counter[1],' ';
print OUTFILE 'Paired: ',$counter[0],' ';
print OUTFILE 'Unmapped: ',$counter[2],' ';
print OUTFILE 'Secondary alignment: ',$counter[3],' ';
close OUTFILE
| yuxiangtan/QueryFuse | QueryFuse_v1/split_bam.file.no_sec_align.pl | Perl | apache-2.0 | 2,961 |
#
# Copyright 2019 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package centreon::common::fortinet::fortigate::mode::signatures;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
use POSIX qw(mktime floor);
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'av', type => 0, cb_prefix_output => 'prefix_av_output' },
{ name => 'avet', type => 0, cb_prefix_output => 'prefix_avet_output' },
{ name => 'ips', type => 0, cb_prefix_output => 'prefix_ips_output' },
{ name => 'ipset', type => 0, cb_prefix_output => 'prefix_ipset_output' },
];
$self->{maps_counters}->{av} = [
{ label => 'av', set => {
key_values => [ { name => 'human' }, { name => 'value' } ],
threshold_use => 'value_absolute',
output_template => "last refresh is: '%s'",
perfdatas => [
{ label => 'av_update', value => 'value_absolute',
template => '%d', min => 0, unit => 's' },
],
}
},
];
$self->{maps_counters}->{avet} = [
{ label => 'avet', set => {
key_values => [ { name => 'human' }, { name => 'value' } ],
threshold_use => 'value_absolute',
output_template => "last refresh is: '%s'",
perfdatas => [
{ label => 'avet_update', value => 'value_absolute',
template => '%d', min => 0, unit => 's' },
],
}
},
];
$self->{maps_counters}->{ips} = [
{ label => 'ips', set => {
key_values => [ { name => 'human' }, { name => 'value' } ],
threshold_use => 'value_absolute',
output_template => "last refresh is: '%s'",
perfdatas => [
{ label => 'ips_update', value => 'value_absolute',
template => '%d', min => 0, unit => 's' },
],
}
},
];
$self->{maps_counters}->{ipset} = [
{ label => 'ipset', set => {
key_values => [ { name => 'human' }, { name => 'value' } ],
threshold_use => 'value_absolute',
output_template => "last refresh is: '%s'",
perfdatas => [
{ label => 'ipset_update', value => 'value_absolute',
template => '%d', min => 0, unit => 's' },
],
}
},
];
}
sub prefix_av_output {
my ($self, %options) = @_;
return 'AV Signature ';
}
sub prefix_avet_output {
my ($self, %options) = @_;
return 'AV Extended Signature ';
}
sub prefix_ips_output {
my ($self, %options) = @_;
return 'IPS Signature ';
}
sub prefix_ipset_output {
my ($self, %options) = @_;
return 'IPS Extended Signature ';
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$options{options}->add_options(arguments => {
});
return $self;
}
sub get_epoch_from_signature {
my ($self, %options) = @_;
$options{date} =~ /\((\d{4})-(\d{2})-(\d{2})\s(\d{2}):(\d{2})\)/;
my ($YYYY,$MM,$DD,$hh,$mm)=($1,$2,$3,$4,$5);
return mktime (0, $mm, $hh, $DD, $MM - 1, $YYYY - 1900);
}
sub manage_selection {
my ($self, %options) = @_;
$self->{snmp} = $options{snmp};
my $oid_fgSysVersionAv = '.1.3.6.1.4.1.12356.101.4.2.1.0';
my $oid_fgSysVersionIps = '.1.3.6.1.4.1.12356.101.4.2.2.0';
my $oid_fgSysVersionAvEt = '.1.3.6.1.4.1.12356.101.4.2.3.0';
my $oid_fgSysVersionIpsEt = '.1.3.6.1.4.1.12356.101.4.2.4.0';
my $result = $self->{snmp}->get_leef(oids => [$oid_fgSysVersionAv, $oid_fgSysVersionIps, $oid_fgSysVersionAvEt, $oid_fgSysVersionIpsEt], nothing_quit => 1);
my $av_epoch = $self->get_epoch_from_signature(date => $result->{$oid_fgSysVersionAv});
my $ips_epoch = $self->get_epoch_from_signature(date => $result->{$oid_fgSysVersionIps});
my $avet_epoch = $self->get_epoch_from_signature(date => $result->{$oid_fgSysVersionAvEt});
my $ipset_epoch = $self->get_epoch_from_signature(date => $result->{$oid_fgSysVersionIpsEt});
my $now = time();
my $av_diff = $now - $av_epoch;
my $ips_diff = $now - $ips_epoch;
my $avet_diff = $now - $avet_epoch;
my $ipset_diff = $now - $ipset_epoch;
$self->{av} = {
human => centreon::plugins::misc::change_seconds(value => $av_diff, start => 'h'),
value => $av_diff
};
$self->{ips} = {
human => centreon::plugins::misc::change_seconds(value => $ips_diff, start => 'h'),
value => $ips_diff
};
$self->{avet} = {
human => centreon::plugins::misc::change_seconds(value => $avet_diff, start => 'h'),
value => $avet_diff
};
$self->{ipset} = {
human => centreon::plugins::misc::change_seconds(value => $ipset_diff, start => 'h'),
value => $ipset_diff
};
}
1;
__END__
=head1 MODE
Check last update/refresh of av and ips signatures
=over 8
=item B<--filter-counters>
Only display some counters (regexp can be used).
Example: --filter-counters='^av$'
=item B<--warning-*> B<--critical-*>
Thresholds (in seconds).
Can be: 'av', 'ips', 'avet', ipset'.
=back
=cut
| Sims24/centreon-plugins | centreon/common/fortinet/fortigate/mode/signatures.pm | Perl | apache-2.0 | 6,090 |
package Paws::SimpleWorkflow::RespondDecisionTaskCompleted;
use Moose;
has Decisions => (is => 'ro', isa => 'ArrayRef[Paws::SimpleWorkflow::Decision]', traits => ['NameInRequest'], request_name => 'decisions' );
has ExecutionContext => (is => 'ro', isa => 'Str', traits => ['NameInRequest'], request_name => 'executionContext' );
has TaskToken => (is => 'ro', isa => 'Str', traits => ['NameInRequest'], request_name => 'taskToken' , required => 1);
use MooseX::ClassAttribute;
class_has _api_call => (isa => 'Str', is => 'ro', default => 'RespondDecisionTaskCompleted');
class_has _returns => (isa => 'Str', is => 'ro', default => 'Paws::API::Response');
class_has _result_key => (isa => 'Str', is => 'ro');
1;
### main pod documentation begin ###
=head1 NAME
Paws::SimpleWorkflow::RespondDecisionTaskCompleted - Arguments for method RespondDecisionTaskCompleted on Paws::SimpleWorkflow
=head1 DESCRIPTION
This class represents the parameters used for calling the method RespondDecisionTaskCompleted on the
Amazon Simple Workflow Service service. Use the attributes of this class
as arguments to method RespondDecisionTaskCompleted.
You shouldn't make instances of this class. Each attribute should be used as a named argument in the call to RespondDecisionTaskCompleted.
As an example:
$service_obj->RespondDecisionTaskCompleted(Att1 => $value1, Att2 => $value2, ...);
Values for attributes that are native types (Int, String, Float, etc) can passed as-is (scalar values). Values for complex Types (objects) can be passed as a HashRef. The keys and values of the hashref will be used to instance the underlying object.
=head1 ATTRIBUTES
=head2 Decisions => ArrayRef[L<Paws::SimpleWorkflow::Decision>]
The list of decisions (possibly empty) made by the decider while
processing this decision task. See the docs for the Decision structure
for details.
=head2 ExecutionContext => Str
User defined context to add to workflow execution.
=head2 B<REQUIRED> TaskToken => Str
The C<taskToken> from the DecisionTask.
C<taskToken> is generated by the service and should be treated as an
opaque value. If the task is passed to another process, its
C<taskToken> must also be passed. This enables it to provide its
progress and respond with results.
=head1 SEE ALSO
This class forms part of L<Paws>, documenting arguments for method RespondDecisionTaskCompleted in L<Paws::SimpleWorkflow>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/SimpleWorkflow/RespondDecisionTaskCompleted.pm | Perl | apache-2.0 | 2,603 |
package Google::Ads::AdWords::v201809::AssetPage;
use strict;
use warnings;
__PACKAGE__->_set_element_form_qualified(1);
sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201809' };
our $XML_ATTRIBUTE_CLASS;
undef $XML_ATTRIBUTE_CLASS;
sub __get_attr_class {
return $XML_ATTRIBUTE_CLASS;
}
use base qw(Google::Ads::AdWords::v201809::Page);
# Variety: sequence
use Class::Std::Fast::Storable constructor => 'none';
use base qw(Google::Ads::SOAP::Typelib::ComplexType);
{ # BLOCK to scope variables
my %totalNumEntries_of :ATTR(:get<totalNumEntries>);
my %Page__Type_of :ATTR(:get<Page__Type>);
my %entries_of :ATTR(:get<entries>);
__PACKAGE__->_factory(
[ qw( totalNumEntries
Page__Type
entries
) ],
{
'totalNumEntries' => \%totalNumEntries_of,
'Page__Type' => \%Page__Type_of,
'entries' => \%entries_of,
},
{
'totalNumEntries' => 'SOAP::WSDL::XSD::Typelib::Builtin::int',
'Page__Type' => 'SOAP::WSDL::XSD::Typelib::Builtin::string',
'entries' => 'Google::Ads::AdWords::v201809::Asset',
},
{
'totalNumEntries' => 'totalNumEntries',
'Page__Type' => 'Page.Type',
'entries' => 'entries',
}
);
} # end BLOCK
1;
=pod
=head1 NAME
Google::Ads::AdWords::v201809::AssetPage
=head1 DESCRIPTION
Perl data type class for the XML Schema defined complexType
AssetPage from the namespace https://adwords.google.com/api/adwords/cm/v201809.
Contains a filtered and paged subset of Assets as returned by {@link AssetService#get}.
=head2 PROPERTIES
The following properties may be accessed using get_PROPERTY / set_PROPERTY
methods:
=over
=item * entries
=back
=head1 METHODS
=head2 new
Constructor. The following data structure may be passed to new():
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| googleads/googleads-perl-lib | lib/Google/Ads/AdWords/v201809/AssetPage.pm | Perl | apache-2.0 | 1,869 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
=head1 NAME
Bio::EnsEMBL::Variation::Utils::BaseVepTabixPlugin
=head1 SYNOPSIS
package FunkyPlugin;
use base qw(Bio::EnsEMBL::Variation::Utils::BaseVepTabixPlugin);
sub new {
}
sub run {
my ($self, $transcript_variation_allele) = @_;
my $results = ... # do analysis
return {
FUNKY_PLUGIN => $results
};
}
1;
=head1 DESCRIPTION
To make writing plugin modules for the VEP easier, get
your plugin to inherit from this class, override (at least)
the feature_types, get_header_info and run methods to behave
according to the documentation below, and then run the VEP
with your plugin using the --plugin <module name> command
line option.
=cut
package Bio::EnsEMBL::Variation::Utils::BaseVepTabixPlugin;
use Bio::EnsEMBL::Variation::Utils::BaseVepPlugin;
use Bio::EnsEMBL::Variation::Utils::VariationEffect qw(overlap);
use Digest::MD5 qw(md5_hex);
use base qw(Bio::EnsEMBL::Variation::Utils::BaseVepPlugin);
use strict;
use warnings;
my $DEFAULT_EXPAND_LEFT = 0;
my $DEFAULT_EXPAND_RIGHT = 1e6;
my $DEFAULT_CACHE_SIZE = 1;
my $DEBUG = 0;
our ($CAN_USE_HTS_PM, $CAN_USE_TABIX_PM);
BEGIN {
if (eval q{ require Bio::DB::HTS::Tabix; 1 }) {
$CAN_USE_HTS_PM = 1;
}
if (eval q{ require Tabix; 1 }) {
$CAN_USE_TABIX_PM = 1;
}
unless($CAN_USE_TABIX_PM || $CAN_USE_HTS_PM) {
# test tabix
die "ERROR: tabix does not seem to be in your path\n" unless `which tabix 2>&1` =~ /tabix$/;
}
}
sub new {
my $class = shift;
my $self = $class->SUPER::new(@_);
return $self;
}
sub get_user_params {
my $self = shift;
my $params = $self->params_to_hash;
foreach my $param_name(qw(expand_left expand_right cache_size)) {
$self->$param_name($params->{$param_name}) if exists($params->{$param_name});
}
}
sub files {
my $self = shift;
if(!exists($self->{_files})) {
my @files;
my $params = $self->params_to_hash;
foreach my $key(grep {/^file\d+/} keys %$params) {
push @files, $params->{$key};
}
unless(@files) {
foreach my $p(@{$self->params}) {
push @files, $p if $p =~ /\.gz$/ || -e $p;
}
}
$self->check_file($_) for @files;
$self->{_files} = \@files;
}
return $self->{_files};
}
sub add_file {
my ($self, $file) = @_;
$self->check_file($file);
push @{$self->{_files}}, $file;
}
sub check_file {
my ($self, $file) = @_;
die("ERROR: No file specified\n") unless $file;
if($file !~ /tp\:\/\//) {
die "ERROR: Data file $file not found\n" unless -e $file;
die "ERROR: Tabix index file $file\.tbi not found - perhaps you need to create it first?\n" unless -e $file.'.tbi';
}
return 1;
}
sub expand_left {
my $self = shift;
$self->{_expand_left} = shift if @_;
$self->{_expand_left} = $DEFAULT_EXPAND_LEFT if !exists($self->{_expand_left});
return $self->{_expand_left};
}
sub expand_right {
my $self = shift;
$self->{_expand_right} = shift if @_;
$self->{_expand_right} = $DEFAULT_EXPAND_RIGHT if !exists($self->{_expand_right});
return $self->{_expand_right};
}
sub cache_size {
my $self = shift;
$self->{_cache_size} = shift if @_;
$self->{_cache_size} = $DEFAULT_CACHE_SIZE if !exists($self->{_cache_size});
return $self->{_cache_size};
}
sub get_data {
my ($self, $c, $s, $e) = @_;
die("ERROR: No chromosome specified\n") unless $c;
die("ERROR: No start specified\n") unless $s;
die("ERROR: No end specified\n") unless $e;
# we use two levels of caching
# 1) results cache for specific coords
# 2) region cache to reduce multiple lookups for sequential "close" coords
my $pos_string = join("_", $c, $s, $e);
my $cache = $self->cache;
# check results cache first
if(exists($cache->{results}) && exists($cache->{results}->{$pos_string})) {
print STDERR "Using results cache\n" if $DEBUG;
return $cache->{results}->{$pos_string};
}
# now check the region cache
my (@result, @missing_regions, $hit_cache);
my $regions_used = 0;
# we only use the region cache if we are allowed to expand left or right
my ($expand_left, $expand_right) = ($self->expand_left, $self->expand_right);
my $use_data_cache = ($expand_left || $expand_right) ? 1 : 0;
if($use_data_cache && $cache->{$c}) {
my $regions = $cache->{$c}->{regions};
# iterate through them backwards as most likely to hit last one pushed on to array first
for(my $i=(scalar @$regions - 1); $i>=0; $i--) {
my $region = $regions->[$i];
if(overlap($s, $e, $region->[0], $region->[1])) {
print STDERR "Using data cache\n" if $DEBUG;
# flag that we've hit the cache and store how many regions we've used
$hit_cache = 1;
# for partial overlaps we store the bits we don't have in @missing_regions
if($s < $region->[0]) {
push @missing_regions, [$s, $region->[0] - 1];
}
if($e > $region->[1]) {
push @missing_regions, [$region->[1] + 1, $e];
}
my $filtered = $self->_filter_by_pos($cache->{$c}->{data}->[$i], $s, $e);
$regions_used++ if scalar @$filtered;
push @result, @$filtered;
}
}
}
# if we hit the cache, we can assume that our original start-end has been covered
# with any missing pieces added to @missing_regions
# otherwise we didn't hit the cache, so we add the original start-end
push @missing_regions, [$s, $e] unless $hit_cache;
foreach my $region(@missing_regions) {
my ($r_s, $r_e) = @$region;
# expand?
$r_s -= $expand_left;
$r_e += $expand_right;
my $tmp_data = $self->_get_data_uncached($c, $r_s, $r_e);
# cache the data
$self->_add_data_to_cache($c, $r_s, $r_e, $tmp_data) if $use_data_cache;
# we don't need to filter it unless we're using the cache
push @result, $use_data_cache ? @{$self->_filter_by_pos($tmp_data, $s, $e)} : @$tmp_data;
$regions_used++;
}
# now unique it, but only if we queried more than one region
my %seen = ();
my @uniq_result = ();
if($regions_used > 1) {
foreach my $d(@result) {
push @uniq_result, $d->[1] unless $seen{$d->[0]};
$seen{$d->[0]} = 1;
}
}
else {
@uniq_result = map {$_->[1]} @result;
}
$self->_add_result_to_cache($pos_string, \@uniq_result);
return \@uniq_result;
}
sub cache {
my $self = shift;
my $cache = $self->{_tabix_cache} ||= {};
return $cache;
}
sub parse_data {
my $self = shift;
return shift;
}
sub identify_data {
my ($self, $line, $parsed) = @_;
return md5_hex($line);
}
sub _get_data_uncached {
my $self = shift;
if($CAN_USE_HTS_PM) {
return $self->_get_data_hts(@_);
}
elsif($CAN_USE_TABIX_PM) {
return $self->_get_data_pm(@_);
}
else {
return $self->_get_data_cl(@_);
}
}
sub _add_data_to_cache {
my ($self, $c, $s, $e, $data) = @_;
my $cache = $self->cache->{$c} ||= {};
push @{$cache->{regions}}, [$s, $e];
push @{$cache->{data}}, $data;
# trim
if(scalar @{$cache->{regions}} > $self->cache_size) {
shift @{$cache->{regions}};
shift @{$cache->{data}};
}
}
sub _add_result_to_cache {
my ($self, $pos_string, $result) = @_;
my $cache = $self->cache;
$cache->{results}->{$pos_string} = $result;
push @{$cache->{results_order}}, $pos_string;
if(scalar @{$cache->{results_order}} > $self->cache_size) {
my $del = shift @{$cache->{results_order}};
delete $cache->{results}->{$del};
}
}
sub _filter_by_pos {
my ($self, $list, $s, $e) = @_;
my @result;
foreach my $data(@$list) {
my $start = $self->get_start($data->[1]);
push @result, $data if overlap($start, $self->get_end($data->[1]), $s, $e);
last if $start > $e;
}
return \@result;
}
sub _get_data_hts {
my ($self, $c, $s, $e) = @_;
my @data;
foreach my $file(@{$self->files}) {
my $hts_obj = $self->_hts_obj($file);
my $valids = $self->{_valids}->{$file} ||= $hts_obj->seqnames;
my $iter = $hts_obj->query(
sprintf(
"%s:%i-%i",
$self->_get_source_chr_name($c, $file, $valids),
$s, $e
)
);
next unless $iter;
while(my $line = $iter->next) {
my $parsed = $self->parse_data($line);
push @data, [$self->identify_data($line), $parsed] if $parsed;
}
}
return \@data;
}
sub _get_data_pm {
my ($self, $c, $s, $e) = @_;
my @data;
foreach my $file(@{$self->files}) {
my $tabix_obj = $self->_tabix_obj($file);
my $valids = $self->{_valids}->{$file} ||= [$tabix_obj->getnames];
my $iter = $tabix_obj->query($self->_get_source_chr_name($c, $file, $valids), $s, $e);
next unless $iter && $iter->{_};
while(my $line = $tabix_obj->read($iter)) {
my $parsed = $self->parse_data($line);
push @data, [$self->identify_data($line), $parsed] if $parsed;
}
}
return \@data;
}
sub _get_data_cl {
my ($self, $c, $s, $e) = @_;
my @data;
foreach my $file(@{$self->files}) {
my $valids = $self->{_valids}->{$file} ||= [split("\n", `tabix -l $file`)];
open TABIX, sprintf(
"tabix -f %s %s:%i-%i |",
$file,
$self->_get_source_chr_name($c, $file, $valids),
$s, $e
);
while(<TABIX>) {
chomp;
s/\r$//g;
my $parsed = $self->parse_data($_);
push @data, [$self->identify_data($_), $parsed] if $parsed;
}
close TABIX;
}
return \@data;
}
sub _hts_obj {
my ($self, $file) = @_;
return $self->{_tabix_obj}->{$file} ||= Bio::DB::HTS::Tabix->new(filename => $file);
}
sub _tabix_obj {
my ($self, $file) = @_;
return $self->{_tabix_obj}->{$file} ||= Tabix->new(-data => $file);
}
sub _get_source_chr_name {
my ($self, $chr, $set, $valids) = @_;
$set ||= 'default';
$valids ||= [];
my $chr_name_map = $self->{_chr_name_map}->{$set} ||= {};
if(!exists($chr_name_map->{$chr})) {
my $mapped_name = $chr;
@$valids = @{$self->can('valid_chromosomes') ? $self->valid_chromosomes : []} unless @$valids;
my %valid = map {$_ => 1} @$valids;
unless($valid{$chr}) {
# still haven't got it
if($mapped_name eq $chr) {
# try adding/removing "chr"
if($chr =~ /^chr/i) {
my $tmp = $chr;
$tmp =~ s/^chr//i;
$mapped_name = $tmp if $valid{$tmp};
}
elsif($valid{'chr'.$chr}) {
$mapped_name = 'chr'.$chr;
}
}
}
$chr_name_map->{$chr} = $mapped_name;
}
return $chr_name_map->{$chr};
}
sub FREEZE {
my $self = shift;
delete $self->{_tabix_obj};
}
1;
| willmclaren/ensembl-variation | modules/Bio/EnsEMBL/Variation/Utils/BaseVepTabixPlugin.pm | Perl | apache-2.0 | 11,496 |
=head1 LICENSE
Copyright (c) 1999-2011 The European Bioinformatics Institute and
Genome Research Limited. All rights reserved.
This software is distributed under a modified Apache license.
For license details, please see
http://www.ensembl.org/info/about/code_licence.html
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <ensembl-dev@ebi.ac.uk>.
Questions may also be sent to the Ensembl help desk at
<helpdesk@ensembl.org>.
=head1 NAME
Bio::EnsEMBL::Funcgen::Importer
=head1 SYNOPSIS
my $imp = Bio::EnsEMBL::Funcgen::Importer->new(%params);
$imp->register_experiment();
=head1 DESCRIPTION
B<This program> is the main class coordinating import of tiling array design and experimental data.
It utilises several underlying parser classes specific to array vendor or import file type.
=cut
################################################################################
package Bio::EnsEMBL::Funcgen::Importer;
use Bio::EnsEMBL::Funcgen::Utils::EFGUtils qw(get_date open_file run_system_cmd);
use Bio::EnsEMBL::Utils::Exception qw( throw deprecate );
use Bio::EnsEMBL::Utils::Argument qw( rearrange );
use Bio::EnsEMBL::Funcgen::Experiment;
#use Bio::EnsEMBL::Funcgen::Utils::Helper;#remove?
use Bio::EnsEMBL::Funcgen::DBSQL::DBAdaptor;
use Bio::EnsEMBL::DBSQL::DBAdaptor;
use Bio::EnsEMBL::Registry;
use File::Path;
use strict;
use vars qw(@ISA);
################################################################################
=head2 new
Description : Constructor method
Arg [1] : hash containing optional attributes:
-name Name of Experiment(dir)
-format of array e.g. Tiled(default)
-vendor name of array vendor
-description of the experiment
-pass DB password
-host DB host
-user DB user
-port DB port
-registry_host Host to load registry from
-registry_port Port for registry host
-registry_user User for registry host
-registry_pass Password for registry user
-ssh Flag to set connection over ssh via forwarded port to localhost (default = 0); remove?
-group name of experimental/research group
` -location of experimental/research group
-contact e/mail address of primary contact for experimental group
-species
-assembly Genome assembly version i.e. 36 for NCBI36
-recover Recovery flag (default = 0)
-data_dir Root data directory (default = $ENV{'EFG_DATA'})
-output_dir review these dirs ???????
-input_dir ?????????
-import_dir ???????
-norm_dir ??????
-fasta dump FASTA flag (default =0)
-array_set Flag to treat all chip designs as part of same array (default = 0)
-array_name Name for array set
-array_file Path of array file to import for sanger ENCODE array
-result_set_name Name to give the raw and normalised result sets (default uses experiment and analysis name)
-norm_method Normalisation method (Nimblegen default = VSN_GLOG);
-dbname Override for autogeneration of funcgen dbaname
-reg_config path to local registry config file (default = ~/ensembl.init || undef)
-design_type MGED term (default = binding_site_identification) get from meta/MAGE?
-farm Flag to submit jobs to farm e.g. normalisation jobs
-batch_job Flag to signify that this Importer is running as a prepared batch/farm job
-prepared Flag to signify result files have been previously imported in prepare mode
and file names will differ to those record in InputSubset
#-use_defaults This changes some mandatory parameters to optional, instead using either DEFAULT or the input file name for the following options -name, -input_set, -feature_type, -cell_type etc ???
-verbose
ReturnType : Bio::EnsEMBL::Funcgen::Importer
Example : my $Exp = Bio::EnsEMBL::Importer->new(%params);
Exceptions : throws if mandatory params are not set or DB connect fails
Caller : General
Status : Medium - potential for %params names to change, remove %attrdata?
=cut
################################################################################
sub new{
my ($caller) = shift;
my $reg = "Bio::EnsEMBL::Registry";
my $class = ref($caller) || $caller;
my ($name, $format, $vendor, $group, $location, $contact, $species,
$array_name, $array_set, $array_file, $data_dir, $result_files,
$ftype_name, $ctype_name, $exp_date, $desc, $user, $host, $port,
$pass, $dbname, $db, $assm_version, $design_type, $output_dir, $input_dir,
$batch_job, $farm, $prepared, $ssh, $fasta, $recover, $reg_config,
$norm_method, $old_dvd_format, $feature_analysis, $reg_db, $parser_type,
$ucsc_coords, $verbose, $fset_desc, $release, $reg_host, $reg_port, $reg_user, $reg_pass)
= rearrange(['NAME', 'FORMAT', 'VENDOR', 'GROUP', 'LOCATION', 'CONTACT', 'SPECIES',
'ARRAY_NAME', 'ARRAY_SET', 'ARRAY_FILE', 'DATA_DIR', 'RESULT_FILES',
'FEATURE_TYPE_NAME', 'CELL_TYPE_NAME', 'EXPERIMENT_DATE', 'DESCRIPTION',
'USER', 'HOST', 'PORT', 'PASS', 'DBNAME', 'DB', 'ASSEMBLY', 'DESIGN_TYPE',
'OUTPUT_DIR', 'INPUT_DIR', #to allow override of defaults
'BATCH_JOB', 'FARM', 'PREPARED', 'SSH', 'DUMP_FASTA', 'RECOVER', 'REG_CONFIG',
'NORM_METHOD', 'OLD_DVD_FORMAT', 'FEATURE_ANALYSIS', 'REGISTRY_DB', 'PARSER',
'UCSC_COORDS', 'VERBOSE', 'FEATURE_SET_DESCRIPTION', 'RELEASE', 'REGISTRY_HOST',
'REGISTRY_PORT', 'REGISTRY_USER', 'REGISTRY_PASS'], @_);
#### Define parent parser class based on vendor
throw("Mandatory argument -vendor not defined") if ! defined $vendor;
#This will override the default Vendor Parser type
#Evals simply protect from messy errors if parser type not found
my $parser_error;
my $vendor_parser = ucfirst(lc($vendor));
#WARNING evaling these parsers to enable pluggability hides errors in parser
#use a perl -MBio::EnsEMBL::Funcgen::Parsers:ParserType to debug
#get rid of all this case guessing and force correct parser name usage?
#WARNING
#Dynamic setting of ISA in this way reports the resultant object as Importer, when
#some throws/methods are actually in other base/custom Parsers
#This can seem a little counterintuitive, but allows plugability
#With out the need for separate control scripts
#Change this to be set and required/detected in the parse_and_import.pl script
#Then we can have Importer.pm as the base class and get rid of this.
#as well as set_config methods?
eval {require "Bio/EnsEMBL/Funcgen/Parsers/${vendor_parser}.pm";};
if($@){
#Don't warn/throw yet as we might have a standard parser format
$parser_error .= "There is no valid parser for the vendor your have specified:\t".$vendor.
"\nMaybe this is a typo or maybe you want to specify a default import format using the -parser option\n".$@;
}
if(defined $parser_type){
#try normal case first
eval {require "Bio/EnsEMBL/Funcgen/Parsers/${parser_type}.pm";};
if($@){
$parser_type = ucfirst(lc($parser_type));
#Now eval the new parser
eval {require "Bio/EnsEMBL/Funcgen/Parsers/${parser_type}.pm";};
if($@){
#Might be no default
my $txt = "There is no valid parser for the -parser format your have specified:\t".$parser_type."\n";
if(! $parser_error){
$txt .= "Maybe this is a typo or maybe you want run with the default $vendor_parser parser\n";
}
throw($txt.$@);
}
#warn about over riding vendor parser here
if(! $parser_error){
#Can't log this as we haven't blessed the Helper yet
warn("WARNING\t::\tYou are over-riding the default ".$vendor." parser with -parser ".$parser_type);
}
}
}
else{
throw($parser_error) if $parser_error;
$parser_type = $vendor_parser;
}
#we should now really set parser_type as an attrtibute?
unshift @ISA, 'Bio::EnsEMBL::Funcgen::Parsers::'.$parser_type;
#change this to be called explicitly from the load script?
#### Create object from parent class
my $self = $class->SUPER::new(@_);
#### Set vars and test minimum mandatory params for any import type
$self->{'name'} = $name || throw('Mandatory param -name not met');#This is not mandatory for array design import
$self->{'user'} = $user || $ENV{'EFG_WRITE_USER'};
$self->vendor(uc($vendor)); #already tested
$self->{'format'} = uc($format) || 'TILED'; #remove default?
$self->group($group) if $group;
$self->location($location) if $location;
$self->contact($contact) if $contact;
$species || throw('Mandatory param -species not met');
$self->array_name($array_name) if $array_name;
$self->array_set($array_set) if $array_set;
$self->array_file($array_file) if $array_file;
$self->{'data_dir'} = $data_dir || $ENV{'EFG_DATA'};
$self->result_files($result_files)if $result_files;
$self->experiment_date($exp_date) if $exp_date;
$self->description($desc) if $desc;#experiment
$self->feature_set_description($fset_desc) if $fset_desc;
$assm_version || throw('Mandatory param -assembly not met');
$self->{'design_type'} = $design_type || 'binding_site_identification'; #remove default?
$self->{'output_dir'} = $output_dir if $output_dir; #config default override
$self->{'input_dir'} = $input_dir if $input_dir; #config default override
$self->farm($farm) if $farm;
$self->batch_job($batch_job);
$self->prepared($prepared);
$self->{'ssh'} = $ssh || 0;
$self->{'_dump_fasta'} = $fasta || 0;
$self->{'recover'} = $recover || 0;
#check for ~/.ensembl_init to mirror general EnsEMBL behaviour
$self->{'reg_config'} = $reg_config || ((-f "$ENV{'HOME'}/.ensembl_init") ? "$ENV{'HOME'}/.ensembl_init" : undef);
#$self->{'write_mage'} = $write_mage || 0;
#$self->{'no_mage'} = $no_mage || 0;
#$self->{'input_set_name'} = $eset_name || $name; #Move this to InputSet?
$self->{'old_dvd_format'} = $old_dvd_format || 0;
$self->{'ucsc_coords'} = $ucsc_coords || 0;
$self->{'verbose'} = $verbose || 0;
$self->{'release'} = $release;
if($reg_host && $self->{'reg_config'}){
warn "You have specified registry parameters and a config file:\t".$self->{'reg_config'}.
"\nOver-riding config file with specified paramters:\t${reg_user}@${reg_host}:$reg_port";
}
#Will a general norm method be applicable for all imports?
#Already casued problems with Bed imports... remove?
#Could set NORM_METHOD in Parser!!
#warn "Need to fully implement norm_method is validate_mage, remove ENV NORM_METHOD?";
$self->{'norm_method'} = $norm_method;# || $ENV{'NORM_METHOD'};
#if ($self->vendor ne 'NIMBLEGEN'){
# $self->{'no_mage'} = 1;
# warn "Hardcoding no_mage for non-NIMBLEGEN imports";
# }
# if($self->{'no_mage'} && $self->{'write_mage'}){
# throw('-no_mage and -write_mage options are mutually exclusive, please select just one');
# }
#### Set up DBs and load and reconfig registry
### Load Registry
#Can we load the registry using the assembly version, then just redefine the efg DB?
#We have problems here if we try and load on a dev version, where no dev DBs are available on ensembldb
#Get the latest API version for the assembly we want to use
#Then load the registry from that version
#Then we can remove some of the dnadb setting code below?
#This may cause problems with API schema mismatches
#Can we just test whether the current default dnadb contains the assembly?
#Problem with this is that this will not have any other data e.g. genes etc
#which may be required for some parsers
#How does the registry pick up the schema version??
#We should really load the registry first given the dnadb assembly version
#Then reset the eFG DB as appropriate
if ($reg_host || ! defined $self->{'_reg_config'}) {
#defaults to current ensembl DBs
$reg_host ||= 'ensembldb.ensembl.org';
$reg_user ||= 'anonymous';
#Default to the most recent port for ensdb
if(! $reg_port && $reg_host eq 'ensdb-archive'){
$reg_port = 5304;
}
#This will try and load the dev DBs if we are using v49 schema or API?
#Need to be mindful about this when developing
#we need to tip all this on it's head and load the reg from the dnadb version!!!!!!!
my $version_text= ($self->{'release'}) ? 'version '.$self->{'release'} : 'current version';
$self->log("Loading $version_text registry from $reg_user".'@'.$reg_host);
#Note this defaults API version, hence running with head code
#And not specifying a release version will find not head version
#DBs on ensembldb, resulting in an exception from reset_DBAdaptor below
$reg->load_registry_from_db(
-host => $reg_host,
-user => $reg_user,
-port => $reg_port,
-pass => $reg_pass,
#-host => "ens-staging",
#-user => 'ensro',
-db_version => $self->{'release'},#51
-verbose => $self->verbose,
);
throw('Not sensible to set the import DB as the default eFG DB from '.$reg_host.', please define db params') if ((! $dbname) && (! $db));
}
else{
$self->log("Loading registry from:\t".$self->{'_reg_config'});
$reg->load_all($self->{'_reg_config'}, 1);
}
#Validate species
my $alias = $reg->get_alias($species) || throw("Could not find valid species alias for $species\nYou might want to clean up:\t".$self->get_dir('output'));
$self->species($alias);
$self->{'param_species'} = $species;#Only used for dir generation
#SET UP DBs
if($db){
#db will have been defined before reg loaded, so will be present in reg
if(! (ref($db) && $db->isa('Bio::EnsEMBL::Funcgen::DBSQL::DBAdaptor'))){
$self->throw('-db must be a valid Bio::EnsEMBL::Funcgen::DBSQL::DBAdaptor');
}
}
else{ #define eFG DB from params or registry
if($reg_db){#load eFG DB from reg
if($dbname){
throw("You cannot specify DB params($dbname) and load from the registry at the same time.");
}
$self->log('WARNING: Loading eFG DB from Registry');
$db = $reg->get_DBAdaptor($self->species(), 'funcgen');
throw("Unable to retrieve ".$self->species." funcgen DB from the registry") if ! $db;
}
else{#resets the eFG DB in the custom or generic registry
$dbname || throw('Must provide a -dbname if not using default custom registry config');
#$user || throw('Must provide a -user parameter');#make this default to EFG_WRITE_USER?
$pass || throw('Must provide a -pass parameter');
#remove this and throw?
if(! defined $host){
$self->log('WARNING: Defaulting to localhost');
$host = 'localhost';
}
$port ||= 3306;
my $host_ip = '127.0.0.1';#is this valid for all localhosts?
if ($self->{'ssh'}) {
$host = `host localhost`; #mac specific? nslookup localhost wont work on server/non-PC
#will this always be the same?
if (! (exists $ENV{'EFG_HOST_IP'})) {
warn "Environment variable EFG_HOST_IP not set for ssh mode, defaulting to $host_ip for $host";
} else {
$host_ip = $ENV{'EFG_HOST_IP'};
}
if ($self->host() ne 'localhost') {
warn "Overriding host ".$self->host()." for ssh connection via localhost($host_ip)";
}
}
#data version is only used if we don't want to define the dbname
#This should never be guessed so don't need data_version here
#$dbname ||= $self->species()."_funcgen_".$self->data_version();
#Remove block below when we can
my $dbhost = ($self->{'ssh'}) ? $host_ip : $host;
#This isn't set yet!?
#When we try to load, say 49, when we only have 48 on ensembldb
#This fails because there is not DB set for v49, as it is not on ensembl DB
#In this case we need to load from the previous version
#Trap this and suggest using the -schema_version/release option
#Can we autodetect this and reload the registry?
#We want to reload the registry anyway with the right version corresponding to the dnadb
#We could either test for the db in the regsitry or just pass the class.
$db = $reg->reset_DBAdaptor($self->species(), 'funcgen', $dbname, $dbhost, $port, $self->user, $pass,
#'Bio::EnsEMBL::Funcgen::DBSQL::DBAdaptor',
{
-dnadb_host => $reg_host,
-dnadb_port => $reg_port,
-dnadb_assembly => $assm_version,
-dnadb_user => $reg_user,
-dnadb_pass => $reg_pass,
});
#ConfigRegistry will try and set this
#This will fail if there is already one in the registry as it will try
#and defined a new unique species so as not to overwrite the original
#e.g. homo_sapiens1
#This is why it was orignally written backwards as we can't easily dynamically redefine
#an adaptor in the registry without ConfigRegistry trying to change the name
#the very act of creating a new db to redefine the registry with causes ConfigRegistry
#to try and register it with a unique species name
#Delete the old funcgen DB from the registry first
#$reg->remove_DBAdaptor($self->species, 'funcgen');
#ConfigRegistry will automatically configure this new db
#$db = Bio::EnsEMBL::Funcgen::DBSQL::DBAdaptor->new(
# -user => $user,
# -host => ($self->{'ssh'}) ? $host_ip : $host,
# -port => $port,
# -pass => $pass,
# #we need to pass dbname else we can use non-standard dbs
# -dbname => $dbname,
# -species => $self->species(),
# -group => 'funcgen',
# );
#if we get a species like homo_sapiens1 here
#This is because ConfigRegistry is try to make the dbname different between the
#one already present and the one you're trying to add
}
}
### VALIDATE DNADB
#This is now done in DBAdaptor
#We can change this to just use the assembly version
#we could even have the wordy assmelby version from the meta table
#do the standard ensembl subs
#s/[A-Za-z]//g;
#s/\.//g;
#And then validate?
#Just stick to number version for now.
#Now we need to set the dnadb_host params to avoid ensembldb defaults
#This should check the registry first
#Then load from the registry db?
#If we have a multi host registry config file this isn't going to work!
#Is this required anymore as the DBAdaptor handles this?
#Not if we pass a db with an incorrect dnadb attached.
#if($db->_get_schema_build($db->dnadb()) !~ /_[0-9]+_${assm_version}[a-z]*$/){
# my $warning = "WARNING: dnadb does not match assembly_version $assm_version. Using ensembldb.enembl.org to define the dnadb";
# $warning .= ' rather than the reg_config' if (defined $self->{'_reg_config'});
#We need to account for reg_config DBs which may have custom info in
#So try reg_config host first, then try ensembldb with warning
#Could have a reg_config only flag for core dbs
#Need to implement more params in set_dnadb_by_assembly_version
# $self->log($warning);
# $db->set_dnadb_by_assembly_version($assm_version);
# }
#Test connections
$self->db($db);
$db->dbc->db_handle;
$db->dnadb->dbc->db_handle;
#Set re/disconnect options
$db->dbc->disconnect_when_inactive(1);
$db->dnadb->dbc->disconnect_when_inactive(1);
### Check analyses/feature_type/cell_type
if($feature_analysis){
my $fanal = $self->db->get_AnalysisAdaptor->fetch_by_logic_name($feature_analysis);
throw("The Feature Analysis $feature_analysis does not exist in the database") if(!$fanal);
$self->feature_analysis($fanal);
}
if($ctype_name){
my $ctype = $self->db->get_CellTypeAdaptor->fetch_by_name($ctype_name);
throw("The CellType $ctype_name does not exist in the database") if(!$ctype);
$self->cell_type($ctype);
}
if ($ftype_name) {
my $ftype = $self->db->get_FeatureTypeAdaptor->fetch_by_name($ftype_name);
throw("The FeatureType $ftype_name does not exist in the database") if(!$ftype);
$self->feature_type($ftype);
}
#Set config here instead?
#So we can check all mandatory params
#Set vendor specific attr dependent vars
#Generic input dir
$self->{'input_dir'} ||= $self->get_dir("data").'/input/'.$self->{'param_species'}.'/'.$self->vendor().'/'.$self->name();
if(! -d $self->get_dir('input')){
if(@{$self->result_files}){
#This is really InputSet specific
#Could go in init_experiment_import
$self->log("Processing files:\n\t\t".join("\n\t\t",@{$self->result_files}));
}
else{
throw('input_dir is not defined or does not exist ('.$self->get_dir('input').')');
}
}
#Parser specific config
$self->set_config();
$self->debug(2, "Importer class instance created.");
$self->debug_hash(3, \$self);
return ($self);
}
=head2 registry_host
Example : my $reg_host = $imp->registry_host;
Description: Accessor for registry host attribute
Returntype : string e.g. ensembldb.ensembl.org
Exceptions : None
Caller : general
Status : at risk
=cut
sub registry_host{
return $_[0]->{'reg_host'};
}
=head2 registry_user
Example : my $reg_user = $imp->registry_user;
Description: Accessor for registry user attribute
Returntype : string e.g. ensembldb.ensembl.org
Exceptions : None
Caller : general
Status : at risk
=cut
sub registry_user{
return $_[0]->{'reg_user'};
}
=head2 registry_port
Example : my $reg_port = $imp->registry_port;
Description: Accessor for registry port attribute
Returntype : string e.g. ensembldb.ensembl.org
Exceptions : None
Caller : general
Status : at risk
=cut
sub registry_port{
return $_[0]->{'reg_port'};
}
=head2 registry_pass
Example : my $reg_pass = $imp->registry_pass;
Description: Accessor for registry pass attribute
Returntype : string e.g. ensembldb.ensembl.org
Exceptions : None
Caller : general
Status : at risk
=cut
sub registry_pass{
return $_[0]->{'reg_pass'};
}
#init method kept separate from new due to differing madatory check and set up
=head2 init_array_import
Example : $self->init_import();
Description: Initialises import by creating working directories
and by storing the Experiemnt
Returntype : none
Exceptions : warns and throws depending on recover and Experiment status
Caller : general
Status : at risk - merge with register_array_design
=cut
sub init_array_import{
my ($self) = shift;
# we need to define which paramters we'll be storing
#use the logic names of the analyses as the field headers
#need to test for vendor here
#Sanger, NIMBLEGEN(no design_id issue, could get from the ndf, but we want it in the DesignNotes.txt)
#Then we can change the Array/Chip generation to soley use the DesignNotes.txt rather than SampleKey
#which is experiment specific
#or eFG format.
$self->create_output_dirs('caches', 'fastas');
}
=head2 init_experiment_import
Example : $self->init_import();
Description: Initialises import by creating working directories
and by storing the Experiemnt
Returntype : none
Exceptions : warns and throws depending on recover and Experiment status
Caller : general
Status : at risk - merge with register exeriment
=cut
sub init_experiment_import{
my ($self) = shift;
#Change this to take config mandatory params?
#No specific to exp import
#Name is used in set_config anyway
#Currently we only have array and experiment import, both of which should have names
#Make mandatory?
foreach my $tmp ("group", "data_dir") {#name now generically mandatory
throw("Mandatory arg $tmp not been defined") if (! defined $self->{$tmp});
}
#Should we separate path on group here too, so we can have a dev/test group?
#Create output dirs
#This should be moved to the Parser to avoid generating directories which are needed for different imports
$self->create_output_dirs('raw', 'norm', 'caches', 'fastas');
throw("No result_files defined.") if (! defined $self->result_files());
#Log input files
#if (@{$self->result_files()}) {
# $self->log("Found result files arguments:\n\t".join("\n\t", @{$self->result_files()}));
#}
#This is done in new
#check for cell||feature and warn if no met file supplied?
if($self->norm_method){
my $norm_anal = $self->db->get_AnalysisAdaptor->fetch_by_logic_name($self->norm_method);
#should we list the valid analyses?
throw($self->norm_method.' is not a valid analysis') if ! $norm_anal;
$self->norm_analysis($norm_anal);
}else{
$self->log('WARNING: No normalisation analysis specified');
}
#warn "Need to check env vars here or in Parser or just after set_config?";
#Need generic method for checking ENV vars in Helper
#check for ENV vars?
#R_LIBS
#R_PATH if ! farm
#R_FARM_PATH
$self->validate_group();#import experimental_group
#Get experiment
my $exp_adaptor = $self->db->get_ExperimentAdaptor();
my $exp = $exp_adaptor->fetch_by_name($self->name()); #, $self->group());
$self->process_experiment_config if $self->can('process_experiment_config');#Parsers::MAGE::process_experiment_config
#This is only used for the first test below.
#my $xml = $exp_adaptor->fetch_mage_xml_by_experiment_name($self->name());# if $self->{'write_xml'};
#DO NOT CHANGE THIS LOGIC!
#write mage if we specify or we don't have a the final xml or the template
#recovery is turned on to stop exiting when previously stored chips are found from the 'write_mage' run.
#This does mean that if you import without running the write_mage step
#you could potentially be overwriting someone elses experiment info
#No way of getting around this, need to make warning obvious, add to end of log!!!
#We always want to write and update xml and ResultSets if we're running the 2nd stage of the import
#Why would we ever want to skip the validate process?
#Leave for now as this is working as we want it
#But propose to remove skip functionality
#if( ! $self->{'no_mage'}){
# if($self->{'write_mage'} || !( -f $self->get_config('tab2mage_file') || $xml)){
# $self->{'write_mage'} = 1;
# $self->backup_file($self->get_config('tab2mage_file'));
# }
# #elsif($xml && (! $self->{'update_xml'})){#Changed this so we always update
# #elsif(! $self->{'update_xml'}){
#
# #Here, we need to always update_xml
# #If we are doing the 2nd stage
# #Currently this is skipping as we haven't explicitly set it
# #To remove this...
# #what we need to do is check that we don't test for update_xml,
# # i.e. assuming that we're running the second stage of the import.
# # Therefore we need a boolean to set whether it is the first stage..else update_xml implicit
# # write mage is explicit flag
# # Or if we have not tab2mage file?
# # we can then override this explicitly with update_xml?
# # WE're never likely edit the xml directly, so we always want to validate and update
# # so write mage flag become update_experiment? No this is no obvious behaviour
# # We need to warn about removing the write_mage flag after we have updated it
# # Otherwise we will never get to 2nd stage
#
# #No mage is still valid as we may want to jus import and experiment
# #Before receiving correct meta data
# #When we can then rerun the import with -write_mage to update the resultsets
#
# elsif( -f $self->get_config('tab2mage_file')){#Run Tab2Mage
#
# $self->backup_file($self->get_config('mage_xml_file'));
# my $cmd = 'tab2mage.pl -e '.$self->get_config('tab2mage_file').' -k -t '.$self->get_dir('output').
# ' -c -d '.$self->get_dir('results');
#
# $self->log('Reading tab2mage file');
# my $t2m_exit_code = run_system_cmd($cmd, 1);#no exit flag due to non-zero exit codes
# warn "tab2mage exit code is $t2m_exit_code";
#
# if(! ($t2m_exit_code > -1) && ($t2m_exit_code <255)){
# throw("tab2mage failed. Please check and correct:\t".$self->get_config('tab2mage_file')."\n...and try again");
# }
#
# $self->{'recover'} = 1;
# }
# }
#Recovery now set so deal with experiment
if ($self->recovery() && ($exp)) {
$self->log("Using previously stored Experiment:\t".$exp->name);
} elsif ((! $self->recovery()) && $exp) {
throw("Your experiment name is already registered in the database, please choose a different \"name\", this will require renaming you input directory, or specify -recover if you are working with a failed/partial import.");
#can we skip this and store, and then check in register experiment if it is already stored then throw if not recovery
} else { # (recover && exp) || (recover && ! exp)
$exp = Bio::EnsEMBL::Funcgen::Experiment->new(
-GROUP => $self->group(),
-NAME => $self->name(),
-DATE => $self->experiment_date(),
-PRIMARY_DESIGN_TYPE => $self->design_type(),
-DESCRIPTION => $self->description(),
-ADAPTOR => $self->db->get_ExperimentAdaptor(),
);
($exp) = @{$exp_adaptor->store($exp)};
}
$self->experiment($exp);
#remove and add specific report, this is catchig some Root stuff
#$self->log("Initiated efg import with following parameters:\n".Data::Dumper::Dumper(\$self));
return;
}
=head2 validate_group
Example : $self->validate_group();
Description: Validates groups details
Returntype : none
Exceptions : throws if insufficient info defined to store new Group and is not already present
Caller : general
Status : Medium - check location and contact i.e. group name clash?
=cut
sub validate_group{
my ($self) = shift;
my $group_ref = $self->db->fetch_group_details($self->group());
if (! $group_ref) {
if ($self->location() && $self->contact()) {
$self->db->import_group($self->group(), $self->location, $self->contact());
} else {
throw("Group ".$self->group()." does not exist, please specify a location and contact to register the group");
}
}
return;
}
=head2 create_output_dirs
Example : $self->create_output_dirs();
Description: Does what it says on the tin, creates dirs in
the root output dir foreach @dirnames, also set paths in self
Arg [1] : mandatory - list of dir names
Returntype : none
Exceptions : none
Caller : general
Status : Medium - add throw?
=cut
sub create_output_dirs{
my ($self, @dirnames) = @_;
#output dir created in control script
#avoids errors when logs generated first
foreach my $name (@dirnames) {
if($name eq 'caches'){
$self->{"${name}_dir"} = $ENV{'EFG_DATA'}."/${name}/".$self->db->dbc->dbname() if(! defined $self->{"${name}_dir"});
}
elsif($name eq 'fastas'){
$self->{"${name}_dir"} = $ENV{'EFG_DATA'}."/${name}/" if(! defined $self->{"${name}_dir"});
}
else{
$self->{"${name}_dir"} = $self->get_dir('output')."/${name}" if(! defined $self->{"${name}_dir"});
}
if(! (-d $self->get_dir($name) || (-l $self->get_dir($name)))){
$self->log("Creating directory:\t".$self->get_dir($name));
#This did not throw with mkdir!!
mkpath $self->get_dir($name) || throw('Failed to create directory: '. $self->get_dir($name));
chmod 0744, $self->get_dir($name);
}
}
return;
}
=head2 vendor
Example : $imp->vendor("NimbleGen");
Description: Getter/Setter for array vendor
Arg [1] : optional - vendor name
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub vendor{
my ($self) = shift;
if(@_){
$self->{'vendor'} = shift;
$self->{'vendor'} = uc($self->{'vendor'});
}
return $self->{'vendor'};
}
=head2 feature_type
Example : $imp->feature_type($ftype);
Description: Getter/Setter for Experiment FeatureType
Arg [1] : optional - Bio::EnsEMBL::Funcgen::FeatureType
Returntype : Bio::EnsEMBL::Funcgen::FeatureType
Exceptions : Throws if arg is not valid or stored
Caller : general
Status : at risk
=cut
sub feature_type{
my ($self) = shift;
if (@_) {
my $ftype = shift;
#do we need this as we're checking in new?
if (! ($ftype->isa('Bio::EnsEMBL::Funcgen::FeatureType') && $ftype->dbID())) {
throw("Must pass a valid stored Bio::EnsEMBL::Funcgen::FeatureType");
}
$self->{'feature_type'} = $ftype;
}
return $self->{'feature_type'};
}
=head2 feature_analysis
Example : $imp->feature_analysis($fanal);
Description: Getter/Setter for Analysis used for creating the imported Features
Arg [1] : optional - Bio::EnsEMBL::Analysis
Returntype : Bio::EnsEMBL::Analysis
Exceptions : Throws if arg is not valid or stored
Caller : general
Status : at risk
=cut
sub feature_analysis{
my ($self) = shift;
if (@_) {
my $fanal = shift;
#do we need this as we're checking in new?
if (! (ref ($fanal) && $fanal->isa('Bio::EnsEMBL::Analysis') && $fanal->dbID())) {
throw("Must pass a valid stored Bio::EnsEMBL::Analysis");
}
$self->{'feature_analysis'} = $fanal;
}
return $self->{'feature_analysis'};
}
=head2 norm_analysis
Example : $imp->norm_analysis($anal);
Description: Getter/Setter for the normalisation analysis
Arg [1] : optional - Bio::EnsEMBL::Analysis
Returntype : Bio::EnsEMBL::Analysis
Exceptions : Throws if arg is not valid or stored
Caller : general
Status : at risk
=cut
sub norm_analysis{
my ($self) = shift;
if (@_) {
my $anal = shift;
#do we need this as we're checking in new?
if (! (ref($anal) && $anal->isa('Bio::EnsEMBL::Analysis') && $anal->dbID())) {
throw("Must pass a valid stored Bio::EnsEMBL::Analysis");
}
$self->{'norm_analysis'} = $anal;
}
return $self->{'norm_analysis'};
}
=head2 cell_type
Example : $imp->cell_type($ctype);
Description: Getter/Setter for Experiment CellType
Arg [1] : optional - Bio::EnsEMBL::Funcgen::CellType
Returntype : Bio::EnsEMBL::Funcgen::CellType
Exceptions : Throws if arg is not valid or stored
Caller : general
Status : at risk
=cut
sub cell_type{
my ($self) = shift;
if (@_) {
my $ctype = shift;
#do we need this as we're checking in new?
if (! ($ctype->isa('Bio::EnsEMBL::Funcgen::CellType') && $ctype->dbID())) {
throw("Must pass a valid stored Bio::EnsEMBL::Funcgen::CellType");
}
$self->{'cell_type'} = $ctype;
}
return $self->{'cell_type'};
}
=head2 ucsc_coords
Example : $start += 1 if $self->ucsc_coords;
Description: Getter for UCSC coordinate usage flag
Returntype : boolean
Exceptions : none
Caller : general
Status : at risk
=cut
sub ucsc_coords{
my $self = shift;
return $self->{'ucsc_coords'};
}
=head2 array_file
Example : my $array_file = $imp->array_file();
Description: Getter/Setter for sanger/design array file
Arg [1] : optional - path to adf or gff array definition/mapping file
Returntype : string
Exceptions : none
Caller : general
Status : at risk
=cut
sub array_file{
my ($self) = shift;
$self->{'array_file'} = shift if(@_);
return $self->{'array_file'};
}
=head2 array_name
Example : my $array_name = $imp->array_name();
Description: Getter/Setter for array name
Arg [1] : optional string - name of array
Returntype : string
Exceptions : none
Caller : general
Status : at risk
=cut
sub array_name{
my ($self) = shift;
$self->{'array_name'} = shift if(@_);
return $self->{'array_name'};
}
=head2 array_set
Example : $imp->array_set(1);
Description: Getter/Setter for array set flag
Arg [1] : optional boolean - treat all array chips as the same array
Returntype : boolean
Exceptions : none
Caller : general
Status : at risk
=cut
sub array_set{
my ($self) = shift;
$self->{'array_set'} = shift if(@_);
return $self->{'array_set'};
}
=head2 add_Array
Arg [1] : Bio::EnsEMBL::Funcgen::Array
Example : $self->add_Array($array);
Description: Setter for array elements
Returntype : none
Exceptions : throws if passed non Array or if more than one Array set
Caller : Importer
Status : At risk - Implement multiple arrays? Move to Experiment?
=cut
sub add_Array{
my $self = shift;
#do we need to check if stored?
if (! $_[0]->isa('Bio::EnsEMBL::Funcgen::Array')) {
throw("Must supply a Bio::EnsEMBL::Funcgen::Array");
} elsif (@_) {
push @{$self->{'arrays'}}, @_;
}
throw("Does not yet support multiple array imports") if(scalar (@{$self->{'arrays'}}) > 1);
#need to alter read_probe data at the very least
return;
}
=head2 arrays
Example : foreach my $array(@{$imp->arrays}){ ...do an array of things ...};
Description: Getter for the arrays attribute
Returntype : ARRAYREF
Exceptions : none
Caller : general
Status : at risk
=cut
sub arrays{
my $self = shift;
if(! defined $self->{'arrays'}){
$self->{'arrays'} = $self->db->get_ArrayAdaptor->fetch_all_by_Experiment($self->experiment());
}
return $self->{'arrays'};
}
=head2 location
Example : $imp->vendor("Hinxton");
Description: Getter/Setter for group location
Arg [1] : optional - location
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub location{
my ($self) = shift;
$self->{'location'} = shift if(@_);
return $self->{'location'};
}
=head2 contact
Example : my $contact = $imp->contact();
Description: Getter/Setter for the group contact
Arg [1] : optional - contact name/email/address
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub contact{
my ($self) = shift;
$self->{'contact'} = shift if(@_);
return $self->{'contact'};
}
=head2 name
Example : $imp->name('Experiment1');
Description: Getter/Setter for the experiment name
Arg [1] : optional - experiment name
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub name{
my ($self) = shift;
$self->{'name'} = shift if(@_);
return $self->{'name'};
}
=head2 result_files
Example : $imp->result_files(\@files);
Description: Getter/Setter for the result file paths
Arg [1] : Listref of file paths
Returntype : Listref
Exceptions : none
Caller : general
Status : At risk
=cut
sub result_files{
my ($self) = shift;
$self->{'result_files'} = shift if(@_);
return $self->{'result_files'};
}
=head2 verbose
Example : $imp->verbose(1);
Description: Getter/Setter for the verbose flag
Arg [1] : optional - 0 or 1
Returntype : int
Exceptions : none
Caller : general
Status : Stable
=cut
sub verbose{
my ($self) = shift;
$self->{'verbose'} = shift if(@_);
return $self->{'verbose'};
}
=head2 experiment_date
Example : $imp->experiment_date('2006-11-02');
Description: Getter/Setter for the experiment date
Arg [1] : optional - date string in yyyy-mm-dd
Returntype : string
Exceptions : none
Caller : general
Status : At risk
=cut
sub experiment_date{
my ($self) = shift;
if (@_) {
my $date = shift;
if ($date !~ /[0-9]{4}-[0-9]{2}[0-9]{2}/o) {
throw('Parameter -experiment_date needs to fe in the format: YYYY-MM-DD');
}
$self->{'experiment_date'} = $date;
} elsif ($self->vendor() eq "nimblegen" && ! defined $self->{'experiment_date'}) {
$self->{'experiment_date'} = &get_date("date", $self->get_config("chip_file")),
}
return $self->{'experiment_date'};
}
=head2 group
Example : my $exp_group = $imp->group();
Description: Getter/Setter for the group name
Arg [1] : optional - group name
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub group{
my ($self) = shift;
$self->{'group'} = shift if(@_);
return $self->{'group'};
}
=head2 dbname
Example : my $exp_group = $imp->group();
Description: Getter/Setter for the group name
Arg [1] : optional - group name
Returntype : string
Exceptions : none
Caller : general
Status : At risk - to be removed, us db->dbc->dbname
=cut
sub dbname{
my ($self) = shift;
deprecate('Use alternative method: $imp->db->dbname');
return $self->db->dbc->dbname;
}
=head2 recovery
Example : if($imp->recovery()){ ....do recovery code...}
Description: Getter/Setter for the recovery flag
Arg [1] : optional - 0 or 1
Returntype : boolean
Exceptions : none
Caller : self
Status : Medium - Most recovery now dynamic using status table
=cut
sub recovery{
my $self = shift;
$self->{'recover'} = shift if(@_);
return $self->{'recover'};
}
=head2 description
Example : $imp->description("Human chrX H3 Lys 9 methlyation");
Description: Getter/Setter for the experiment element
Arg [1] : optional - experiment description
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub description{
my $self = shift;
if (@_) {
$self->{'description'} = shift;
}
return $self->{'description'};
}
=head2 feature_set_description
Example : $imp->description("ExperimentalSet description");
Description: Getter/Setter for the FeatureSet description for an
InputSet import e.g. preprocessed GFF/Bed data
Arg [1] : optional - string feature set description
Returntype : string
Exceptions : none
Caller : general
Status : At risk
=cut
sub feature_set_description{
my $self = shift;
$self->{'feature_set_description'} = shift if @_;
return $self->{'feature_set_description'};
}
=head2 format
Example : $imp->format("Tiled");
Description: Getter/Setter for the array format
Arg [1] : optional - array format
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub format{
my ($self) = shift;
$self->{'format'} = shift if(@_);
return $self->{'format'};
}
=head2 experiment
Example : my $exp = $imp->experiment();
Description: Getter/Setter for the Experiment element
Arg [1] : optional - Bio::EnsEMBL::Funcgen::Experiment
Returntype : Bio::EnsEMBL::Funcgen::Experiment
Exceptions : throws if arg is not an Experiment
Caller : general
Status : Stable
=cut
sub experiment{
my ($self) = shift;
if (@_) {
if (! $_[0]->isa('Bio::EnsEMBL::Funcgen::Experiment')) {
throw("Must pass a Bio::EnsEMBL::Funcgen::Experiment object");
}
$self->{'experiment'} = shift;
}
return $self->{'experiment'};
}
=head2 db
Example : $imp->db($funcgen_db);
Description: Getter/Setter for the db element
Arg [1] : optional - Bio::EnsEMBL::Funcgen::DBSQL::DBAdaptor
Returntype : Bio::EnsEMBL::Funcgen::DBSQL::DBAdaptor
Exceptions : throws if arg is not an DBAdaptor
Caller : general
Status : Stable
=cut
sub db{
my $self = shift;
if (defined $_[0] && $_[0]->isa("Bio::EnsEMBL::Funcgen::DBSQL::DBAdaptor")) {
$self->{'db'} = shift;
} elsif (defined $_[0]) {
throw("Need to pass a valid Bio::EnsEMBL::Funcgen::DBSQL::DBAdaptor");
}
return $self->{'db'};
}
=head2 pass
Example : $imp->pass("password");
Description: Getter/Setter for the db password
Arg [1] : optional - db password
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub pass{
my $self = shift;
$self->{'pass'} = shift if(@_);
return $self->{'pass'};
}
=head2 pass
Example : $imp->host("hoastname");
Description: Getter/Setter for the db hostname
Arg [1] : optional - db hostname
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub host{
my $self = shift;
$self->{'host'} = shift if(@_);
return $self->{'host'};
}
=head2 port
Example : $imp->port(3306);
Description: Getter/Setter for the db port number
Arg [1] : optional - db port number
Returntype : int
Exceptions : none
Caller : general
Status : Stable
=cut
sub port{
my $self = shift;
$self->{'port'} = shift if(@_);
return $self->{'port'};
}
=head2 user
Example : $imp->user("user_name");
Description: Getter/Setter for the db user name
Arg [1] : optional - db user name
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub user{
my $self = shift;
$self->{'user'} = shift if(@_);
return $self->{'user'};
}
=head2 dump_fasta
Example : if($self->dump_fasta()){...do fasta dump...}
Description: Getter/Setter for the dump_fasta flag
Arg [1] : optional - 0 or 1
Returntype : boolean
Exceptions : none
Caller : self
Status : Stable
=cut
sub dump_fasta{
my $self = shift;
$self->{'_dump_fasta'} = shift if @_;
return $self->{'_dump_fasta'};
}
=head2 species
Example : $imp->species("homo_sapiens");
Description: Getter/Setter for species
Arg [1] : optional - species name(alias?)
Returntype : string
Exceptions : none ? throw if no alias found?
Caller : general
Status : Medium - may move reg alias look up to this method
=cut
sub species{
my $self = shift;
#should we do reg alias look up here?
#Will we ever want to redefine species?
#Change to just getter?
$self->{'species'} = shift if(@_);
return $self->{'species'};
}
=head2 get_dir
Example : $imp->get_dir("import");
Description: Retrieves full path for given directory
Arg [1] : mandatory - dir name
Returntype : string
Exceptions : none
Caller : general
Status : at risk - move to Helper?
=cut
sub get_dir{
my ($self, $dirname) = @_;
return $self->get_data("${dirname}_dir");
}
=head2 norm_method
Example : my $norm_method = $imp->norm_method()
Description: Getter/Setter for normalisation method
Arg [1] : mandatory - method name
Returntype : string
Exceptions : none ? throw if no analysis with logic name
Caller : general
Status : At risk - restrict to logic_name and validate against DB, allow multiple
=cut
#Move to Nimblegen?
#Do we ever want to normalise other data?
sub norm_method{
my $self = shift;
if (@_) {
$self->{'norm_method'} = shift;
} elsif (! defined $self->{'norm_method'}) {
$self->{'norm_method'}= $self->get_config('norm_method');
}
return $self->{'norm_method'};
}
=head2 get_config
Arg [1] : mandatory - name of the data element to retrieve from the config hash
Example : %dye_freqs = %{$imp->get_config('dye_freqs')};
Description: returns data from the definitions hash
Returntype : various
Exceptions : none
Caller : Importer
Status : at risk - replace with direct calls in the inherited Defs class?
=cut
sub get_config{
my ($self, $data_name) = @_;
return $self->get_data('config', $data_name); #will this cause undefs?
}
=head2 register_experiment
Example : $imp->register_experiment()
Description: General control method, performs all data import and normalisations
Arg [1] : optional - dnadb DBAdaptor
Returntype : none
Exceptions : throws if arg is not Bio::EnsEMBL::DBSQL::DBAdaptor
Caller : general
Status : Medium
=cut
#Need to split this method?
#Pre farm process
#Define and store all sets,
#pre process input file once if required
#How are we going to be able to tell wether this has been done successfully?
#runner will catch error, therefore safe to assume that file is complete if farm job
#unless we are not running with farm
#farm specific processes
#actually parse and store data
#Can we separate these for different import types?
sub register_experiment{
my ($self) = shift;
#Need to check for dnadb passed with adaptor to contructor
if (@_) {
if ( ! $_[0]->isa("Bio::EnsEMBL::DBSQL::DBAdaptor")) {
throw("You need to pass a valid dnadb adaptor to register the experiment");
}
$self->db->dnadb($_[0]);
} elsif ( ! $self->db) {
throw("You need to pass/set a DBAdaptor with a DNADB attached of the relevant data version");
}
#This could still be the default core db for the current version
#warn here if not passed DB?
#These should be vendor independent, only the read methods should need specific order?
$self->init_experiment_import();
#can we just have init here instead?
#This could do with a rewrite to move some things to the parsers
#$self::SUPER->register_experiment
$self->write_validate_experiment_config if $self->can('write_validate_experiment_config');
#This is too array specific!
#Can we have an array of import_methods in the config?
#foreach my $method(@{$self->get_config{'import_methods'}}){
#$self->method;
#}
#We're already doing this in read_data for each of these data_types
#Need to be able to run this separately, so we can normalise previously imported sets with different methods
#should be able t do this without raw data files e.g. retrieve info from DB
#Is this implemented?
$self->read_data("probe");
$self->read_data("results");
my $norm_method = $self->norm_method();
if (defined $norm_method) {
warn "norm method is $norm_method";
$self->R_norm($norm_method);
#change this to $self->$norm_method
#so we can have non-R_norm normalisation
}
return;
}
#Move array specific ones to Nimblegen.pm?
#Also used by ArrayDesign and Sanger.pm
#So need to create Array.pm baseclass, which is a Helper.
=head2 store_set_probes_features
Arg [1] : mandatory - array chip id
Arg [2] : optional - Bio::EnsEMBL::Funcgen::ProbeSet
Arg [3] : mandatory - hashref of keys probe id, values are
hash of probe/features with values
Bio::EnsEMBL::Funcgen::Probe/Features for a given
probe set if defined.
Example : $self->store_set_probes_features($ac->dbID(), $ops, \%pfs);
Description: Stores probe set, probes and probe features
Returntype : none
Exceptions : none
Caller : self
Status : Medium
=cut
sub store_set_probes_features{
my ($self, $ac_id, $pf_hash, $ops) = @_;
### Deal with ProbeSets
if ($ops) {
$ops->size(scalar(keys %$pf_hash));
($ops) = $self->db->get_ProbeSetAdaptor->store($ops);
}
#If we're going to validate fully, we need to check for probes in this probeset on this array chip
#Update size if we have any new probes
#Overkill? Only do on recover? Do not read if array chip is IMPORTED
#This does not make any attempt to validate probes/set vs previously stored data
for my $probe_id (keys %$pf_hash) {
#set probeset in probe and store
#the process corresponding feature
my $probe = $pf_hash->{$probe_id}->{'probe'};
$probe->probeset($ops) if $ops;
($probe) = @{$self->db->get_ProbeAdaptor->store($probe)};
#Can't use get_all_Arrays here as we can't guarantee this will only ever be the array we've generated
#Might dynamically load array if non-present
#This is allowing multiple dbIDs per probe??? Is this wrong?
#$self->cache_probe_info($probe->get_probename(), $probe->dbID());###########Remove as we're now importing all then resolving
foreach my $feature (@{$pf_hash->{$probe_id}->{'features'}}) {
$feature->probe($probe);
($feature) = @{$self->db->get_ProbeFeatureAdaptor->store($feature)};
}
}
undef $ops; #Will this persist in the caller?
undef %{$pf_hash};
return;
}
=head2 cache_slice
Arg [0] : string - region_name e.g. X
Arg [1] : optional - coordinate system name e.g. supercontig, defaults to chromosome
Example : my $slice = $self->cache_slice(12);
Description: Caches or retrieves from cache a given slice
Returntype : Bio::EnsEMBL::Slice
Exceptions : throws f no region name specified
Caller : self
Status : At risk
=cut
sub cache_slice{
my ($self, $region_name, $cs_name, $total_count) = @_;
throw("Need to define a region_name to cache a slice from") if ! $region_name;
$self->{'slice_cache'} ||= {};
$region_name =~ s/chr//;
$region_name = "MT" if $region_name eq "M";
if (! exists ${$self->{'seen_slice_cache'}}{$region_name}) {
my $slice = $self->slice_adaptor->fetch_by_region($cs_name, $region_name);
#Set seen cache so we don't try this again
$self->{seen_slice_cache}{$region_name} = $slice;
if(! $slice){
warn("-- Could not generate a slice for ${cs_name}:$region_name\n");
}
else{
my $sr_name = $slice->seq_region_name; #In case we passed a slice name
if(@{$self->{seq_region_names}}){
return if ! grep(/^${sr_name}$/, @{$self->{seq_region_names}}); #not on required slice
}
}
$self->{'slice_cache'}->{$region_name} = $slice;
}
if($total_count && exists ${$self->{'seen_slice_cache'}}{$region_name}){
#This is an InputSet specific method
$self->count('total_features') if $self->can('count');
}
#Only return if exists to avoid creating hash key
return (exists $self->{'slice_cache'}->{$region_name}) ? $self->{'slice_cache'}->{$region_name} : undef;
}
=head2 slice_cache
Example : my @seen_slices = values(%{$self->slice_cache});;
Description: Returns the slice cache i.e. all the Slices seen in the data filtered
by the defined slices. This method can be used to run only the appropriate
slice jobs after a prepare stage.
Returntype : Hashref of seq_region name Bio::EnsEMBL::Slice pairs
Exceptions : None
Caller : self
Status : At risk
=cut
sub slice_cache{
my $self = shift;
return $self->{'slice_cache'};
}
=head2 cache_probe_info
Arg [0] : mandatory - probe name
Arg [1] : mandatory - probe dbID
Arg [2] : optioanl int - x coord of probe on array
Arg [3] : optional int - y coord of probe on array
Example : $self->cache_probe_info("Probe1", $probe->dbID());
Or for result files which do not have X & Y, we need to cache
X & Y from the design files: $self->cache_probe_info('Probe2', $probe->dbID(), $x, $y);
Description: Setter for probe cache values
Returntype : none
Exceptions : throws is cache conflict encountered
Caller : self
Status : At risk - merge with following?
=cut
sub cache_probe_info{
my ($self, $pname, $pid, $x, $y) = @_;
throw('Deprecated, too memory expensive, now resolving DB duplicates and using Tied File cache');
throw("Must provide a probe name and id") if (! defined $pname || ! defined $pid);
#do we need to loop through the file here?
#if we make sure we're testing for a previous dbID before storing probes then we don't need to do this
#we can catch the error when we get the probe id as we can check for >1 id for the same probe name
#if (defined $self->{'_probe_cache'}->{$pname} && ($self->{'_probe_cache'}->{$pname}->[0] != $pid)) {
# throw("Found two differing dbIDs for $pname, need to sort out redundant probe entries");
#}
$self->{'_probe_cache'}->{$pname} = (defined $x && defined $y) ? [$pid, $x, $y] : [$pid];
return;
}
=head2 get_probe_id_by_name_Array
Arg [1] : mandatory - probe name
Example : $pid = $self->get_probe_id_by_name($pname);
Description: Getter for probe cache values
Returntype : int
Exceptions : none
Caller : self
Status : At risk - merge with previous, move to importer?
=cut
sub get_probe_id_by_name_Array{
my ($self, $name, $array) = @_;
#this is only ever called for fully imported ArrayChips, as will be deleted if recovering
$self->resolve_probe_data() if(! exists $self->{'_probe_cache'}{$array->name()});
#we want to cycle through the given cache starting from the last position or 0.
#we don't want to have to test for the size of the cache each time as this will be quite expensive
#so we should store sizes separately along with current position
my ($pid, $line);
#check current line
if($line = $self->{'_probe_cache'}{$array->name()}{'current_line'}){
if($line =~ /^\Q${name}\E\t/){
$pid = (split/\t/o, $line)[1];
}
}
if(! $pid){
while($line = $self->{'_probe_cache'}{$array->name()}{'handle'}->getline()){
if($line =~ /^\Q${name}\E\t/){
$pid = (split/\t/o, $line)[1];
$self->{'_probe_cache'}{$array->name()}{'current_line'} = $line;
last;
}
}
}
#do not remove this
if(! $pid){
throw("Did not find probe name ($name) in cache, cache may need rebuilding, results may need sorting, or do you have an anomolaous probe?")
}else{
chomp $pid;
}
return $pid;
}
=head2 get_probe_cache_by_Array
Arg[1] : Bio::EnsEMBL::Funcgen::Array
Arg[2] : boolean - from db flag, only to be used by Importer->resolve_probe_data !
Example : $self->get_probe_cache_by_Array();
Description: Gets the probe info cache which is an array tied to a file
Returntype : Boolean - True if cache has been generated and set successfully
Exceptions : none
Caller : general
Status : At risk
=cut
#from db flag should only be used by importer
#this is because there is no guarantee that it will be resolved unless
#called by resolve_probe_data
#which then renames the file and resets the handle
#can we clean this up and protect/hide this functionality?
#can we check the cache file name in the get methods and throw if it contains unresolved?
#make this private?
sub get_probe_cache_by_Array{
my ($self, $array, $from_db) = @_;
my $msg = "Getting probe cache for ".$array->name();
$msg .= " from DB" if $from_db;
$self->log($msg);#, 1);
if(! ($array && $array->isa('Bio::EnsEMBL::Funcgen::Array') && $array->dbID())){
throw('Must provide a valid stored Bio::EnsEMBL::Funcgen::Array object');
}
my $set = 0;
my $cache_file = $self->get_dir('caches').'/'.$array->name().'.probe_cache';
### Generate and resolve fresh cache from DB
if($from_db){
$cache_file .= '.unresolved';#This will be renamed by the caller if it is resolved
if(exists $self->{'_probe_cache'}{$array->name()}){
$self->log('Rebuilding probe_cache from DB for '.$array->name(), 1);
#untie @{$self->{'_probe_cache'}{$array->name()}{'entries'}};
#close($self->{'_probe_cache'}{$array->name()}{'handle'});#do we need to do this?
delete $self->{'_probe_cache'}{$array->name()};#implicitly closes
$self->log('Deleted old cache', 1);
}else{
$self->log('Building probe_cache from DB for '.$array->name(), 1);
}
#Move this to ProbeAdaptor?
#This is where we'd set the unique key for a vendor and resolves duplicates based on the key
my $cmd = 'SELECT name, probe_id from probe WHERE array_chip_id IN ('.join(',', @{$array->get_array_chip_ids()}).') ORDER by name, probe_id';
$cmd = 'mysql '.$self->db->connect_string()." -e \"$cmd\" >".$cache_file;
run_system_cmd($cmd);
}
### Set cache
if(-f $cache_file){
$self->log('MD5 check here?',1);
$self->{'_probe_cache'}{$array->name()}{'current_line'} = undef;
$self->{'_probe_cache'}{$array->name()}{'handle'} = open_file($cache_file);
#can we do a select count instead? and do this instead of the MD5?
#$cmd = "wc -l $cache_file";
#my $size = `$cmd`;
$set = 1;
}
else{
warn 'Failed to get probe cache for array:'.$array->name();
}
return $set;
}
#should reorganise these emthods to split reading the array data, and the actual data
#currently:
#meta reads array and chip data
#probe reads probe_set, probes, which should definitely be in array, probe_feature? and results
#native data format may not map to these methods directly, so may need to call previous method if required data not defined
=head2 read_data
Example : $self->read_data("probe")
Description: Calls each method in data_type array from config hash
Arg [1] : mandatory - data type
Returntype : none
Exceptions : none
Caller : self
Status : At risk
=cut
sub read_data{
my($self, $data_type) = @_;
map {my $method = "read_${_}_data"; $self->$method()} @{$self->get_config("${data_type}_data")};
return;
}
=head2 design_type
Example : $self->design_type("binding_site_identification")
Description: Getter/Setter for experimental design type
Arg [1] : optional - design type
Returntype : string
Exceptions : none
Caller : general
Status : At risk
=cut
sub design_type{
my $self = shift;
return $self->{'design_type'};
}
=head2 get_chr_seq_region_id
Example : $seq_region_id = $self->get_seq_region_id('X');
Description: Calls each method in data_type array from config hash
Arg [1] : mandatory - chromosome name
Arg [2] : optional - start value
Arg [3] : optional - end value
Returntype : int
Exceptions : none
Caller : self
Status : At risk
=cut
#convinience wrapper method
#could we use the seq region cache instead?
#this seems like a lot of overhead for getting the id
sub get_chr_seq_region_id{
my ($self, $chr, $start, $end) = @_;
#what about strand info?
#do we need the start and stop?
#use start and stop to prevent problems with scaffodl assemblies, i.e. >1 seq_region_id
#my $slice = $self->slice_adaptor->fetch_by_region("chromosome", $chr, $start, $end);
#we could pass the slice back to the slice adaptor for this, to avoid dbid problems betwen DBs
###would need to implement other cs's here
return $self->slice_adaptor->fetch_by_region("chromosome", $chr, $start, $end)->get_seq_region_id();
}
#convinience method
sub slice_adaptor{
my $self = shift;
if (! defined $self->{'slice_adaptor'}) {
$self->{'slice_adaptor'} = $self->db->get_SliceAdaptor();
}
return $self->{'slice_adaptor'};
}
=head2 vsn_norm
Example : $self->vsn_norm();
Description: Convinience/Wrapper method for vsn R normalisation
Returntype : none
Exceptions : none
Caller : general
Status : At risk
=cut
#Have Norm class or contain methods in importer?
#Need to have analysis set up script for all standard analyses.
sub vsn_norm{
my $self = shift;
return $self->R_norm("VSN_GLOG");
}
=head2 farm
Arg [1] : Boolean
Example : $importer->farm(1);
Description: Flag to turn farm submission on
Returntype : Boolean
Exceptions : Throws is argument not a boolean
Caller : general
Status : At risk
=cut
sub farm{
my ($self, $farm) = @_;
$self->{'farm'} ||= undef;#define farm
if (defined $farm) {
throw("Argument to farm must be a boolean 1 or 0") if(! ($farm == 1 || $farm == 0));
$self->{'farm'} = $farm;
}
return $self->{'farm'};
}
=head2 batch_job
Arg [1] : Boolean
Example : $importer->batch_job(1);
Description: Flag to turn on batch_job status
Returntype : Boolean
Exceptions : Throws is argument not a boolean
Caller : general
Status : At risk
=cut
sub batch_job{
my ($self, $batch_job) = @_;
#$self->{'batch_job'} ||= undef;
if (defined $batch_job) {
throw("Argument to batch_job must be a boolean 1 or 0") if(! ($batch_job == 1 || $batch_job == 0));
$self->{'batch_job'} = $batch_job;
}
return $self->{'batch_job'};
}
=head2 prepared
Arg [1] : Boolean
Example : $importer->prepared(1);
Description: Flag to turn on prepared file status
This signifies that the files have been previously imported
using prepare mode and may not match the InputSubset names
Returntype : Boolean
Exceptions : None
Caller : general
Status : At risk
=cut
sub prepared{
my ($self, $prepared) = @_;
$self->{'prepared'} = $prepared if (defined $prepared);
return $self->{'prepared'};
}
=head2 R_norm
Example : $self->R_norm(@logic_names);
Description: Performs R normalisations for given logic names
Returntype : none
Exceptions : Throws if R exits with error code or if data not not valid for analysis
Caller : general
Status : At risk
=cut
sub R_norm{
my ($self, @logic_names) = @_;
#This currently normalises a single two colour chip at a time
#rather than normalising across a set of chip
#also does in sets of analyses
#good for keeping data separate, but not efficient in terms of querying
#convert to use one script which only queries for each echip once, then does each anal
my $aa = $self->db->get_AnalysisAdaptor();
my $rset_adaptor = $self->db->get_ResultSetAdaptor();
my $ra_id = $aa->fetch_by_logic_name("RawValue")->dbID();
my %r_config = (
"VSN_GLOG" => {( libs => ['vsn'],
#norm_method => 'vsn',
)},
"T.Biweight" => {(
libs => ['affy'],
#norm_method => 'tukey.biweight',
)},
);
foreach my $logic_name (@logic_names) {
#throw("Not yet implemented TukeyBiweight") if $logic_name eq "Tukey_Biweight";
#this has already been chcecked and set as the norm_analysis
#need to resolve this multi method approach
my $norm_anal = $aa->fetch_by_logic_name($logic_name);
#This only creates an RSet for the IMPORT set
#So if we re-run with a different analysis
#tab2mage will have already been validated
#So RSet generation will be skipped
#We need to recreate the each non-import RSet for this norm analysis
#This also means the RSets are being created before the data has been imported
#This avoids having to parse tab2mage each time but means we have an uncertain status of these Rsets
my $rset = $self->get_import_ResultSet($norm_anal, 'experimental_chip');
my @chips = ();
if (! $rset) {
$self->log("All ExperimentalChips already have status:\t${logic_name}");
} else { #Got data to normalise and import
my @dbids;
my $R_file = $self->get_dir("norm")."/${logic_name}.R";
my $job_name = $self->experiment->name()."_${logic_name}";
my $resultfile = $self->get_dir("norm")."/result.${logic_name}.txt";
my $outfile = $self->get_dir("norm")."/${logic_name}.out";
#How do we get farm job output i.e. run time memusage
#from interactive job?
#This assumes R_PATH
my $errfile = $self->get_dir("norm")."/${logic_name}.err";
#Let's build this better so we capture the farm output aswell as the job output.
my $cmdline = "$ENV{'R_PATH'} --no-save < $R_file";# >$errfile 2>&1";
#-K option waits for job to complete
my $bsub = "bsub -K -J $job_name ".$ENV{'R_BSUB_OPTIONS'}.
" -e $errfile -o $outfile $ENV{'R_FARM_PATH'} CMD BATCH $R_file";
#Can we separate the out and err for commandline?
my $r_cmd = (! $self->farm()) ? "$cmdline >$outfile 2>&1" : $bsub;
$self->backup_file($resultfile); #Need to do this as we're appending in the loop
#setup qurey
#warn "Need to add host and port here";
#Set up DB, defaults and libs for each logic name
my $query = "options(scipen=20);library(RMySQL);library(Ringo);";
#scipen is to prevent probe_ids being converted to exponents
#Ringo is for default QC
#foreach my $ln(@logic_names){
foreach my $lib (@{$r_config{$logic_name}{'libs'}}) {
$query .= "library($lib);";
}
#}
$query .= "con<-dbConnect(dbDriver(\"MySQL\"), host=\"".$self->db->dbc->host()."\", port=".$self->db->dbc->port().", dbname=\"".$self->db->dbc->dbname()."\", user=\"".$self->db->dbc->username()."\"";
#should use read only pass here as we are printing this to file
$query .= ", pass=\"".$self->db->dbc->password."\")\n";
#Build queries for each chip
foreach my $echip (@{$self->experiment->get_ExperimentalChips()}) {
#should implement logic name here?
#can't as we need seperate ResultSet for each
if ($echip->has_status($logic_name)) {
$self->log("ExperimentalChip ".$echip->unique_id()." already has status:\t$logic_name");
} else {
#warn "Need to roll back here if recovery, as norm import may fail halfway through";
push @chips, $echip;
my $cc_id = $rset->get_chip_channel_id($echip->dbID());
#if ($self->recovery()){
# $self->log('Rolling back results for ExperimentalChip('.$echip->dbID().") $logic_name");
# $self->db->rollback_results($cc_id) if $self->recovery();
# }
$self->log("Building $logic_name R cmd for ".$echip->unique_id());
@dbids = ();
foreach my $chan (@{$echip->get_Channels()}) {
if ($chan->type() eq "EXPERIMENTAL") {
push @dbids, $chan->dbID();
} else {
unshift @dbids, $chan->dbID();
}
}
throw("vsn does not accomodate more than 2 channels") if (scalar(@dbids > 2) && $logic_name eq "VSN_GLOG");
#should do some of this with maps?
#HARDCODED metric ID for raw data as one
#Need to get total and experimental here and set db_id accordingly
#can probably do this directly into one df
$query .= "c1<-dbGetQuery(con, 'select r.probe_id as PROBE_ID, r.score as CONTROL_score, r.X, r.Y from result r, chip_channel c, result_set rs where c.table_name=\"channel\" and c.table_id=${dbids[0]} and c.result_set_id=rs.result_set_id and rs.analysis_id=${ra_id} and c.chip_channel_id=r.chip_channel_id')\n";
$query .= "c2<-dbGetQuery(con, 'select r.probe_id as PROBE_ID, r.score as EXPERIMENTAL_score, r.X, r.Y from result r, chip_channel c, result_set rs where c.table_name=\"channel\" and c.table_id=${dbids[1]} and c.result_set_id=rs.result_set_id and rs.analysis_id=${ra_id} and c.chip_channel_id=r.chip_channel_id')\n";
#Can we define some of the basic structures here and reuse in the QC and each norm method?
#Is this going to eat up memory?
#can we strip out and separate the data from c1 and c2 into RGList and
#individual vector for probe_ids, then rm c1 and c2 to free up memory
#create RGList object
$query .= "R<-as.matrix(c1['CONTROL_score'])\nG<-as.matrix(c2['EXPERIMENTAL_score'])\n";
$query .= "genes<-cbind(c1['PROBE_ID'], c1['X'], c1['Y'])\n";
$query .= "testRG<-new('RGList', list(R=R, G=G, genes=genes))\n";
#QC plots here before doing norm
#open pdf device
$query .= "pdf('".$self->get_dir('norm').'/'.$echip->unique_id."_QC.pdf', paper='a4', height = 15, width = 9)\n";
#set format
$query .= "par(mfrow = c(2,2), font.lab = 2)\n";
#Channel densisties
#These need limma or Ringo
$query .= "plotDensities(testRG)\n";
#MvA Plot
$query .= 'meanLogA<-((log(testRG$R, base=exp(2)) + log(testRG$G, base=exp(2)))/2)'."\n";
$query .= 'logIntRatioM<-(log(testRG$R, base=exp(2)) - log(testRG$G, base=exp(2)))'."\n";
$query .= "yMin<-min(logIntRatioM)\n";
$query .= "yMax<-max(logIntRatioM)\n";
#Need to validate yMax here
#If is is Inf then we need to sort the vector and track back until we find the high real number
#count number of Infs and note on MvA plot
$query .= "infCount<-0\n";
$query .= "if( yMax == Inf){; sortedM<-sort(logIntRatioM); lengthM<-length(logIntRatioM); indexM<-lengthM\n"
."while (yMax == Inf){; indexM<-(indexM-1); yMax<-sortedM[indexM];}; infCount<-(lengthM-indexM);}\n";
#
$query .= "if(infCount == 0){\n";
$query .= 'plot(meanLogA, logIntRatioM, xlab="A - Average Log Ratio",ylab="M - Log Ratio",pch=".",ylim=c(yMin,yMax), main="'.$echip->unique_id.'")'."\n";
$query .= "} else {\n";
$query .= 'plot(meanLogA, logIntRatioM, xlab="A - Average Log Ratio",ylab="M - Log Ratio",pch=".",ylim=c(yMin,yMax), main="'.$echip->unique_id.'", sub=paste(infCount, " Inf values not plotted"));'."}\n";
#$query .= 'plot(log(testRG$R*testRG$G, base=exp(2))/2, log(testRG$R/testRG$G, base=exp(2)),xlab="A",ylab="M",pch=".",ylim=c(-3,3), main="'.$echip->unique_id."\")\n";
#Plate plots
$query .= 'image(testRG, 1, channel = "green", mycols = c("black", "green4", "springgreen"))'."\n";
$query .= 'image(testRG, 1, channel = "red", mycols = c("black", "green4", "springgreen"))'."\n";
$query .= "dev.off()\n";
#Finished QC pdf printing
#The simple preprocess step of Ringo is actually vsn, so we can nest these in line
### Build Analyses cmds ###
if($logic_name eq 'T.Biweight'){
#log2 ratios
$query .= 'lr_df<-cbind((log(c2["EXPERIMENTAL_score"], base=exp(2)) - log(c1["CONTROL_score"], base=exp(2))))'."\n";
#Adjust using tukey.biweight weighted average
#inherits first col name
$query .= 'norm_df<-(lr_df["EXPERIMENTAL_score"]-tukey.biweight(as.matrix(lr_df)))'."\n";
$query .= 'formatted_df<-cbind(rep.int(0, length(c1["PROBE_ID"])), c1["PROBE_ID"], sprintf("%.3f", norm_df[,1]), rep.int('.$cc_id.', length(c1["PROBE_ID"])), c1["X"], c1["Y"])'."\n";
}
elsif($logic_name eq 'VSN_GLOG'){
#could do this directly
$query .= "raw_df<-cbind(c1[\"CONTROL_score\"], c2[\"EXPERIMENTAL_score\"])\n";
#variance stabilise
$query .= "norm_df<-vsn(raw_df)\n";
#do some more calcs here and print report?
#fold change exponentiate? See VSN docs
#should do someplot's of raw and glog and save here?
#set log func and params
#$query .= "par(mfrow = c(1, 2)); log.na = function(x) log(ifelse(x > 0, x, NA));";
#plot
#$query .= "plot(exprs(glog_df), main = \"vsn\", pch = \".\");".
# "plot(log.na(exprs(raw_df)), main = \"raw\", pch = \".\");";
#FAILS ON RAW PLOT!!
#par(mfrow = c(1, 2))
#> meanSdPlot(nkid, ranks = TRUE)
#> meanSdPlot(nkid, ranks = FALSE)
#Now create table structure with glog values(diffs)
#3 sig dec places on scores(doesn't work?!)
$query .= 'formatted_df<-cbind(rep.int(0, length(c1["PROBE_ID"])), c1["PROBE_ID"], sprintf("%.3f", (exprs(norm_df[,2]) - exprs(norm_df[,1]))), rep.int('.$cc_id.', length(c1["PROBE_ID"])), c1["X"], c1["Y"])'."\n";
}
#load back into DB
#c3results<-cbind(rep("", length(c3["probe_id"])), c3["probe_id"], c3["c3_score"], rep(1, length(c3["probe_id"])), rep(1, length(c3["probe_id"])))
#may want to use safe.write here
#dbWriteTable(con, "result", c3results, append=TRUE)
#dbWriteTable returns true but does not load any data into table!!!
$query .= "write.table(formatted_df, file=\"${resultfile}\", sep=\"\\t\", col.names=FALSE, row.names=FALSE, quote=FALSE, append=TRUE)\n";
#tidy up here??
}
}
$query .= "q();";
open(RFILE, ">$R_file") || throw("Cannot open $R_file for writing");
print RFILE $query;
close(RFILE);
my $submit_text = "Submitting $logic_name job";
$submit_text .= ' to farm' if $self->farm;
$self->log("${submit_text}:\t".localtime());
run_system_cmd($r_cmd);
$self->log("Finished $logic_name job:\t".localtime());
$self->log('See '.$self->get_dir('norm').' for ExperimentalChip QC files');
#Now load file and update status
#Import directly here to avoid having to reparse all results if we crash!!!!
$self->log("Importing:\t$resultfile");
$self->db->load_table_data("result", $resultfile);
$self->log("Finishing importing:\t$resultfile");
foreach my $echip(@chips){
$echip->adaptor->store_status($logic_name, $echip);
}
#Recreate all non-import RSets for analysis if not already present
#
my $rset_a = $self->db->get_ResultSetAdaptor();
my %seen_rsets;
foreach my $anal_rset(@{$rset_a->fetch_all_by_Experiment($self->experiment)}){
next if($anal_rset->name =~ /_IMPORT$/o);
next if(exists $seen_rsets{$anal_rset->name});
next if $anal_rset->analysis->logic_name eq $norm_anal->logic_name;
$seen_rsets{$rset->name} = 1;
$anal_rset->analysis($norm_anal);
$anal_rset->{'dbID'} = undef;
$anal_rset->{'adaptor'} = undef;
#add the chip_channel_ids from the new anal IMPORT set
foreach my $table_id(@{$anal_rset->table_ids}){
$anal_rset->{'table_id_hash'}{$table_id} = $rset->get_chip_channel_id($table_id);
}
$self->log('Adding new ResultSet '.$anal_rset->name.' with analysis '.$norm_anal->logic_name);
$rset_a->store($anal_rset);
}
}
}
return;
}
#can we sub this? args: table_name, logic_name
#also use result_set_name
#would also clean all data for result set if recovery
#return would be result_set
#Can we extend this to incorporate InputSet parser define_sets?
sub get_import_ResultSet{
my ($self, $anal, $table_name) = @_;
if (!($anal && $anal->isa("Bio::EnsEMBL::Analysis") && $anal->dbID())) {
throw("Must provide a valid stored Bio::EnsEMBL::Analysis");
}
$self->log("Getting import $table_name ResultSet for analysis:\t".$anal->logic_name());
my ($rset, @new_chip_channels);
my $result_adaptor = $self->db->get_ResultSetAdaptor();
my $logic_name = $anal->logic_name;
my $status = ($logic_name eq "RawValue") ? "IMPORTED" : $logic_name;
if(($logic_name) eq 'RawValue' && ($table_name eq 'experimental_chip')){
throw("Cannot have an ExperimentalChip ResultSet with a RawValue analysis, either specify 'channel' or another analysis");
}
#Build IMPORT Set for $table_name
foreach my $echip (@{$self->experiment->get_ExperimentalChips()}) {
#clean chip import and generate rset
if($table_name eq 'experimental_chip'){
if ($echip->has_status($status)) { #this translates to each channel have the IMPORTED_RawValue status
$self->log("ExperimentalChip(".$echip->unique_id().") already has status:\t".$status);
}
else {
$self->log("Found ExperimentalChip(".$echip->unique_id().") without status $status");
push @new_chip_channels, $echip;
}
}else{#channel
foreach my $chan(@{$echip->get_Channels()}){
if ($chan->has_status($status)) { #this translates to each channel have the IMPORTED_RawValue status
$self->log("Channel(".$echip->unique_id()."_".$self->get_config('dye_freqs')->{$chan->dye()}.") already has status:\t".$status);
}
else {
$self->log("Found Channel(".$echip->unique_id()."_".$self->get_config('dye_freqs')->{$chan->dye()}.") without status $status");
push @new_chip_channels, $chan;
}
}
}
if (( ! $rset) && @new_chip_channels) {
my(@tmp) = @{$result_adaptor->fetch_all_by_name_Analysis($self->name()."_IMPORT", $anal)};
if(scalar(@tmp) > 1){
throw('Found more than one IMPORT ResultSet for '.$self->name().'_IMPORT with analysis '.$logic_name);
}
$rset = shift @tmp;
#do we need to throw here if not recovery?
#what if we want the import result set elsewhere during the first import?
#if ($self->recovery()) {
#fetch by anal and experiment_id
#Need to change this to result_set.name!
# warn("add chip set handling here");
#my @tmp = @{$result_adaptor->fetch_all_by_Experiment_Analysis($self->experiment(), $anal)};
#throw("Found more than one ResultSet for Experiment:\t".$self->experiment->name()."\tAnalysis:\t".$anal->logic_name().')' if (scalar(@tmp) >1);
#$rset = $tmp[0];
#warn "fetching rset with ".$self->name()."_IMPORT ". $anal->logic_name;
#$rset = $result_adaptor->fetch_by_name_Analysis($self->name()."_IMPORT", $anal);
warn("Warning: Could not find recovery ResultSet for analysis ".$logic_name) if ! $rset;
#}
if (! $rset) {
$self->log("Generating new ResultSet for analysis ".$logic_name);
$rset = Bio::EnsEMBL::Funcgen::ResultSet->new
(
-analysis => $anal,
-table_name => $table_name,
-name => $self->name()."_IMPORT",
-feature_type => $self->feature_type(),
-cell_type => $self->cell_type(),
);
#These types should be set to NULL during the MAGE-XML validation if we have more than one type in an experiment
($rset) = @{$result_adaptor->store($rset)};
}
}
}
#do we need this here as we're rolling back in the read methods?
#we only want to roll back those chips/channels which have not been registered
if ($self->recovery()) {
my $ec_adaptor = $self->db->get_ExperimentalChipAdaptor();
foreach my $cc(@new_chip_channels){
#only roll back if already part of import set
#Not previously registered if not
if($rset->contains($cc) && $rset->get_chip_channel_id($cc->dbID())){
if($table_name eq 'channel'){
my $chan_name = $ec_adaptor->fetch_by_dbID($cc->experimental_chip_id())->unique_id()."_".
$self->get_config('dye_freqs')->{$cc->dye()};
$self->log("Rolling back results for $table_name:\t".$chan_name);
}else{
$self->log("Rolling back results for $table_name:\t".$cc->unique_id);
}
$self->rollback_results([$rset->get_chip_channel_id($cc->dbID())]);
}
}
}
#check whether it is present in the ResultSet and add if not
if ($rset) {
#ids will already be present if not rset i.e. already imported
foreach my $cc(@new_chip_channels){
$rset->add_table_id($cc->dbID()) if(! $rset->contains($cc));
}
}
if ($rset) {
$result_adaptor->store_chip_channels($rset);
} else {
$self->log("All ExperimentalChips have status:\t$status");
}
#this only returns a result set if there is new data to import
return $rset;
}
=head2 resolve_probe_data
Example : $self->resolve_probe_data();
Description: Resolves DB probe duplicates and builds local probe cache
Returntype : none
Exceptions : ????
Caller : general
Status : At risk
=cut
sub resolve_probe_data{
my $self = shift;
$self->log("Resolving probe data", 1);
warn "Probe cache resolution needs to accomodate probesets too!";
foreach my $array(@{$self->arrays()}){
my $resolve = 0;
if($self->get_probe_cache_by_Array($array)){#cache already generated
#check if we have any new unresolved array chips to add to the cache
foreach my $achip(@{$array->get_ArrayChips()}){
if($achip->has_status('RESOLVED')){
$self->log("ArrayChip has RESOLVED status:\t".$achip->design_id());#, 1);
next;
}else{
$self->log("Found un-RESOLVED ArrayChip:\t".$achip->design_id());
$resolve = 1;
last;
}
}
}else{#no cache file
$resolve = 1;
$self->log('No probe cache found for array '.$array->name());
}
if($resolve){
$self->log('Resolving array duplicates('.$array->name().') and rebuilding probe cache.', 1);
$self->get_probe_cache_by_Array($array, 1);#get from DB
#we need ot make sure we mark cache as unresolved, so we don't use it by mistake.
my ($line, $name, $pid, @pids);
#my $index = 0;
my $tmp_name = '';
my $tmp_id = '';
#miss the header
while ($line = $self->{'_probe_cache'}{$array->name}{'handle'}->getline()){
($name, $pid) = split/\t/o, $line;
if($name eq $tmp_name){
if($pid != $tmp_id){
push @pids, $pid;
#should reset to pid here if we have x y data else undef
#ignore this and force result to have x y
}
#can't do this naymore unless we figure out how to move the line pointer
#would still need to sed the file anyway, better to regen from DB?
#undef $self->{'_probe_cache'}{$array->name}{'entries'}->[$i];#delete true or to be resolved duplicate
}
elsif($name ne $tmp_name){#new probe
$self->tidy_duplicates(\@pids) if(scalar(@pids) > 1);
$tmp_name = $name;
$tmp_id = $pid;
@pids = ($pid);
#$index = $i + 1;
}
}
$self->tidy_duplicates(\@pids) if(scalar(@pids) > 1);
#rename resovled cache and reset cache handle
my $cmd = 'mv '.$self->get_dir('caches').'/'.$array->name().'.probe_cache.unresolved '.
$self->get_dir('caches').'/'.$array->name().'.probe_cache';
run_system_cmd($cmd);
$self->get_probe_cache_by_Array($array); #This sets the caches
#warn "Only generate MD5 here, as this is guranteed to be correct";
foreach my $achip(@{$array->get_ArrayChips()}){
if(! $achip->has_status('RESOLVED')){
$self->log("Updating ArrayChip to RESOLVED status:\t".$achip->design_id());
$achip->adaptor->store_status('RESOLVED', $achip);
}
}
$self->log('Finished building probe cache for '.$array->name(), 1);
}
}
$self->log('Finished resolving probe data', 1);
return;
}
sub tidy_duplicates{
my ($self, $pids) = @_;
my $pfa = $self->db->get_ProbeFeatureAdaptor();
my ($feature, %features);
foreach my $dup_id(@$pids){
foreach $feature(@{$pfa->fetch_all_by_Probe_id($dup_id)}){
#can we safely assume end will be same too?
push @{$features{$feature->seq_region_name().':'.$feature->start()}}, $feature;
}
}
my (@reassign_ids, @delete_ids);
foreach my $seq_start_key(keys %features){
my $reassign_features = 1;
foreach $feature(@{$features{$seq_start_key}}){
if($feature->probe_id() == $pids->[0]){
$reassign_features = 0;
}else{
push @delete_ids, $feature->dbID();
}
}
#This assumes that we actually have at least one element to every seq_start_key array
if($reassign_features){
my $new_fid = pop @delete_ids;
push @reassign_ids, $new_fid;
}
}
#resolve features first so we don't get any orphaned features if we crash.
$pfa->reassign_features_to_probe(\@reassign_ids, $pids->[0]) if @reassign_ids;
$pfa->delete_features(\@delete_ids) if @delete_ids;
return;
}
1;
| adamsardar/perl-libs-custom | EnsemblAPI/ensembl-functgenomics/modules/Bio/EnsEMBL/Funcgen/Importer.pm | Perl | apache-2.0 | 85,382 |
#
# Copyright 2022 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::dlink::dgs3100::snmp::mode::components::fan;
use strict;
use warnings;
my %map_states = (
1 => 'normal',
2 => 'warning',
3 => 'critical',
4 => 'shutdown',
5 => 'notPresent',
6 => 'notFunctioning',
);
# In MIB 'env_mib.mib'
my $mapping = {
rlEnvMonFanStatusDescr => { oid => '.1.3.6.1.4.1.171.10.94.89.89.83.1.1.1.2' },
rlEnvMonFanState => { oid => '.1.3.6.1.4.1.171.10.94.89.89.83.1.1.1.3', map => \%map_states },
};
my $oid_rlEnvMonFanStatusEntry = '.1.3.6.1.4.1.171.10.94.89.89.83.1.1.1';
sub load {
my ($self) = @_;
push @{$self->{request}}, { oid => $oid_rlEnvMonFanStatusEntry };
}
sub check {
my ($self) = @_;
$self->{output}->output_add(long_msg => "Checking fans");
$self->{components}->{fan} = {name => 'fans', total => 0, skip => 0};
return if ($self->check_filter(section => 'fan'));
foreach my $oid ($self->{snmp}->oid_lex_sort(keys %{$self->{results}->{$oid_rlEnvMonFanStatusEntry}})) {
next if ($oid !~ /^$mapping->{rlEnvMonFanStatusDescr}->{oid}\.(.*)$/);
my $instance = $1;
my $result = $self->{snmp}->map_instance(mapping => $mapping, results => $self->{results}->{$oid_rlEnvMonFanStatusEntry}, instance => $instance);
next if ($self->check_filter(section => 'fan', instance => $result->{rlEnvMonFanStatusDescr}));
next if ($result->{rlEnvMonFanState} eq 'notPresent' &&
$self->absent_problem(section => 'fan', instance => $result->{rlEnvMonFanStatusDescr}));
$self->{components}->{fan}->{total}++;
$self->{output}->output_add(long_msg => sprintf("fan '%s' status is %s.",
$result->{rlEnvMonFanStatusDescr}, $result->{rlEnvMonFanState},
));
my $exit = $self->get_severity(section => 'fan', value => $result->{rlEnvMonFanState});
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("fan '%s' status is %s",
$result->{rbnFanDescr}, $result->{rlEnvMonFanState}));
}
}
}
1; | centreon/centreon-plugins | network/dlink/dgs3100/snmp/mode/components/fan.pm | Perl | apache-2.0 | 3,029 |
package Paws::Greengrass::UpdateLoggerDefinition;
use Moose;
has LoggerDefinitionId => (is => 'ro', isa => 'Str', traits => ['ParamInURI'], uri_name => 'LoggerDefinitionId', required => 1);
has Name => (is => 'ro', isa => 'Str');
use MooseX::ClassAttribute;
class_has _api_call => (isa => 'Str', is => 'ro', default => 'UpdateLoggerDefinition');
class_has _api_uri => (isa => 'Str', is => 'ro', default => '/greengrass/definition/loggers/{LoggerDefinitionId}');
class_has _api_method => (isa => 'Str', is => 'ro', default => 'PUT');
class_has _returns => (isa => 'Str', is => 'ro', default => 'Paws::Greengrass::UpdateLoggerDefinitionResponse');
class_has _result_key => (isa => 'Str', is => 'ro');
1;
### main pod documentation begin ###
=head1 NAME
Paws::Greengrass::UpdateLoggerDefinition - Arguments for method UpdateLoggerDefinition on Paws::Greengrass
=head1 DESCRIPTION
This class represents the parameters used for calling the method UpdateLoggerDefinition on the
AWS Greengrass service. Use the attributes of this class
as arguments to method UpdateLoggerDefinition.
You shouldn't make instances of this class. Each attribute should be used as a named argument in the call to UpdateLoggerDefinition.
As an example:
$service_obj->UpdateLoggerDefinition(Att1 => $value1, Att2 => $value2, ...);
Values for attributes that are native types (Int, String, Float, etc) can passed as-is (scalar values). Values for complex Types (objects) can be passed as a HashRef. The keys and values of the hashref will be used to instance the underlying object.
=head1 ATTRIBUTES
=head2 B<REQUIRED> LoggerDefinitionId => Str
logger definition Id
=head2 Name => Str
name of the definition
=head1 SEE ALSO
This class forms part of L<Paws>, documenting arguments for method UpdateLoggerDefinition in L<Paws::Greengrass>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/Greengrass/UpdateLoggerDefinition.pm | Perl | apache-2.0 | 2,031 |
:- module(assrt_synchk,
[
],
[assertions]).
:- comment(title,"Simple syntax checking of code and assertions").
:- comment(module,"
@cindex{checking syntax of code}
@cindex{checking syntax of assertions}
This module defines some predicates which are useful for checking the
syntax of the code and assertions in a file, as well as imports and
exports. Full (semantic) assertion checking must be done with the
preprocessor.
This module can be used in three ways:
@begin{itemize}
@item From the top level, by loading this module and calling the
appropriate exported predicate.
@item From a shell, using the @tt{fileinfo} utility (see the @tt{etc}
directory).
@item From the CIAO emacs mode, by selecting the appropriate menu
option (or key binding).
@end{itemize}
").
%% ISO Compat
:- use_module(library(format)).
:- use_module(library(aggregates)).
%% CIAO libraries
:- use_module(library('compiler/c_itf')).
:- use_module(library('assertions/assrt_lib')).
| leuschel/ecce | www/CiaoDE/ciao/lib/assertions/assrt_synchk.pl | Perl | apache-2.0 | 994 |
use strict;
| Litres/FB3Editor | Backend/perl/fb3edit.pl | Perl | bsd-2-clause | 13 |
#!/usr/bin/perl -w
#
# handlertree.pl
# ~~~~~~~~~~~~~~
# A tool for post-processing the debug output generated by Asio-based programs
# to print the tree of handlers that resulted in some specified handler ids.
# Programs write this output to the standard error stream when compiled with
# the define `BOOST_ASIO_ENABLE_HANDLER_TRACKING'.
#
# Copyright (c) 2003-2020 Christopher M. Kohlhoff (chris at kohlhoff dot com)
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#
use strict;
my %target_handlers = ();
my @cached_output = ();
my %outstanding_handlers = ();
my %running_handlers = ();
#-------------------------------------------------------------------------------
# Build the initial list of target handlers from the command line arguments.
sub build_initial_target_handlers()
{
for my $handler (@ARGV)
{
$target_handlers{$handler} = 1;
}
}
#-------------------------------------------------------------------------------
# Parse the debugging output and cache the handler tracking lines.
sub parse_debug_output()
{
while (my $line = <STDIN>)
{
chomp($line);
if ($line =~ /\@asio\|([^|]*)\|([^|]*)\|(.*)$/)
{
push(@cached_output, $line);
}
}
}
#-------------------------------------------------------------------------------
# Iterate over the cached output in revese and build a hash of all target
# handlers' ancestors.
sub build_target_handler_tree()
{
my $i = scalar(@cached_output) - 1;
while ($i >= 0)
{
my $line = $cached_output[$i];
if ($line =~ /\@asio\|([^|]*)\|([^|]*)\|(.*)$/)
{
my $action = $2;
# Handler creation.
if ($action =~ /^([0-9]+)\*([0-9]+)$/)
{
if ($1 ne "0" and exists($target_handlers{$2}))
{
$target_handlers{$1} = 1;
}
}
}
--$i;
}
}
#-------------------------------------------------------------------------------
# Print out all handler tracking records associated with the target handlers.
sub print_target_handler_records()
{
for my $line (@cached_output)
{
if ($line =~ /\@asio\|([^|]*)\|([^|]*)\|(.*)$/)
{
my $action = $2;
# Handler location.
if ($action =~ /^([0-9]+)\^([0-9]+)$/)
{
print("$line\n") if ($1 eq "0" or exists($target_handlers{$1})) and exists($target_handlers{$2});
}
# Handler creation.
if ($action =~ /^([0-9]+)\*([0-9]+)$/)
{
print("$1, $2, $line\n") if ($1 eq "0" or exists($target_handlers{$1})) and exists($target_handlers{$2});
}
# Begin handler invocation.
elsif ($action =~ /^>([0-9]+)$/)
{
print("$line\n") if (exists($target_handlers{$1}));
}
# End handler invocation.
elsif ($action =~ /^<([0-9]+)$/)
{
print("$line\n") if (exists($target_handlers{$1}));
}
# Handler threw exception.
elsif ($action =~ /^!([0-9]+)$/)
{
print("$line\n") if (exists($target_handlers{$1}));
}
# Handler was destroyed without being invoked.
elsif ($action =~ /^~([0-9]+)$/)
{
print("$line\n") if (exists($target_handlers{$1}));
}
# Operation associated with a handler.
elsif ($action =~ /^\.([0-9]+)$/)
{
print("$line\n") if (exists($target_handlers{$1}));
}
}
}
}
#-------------------------------------------------------------------------------
build_initial_target_handlers();
parse_debug_output();
build_target_handler_tree();
print_target_handler_records();
| stan-dev/math | lib/boost_1.75.0/libs/asio/tools/handlertree.pl | Perl | bsd-3-clause | 3,615 |
# Functions for reading and writing autoreply files
# read_autoreply(file, &simple)
# Fills in the autoreply parts of a simple alias structure from a file
sub read_autoreply
{
local ($file, $simple) = @_;
local @lines;
open(FILE, $file);
while(<FILE>) {
if (/^Reply-Tracking:\s*(.*)/) {
$simple->{'replies'} = $1;
}
elsif (/^Reply-Period:\s*(.*)/) {
$simple->{'period'} = $1;
}
elsif (/^No-Autoreply:\s*(.*)/) {
$simple->{'no_autoreply'} = $1;
}
elsif (/^No-Autoreply-Regexp:\s*(.*)/) {
push(@{$simple->{'no_autoreply_regexp'}}, $1);
}
elsif (/^Autoreply-File:\s*(.*)/) {
push(@{$simple->{'autoreply_file'}}, $1);
}
elsif (/^Autoreply-Start:\s*(\d+)/) {
$simple->{'autoreply_start'} = $1;
}
elsif (/^Autoreply-End:\s*(\d+)/) {
$simple->{'autoreply_end'} = $1;
}
elsif (/^From:\s*(.*)/) {
$simple->{'from'} = $1;
}
elsif (/^Charset:\s*(\S+)/) {
$simple->{'charset'} = $1;
}
else {
push(@lines, $_);
}
}
close(FILE);
$simple->{'autotext'} = join("", @lines);
}
# write_autoreply(&file, &simple)
# Writes the autoreply parts of a simple alias structure to a file
sub write_autoreply
{
local ($file, $simple) = @_;
&open_tempfile(AUTO, ">$file");
if ($simple->{'replies'}) {
&print_tempfile(AUTO,
"Reply-Tracking: $simple->{'replies'}\n");
}
if ($simple->{'period'}) {
&print_tempfile(AUTO,
"Reply-Period: $simple->{'period'}\n");
}
if ($simple->{'no_autoreply'}) {
&print_tempfile(AUTO,
"No-Autoreply: $simple->{'no_autoreply'}\n");
}
foreach my $r (@{$simple->{'no_autoreply_regexp'}}) {
&print_tempfile(AUTO, "No-Autoreply-Regexp: $r\n");
}
foreach my $f (@{$simple->{'autoreply_file'}}) {
&print_tempfile(AUTO, "Autoreply-File: $f\n");
}
if ($simple->{'autoreply_start'}) {
&print_tempfile(AUTO,
"Autoreply-Start: $simple->{'autoreply_start'}\n");
}
if ($simple->{'autoreply_end'}) {
&print_tempfile(AUTO,
"Autoreply-End: $simple->{'autoreply_end'}\n");
}
if ($simple->{'from'}) {
&print_tempfile(AUTO, "From: $simple->{'from'}\n");
}
if ($simple->{'charset'}) {
&print_tempfile(AUTO, "Charset: $simple->{'charset'}\n");
}
&print_tempfile(AUTO, $simple->{'autotext'});
&close_tempfile(AUTO);
}
1;
| xtso520ok/webmin | filter/autoreply-file-lib.pl | Perl | bsd-3-clause | 2,180 |
#!/usr/bin/perl -w
#
# This program makes use of the JSDoc module to make a JavaDoc equivalent
# for JavaScript. The template that is used is based on the JavaDoc
# doclet. This program only needs to be invoked with one or more
# JS OO sourcefiles as command-line args.
#
use strict;
use HTML::Template;
use File::Copy;
use File::Basename;
use File::Path;
use Getopt::Long;
use File::Find;
use lib dirname($0);
use JSDoc;
use JSDoc::XML;
use JSDoc::XMI;
use JavaScript::Syntax::HTML qw(to_html);
use constant LOCATION => dirname($0) . '/';
use constant MAIN_TMPL => "main.tmpl";
use constant ALLCLASSES_TMPL => 'allclasses-frame.tmpl';
use constant ALLCLASSES_NOFRAME_TMPL => 'allclasses-noframe.tmpl';
use constant OVERVIEW_FRAME_TMPL => 'overview-frame.tmpl';
use constant TREE_TMPL => 'overview-tree.tmpl';
use constant OVERVIEW_TMPL => 'overview-summary.tmpl';
use constant INDEX_TMPL => 'index.tmpl';
use constant DEFAULT_DEST_DIR => 'js_docs_out/';
use constant STYLESHEET => 'stylesheet.css';
use constant HELP_TMPL => 'help-doc.tmpl';
use constant INDEX_ALL_TMPL => 'index-all.tmpl';
use constant CONSTANTS_TMPL => 'constant-values.tmpl';
use vars qw/ $CLASSES $DEFAULT_CLASSNAME @CLASSNAMES @INDEX %TMPL_CACHE
%CLASS_ATTRS_MAP %METHOD_ATTRS_MAP %FILE_ATTRS_MAP %OPTIONS
@FILENAMES %FILE_OVERVIEWS $TIME $CURRENT_CLASS /;
#
# Begin main execution
#
&parse_cmdline;
&initialize_param_maps;
$TIME = localtime();
do '.jsdoc_config';
warn "Error parsing config file: $@\n" if $@;
my @sources;
mkdir($OPTIONS{OUTPUT})
or die "Can't create output directory $OPTIONS{OUTPUT}: $!\n"
unless (-e $OPTIONS{OUTPUT} && -d $OPTIONS{OUTPUT});
if (@ARGV < 1 || $OPTIONS{HELP} || !(@sources = &load_sources())){
warn "No sourcefiles supplied\n" if !$OPTIONS{HELP};
&show_usage();
exit(1);
}
# Parse the code tree
&configure_parser(
GLOBALS_NAME => $OPTIONS{GLOBALS_NAME},
NO_LEXICAL_PRIVATES => $OPTIONS{NO_LEXICAL_PRIVATES});
$CLASSES = &parse_code_tree(@sources);
%FILE_OVERVIEWS = %{delete $CLASSES->{__FILES__}};
&map_implementation_classes();
die "Nothing to document, exiting\n" unless keys %{$CLASSES};
if ($OPTIONS{FORMAT} eq 'html'){
&output_html;
} elsif ($OPTIONS{FORMAT} eq 'xml') {
&output_xml;
} elsif ($OPTIONS{FORMAT} eq 'xmi'){
&output_xmi;
} else {
die "Unknown data format '$OPTIONS{FORMAT}'\n";
}
&_log('Completed generating documentation');
#
# End main execution
#
sub map_implementation_classes {
my (%implementations, %implementors);
for my $class (keys %$CLASSES){
my $implements = $CLASSES->{$class}->{constructor_vars}->{implements};
if ($implements){
for my $iname (map {strip($_)} @$implements){
push @{$implementations{$class}}, $iname;
push @{$implementors{$iname}}, $class;
}
}
}
sub add_method_to_subclasses {
my ($cname, $method, $subclasses_of) = @_;
$subclasses_of ||= $cname;
for my $subclass (keys %$CLASSES){
if (($CLASSES->{$subclass}->{extends} || '') eq $subclasses_of){
if (not has_method($subclass, $method)){
my $inherits_hash = $CLASSES->{$subclass}->{inherits};
push @{$inherits_hash->{$cname}->{instance_methods}},
$method;
add_method_to_subclasses($cname, $method, $subclass);
}
}
}
}
sub get_instance_method {
my ($classname, $methodname) = @_;
my $class = $CLASSES->{$classname} or return undef;
for my $method (@{$class->{instance_methods}}){
if ($method->{mapped_name} eq $methodname){
return $method;
}
}
return undef;
}
# Copy the methods to the implementor
for my $class (keys %implementations){
for my $interface (map {$CLASSES->{$_}} @{$implementations{$class}}){
my @interface_methods = @{$interface->{instance_methods} || []};
for my $superclass (keys %{$interface->{inherits} || {}}){
push @interface_methods,
map { get_instance_method($superclass, $_) }
@{$interface->{inherits}->{$superclass}->
{instance_methods}};
}
for my $interface_method (@interface_methods){
if (not has_method($class, $interface_method->{mapped_name})){
push @{$CLASSES->{$class}->{instance_methods}},
$interface_method;
add_method_to_subclasses($class,
$interface_method->{mapped_name});
}
}
}
}
# Put a flag on interfaces to say who they're implemented by
for my $interface (keys %implementors){
my $interface_class = $CLASSES->{$interface};
push @{$interface_class->{constructor_vars}->{implemented_by}}, $_
for @{$implementors{$interface}};
}
}
#
# Output a single template
#
sub output_template {
my ($tmplname, $outname, $params, $relaxed) = (@_);
$OPTIONS{TEMPLATEDIR} =~ s/(\S+)\/$/$1/;
$tmplname = $OPTIONS{TEMPLATEDIR} . "/$tmplname";
die "Template file '$tmplname' not found" unless -e $tmplname;
# Caching templates seems to improve performance quite a lot
if (!$TMPL_CACHE{$tmplname}){
$TMPL_CACHE{$tmplname} = new HTML::Template(
die_on_bad_params => !$relaxed,
filename => $tmplname);
}
my $tmpl = $TMPL_CACHE{$tmplname};
$tmpl->param($params);
$outname = sprintf('%s%s', $OPTIONS{OUTPUT}, $outname);
&print_file($outname, $tmpl->output);
}
#
# Output data to a file
#
sub print_file {
my ($fname, $data) = @_;
open FILE, ">$fname"
or die "Couldn't open '$fname' to write: $!\n";
print FILE $data;
close FILE;
}
#
# Output HTML documentation in the output directory
#
sub output_html {
&set_file_paths();
&output_class_templates();
&output_index_template();
&output_aux_templates();
&output_tree_template();
}
#
# Output XMI in the output directory
#
sub output_xmi {
my $xmi = JSDoc::XMI->new(LOCATION);
&print_file("$OPTIONS{OUTPUT}/jsdoc.xmi", $xmi->output($CLASSES));
}
#
# Output XML in the output directory
#
sub output_xml {
my $xml = JSDoc::XML->new(LOCATION);
&print_file("$OPTIONS{OUTPUT}/jsdoc.xml", $xml->output($CLASSES));
}
sub set_file_paths {
for my $classname (keys %$CLASSES){
my $class = $CLASSES->{$classname};
my $filepath = $classname;
$filepath =~ s/\./\//g;
$filepath = "$filepath.html";
my $dirpath = dirname($filepath);
$class->{filepath} = $filepath;
$class->{dirpath} = $dirpath;
my $fullpath = "$OPTIONS{OUTPUT}$dirpath";
mkpath($fullpath) unless -e $fullpath;
$class->{ispackage} = @{$class->{inner_classes}}
&& not (@{$class->{instance_methods} || []}
or @{$class->{instance_fields} || []});
}
}
#
# Gather information for each class and output its template
#
sub output_class_templates {
# Note the class name for later, including classes that aren't defined
# but are used for inheritance
my %seen;
@CLASSNAMES = sort { lc $a->{classname} cmp lc $b->{classname}}
grep { !$seen{$_->{classname}}++ }
(map {classname => $_,
classlink => link_to_class($_) } ,
grep { not defined $CLASSES->{$_}->{constructor_vars}->{private}
or $OPTIONS{PRIVATE} }
keys %$CLASSES),
(map { classname => $_ }, grep { !defined($$CLASSES{$_}) }
map { $_->{extends} } grep { defined($_->{extends}) }
values %$CLASSES);
die "Nothing to document, exiting\n" unless @CLASSNAMES;
@FILENAMES = map {filename => $_, mangledfilename => mangle($_)},
sort {lc($a) cmp lc($b)} grep {length $_} keys %FILE_OVERVIEWS;
for (my $i = 0; $i < @CLASSNAMES; $i++){
my $classname = $CLASSNAMES[$i]->{classname};
$CURRENT_CLASS = $classname;
next unless $$CLASSES{$classname};
# Template Parameters
my ($class, $subclasses, $class_summary, $constructor_params,
$next_class, $prev_class, $constructor_attrs,
$constructor_detail, $constructor_args);
$class= $$CLASSES{$classname};
next unless is_class_linkable($classname);
&add_to_index($class, $classname);
# Set up the constructor and class information
&resolve_synonyms($class->{constructor_vars});
&format_vars($class->{constructor_vars});
($constructor_params, $constructor_args) =
&fetch_args($class->{constructor_vars},
$class->{constructor_args});
$constructor_attrs =
&format_method_attributes($class->{constructor_vars});
$constructor_detail =
&resolve_inner_links($class->{constructor_detail});
$class_summary = &format_class_attributes($class->{constructor_vars});
$class_summary = &resolve_inner_links($class_summary);
# Navbar information
$next_class = $i + 1 < @CLASSNAMES
? link_to_class($CLASSNAMES[$i + 1]->{classname}, $classname)
: undef;
$prev_class = $i > 0
? link_to_class($CLASSNAMES[$i - 1]->{classname}, $classname)
: undef;
my $superclass = $class->{extends} || '';
if ($superclass && $$CLASSES{$superclass}){
$superclass = make_link($superclass, $classname)
unless (!$OPTIONS{PRIVATE}
&& $$CLASSES{$superclass}->{constructor_vars}->{private});
}
my $file_overview = $class->{constructor_vars}->{filename} ?
sprintf('overview-summary-%s.html',
mangle($class->{constructor_vars}->{filename}))
: '';
my @inner_classes = map {
classname => $_->{class_name},
classlink => link_to_class($_->{class_name}, $classname)
}, @{$class->{inner_classes} || []};
&output_template(MAIN_TMPL, $class->{filepath}, {
path_to_base => path_to_base($classname),
next_class => $next_class,
prev_class => $prev_class,
file_overview => $file_overview,
superclass => $superclass,
constructor_args => $constructor_args,
constructor_params => $constructor_params,
constructor_attrs => $constructor_attrs,
constructor_returns => $class->{constructor_vars}->{returns}[0],
class_summary => $class_summary,
class_attribs => $class->{constructor_vars}->{private} ?
'<private>' : '',
constructor_detail => $constructor_detail,
constructor_summary => &get_summary($constructor_detail),
classname => $classname,
hierarchy_meta => get_class_hierarchy_metadata($classname),
class_tree => &build_class_tree($classname, $CLASSES),
fields => &map_fields($class),
methods => &map_methods($class),
method_inheritance => &map_method_inheritance($class),
field_inheritance => &map_field_inheritance($class),
inner_classes => \@inner_classes,
project_name => $OPTIONS{PROJECT_NAME},
page_footer => $OPTIONS{PAGE_FOOTER},
ctime => $TIME
}, 1);
}
}
#
# Handle cleaning up / resolving inner links in FILE_OVERVIEWS
#
sub process_file_overviews {
for my $filename (map{$_->{filename}} @FILENAMES){
my $overview = $FILE_OVERVIEWS{$filename};
my $src = $overview->{src};
$overview->{src} = '';
format_vars($overview);
$overview =
resolve_inner_links($FILE_OVERVIEWS{$filename});
$overview->{src} = $src;
$FILE_OVERVIEWS{$filename} = $overview;
}
}
#
# Output all the non-class template files
#
sub output_aux_templates(){
unless ($OPTIONS{LOGO} and -f $OPTIONS{LOGO} and -r $OPTIONS{LOGO}){
$OPTIONS{LOGO} and warn "Can't read $OPTIONS{LOGO}";
$OPTIONS{LOGO} = '';
}
$OPTIONS{LOGO} and copy $OPTIONS{LOGO}, $OPTIONS{OUTPUT};
&process_file_overviews;
$DEFAULT_CLASSNAME =
(grep {
is_class_linkable($_->{classname}) } @CLASSNAMES)[0]->{classname};
my $summary = &get_overall_summary;
&output_classes_frames_templates;
&output_multiple_files_templates
if (@FILENAMES > 1 || $OPTIONS{PACKAGENAMING});
&output_index_and_help_templates($summary);
&output_overview_summaries($summary);
&output_const_summary();
my $stylesheet = LOCATION . STYLESHEET;
if ($OPTIONS{TEMPLATEDIR} ne LOCATION){
$stylesheet = $OPTIONS{TEMPLATEDIR} . '/' . STYLESHEET;
die "Stylesheet '$stylesheet' not found" unless -e $stylesheet;
}
copy ($stylesheet, $OPTIONS{OUTPUT} . STYLESHEET);
}
sub get_overall_summary {
my $summary;
if ($OPTIONS{PROJECT_SUMMARY}){
if (-f $OPTIONS{PROJECT_SUMMARY} and
open SUMMARY, $OPTIONS{PROJECT_SUMMARY}){
local $/ = undef;
$summary = <SUMMARY>;
close SUMMARY;
} else {
warn "Can't open $OPTIONS{PROJECT_SUMMARY}";
}
} elsif (@FILENAMES == 1) {
# If we only have one file and it has an overview, use that overview
my $filename = $FILENAMES[0]->{filename};
if ($FILE_OVERVIEWS{$filename}->{fileoverview}){
$summary = $FILE_OVERVIEWS{$filename}->{fileoverview}[0];
$summary .= "<BR/><BR/>";
while (my ($name, $val) = each %{$FILE_OVERVIEWS{$filename}}){
$summary .= &{$FILE_ATTRS_MAP{$name}}($val)
if $FILE_ATTRS_MAP{$name};
}
}
}
$summary;
}
#
# Output the main (default) page and the help template
#
sub output_index_and_help_templates {
my ($summary) = @_;
my $numpackages = scalar(grep {$_->{ispackage}} values %$CLASSES);
# Output the main index template
&output_template(INDEX_TMPL, 'index.html', {
DEFAULT_CLASSNAME => @FILENAMES > 1
? 'overview-summary.html'
: link_to_class($DEFAULT_CLASSNAME),
multifile => (@FILENAMES > 1
|| ($numpackages && $OPTIONS{PACKAGENAMING}))});
# Output the help document template
&output_template(HELP_TMPL, 'help-doc.html', {
page_footer => $OPTIONS{PAGE_FOOTER},
ctime => $TIME,
project_name => $OPTIONS{PROJECT_NAME} });
}
#
# Output the frames listing all the classes
#
sub output_classes_frames_templates {
my @classnames = grep { is_class_linkable($_->{classname}) } @CLASSNAMES;
if ($OPTIONS{PACKAGENAMING}){
@classnames = grep { !($CLASSES->{$_->{classname}}->{ispackage}) }
@classnames;
}
my $params = {
filename => 'All Classes',
fname_link => '<a href="overview-summary.html" ' .
'target="classFrame">All Classes</a>',
CLASSNAMES => \@classnames };
if (@FILENAMES < 2 && !$OPTIONS{PACKAGENAMING}){
$params->{project_name} = $OPTIONS{PROJECT_NAME};
$params->{logo} = basename($OPTIONS{LOGO});
}
&output_template(ALLCLASSES_TMPL, 'allclasses-frame.html', $params);
&output_template(ALLCLASSES_NOFRAME_TMPL, 'allclasses-noframe.html', {
CLASSNAMES => \@classnames,
project_name => $OPTIONS{PROJECT_NAME},
logo => basename($OPTIONS{LOGO}) });
}
#
# Output the overview summary templates
#
sub output_overview_summaries {
my ($summary) = @_;
my @overviews = map {
name => $_,
link => &mangle("overview-summary-$_.html"),
overview =>
get_summary(
$FILE_OVERVIEWS{$_}{fileoverview}[0] || ' ')
}, sort {lc($a) cmp lc($b)} keys(%FILE_OVERVIEWS);
&output_template(OVERVIEW_TMPL, 'overview-summary.html', {
generic => 1,
project_name => $OPTIONS{PROJECT_NAME},
project_title => $OPTIONS{PROJECT_NAME},
page_footer => $OPTIONS{PAGE_FOOTER},
ctime => $TIME,
project_summary => $summary,
is_file_summary => 0,
overviews => \@overviews });
for my $filename (keys %FILE_OVERVIEWS){
my @classes = grep {
($$CLASSES{$_}->{constructor_vars}->{filename} || '') eq $filename
} keys %$CLASSES;
my @class_overviews = sort { lc($a->{name}) cmp lc($b->{name}) }
map {
name => $_,
link => link_to_class($_),
overview => get_summary(
$CLASSES->{$_}->{constructor_vars}->{class}[0] || ' ')
}, grep { !$CLASSES->{$_}->{constructor_vars}->{private}
|| $OPTIONS{PRIVATE} } @classes;
my %overview = %{$FILE_OVERVIEWS{$filename}};
my $src = delete $overview{src};
my $summary = $overview{fileoverview}[0] ||
"No overview generated for '$filename'";
$summary .= "<BR/><BR/>";
while (my ($name, $val) = each %overview){
$summary .= &{$FILE_ATTRS_MAP{$name}}($val)
if $FILE_ATTRS_MAP{$name};
}
my @methods =
map {
is_private => $_->{is_private},
method_summary => $_->{method_summary},
is_class_method => $_->{is_class_method},
method_anchor =>
sprintf('%s%s', $_->{is_class_method} ? '!s!' : '',
$_->{method_name}),
method_arguments=> $_->{method_arguments},
method_name => $_->{method_name},
type => $_->{type},
file_link => $OPTIONS{GLOBALS_NAME} . ".html"
}, @{&map_methods($$CLASSES{$OPTIONS{GLOBALS_NAME}}, $filename)};
$filename = mangle($filename);
&output_template(OVERVIEW_TMPL, "overview-summary-$filename.html", {
generic => 0,
sourcecode => $OPTIONS{NO_SRC} ? '' : &to_html($src),
project_name => $OPTIONS{PROJECT_NAME},
project_title => $filename,
page_footer => $OPTIONS{PAGE_FOOTER},
ctime => $TIME,
project_summary => $summary,
is_file_summary => 1,
methods => \@methods,
overviews => \@class_overviews });
}
}
#
# Output a summary page about the 'static constant' field values for all
# classes
#
sub output_const_summary {
my @static_params;
for my $classname (sort { uc($a) cmp uc($b) } keys %$CLASSES){
my $class = $CLASSES->{$classname};
my @statics = grep { $_->{field_value} =~ /^(?:\d+)|(?:(['"]).*\1)$/}
grep { $_->{field_vars}->{final}} @{$class->{class_fields}};
if (@statics){
push @static_params, {
classname => $classname,
static_values => [map {
name => $_->{field_name},
value => $_->{field_value},
classname => $classname}, @statics] };
}
}
&output_template(CONSTANTS_TMPL, 'constant-values.html', {
project_name => $OPTIONS{PROJECT_NAME},
page_footer => $OPTIONS{PAGE_FOOTER},
ctime => $TIME,
classnames => [map {name => $_->{classname}}, @static_params],
static_finals => \@static_params
}
) if @static_params;
}
#
# Method to handle outputting file overview template if
# more than one sourcefile is being processed
#
sub output_multiple_files_templates {
my %params = (
logo => basename($OPTIONS{LOGO}),
project_name => $OPTIONS{PROJECT_NAME});
if ($OPTIONS{PACKAGENAMING}){
my @packagenames = sort { $a->{filename} cmp $b->{filename} }
map {
filelink => "overview-$_->{classname}.html",
filename => $_->{classname} }, grep { $_->{ispackage} }
values %$CLASSES;
$params{filenames} = \@packagenames;
$params{heading} = "Packages";
} else {
my @filenames = map {
filelink => "overview-$_->{mangledfilename}.html",
filename => $_->{filename} }, @FILENAMES;
$params{filenames} = \@filenames;
$params{heading} = "Files";
}
&output_template(OVERVIEW_FRAME_TMPL, 'overview-frame.html', \%params);
if (!$OPTIONS{PACKAGENAMING}){
for my $fname (map { $_->{filename}} @FILENAMES){
my @classes = grep {
($$CLASSES{$_}->{constructor_vars}->{filename} || '')
eq $fname
} keys %$CLASSES;
#methods under GLOBAL (detached) class
my @methods;
for (my $i = 0; $i < @CLASSNAMES; $i++){
if($CLASSNAMES[$i]->{classname} eq $OPTIONS{GLOBALS_NAME}){
my $class = $$CLASSES{$CLASSNAMES[$i]->{classname}};
for my $method (
sort {lc $a->{mapped_name}
cmp lc $b->{mapped_name} }
@{$class->{class_methods}}){
if(defined($fname)
&& $fname eq $method->{vars}->{filename}){
$method->{filename} = $fname;
push(@methods, $method);
}
}
last;
}
}
&output_template(ALLCLASSES_TMPL,
sprintf('overview-%s.html', $fname), {
filename => $fname,
fname_link => $FILE_OVERVIEWS{$fname}
? sprintf(
'<a href="overview-summary-%s.html"
target="classFrame">%s</a>',
mangle($fname), $fname)
: $fname,
CLASSNAMES => [map {
classname => $_,
classlink => link_to_class($_)
},
grep { !$$CLASSES{$_}->{constructor_vars}->{private}
|| $OPTIONS{PRIVATE} } sort @classes] });
}
}
if ($OPTIONS{PACKAGENAMING}){
my @packages = grep { $_->{ispackage} } values %$CLASSES;
for my $package (@packages){
my $name = $package->{classname};
my %params = (
filename => $name,
fname_link => sprintf(
'<a href="%s" target="classFrame">%s</a>',
link_to_class($name), $name));
my @inner_classes = sort {
$a->{classname} cmp $b->{classname}} map {
classname => $_,
classlink => link_to_class($_) },
map { $_->{class_name} } @{$package->{inner_classes}};
$params{CLASSNAMES} = \@inner_classes;
output_template(ALLCLASSES_TMPL, "overview-$name.html", \%params);
}
}
}
#
# Mangle a file path so that it can be used as a filename
#
sub mangle {
my $input = shift;
local $_ = $input;
tr{/\\}{_};
die if $input ne $_;
$_;
}
#
# Build the tree representation of the inheritance
# PARAM: Name of the class
#
sub build_class_tree {
my $classname = shift;
my $class = $$CLASSES{$classname};
my $tree = "";
my @family;
push @family, $classname;
while ($class->{extends} and $class->{extends} ne ""){
my $base = $class->{extends};
if ($$CLASSES{$base} and is_class_linkable($base)){
$base = sprintf("<a href='%s'>$base</a>",
link_to_class($base, $classname))
unless (!$OPTIONS{PRIVATE}
&& $$CLASSES{$base}->{constructor_vars}->{private});
} elsif ($class->{constructor_vars}->{base}){
if (my ($bcname, $url) =
$class->{constructor_vars}->{base}->[0]
=~ /^(\S+)\s(\S.*)$/){
$base = "<a href='$url'>$base</a>";
}
}
push @family, $base;
$class = $$CLASSES{$class->{extends}};
}
push @family, "Object";
my $indent = 3;
$tree = (pop @family) . "\n";
my $name = $_;
while ($name = pop (@family)){
my $instr = " " x $indent;
$tree .= sprintf "%s|\n%s+--%s%s%s\n", $instr, $instr,
$name eq $classname ? "<b>" : "", $name,
$name eq $classname ? "</b>" : "";
$indent += 6;
}
$tree;
}
#
# Shown if no commandline args are given
#
sub show_usage(){
print qq{Usage: jsdoc [OPTIONS] <js sourcefiles and/or directories>+
-h | --help Show this message and exit
-r | --recursive Recurse through given directories
-p | --private Show private methods and fields
-d | --directory Specify output directory (defaults to js_docs_out)
-q | --quiet Suppress normal output
--page-footer Specify (html) footer string that will be added to
all docs
--project-name Specify project name for that will be added to docs
--logo Specify a path to a logo to be used in the docs
--project-summary Specify a path to a text file that contains an
overview summary of the project
--no-sources Don't include the source code view
--extensions Provide a comma-separated list of file extensions
to be considered as JavaScript source files
--nested-file-naming Use package-style naming (i.e. keep directory
names in the file path). This is useful if you
have multiple files with the same name, but in
different directories. This option is only useful
if --recursive is also used.
--globals-name Specify a 'class name' under which all unattached
methods will be classified. The defaults to GLOBALS
--format Set the output format. The options are html, xml
and xmi, defaulting to html. The others are currently
alpha software.
--template-dir Provide another directory containing HTML templates
--no-lexical-privates Ignore "private" variables and functions that are
lexically defined within constructors
--package-naming Use packages, with a package being defined as a class
that only contains nested classes and possibly
static members.
\n};
}
#
# Take all the command line args as filenames and add them to @SOURCESFILES
#
sub load_sources(){
my (@filenames, @sources);
my $ext_re = sprintf('%s',
join '|', split /\s*,\s*/, $OPTIONS{EXTENSIONS});
for my $arg (@ARGV){
if (-d $arg) {
$arg =~ s/(.*[^\/])$/$1\//;
find( {
wanted => sub {
push @filenames, {
name => $_,
relname => $OPTIONS{NESTEDFILENAMING}
? substr($_, length($arg))
: (fileparse($_))[0]
} if ((-f and -r and /.+\.$ext_re$/oi) &&
(/^\Q$arg\E[^\/]+$/ || $OPTIONS{RECURSIVE}))
},
no_chdir => 1 }, $arg);
} elsif (-f $arg){
my $relname = (fileparse($arg))[0];
push @filenames, { name => $arg, relname => $relname };
}
}
for (@filenames){
&_log(sprintf 'Loading sources from %s', $_->{name});
open SRC, '<', $_->{name}
or (warn sprintf("Can't open %s, skipping: $!\n", $_->{name})
and next);
local $/ = undef;
push @sources, $_->{relname};
push @sources, \<SRC>;
close SRC;
}
@sources;
}
#
# Once all sources have been parsed, finds all subclasses
# of $classname
#
sub find_subclasses {
my ($classname) = @_;
my @subclasses;
for my $class (keys %$CLASSES){
my $subclassname = $$CLASSES{$class}->{extends};
if ($$CLASSES{$class}->{extends} and
$$CLASSES{$class}->{extends} eq $classname){
push @subclasses, $class;
}
}
\@subclasses;
}
#
# Make a summary of a description, cutting it off either at the first
# double newline or the first period followed by whitespace.
# PARAM: $description
#
sub get_summary {
my ($description) = @_;
my $summary;
if ($description){
($summary) = $description =~ /^(.*?(?:[?!.](?=\s)|\n\n)).*$/gs
or $summary = $description;
} else {
$summary = "";
}
$summary;
}
#
# Set up all the instance and class methods for one template
# PARAM: A reference to a class
# PARAM: Optional filename, only maps methods for that file (used for GLOBAL)
#
sub map_methods{
my ($class, $fname) = @_;
my @methods;
for my $mtype (qw(instance_methods class_methods)){
next unless $class->{$mtype};
for my $method (
sort {lc $a->{mapped_name} cmp lc $b->{mapped_name} }
@{$class->{$mtype}}){
next if $fname && $fname ne $method->{vars}->{filename};
&resolve_synonyms($method->{vars});
next if (!$OPTIONS{PRIVATE} && $method->{vars}->{private});
$method->{vars}->{returns}[0] =
$method->{vars}->{returns}[0] || $method->{vars}->{return};
my ($args, $arglist) =
&fetch_args($method->{vars}, $method->{argument_list});
$args = [map { &format_vars($_); $_ } @{$args}];
&format_vars($method->{vars});
my $desc = &resolve_inner_links($method->{description});
my $type = &map_return_type($method);
my $ret = $method->{vars}->{returns}[0];
my $attrs = &format_method_attributes($method->{vars});
push @methods, {
method_description => $desc,
method_summary => &get_summary($desc),
method_name => $method->{mapped_name},
method_arguments => $arglist,
method_params => $args,
method_returns => $ret,
is_class_method => $mtype eq 'class_methods',
is_private => defined($method->{vars}->{private}),
attributes => $attrs,
type => $type };
}
}
return \@methods;
}
#
# Map a function return type
#
sub map_return_type {
my ($method) = @_;
#return 'Object' unless $method->{vars}->{type}[0];
my $name = 'Object';
my $link = '';
if (defined($method->{vars}->{type})){
$name = $method->{vars}->{type}[0];
} elsif (defined($method->{vars}->{returns}[0])){
if ($method->{vars}->{returns}[0] =~ s/\s*\{(\S+)(?:\s+([^}]+))?\}//){
$name = $1;
$link = $2;
}
$method->{vars}->{type} = [$name];
}
$name =~ s/^\s*(\S.*?)\s*$/$1/;
if ($$CLASSES{$name} || $link){
$link ||= link_to_class($name, $CURRENT_CLASS);
return qq|<a href="$link">$name</a>|;
}
$name;
}
#
# Set up all the instance and class methods for one template
# PARAM: A reference to a class
#
sub map_fields {
my $class = shift;
my @fields;
# Set up the instance fields
for my $type (qw(instance_fields class_fields)){
next unless $class->{$type};
for (sort {lc $a->{field_name} cmp lc $b->{field_name} }
@{$class->{$type}}){
&resolve_synonyms($_->{field_vars});
next if (!$OPTIONS{PRIVATE} && $_->{field_vars}->{private});
my $description = &resolve_inner_links($_->{field_description});
my $const_link = ($_->{field_vars}->{final} &&
($_->{field_value} =~ /^\-?\d+(\.\d+)?$/
|| $_->{field_value} =~ /^(["']).*\1$/))
? $class->{classname} : '';
push @fields, {
field_name => $_->{field_name},
field_description => $description,
field_summary => &get_summary($description),
is_final => defined($_->{field_vars}->{final}),
is_private => defined($_->{field_vars}->{private}),
is_class_field => $type eq 'class_fields',
type => &map_field_type($_),
const_link => $const_link};
}
}
\@fields;
}
#
# Map a field type
#
sub map_field_type {
my ($field) = @_;
return 'Object' unless $field->{field_vars}->{type}[0];
my $name = $field->{field_vars}->{type}[0];
$name =~ s/^\s*(\S.*?)\s*$/$1/;
return sprintf('<a href="%s">%s</a>',
link_to_class($name, $CURRENT_CLASS), $name)
if $$CLASSES{$name};
$name;
}
#
# Map all the inherited methods to a template parameter
# PARAM: A reference to a class
#
sub map_method_inheritance {
my $class = shift;
my @method_inheritance;
# Set up the inherited methods
if ($class->{inherits}){
my $superclassname = $class->{extends};
my $superclass = $$CLASSES{$superclassname};
while ($superclass){
if (!$superclass->{constructor_vars}->{private}
|| $OPTIONS{PRIVATE}){
my $methods =
$class->{inherits}->{$superclassname}->{instance_methods};
push @method_inheritance, {
superclass_name => $superclassname,
superclass_link => link_to_class(
$superclassname,
$class->{classname}),
inherited_methods => join(', ',
map(sprintf("<a href='%s#%s'>%s</a>",
link_to_class(
$superclassname, $class->{classname}),
$_, $_),
&filter_private_methods(
$methods, $superclassname)))}
if ($methods and @$methods);
}
$superclassname = $superclass->{extends};
$superclass = $superclassname ? $$CLASSES{$superclassname} : undef;
}
}
\@method_inheritance;
}
#
# Map all the inherited fields to a template parameter
# PARAM: A reference to a class
#
sub map_field_inheritance {
my $class = shift;
my @field_inheritance;
# Set up the inherited fields
if ($class->{inherits}){
my $superclassname = $class->{extends};
my $superclass = $$CLASSES{$superclassname};
while ($superclass){
if (!$superclass->{constructor_vars}->{private}
|| $OPTIONS{PRIVATE}){
my $fields =
$class->{inherits}->{$superclassname}->{instance_fields};
push @field_inheritance, {
superclass_name => $superclassname,
superclass_link => link_to_class(
$superclassname,
$class->{classname}),
inherited_fields => join(', ',
map(sprintf("<a href='%s#%s'>%s</a>",
link_to_class(
$superclassname, $class->{classname}),
$_, $_),
&filter_private_fields($fields, $superclassname)))}
if ($fields and @$fields);
}
$superclassname = $superclass->{extends};
$superclass = $superclassname ? $$CLASSES{$superclassname} : undef;
}
}
\@field_inheritance;
}
#
# Filter out private inherited methods
#
sub filter_private_methods {
my ($methods, $superclassname) = @_;
my @visible_methods;
for my $method(@$methods){
for my $super_method
(@{$$CLASSES{$superclassname}->{instance_methods}}){
push @visible_methods, $method
if $method eq $super_method->{mapped_name} and
(!$super_method->{vars}->{private} || $OPTIONS{PRIVATE});
}
}
@visible_methods;
}
#
# Filter out private inherited fields
#
sub filter_private_fields {
my ($fields, $superclassname) = @_;
my @visible_fields;
for my $field (@$fields){
for my $super_field(@{$$CLASSES{$superclassname}->{instance_fields}}){
push @visible_fields, $field
if $field eq $super_field->{field_name} and
(!$super_field->{field_vars}->{private}
|| $OPTIONS{PRIVATE});
}
}
@visible_fields;
}
#
# Builds up the listing of subclasses, implemented interfaces, and
# who implements this interface
#
sub get_class_hierarchy_metadata {
my ($classname) = @_;
my $class = $CLASSES->{$classname};
my @meta;
# Find all the direct subclasses
my $subclasses = join(', ',
map { make_link($_, $classname) } @{&find_subclasses($classname)});
if ($subclasses){
push @meta, { title => 'Direct Known Subclasses',
data => $subclasses };
}
my $implemented_interfaces = join(', ',
map { make_link($_, $classname) }
@{$class->{constructor_vars}->{implements} || []});
if ($implemented_interfaces){
push @meta, { title => 'All Implemented Interfaces',
data => $implemented_interfaces };
}
my $implemented_by = join(', ',
map { make_link($_, $classname) }
@{$class->{constructor_vars}->{implemented_by} || []});
if ($implemented_by){
push @meta, { title => 'All Known Implementing Classes',
data => $implemented_by };
}
return \@meta;
}
#
# Adds a class's information to the global INDEX list
#
sub add_to_index {
my ($class, $classname) = @_;
push @INDEX, {
name => $classname,
class => $classname,
classlink => link_to_class($classname),
type => '', linkname => ''
};
if (!$class->{constructor_args}){
$class->{constructor_args} = '';
} else {
push @INDEX, {
name => "$classname$class->{constructor_args}",
class => $classname,
classlink => link_to_class($classname),
type => 'Constructor in ',
linkname => 'constructor_detail' };
}
for my $mtype (qw(class_methods instance_methods)){
my $type = sprintf('%s method in ',
$mtype eq 'class_methods' ? 'Class' : 'Instance');
push @INDEX, {
name => "$_->{mapped_name}$_->{argument_list}",
class => $classname,
classlink => link_to_class($classname),
type => $type,
linkname => $_->{mapped_name}}
for grep {
not($_->{vars}->{private} and not $OPTIONS{PRIVATE})
} @{$class->{$mtype}};
}
for my $ftype (qw(class_fields instance_fields)){
my $type = sprintf('%s field in ',
$ftype eq 'class_fields' ? 'Class' : 'Instance');
push @INDEX, {
name => $_->{field_name},
class => $classname,
classlink => link_to_class($classname),
type => $type,
linkname => $_->{field_name}}
for grep {
not($_->{field_vars}->{private} and not $OPTIONS{PRIVATE})
} @{$class->{$ftype}};
}
}
#
# Outputs the index page
#
sub output_index_template {
@INDEX = sort {lc $a->{name} cmp lc $b->{name}} @INDEX;
my %letters;
for my $item (@INDEX){
my $letter = uc(substr($item->{name}, 0, 1));
$letter = uc(substr($item->{class}, 0, 1)) if $letter eq '';
push @{$letters{$letter}}, $item;
}
my $letter_list = [map {letter_name => $_},
sort {lc $a cmp lc $b} keys %letters];
&output_template(INDEX_ALL_TMPL, 'index-all.html', {
letters => $letter_list,
project_name => $OPTIONS{PROJECT_NAME},
page_footer => $OPTIONS{PAGE_FOOTER},
ctime => $TIME,
index_list => [map {
letter => $_->{letter_name},
value => $letters{$_->{letter_name}}
}, @{$letter_list}] });
}
sub is_class_linkable {
my ($cname) = @_;
my $ret_val = defined($$CLASSES{$cname}) && $$CLASSES{$cname}->{filepath};
$ret_val ||= 0;
return $ret_val;
}
#
# Recursively builds up the overview tree
#
sub build_tree {
my $parentclassname = shift || '';
my $ret = "";
for my $cname (map {$_->{classname}} @CLASSNAMES) {
next if $cname eq $OPTIONS{GLOBALS_NAME};
my $class = $$CLASSES{$cname};
my $parent = $class->{extends} || '-';
if ((!$parentclassname && $parent eq '-')
or ($parent eq $parentclassname)) {
$ret .= is_class_linkable($cname) ? sprintf(qq{
<LI TYPE="circle"><B>%s</B></LI> },
make_link($cname))
: qq{
<LI TYPE="circle">
$cname</LI>
};
my $childrentree .= &build_tree($cname);
$ret = "$ret$childrentree" if $childrentree;
}
}
$ret = "<UL>$ret</UL>" unless not $ret;
if ($parentclassname eq ''){
$ret = qq{<UL><LI TYPE="circle">Object</LI>$ret</UL>};
}
$ret;
}
#
# Outputs the overview tree
#
sub output_tree_template {
my $tree = &build_tree();
&output_template(TREE_TMPL, 'overview-tree.html', {
classtrees => $tree,
project_name => $OPTIONS{PROJECT_NAME},
page_footer => $OPTIONS{PAGE_FOOTER},
ctime => $TIME }, 1);
}
#
# Formats additional non-standard attributes for methods according to user
# configuration
#
sub format_method_attributes {
my ($attrs) = shift;
my $attributes = '';
while (my ($name, $val) = each %{$attrs}) {
$attributes .= &{$METHOD_ATTRS_MAP{$name}}($val)
if $METHOD_ATTRS_MAP{$name};
}
$attributes;
}
#
# Formats additional non-standard attributes for classes according to user
# configuration
#
sub format_class_attributes {
my ($attrs) = shift;
my $attributes;
if ($attrs->{class} && @{ $attrs->{class} }){
$attributes = sprintf('<BR/>%s<BR/>', $attrs->{class}[0] || '')
}
while (my ($name, $val) = each %{$attrs}) {
$attributes .= &{$CLASS_ATTRS_MAP{$name}}($val)
if $CLASS_ATTRS_MAP{$name};
}
$attributes;
}
#
# Parses the command line options
#
sub parse_cmdline {
$OPTIONS{OUTPUT} = DEFAULT_DEST_DIR;
$OPTIONS{PROJECT_NAME} = '';
$OPTIONS{COPYRIGHT} = '';
$OPTIONS{PROJECT_SUMMARY} = '';
$OPTIONS{LOGO} = '';
$OPTIONS{GLOBALS_NAME} = 'GLOBALS';
$OPTIONS{FORMAT} = 'html';
$OPTIONS{EXTENSIONS} = 'js';
$OPTIONS{TEMPLATEDIR} = LOCATION;
GetOptions(
'private|p' => \$OPTIONS{PRIVATE},
'directory|d=s' => \$OPTIONS{OUTPUT},
'help|h' => \$OPTIONS{HELP},
'recursive|r' => \$OPTIONS{RECURSIVE},
'page-footer=s' => \$OPTIONS{PAGE_FOOTER},
'project-name=s' => \$OPTIONS{PROJECT_NAME},
'project-summary=s' => \$OPTIONS{PROJECT_SUMMARY},
'logo=s' => \$OPTIONS{LOGO},
'globals-name=s' => \$OPTIONS{GLOBALS_NAME},
'quiet|q' => \$OPTIONS{QUIET},
'no-sources' => \$OPTIONS{NO_SRC},
'nested-file-naming' => \$OPTIONS{NESTEDFILENAMING},
'format=s' => \$OPTIONS{FORMAT},
'extensions=s' => \$OPTIONS{EXTENSIONS},
'no-lexical-privates' => \$OPTIONS{NO_LEXICAL_PRIVATES},
'template-dir=s' => \$OPTIONS{TEMPLATEDIR},
'package-naming' => \$OPTIONS{PACKAGENAMING});
$OPTIONS{OUTPUT} =~ s/([^\/])$/$1\//;
}
#
# Resolves links for {@link } items
#
sub resolve_inner_links {
my $doc = shift;
$doc =~ s{\{\@link\s+([^\}]+)\}}{&format_link($1)}eg if $doc;
return $doc;
}
sub link_to_class {
my ($toclass, $fromclass) = @_;
$fromclass ||= '';
$toclass = strip($toclass);
$fromclass = strip($fromclass);
$toclass = $CLASSES->{$toclass}->{filepath};
$fromclass = $CLASSES->{$fromclass}->{filepath} if $fromclass;
my $fromslashes = ($fromclass =~ tr/\///);
my $link = ('../' x $fromslashes);
$link .= $toclass if $toclass;
return $link;
}
sub make_link {
my ($toclass, $fromclass) = @_;
if (is_class_linkable($toclass)){
my $link = link_to_class($toclass, $fromclass);
return "<a href='$link'>$toclass</a>";
}
return $toclass;
}
sub path_to_base {
my ($fromclass) = @_;
$fromclass = $CLASSES->{$fromclass}->{filepath};
my $fromslashes = ($fromclass =~ tr/\///);
my $path = '../' x $fromslashes;
return $path;
}
#
# Formats a {@link } item
#
sub format_link {
my ($link) = shift;
die unless $CURRENT_CLASS;
$link = strip($link);
$link =~ s/<[^>]*>//g;
my ($class, $method, $label, $url);
my $class_re = qr/\w+(?:\.\w+)*/;
unless ((($class, $method, $label) =
$link =~ /^($class_re)?#($class_re)\s*(.*)$/)
or (($class, $label) = $link =~ /^($class_re)(?:\s+(.*))?$/)){
if (($url, $label) = $link =~ /^(https?:\/\/\S+)\s+(.*?)\s*$/){
return "<a href='$url'>$label</a>";
} else {
return $link;
}
}
if ($class){
unless ($$CLASSES{$class}){
warn "\@link can't find reference $class\n";
return $link;
}
}
my $classlink = ($class && defined($$CLASSES{$class}))
? link_to_class($class, $CURRENT_CLASS) : '';
if (!$method){
$label = $class unless $label;
qq{<a href="$classlink#">$label</a>};
} else {
my $clss = $CLASSES->{$class || $CURRENT_CLASS};
my @methods = (@{$clss->{instance_methods}},
@{$clss->{class_methods}});
my @fields = (@{$clss->{instance_fields}}, @{$clss->{class_fields}});
my @statics = (@{$clss->{class_methods}}, @{$clss->{class_fields}});
my $ismethod = grep { $_->{mapped_name} eq $method } @methods;
my $isfield = grep { $_->{field_name} eq $method } @fields
unless $ismethod;
my $isstatic = grep {
($_->{field_name} || $_->{mapped_name}) eq $method } @statics;
if ($class){
$label = "$class.$method" . ($ismethod ? '()' : '') unless $label;
if ($ismethod or $isfield){
$method = ($isstatic ? "!s!" : "") . $method;
return qq{<a href="$classlink#$method">$label</a>};
} else {
warn "\@link can't find reference $method in $class\n";
return $link;
}
} else {
$label = $method . ($ismethod ? "()" : "") unless $label;
$method = ($isstatic ? "!s!" : "") . $method;
return qq{<a href="#$method">$label</a>};
}
}
}
#
# Initializes the customizable maps for @attributes
#
sub initialize_param_maps {
%CLASS_ATTRS_MAP = (
author =>
sub {
'<B>Author:</B> ' .
join(', ', @{$_[0]}) . "<BR/>"
},
deprecated =>
sub {
'<B>Deprecated</B> <I>' . ($_[0] ? $_[0]->[0] : '') .
"</I><BR/><BR/>";
},
see =>
sub {
'<B>See:</B><UL>- ' .
join('<BR/>- ', map {&format_link($_)} @{$_[0]}) . "</UL>"
},
version =>
sub {
'<B>Version: </B>' .
join(', ', @{$_[0]}) . '<BR/><BR/>'
},
requires =>
sub {
'<B>Requires:</B><UL>- ' .
join('<BR/>- ', map {&format_link($_)} @{$_[0]}) . "</UL>"
},
filename =>
sub {
sprintf '<I>Defined in %s</I><BR/><BR/>',
sprintf("<a href='%soverview-summary-%s.html'>%s</a>",
path_to_base($CURRENT_CLASS),mangle($_[0]), $_[0]);
},
overviewfile =>
sub {
my ($content, $fh) = "";
my $fname = $_[0][0] or return '';
unless(open $fh, "$fname"){
warn "Can't open overview file '$fname' : $!\n";
return '';
}
{ local $/ = undef; $content .= <$fh> }
close $fh or warn "Couldn't close overview file '$fname'\n";
# Crude manner to strip out extra HTML
$content =~ s/<body>(.*)<\/body>/$1/si;
"$content<br/>";
}
);
%METHOD_ATTRS_MAP = (
throws =>
sub {
"<B>Throws:</B><UL>- " .
join("<BR>- ", @{$_[0]}) . "</UL>"
},
);
$METHOD_ATTRS_MAP{exception} = $METHOD_ATTRS_MAP{throws};
$METHOD_ATTRS_MAP{$_} = $CLASS_ATTRS_MAP{$_} for qw(author version
deprecated see requires);
$FILE_ATTRS_MAP{$_} = $CLASS_ATTRS_MAP{$_} for qw(author version
see requires);
}
#
# Parse the @param or @argument values into name/value pairs and
# return the list of them
#
sub fetch_args {
my ($vars, $arg_list) = @_;
return ([], $arg_list) unless $vars and $arg_list;
my (@args, %used);
for my $arg (split /\W+(?<!\$)/, ($arg_list =~ /\(([^)]*)/)[0]){
for (@{$vars->{param}}){
my ($type, $link, $name, $value) =
/(?:
\{\s*
(\S+) # type name
(?:\s+(\S+)\s*)? # optional link
\})?
\s*
(\$?\w+) # parameter name
(.*) # description
/x;
next unless $name eq $arg;
$used{$name} = 1;
$type ||= '';
if ($$CLASSES{$type} || $link){
$link ||= $type;
if ($CLASSES->{$link}){
$type = sprintf('<a href="%s">%s</a>',
link_to_class($link, $CURRENT_CLASS), $type);
} else {
$type = sprintf('<a href="%s">%s</a>',
"$link.html", $type);
}
}
my $type_regex = qr{\Q$arg\E\b};
$arg_list =~ s/(?<!gt; )($type_regex)/<$type> $1/
if $type;
push @args, { varname => $name, vardescrip => $value};
}
}
for (@{$vars->{param}}){
my ($type, $link, $name, $value)
= /(?:\{\s*(\S+)(?:\s+(\S+)\s*)?\})?\s*(\$?\w+)(.*)/;
next if $used{$name};
push @args, { varname => $name, vardescrip => $value };
}
return (\@args, $arg_list);
}
sub resolve_synonyms {
my ($item) = @_;
$item->{param} = $item->{param} || $item->{argument};
$item->{returns} = $item->{return} || $item->{returns};
$item->{final} = $item->{final} || $item->{const};
}
#
# Log a message to STDOUT if the --quiet switch is not used
#
sub _log {
print $_[0], "\n" unless $OPTIONS{QUIET};
}
#
# Takes a vars hash and resolves {@link}s within it
#
sub format_vars {
my ($vars) = @_;
for my $key (keys %$vars){
if (ref($vars->{$key}) eq 'ARRAY'){
for (0..$#{$vars->{$key}}){
$vars->{$key}->[$_] = &resolve_inner_links($vars->{$key}->[$_]);
}
} else {
$vars->{$key} = &resolve_inner_links($vars->{$key});
}
}
}
#
# Util methods
#
sub strip {
my ($value) = @_;
return $value unless $value;
$value =~ s/^\s*(.*?)\s*$/$1/;
return $value;
}
sub has_method {
my ($classname, $mname) = @_;
my $class = $CLASSES->{$classname};
for my $method (@{$class->{instance_methods}}){
if ($method->{mapped_name} eq $mname){
return 1;
}
}
return 0;
}
| dermidgen/sjl | lib/jsdoc/jsdoc.pl | Perl | bsd-3-clause | 54,465 |
# please insert nothing before this line: -*- mode: cperl; cperl-indent-level: 4; cperl-continued-statement-offset: 4; indent-tabs-mode: nil -*-
package TestCommon::Utils;
use strict;
use warnings FATAL => 'all';
use APR::Brigade ();
use APR::Bucket ();
use Apache2::Filter ();
use Apache2::Connection ();
use Apache2::Const -compile => qw(MODE_READBYTES);
use APR::Const -compile => qw(SUCCESS BLOCK_READ);
use constant IOBUFSIZE => 8192;
# perl 5.6.x only triggers taint protection on strings which are at
# least one char long
sub is_tainted {
return ! eval {
eval join '', '#',
map defined() ? substr($_, 0, 0) : (), @_;
1;
};
}
# to enable debug start with: (or simply run with -trace=debug)
# t/TEST -trace=debug -start
sub read_post {
my $r = shift;
my $debug = shift || 0;
my $bb = APR::Brigade->new($r->pool,
$r->connection->bucket_alloc);
my $data = '';
my $seen_eos = 0;
my $count = 0;
do {
$r->input_filters->get_brigade($bb, Apache2::Const::MODE_READBYTES,
APR::Const::BLOCK_READ, IOBUFSIZE);
$count++;
warn "read_post: bb $count\n" if $debug;
while (!$bb->is_empty) {
my $b = $bb->first;
if ($b->is_eos) {
warn "read_post: EOS bucket:\n" if $debug;
$seen_eos++;
last;
}
if ($b->read(my $buf)) {
warn "read_post: DATA bucket: [$buf]\n" if $debug;
$data .= $buf;
}
$b->delete;
}
} while (!$seen_eos);
$bb->destroy;
return $data;
}
1;
__END__
=head1 NAME
TestCommon::Utils - Common Test Utils
=head1 Synopsis
use TestCommon::Utils;
# test whether some SV is tainted
$b->read(my $data);
ok TestCommon::Utils::is_tainted($data);
my $data = TestCommon::Utils::read_post($r);
=head1 Description
Various handy testing utils
=head1 API
=head2 is_tainted
is_tainted(@data);
returns I<TRUE> if at least one element in C<@data> is tainted,
I<FALSE> otherwise.
=head2 read_post
my $data = TestCommon::Utils::read_post($r);
my $data = TestCommon::Utils::read_post($r, $debug);
reads the posted data using bucket brigades manipulation.
To enable debug pass a true argument C<$debug>
=cut
| dreamhost/dpkg-ndn-perl-mod-perl | t/lib/TestCommon/Utils.pm | Perl | apache-2.0 | 2,391 |
#!/usr/bin/perl
$ENV{'PATH'} = '/bin:/usr/bin:/usr/local/bin';
#####################################
# make a circular plasmid image file
#####################################
my $location = $ARGV[0];
chdir($location);
my $dateID = $ARGV[1];
my $file = $ARGV[2]; # cirdna.txt
system("cp $file cirdna.cirp");
my $plasmid_size = qx{grep \"End\" $file | awk '{print \$2}'};
chomp($plasmid_size);
$plasmid_size = sprintf '%.1f', $plasmid_size / 1000;
$plasmid_size = $plasmid_size . " kb";
print "$plasmid_size\n";
system("cirdna -infile cirdna.cirp -ruler N -blocktype Outline -posticks In -posblocks Out -graphout cps -gapsize 1000 -blockheight 2.0 -originangle 270 -gtitle \"$plasmid_size\" -textheight 1.5 -textlength 1.5 -tickheight 1.5 -postext 1.0 -intercolour 7");
system("ps2pdf -dPDFSETTINGS=/prepress -dEPSCrop -r100 cirdna.ps");
system("rm cirdna.cirp");
system("rm cirdna.ps");
my $pdf = substr($file, 0, -4) . ".pdf";
my $png = substr($file, 0, -4) . ".png";
system("mv cirdna.pdf $pdf");
system("gs -dQUIET -dNOPAUSE -dBATCH -sDEVICE=pngalpha -sOutputFile=$png -r100 $pdf");
| LivingComputingProject/cello | resources/scripts/make_cirdna_plasmid.pl | Perl | bsd-2-clause | 1,100 |
package TownCrier::Handler::Admin;
use TownCrier::Handler::Admin::Event;
1;
| robn/towncrier | lib/TownCrier/Handler/Admin.pm | Perl | mit | 78 |
#/usr/bin/perl
use strict;
use warnings;
use lib './libs/lib64/perl5/site_perl/5.8.8/x86_64-linux-thread-multi';
use Digest::SHA1 qw(sha1 sha1_hex sha1_base64);
sub getXMLText
{
my $tempDirPath = $_[0];
my $pdfFilePath = $_[1];
my $captionMapsDir = $tempDirPath."/CaptionMaps/";
my $synDirPath = $tempDirPath."/Synopses/";
open(my $capmapsFile, $captionMapsDir."map.txt") or
die("Error: cannot open file 'data.txt'\n");
my $text = "<doc>\n";
my $line;
my $synFile;
#temp parameters for each field
my $algoid = "";
my $caption = "";
my $reftext = "";
my $synopsis = "";
my $paperid = "";
my $pagenum = "";
my $checksum = "";
#get metadata about the document
$checksum = getSha1Checksum($pdfFilePath);
while( $line = <$capmapsFile> )
{
chomp($line);
$line = trim($line);
next if($line eq "");
if($line eq "<:algorithm:>")
{ $algoid = "";
$caption = "";
$reftext = "";
$synopsis = "";
$paperid = "";
$pagenum = "";
}
elsif($line =~ m/^(\<:info:\>)/i)
{ ($paperid, $pagenum, $algoid, $caption) = ($line =~ m/<:info:>(.*)<###>(.*)<###>(.*)<###>(.*)/);
}
elsif($line eq "<:/algorithm:>")
#get synopsis and write a complete algorithm element to the output XML file
{
$synFile = $synDirPath.$algoid.".txt";
$synopsis = trim(readFile($synFile));
#replace XML escape characters
$caption = replaceXMLEscapeChars($caption);
$reftext = replaceXMLEscapeChars($reftext);
$synopsis = replaceXMLEscapeChars($synopsis);
$text .= "\t<algorithm id=\"".$algoid."\">\n";
$text .= "\t\t<caption>".$caption."</caption>\n";
$text .= "\t\t<reftext>".$reftext."</reftext>\n";
$text .= "\t\t<synopsis>".$synopsis."</synopsis>\n";
$text .= "\t\t<paperid>".$paperid."</paperid>\n";
$text .= "\t\t<pagenum>".$pagenum."</pagenum>\n";
$text .= "\t\t<checksum>".$checksum."</checksum>\n";
$text .= "\t</algorithm>\n\n";
}
else #accumulate ref text
{ $reftext .= $line." ";
}
}
$text .= "</doc>\n";
close capmapsFile;
return $text;
}
sub genXMLFile
{ my $tempDirPath = $_[0];
my $pdfFilePath = $_[1];
my $outputXMLFilePath = $_[2];
my $text = getXMLText($tempDirPath, $pdfFilePath);
if($text eq "<doc>\n</doc>\n")
{ return 0; #no algorithm
}
open(OUTP, ">$outputXMLFilePath") or die("Cannot open file '$outputXMLFilePath' for writing\n");
print OUTP $text;
close OUTP;
return 1;
}
sub getSha1Checksum
{
my $filename = $_[0];
my $fh;
unless (open $fh, $filename) {
die("$0: open $filename: $!");
}
my $sha1 = Digest::SHA1->new;
$sha1->addfile($fh);
#print $sha1->hexdigest, " $file\n";
close $fh;
return $sha1->hexdigest;
}
sub trim{
my $string = $_[0];
$string =~ s/^\s*(.*)\s*$/$1/;
return $string;
}
sub replaceXMLEscapeChars
{ my $str = $_[0];
$str =~ s/&/&/g;
$str =~ s/</</g;
$str =~ s/>/>/g;
$str =~ s/"/"/g;
$str =~ s/'/'/g;
return $str;
}
#read the entire file into a string
sub readFile
{ open(FILE, "<".$_[0]) or die "Error: no file found. $_[0]\n";
my $output = do {local $/; <FILE> };
close FILE;
return $output;
}
1;
| SeerLabs/PDFMEF | resources/algextract/perl/AlgoXmlGenerator.pl | Perl | apache-2.0 | 3,140 |
#-----------------------------------------------------------
# ntusernetwork.pl
# Plugin for Registry Ripper,
# Network key parser
#
#-----------------------------------------------------------
package ntusernetwork;
use strict;
my %config = (hive => "NTUSER\.DAT",
hasShortDescr => 1,
hasDescr => 0,
hasRefs => 0,
osmask => 22,
version => 20110601);
sub getConfig{return %config}
sub getShortDescr {
return "Returns contents of user's Network subkeys";
}
sub getDescr{}
sub getRefs {}
sub getHive {return $config{hive};}
sub getVersion {return $config{version};}
my $VERSION = getVersion();
sub pluginmain {
my $class = shift;
my $ntuser = shift;
::logMsg("Launching ntusernetwork v.".$VERSION);
::rptMsg("ntusernetwork v.".$VERSION); # banner
::rptMsg("(".$config{hive}.") ".getShortDescr()."\n"); # banner
my $reg = Parse::Win32Registry->new($ntuser);
my $root_key = $reg->get_root_key;
my $key_path = 'Network';
my $key;
if ($key = $root_key->get_subkey($key_path)) {
::rptMsg($key_path);
::rptMsg("");
my @subkeys = $key->get_list_of_subkeys();
if (scalar @subkeys > 0) {
foreach my $s (@subkeys) {
::rptMsg($key_path."\\".$s->get_name());
::rptMsg("LastWrite time: ".gmtime($s->get_timestamp()));
my @vals = $s->get_list_of_values();
if (scalar @vals > 0) {
foreach my $v (@vals) {
::rptMsg(sprintf " %-15s %-25s",$v->get_name(),$v->get_data());
}
::rptMsg("");
}
}
}
else {
::rptMsg($key_path." key has no subkeys.");
}
}
else {
::rptMsg($key_path." key not found.");
}
}
1;
| dgrove727/autopsy | thirdparty/rr-full/plugins/ntusernetwork.pl | Perl | apache-2.0 | 1,679 |
#!/usr/bin/perl -wT
# flush the buffers after each print
select (STDOUT);
$| = 1;
print "Content-Type: image/svg+xml\r\n";
print "Expires: Thu, 01 Dec 2003 16:00:00 GMT\r\n";
print "Cache-Control: no-store, no-cache, must-revalidate\r\n";
print "Pragma: no-cache\r\n";
print "\r\n";
sleep 2;
open(FILE, "intrinsic-ratio.svg");
while (<FILE>) {
print $_;
}
close(FILE);
| nwjs/chromium.src | third_party/blink/web_tests/http/tests/misc/resources/intrinsic-ratio-slow.pl | Perl | bsd-3-clause | 375 |
#!/opt/ActivePerl-5.22/bin/perl
use 5.022;
{
package Prototype;
use Moose::Role;
requires 'Clone';
requires 'Hello'
}
{
package ConcretePrototype1;
use Moose;
use Clone 'clone';
with 'Prototype';
use namespace::autoclean;
sub Clone {
my $self = shift;
$self->new();
}
sub Hello {
say 'Hello!';
}
__PACKAGE__->meta->make_immutable;
}
{
package ConcretePrototype2;
use Moose;
use Clone 'clone';
with 'Prototype';
use namespace::autoclean;
sub Clone {
my $self = shift;
$self->new();
}
sub Hello {
say 'Bonjourno!';
}
__PACKAGE__->meta->make_immutable;
}
my $cp2 = ConcretePrototype2->Clone();
my $cp1 = ConcretePrototype1->Clone();
$cp2->Hello();
$cp1->Hello();
| jmcveigh/perl-design-patterns | CreationalPatterns/PrototypePattern.pl | Perl | mit | 844 |
#voeg toe in vorige oplossin
foreach $nsheet (in $book->{Worksheets}){
$range=$nsheet->{UsedRange};
printf "\n\t%-30s heeft %3d kolommen en %3d rijen\n",$nsheet->{name},$range->{columns}->{count},$range->{rows}->{count} ;
}
| VDBBjorn/Besturingssystemen-III | Labo/reeks2/Reeks2_03.pl | Perl | mit | 234 |
/* Part of SWI-Prolog
Author: Matt Lilley
E-mail: matt.s.lilley@gmail.com
WWW: http://www.swi-prolog.org
Copyright (c) 2014, Mike Elston, Matt Lilley
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* PostgreSQL is a trademark of the PostgreSQL Global Development Group.
Microsoft, SQL Server, and Windows are either registered trademarks or
trademarks of Microsoft Corporation in the United States and/or other
countries. SQLite is a registered trademark of Hipp, Wyrick & Company,
Inc in the United States. All other trademarks or registered trademarks
are the property of their respective owners.
*/
:-module(sql_keywords,
[reserved_sql_keyword/1]).
reserved_sql_keyword(absolute).
reserved_sql_keyword(add).
reserved_sql_keyword(all).
reserved_sql_keyword(allocate).
reserved_sql_keyword(alter).
reserved_sql_keyword(and).
reserved_sql_keyword(any).
reserved_sql_keyword(are).
reserved_sql_keyword(as).
reserved_sql_keyword(asc).
reserved_sql_keyword(assertion).
reserved_sql_keyword(authorization).
reserved_sql_keyword(avg).
reserved_sql_keyword(begin).
reserved_sql_keyword(between).
reserved_sql_keyword(bit).
reserved_sql_keyword(bit_length).
reserved_sql_keyword(both).
reserved_sql_keyword(by).
reserved_sql_keyword(cascade).
reserved_sql_keyword(cascaded).
reserved_sql_keyword(case).
reserved_sql_keyword(cast).
reserved_sql_keyword(catalog).
reserved_sql_keyword(char).
reserved_sql_keyword(character).
reserved_sql_keyword(character_length).
reserved_sql_keyword(char_length).
reserved_sql_keyword(check).
reserved_sql_keyword(close).
reserved_sql_keyword(coalesce).
reserved_sql_keyword(collate).
reserved_sql_keyword(collation).
reserved_sql_keyword(column).
reserved_sql_keyword(commit).
reserved_sql_keyword(connect).
reserved_sql_keyword(connection).
reserved_sql_keyword(constraint).
reserved_sql_keyword(constraints).
reserved_sql_keyword(continue).
reserved_sql_keyword(convert).
reserved_sql_keyword(corresponding).
reserved_sql_keyword(create).
reserved_sql_keyword(cross).
reserved_sql_keyword(current).
reserved_sql_keyword(current_date).
reserved_sql_keyword(current_time).
reserved_sql_keyword(current_timestamp).
reserved_sql_keyword(current_user).
reserved_sql_keyword(cursor).
%reserved_sql_keyword(date).
reserved_sql_keyword(day).
reserved_sql_keyword(deallocate).
reserved_sql_keyword(dec).
reserved_sql_keyword(decimal).
reserved_sql_keyword(declare).
reserved_sql_keyword(default).
reserved_sql_keyword(deferrable).
reserved_sql_keyword(deferred).
reserved_sql_keyword(delete).
reserved_sql_keyword(desc).
reserved_sql_keyword(describe).
reserved_sql_keyword(descriptor).
reserved_sql_keyword(diagnostics).
reserved_sql_keyword(disconnect).
reserved_sql_keyword(distinct).
reserved_sql_keyword(domain).
reserved_sql_keyword(double).
reserved_sql_keyword(drop).
reserved_sql_keyword(else).
reserved_sql_keyword(end).
reserved_sql_keyword(exec).
reserved_sql_keyword(escape).
reserved_sql_keyword(except).
reserved_sql_keyword(execute).
reserved_sql_keyword(exists).
reserved_sql_keyword(external).
reserved_sql_keyword(extract).
reserved_sql_keyword(false).
reserved_sql_keyword(fetch).
reserved_sql_keyword(first).
reserved_sql_keyword(for).
reserved_sql_keyword(foreign).
reserved_sql_keyword(found).
reserved_sql_keyword(from).
reserved_sql_keyword(full).
reserved_sql_keyword(get).
reserved_sql_keyword(global).
reserved_sql_keyword(go).
reserved_sql_keyword(goto).
reserved_sql_keyword(grant).
reserved_sql_keyword(group).
reserved_sql_keyword(having).
reserved_sql_keyword(hour).
reserved_sql_keyword(identity).
reserved_sql_keyword(immediate).
reserved_sql_keyword(in).
reserved_sql_keyword(indicator).
reserved_sql_keyword(initially).
reserved_sql_keyword(inner).
reserved_sql_keyword(input).
reserved_sql_keyword(insensitive).
reserved_sql_keyword(insert).
reserved_sql_keyword(int).
reserved_sql_keyword(integer).
reserved_sql_keyword(intersect).
reserved_sql_keyword(interval).
reserved_sql_keyword(into).
reserved_sql_keyword(is).
reserved_sql_keyword(isolation).
reserved_sql_keyword(join).
reserved_sql_keyword(key).
reserved_sql_keyword(language).
reserved_sql_keyword(last).
reserved_sql_keyword(leading).
reserved_sql_keyword(left).
reserved_sql_keyword(level).
reserved_sql_keyword(like).
reserved_sql_keyword(local).
reserved_sql_keyword(lower).
reserved_sql_keyword(match).
reserved_sql_keyword(max).
reserved_sql_keyword(min).
reserved_sql_keyword(minute).
reserved_sql_keyword(module).
reserved_sql_keyword(names).
reserved_sql_keyword(national).
reserved_sql_keyword(natural).
reserved_sql_keyword(nchar).
reserved_sql_keyword(next).
reserved_sql_keyword(no).
reserved_sql_keyword(not).
reserved_sql_keyword(null).
reserved_sql_keyword(nullif).
reserved_sql_keyword(numeric).
reserved_sql_keyword(octet_length).
reserved_sql_keyword(of).
reserved_sql_keyword(on).
reserved_sql_keyword(only).
reserved_sql_keyword(open).
reserved_sql_keyword(option).
reserved_sql_keyword(or).
reserved_sql_keyword(order).
reserved_sql_keyword(outer).
reserved_sql_keyword(output).
reserved_sql_keyword(overlaps).
reserved_sql_keyword(pad).
reserved_sql_keyword(partial).
reserved_sql_keyword(position).
reserved_sql_keyword(precision).
reserved_sql_keyword(prepare).
reserved_sql_keyword(preserve).
reserved_sql_keyword(primary).
reserved_sql_keyword(prior).
reserved_sql_keyword(privileges).
reserved_sql_keyword(procedure).
reserved_sql_keyword(public).
reserved_sql_keyword(read).
reserved_sql_keyword(real).
reserved_sql_keyword(references).
reserved_sql_keyword(relative).
reserved_sql_keyword(restrict).
reserved_sql_keyword(revoke).
reserved_sql_keyword(right).
reserved_sql_keyword(rollback).
reserved_sql_keyword(schema).
reserved_sql_keyword(scroll).
reserved_sql_keyword(second).
reserved_sql_keyword(section).
reserved_sql_keyword(select).
reserved_sql_keyword(session).
reserved_sql_keyword(session_user).
reserved_sql_keyword(set).
reserved_sql_keyword(size).
reserved_sql_keyword(smallint).
reserved_sql_keyword(some).
reserved_sql_keyword(space).
reserved_sql_keyword(sql).
reserved_sql_keyword(sqlcode).
reserved_sql_keyword(sqlerror).
reserved_sql_keyword(sqlstate).
reserved_sql_keyword(substring).
reserved_sql_keyword(sum).
reserved_sql_keyword(system_user).
reserved_sql_keyword(table).
reserved_sql_keyword(temporary).
reserved_sql_keyword(then).
reserved_sql_keyword(time).
reserved_sql_keyword(timestamp).
reserved_sql_keyword(timezone_hour).
reserved_sql_keyword(timezone_minute).
reserved_sql_keyword(to).
reserved_sql_keyword(trailing).
reserved_sql_keyword(transaction).
reserved_sql_keyword(translate).
reserved_sql_keyword(translation).
reserved_sql_keyword(trim).
reserved_sql_keyword(true).
reserved_sql_keyword(union).
reserved_sql_keyword(unique).
reserved_sql_keyword(unknown).
reserved_sql_keyword(update).
reserved_sql_keyword(upper).
reserved_sql_keyword(usage).
reserved_sql_keyword(user).
reserved_sql_keyword(using).
reserved_sql_keyword(values).
reserved_sql_keyword(varchar).
reserved_sql_keyword(varying).
reserved_sql_keyword(view).
reserved_sql_keyword(when).
reserved_sql_keyword(whenever).
reserved_sql_keyword(where).
reserved_sql_keyword(with).
reserved_sql_keyword(work).
reserved_sql_keyword(write).
reserved_sql_keyword(zone).
reserved_sql_keyword(isnull).
reserved_sql_keyword(ltrim).
reserved_sql_keyword(replace).
reserved_sql_keyword(dateadd).
reserved_sql_keyword(datediff).
reserved_sql_keyword(charindex).
reserved_sql_keyword(datepart).
reserved_sql_keyword(abs).
| TeamSPoon/logicmoo_workspace | docker/rootfs/usr/local/lib/swipl/library/cql/sql_keywords.pl | Perl | mit | 8,858 |
% Adapted from LP/SGST06/times.pl
% terminating
%query: times(i,i,o).
times(X,Y,Z) :- mult(X,Y,0,Z).
mult(0,Y,0,Z) :- !, eq(Z,0).
mult(s(X),Y,0,Z) :- !, mult(X,Y,Y,Z).
mult(X,Y,W,s(Z)) :- p(W,P), mult(X,Y,P,Z).
p(0,0).
p(s(X),X).
eq(X,X).
| ComputationWithBoundedResources/ara-inference | doc/tpdb_trs/Logic_Programming_with_Cut/Stroeder_09/times1.pl | Perl | mit | 257 |
#
# Just a few database utils. Not OO. Only thing that has state is a database
# handle. Please never use this for anything other than this thrown together
# project.
#
package YtrmvcLite::Database;
use strict;
use warnings;
use parent 'Exporter';
use YtrmvcLite::Strings;
our @EXPORT = (
qw(table_exists get_autoincrement init_table db_just_a_test)
);
our $dbh;
$YtrmvcLite::Strings::strings->{no_db_handle_ref} = 'No database handle provided.';
sub db_just_a_test {
return string('no_db_handle_ref');
}
sub table_exists {
my ($table) = @_;
my ($sth);
return 0 unless($sth = $dbh->prepare("SELECT * FROM " . $table . " LIMIT 1;"));
return 0 unless($sth->execute());
return 1;
}
sub get_autoincrement {
my ($dbsource) = @_;
print $dbsource, "\n\n";
return 'INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT' if($dbsource =~ /^DBI:mysql:/i);
return 'INTEGER PRIMARY KEY' if($dbsource =~ /^DBI:SQLite:/i);
return 'INTEGER PRIMARY KEY' if($dbsource =~ /^DBI:SQLite2:/i);
}
sub init_table {
my ($table, $columns, $dbsource) = @_;
my ($sth, @column_arr);
foreach my $column (@{$columns}) {
my $column_str = "`$$column{name}` " . ($$column{auto_increment} ?
get_autoincrement($dbsource) : sub {
if($$column{type}) {
if($$column{type} eq 'ip') {
return 'TEXT' if $dbsource =~ /^DBI:SQLite/i;
return 'VARBINARY(16)' if $dbsource =~ /^DBI:MySQL/i;
# bytea(16) might be better if it actually works...
# No idea if this can detect an IP in binary either.
return 'inet' if $dbsource =~ /^DBI:Pg/i;
return 'TEXT';
}
return $$column{type}
}
return 'TEXT';
}->());
push @column_arr, $column_str;
}
$sth = $dbh->prepare("CREATE TABLE $table (" . join(',', @column_arr) . ")")
or die $dbh->errstr;
$sth->execute() or die $dbh->errstr;
}
1;
| marlencrabapple/YtrmvcLite | Database.pm | Perl | mit | 1,923 |
use Win32::OLE 'in';
use Win32::OLE::Const 'Microsoft WMI Scripting ';
my $ComputerName = ".";
my $NameSpace = "root/cimv2";
#my $NameSpace = "root/msapps12";
my $ClassName = "";
#my $ClassName = "__EventGenerator"; #om snel te testen
my $Level=($ClassName ? -1 : -2);
my $Locator=Win32::OLE->new("WbemScripting.SWbemLocator");
my $WbemServices = $Locator->ConnectServer($ComputerName, $NameSpace);
GetSubClasses($ClassName,$Level);
sub GetSubClasses {
my ($ClassName,$Level) = @_;
$Level++;
print "\n","\t" x $Level , $ClassName if $ClassName;
my $Instances = $WbemServices->SubClassesOf($ClassName, wbemQueryFlagShallow); #onmiddelijke subklassen
GetSubClasses($_,$Level) foreach sort {uc($a) cmp uc($b)} map {$_->{Path_}->{RelPath}} in $Instances;
}
| VDBBjorn/Besturingssystemen-III | Labo/reeks4/Reeks4_24.pl | Perl | mit | 783 |
package Artemis::Item;
use strict;
use warnings;
use Carp qw(confess cluck);
use Class::Method::Modifiers;
use Role::Tiny::With;
with qw(
Artemis::Role::Domain
Artemis::Role::Model
);
sub columns { qw(id type subtype name cost) }
sub cost {
my $self = shift;
$self->{'cost'} ||= '0 gp';
}
1;
__END__
=head1 NAME
Artemis::Item - Base class for items
=cut
| jgoodman/Artemis | lib/Artemis/Item.pm | Perl | mit | 380 |
package Util;
=head1 DESCRIPTION
This package includes various utility subroutines.
=head1 SYNOPSIS
=head1 AUTHOR
Xuning Wang
=cut
use strict;
use Carp qw(carp croak);
use Excel::Writer::XLSX;
=head2 tab2xlsx
Usage : Util::tab2xlsx($tabfile, $outfile)
Function: Generate Excel file from tab-separated file.
Returns : An excel file
Args : infile, outfile
=cut
sub tab2xlsx {
my ( $tabfile, $outfile ) = @_;
croak "Must have tabfile and outfile.\n" if ( !$tabfile or !$outfile );
open( my $in, $tabfile ) or croak "Cannot open $tabfile\n";
my $workbook = Excel::Writer::XLSX->new($outfile);
my $worksheet = $workbook->add_worksheet();
my $row = 0;
my $col = 0;
while ( my $line = <$in> ) {
chomp $line;
foreach my $e ( split( /\t/, $line ) ) {
$worksheet->write( $row, $col++, $e );
}
$row++;
$col = 0;
}
close $in;
}
=head2 tabcat
Usage : Util::tabcat(args)
Function: Concontenate tab-delimited files with header
Returns :
Args : infile_aref, outfile, hasHeader
infile_aref is a array ref of infiles
hasHeader: 0 if the tab file has no header row, 1-has header row.
=cut
sub tabcat {
my ( $infile_aref, $outfile, $hasHeader ) = @_;
croak "Must have outfile and infile_aref.\n"
if ( !$outfile or !$infile_aref );
my @infiles = @{$infile_aref};
my $cmd;
if ( !$hasHeader ) {
qx(cat @infiles > $outfile);
}
else {
my $file = shift @infiles;
qx(cat $file > $outfile) if -f $file;
foreach $file (@infiles) {
qx(tail -n +2 $file >> $outfile) if $file;
}
}
}
=head2 run
Usage : Util::run($cmd, $fail_msg, $verbose, fail_flag_file, warn_on_error)
Function: run a command
Returns : command status
Args : cmd, fail_msg, verbose, die_on_error, fail_flag_file
warn_on_error: 1-warn, 0-die.
fail_flag_file: if provided, the file will be created.
=cut
sub run {
my ( $cmd, $fail_msg, $verbose, $fail_flag_file, $warn_on_error ) = @_;
print STDERR "$cmd\n" if $verbose;
if ( system($cmd) ) {
if ($fail_flag_file) {
qx(touch $fail_flag_file);
}
if ($warn_on_error) {
warn "$fail_msg\n";
}
else {
die "$fail_msg\n";
}
}
}
=head2 getJobName
Usage : Util::getJobName($prefix, $suffix)
Function: Construct a SGE job name based on prefix and suffix
Returns : a job name
Args : prefix and suffix
=cut
sub getJobName {
my ( $prefix, $suffix ) = @_;
croak "No prefix or suffix\n" if !$prefix or !$suffix;
my $time = `date +%M`;
chomp $time;
my $jobname = join( "", $prefix, $time, ".", $suffix );
return $jobname;
}
=head2 getJobCount
Usage : Util::getJobCount($jobname_href)
Function: Obtain the number of jobs still on SGE queue
Returns : a number
Args : a hash reference of jobname
=cut
sub getJobCount {
my $jobs = shift;
my $result = `qstat`;
my @lines = split( /\n/, $result );
return 0 if !@lines;
my $n = 0;
shift @lines; # remove header line
shift @lines; # remove ------ line
foreach my $line (@lines) {
next if !$line;
$line =~ s/^\s+//g;
my ( $jobid, $prior, $name, $user, $state ) = split( /\s+/, $line );
next if !$jobs->{$name};
$n++;
}
return $n;
}
=head2 refGeneCoord
Usage : Util::refGeneCoord(refGeneFile, refGeneID)
Function: Return coordinates from UCSC refGene table for a given refseq ID
refseq ID examples: NM_001005738. It's the 2nd column in the file.
Returns : an array: (name, chr, strand, txStart, txEnd, cdsStart, cdsEnd,
exonStarts, exonEnds)
Args : refGene file, refGene ID
=cut
sub refGeneCoord {
my ( $refGene_file, $refseq_id ) = @_;
open(my $inf, $refGene_file) or croak "Could not open $refGene_file\n!";
while (my $line=<$inf>) {
chomp $line;
my @a = split( /\t/, $line);
if ( uc($a[1]) eq uc($refseq_id) ) {
return ($a[1], $a[2],$a[3],$a[4],$a[5],$a[6],$a[7],$a[9],$a[10]);
}
}
close $inf;
return ();
}
1;
| pinetree1/crispr-dav | Modules/Util.pm | Perl | mit | 4,250 |
#!/usr/bin/perl -w
$factor0 = 6;
$factor1 = 7;
print $factor0 * $factor1, "\n";
| oringal/ass1_cs2041 | 1/answer4.pl | Perl | mit | 80 |
#!/usr/bin/env perl
=head1 NAME
AddTissueSampleCvterm
=head1 SYNOPSIS
mx-runAddTissueSampleCvterm [options] -H hostname -D dbname -u username [-F]
this is a subclass of L<CXGN::Metadata::Dbpatch>
see the perldoc of parent class for more details.
=head1 DESCRIPTION
This patch adds stock)type cvterm for tissue sample.
This subclass uses L<Moose>. The parent class uses L<MooseX::Runnable>
=head1 AUTHOR
Alex Ogbonna<aco46@cornell.edu>
=head1 COPYRIGHT & LICENSE
Copyright 2010 Boyce Thompson Institute for Plant Research
This program is free software; you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
package AddTissueSampleCvterm;
use Moose;
use Bio::Chado::Schema;
use Try::Tiny;
extends 'CXGN::Metadata::Dbpatch';
has '+description' => ( default => <<'' );
This patch adds stock)type cvterm for tissue sample.
has '+prereq' => (
default => sub {
[],
},
);
sub patch {
my $self=shift;
print STDOUT "Executing the patch:\n " . $self->name . ".\n\nDescription:\n ". $self->description . ".\n\nExecuted by:\n " . $self->username . " .";
print STDOUT "\nChecking if this db_patch was executed before or if previous db_patches have been executed.\n";
print STDOUT "\nExecuting the SQL commands.\n";
my $schema = Bio::Chado::Schema->connect( sub { $self->dbh->clone } );
print STDERR "INSERTING CV TERMS...\n";
my $terms = {
'stock_type' => [
'tissue_sample',
]
};
foreach my $t (keys %$terms){
foreach (@{$terms->{$t}}){
$schema->resultset("Cv::Cvterm")->create_with({
name => $_,
cv => $t
});
}
}
print "You're done!\n";
}
####
1; #
####
| solgenomics/sgn | db/00090/AddTissueSampleCvterm.pm | Perl | mit | 1,703 |
=pod
=head1 NAME
openssl-sess_id,
sess_id - SSL/TLS session handling utility
=head1 SYNOPSIS
B<openssl> B<sess_id>
[B<-help>]
[B<-inform PEM|DER>]
[B<-outform PEM|DER|NSS>]
[B<-in filename>]
[B<-out filename>]
[B<-text>]
[B<-noout>]
[B<-context ID>]
=head1 DESCRIPTION
The B<sess_id> process the encoded version of the SSL session structure
and optionally prints out SSL session details (for example the SSL session
master key) in human readable format. Since this is a diagnostic tool that
needs some knowledge of the SSL protocol to use properly, most users will
not need to use it.
=head1 OPTIONS
=over 4
=item B<-help>
Print out a usage message.
=item B<-inform DER|PEM>
This specifies the input format. The B<DER> option uses an ASN1 DER encoded
format containing session details. The precise format can vary from one version
to the next. The B<PEM> form is the default format: it consists of the B<DER>
format base64 encoded with additional header and footer lines.
=item B<-outform DER|PEM|NSS>
This specifies the output format. The B<PEM> and B<DER> options have the same meaning
and default as the B<-inform> option. The B<NSS> option outputs the session id and
the master key in NSS keylog format.
=item B<-in filename>
This specifies the input filename to read session information from or standard
input by default.
=item B<-out filename>
This specifies the output filename to write session information to or standard
output if this option is not specified.
=item B<-text>
Prints out the various public or private key components in
plain text in addition to the encoded version.
=item B<-cert>
If a certificate is present in the session it will be output using this option,
if the B<-text> option is also present then it will be printed out in text form.
=item B<-noout>
This option prevents output of the encoded version of the session.
=item B<-context ID>
This option can set the session id so the output session information uses the
supplied ID. The ID can be any string of characters. This option won't normally
be used.
=back
=head1 OUTPUT
Typical output:
SSL-Session:
Protocol : TLSv1
Cipher : 0016
Session-ID: 871E62626C554CE95488823752CBD5F3673A3EF3DCE9C67BD916C809914B40ED
Session-ID-ctx: 01000000
Master-Key: A7CEFC571974BE02CAC305269DC59F76EA9F0B180CB6642697A68251F2D2BB57E51DBBB4C7885573192AE9AEE220FACD
Key-Arg : None
Start Time: 948459261
Timeout : 300 (sec)
Verify return code 0 (ok)
These are described below in more detail.
=over 4
=item B<Protocol>
This is the protocol in use TLSv1.3, TLSv1.2, TLSv1.1, TLSv1 or SSLv3.
=item B<Cipher>
The cipher used this is the actual raw SSL or TLS cipher code, see the SSL
or TLS specifications for more information.
=item B<Session-ID>
The SSL session ID in hex format.
=item B<Session-ID-ctx>
The session ID context in hex format.
=item B<Master-Key>
This is the SSL session master key.
=item B<Start Time>
This is the session start time represented as an integer in standard
Unix format.
=item B<Timeout>
The timeout in seconds.
=item B<Verify return code>
This is the return code when an SSL client certificate is verified.
=back
=head1 NOTES
The PEM encoded session format uses the header and footer lines:
-----BEGIN SSL SESSION PARAMETERS-----
-----END SSL SESSION PARAMETERS-----
Since the SSL session output contains the master key it is
possible to read the contents of an encrypted session using this
information. Therefore appropriate security precautions should be taken if
the information is being output by a "real" application. This is however
strongly discouraged and should only be used for debugging purposes.
=head1 BUGS
The cipher and start time should be printed out in human readable form.
=head1 SEE ALSO
L<ciphers(1)>, L<s_server(1)>
=head1 COPYRIGHT
Copyright 2000-2020 The OpenSSL Project Authors. All Rights Reserved.
Licensed under the OpenSSL license (the "License"). You may not use
this file except in compliance with the License. You can obtain a copy
in the file LICENSE in the source distribution or at
L<https://www.openssl.org/source/license.html>.
=cut
| pmq20/ruby-compiler | vendor/openssl/doc/man1/sess_id.pod | Perl | mit | 4,179 |
name(logicmoo_nars).
version('2.0.3').
title('Narsese: Like OpenNARS in Prolog').
keywords([nars,ai,agi,chatbot,interpeters]).
author( 'Douglas Miles', 'http://www.linkedin.com/in/logicmoo' ).
packager( 'logicmoo/LogicMoo', 'https://github.com/logicmoo/' ).
maintainer( 'logicmoo', 'https://github.com/logicmoo/' ).
home( 'https://github.com/logicmoo/logicmoo_nars' ).
download( 'https://github.com/logicmoo/logicmoo_nars/release/*.zip' ).
requires(logicmoo_utils).
autoload(false).
| TeamSPoon/logicmoo_workspace | packs_sys/logicmoo_nars/pack.pl | Perl | mit | 483 |
/* Part of SWI-Prolog
Author: Jan Wielemaker
E-mail: J.Wielemaker@vu.nl
WWW: http://www.swi-prolog.org
Copyright (c) 2016, VU University Amsterdam
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
:- module(hash_stream,
[ open_hash_stream/3, % +OrgStream, -HashStream, +Options
stream_hash/2 % +HashStream, -Hash
]).
:- use_foreign_library(foreign(hashstream)).
:- predicate_options(open_hash_stream/3, 3,
[ close_parent(boolean),
algorithm(oneof([md5,sha1,sha224,sha256,sha384,sha512]))
]).
/** <module> Maintain a hash on a stream
This library defines a filter stream that maintains a hash of the data
that passes through the stream. It can be used to compute the hash of
input data while it is being processed. This is notably interesting if
data is processed from a socket as it avoids the need for collecting the
data first in a temporary file.
A typical processing sequence is illustrated below, where process/2
somehow processed the data and save_result/3 records the result as
obtained from `URL` with content digest `SHA256` its `Result`.
```
...,
http_open(URL, In0, []),
open_hash_stream(In0, In, [algorithm(sha256)]),
process(In, Result),
stream_hash(In, SHA256),
close(In),
save_result(URL, SHA256, Result)
```
This library can also be used to compute the hash for the content of a
file. The advantage is that this code doesn't rely on external tools. It
is considerably faster for short files, but considerably slower on large
files because Prolog I/O is based on character streams rather than
blocks.
```
file_hash(Algorithm, File, Hash) :-
setup_call_cleanup(
open(File, read, In0, [type(binary)]),
setup_call_cleanup(
open_hash_stream(In0, In,
[ algorithm(Algorithm),
close_parent(false)
]),
( setup_call_cleanup(
open_null_stream(Null),
copy_stream_data(In, Null),
close(Null)),
stream_hash(In, Hash)
),
close(In)),
close(In0)).
```
@see In addition to this hash library, SWI-Prolog provides
library(md5), library(sha) and hash functions through
library(crypto), part of the `ssl` package.
*/
%! open_hash_stream(+OrgStream, -HashStream, +Options) is det.
%
% Open a filter stream on OrgStream that maintains a hash. The
% hash can be retrieved at any time using stream_hash/2. Provided
% options:
%
% - algorithm(+Algorithm)
% One of `md5`, `sha1`, `sha224`, `sha256`, `sha384` or
% `sha512`. Default is `sha1`.
% - close_parent(+Bool)
% If `true` (default), closing the filter stream also closes the
% original (parent) stream.
%! stream_hash(+HashStream, -Digest:atom) is det.
%
% Unify Digest with a hash for the bytes send to or read from
% HashStream. Note that the hash is computed on the stream
% buffers. If the stream is an output stream, it is first flushed
% and the Digest represents the hash at the current location. If
% the stream is an input stream the Digest represents the hash of
% the processed input including the already buffered data.
| TeamSPoon/logicmoo_workspace | docker/rootfs/usr/local/lib/swipl/library/hash_stream.pl | Perl | mit | 4,787 |
package GAL::List::Numeric;
use strict;
use vars qw($VERSION);
$VERSION = 0.2.0;
use base qw(GAL::List);
use Statistics::Descriptive;
=head1 NAME
GAL::List::Numeric - Provide functions for lists of numbers
=head1 VERSION
This document describes GAL::List::Numeric version 0.2.0
=head1 SYNOPSIS
use GAL::List::Numeric;
@rand_list = map {int(rand(1000)) + 1} (1 .. 1000);
$list_numeric = GAL::List::Numeric->new(list => \@rand_list);
$stats = $list_numeric->stats;
$mean = $list_numerics->stats->mean;
$bins = $list_numeric->bins(\@bins);
$bins = $list_numeric->bin_range($min, $max, $step);
$fd = $list_numeric->fd($bin_value);
$cfd = $list_numeric->cfd;
$rfd = $list_numeric->relative_fd;
$rcfd = $list_numeric->relative_cfd;
# The following methods are provided by <Statistics::Descriptive>, please see
# documentation for that package.
$list_numeric->stats->count();
$list_numeric->stats->mean();
$list_numeric->stats->sum();
$list_numeric->stats->variance();
$list_numeric->stats->standard_deviation();
$list_numeric->stats->min();
$list_numeric->stats->mindex();
$list_numeric->stats->max();
$list_numeric->stats->maxdex();
$list_numeric->stats->sample_range();
$list_numeric->stats->median();
$list_numeric->stats->harmonic_mean();
$list_numeric->stats->geometric_mean();
$list_numeric->stats->mode();
$list_numeric->stats->trimmed_mean($ltrim, $utrim);
$list_numeric->stats->frequency_distribution($partitions);
$list_numeric->stats->frequency_distribution(\@bins);
$list_numeric->stats->frequency_distribution();
$list_numeric->stats->least_squares_fit();
=head1 DESCRIPTION
GAL::List::Nuimeric provides a collection of functions for lists
of numbers. It uses Statistics::Descriptive to provide basic
descriptive statistics. In addition it provides frequency
distributions - as well as relative and cumulative frequency
distributions - of binned values.
=head1 METHODS
=cut
#-----------------------------------------------------------------------------
# Constructor
#-----------------------------------------------------------------------------
=head2 new
Title : new
Usage : GAL::List::Numeric->new()
Function: Creates a GAL::List::Numeric object;
Returns : A GAL::List::Numeric object
Args : list => \@list
=cut
sub new {
my ($class, @args) = @_;
my $self = $class->SUPER::new(@args);
return $self;
}
sub _initialize_args {
my ($self, @args) = @_;
######################################################################
# This block of code handels class attributes. Use the
# @valid_attributes below to define the valid attributes for
# this class. You must have identically named get/set methods
# for each attribute. Leave the rest of this block alone!
######################################################################
my $args = $self->SUPER::_initialize_args(@args);
# Set valid class attributes here
my @valid_attributes = qw();
$self->set_attributes($args, @valid_attributes);
######################################################################
}
#-----------------------------------------------------------------------------
# Attributes
#-----------------------------------------------------------------------------
# =head2 attribute
#
# Title : attribute
# Usage : $a = $self->attribute()
# Function: Get/Set the value of attribute.
# Returns : The value of attribute.
# Args : A value to set attribute to.
#
# =cut
#
# sub attribute {
# my ($self, $attribute) = @_;
# $self->{attribute} = $attribute if $attribute;
# return $self->{attribute};
# }
#-----------------------------------------------------------------------------
# Methods
#-----------------------------------------------------------------------------
=head2 stats
Title : stats
Usage : $stats = $self->stats()
Function: Provides access to the Statistics::Descriptive object
Returns : A Statistics::Descriptive object
Args : None
=cut
sub stats {
my $self = shift;
unless ($self->{stats}) {
my $stats = Statistics::Descriptive::Full->new();
$stats->add_data(@{$self->{list}});
$self->{stats} = $stats;
}
return $self->{stats};
}
#-----------------------------------------------------------------------------
=head2 count
Title : count
Usage : $a = $self->count()
Function: Returns the count of the list of elements
Returns :
Args :
=cut
sub count {
return shift->stats->count();
}
#-----------------------------------------------------------------------------
=head2 mean
Title : mean
Usage : $a = $self->mean()
Function: Returns the mean of the list of elements
Returns :
Args :
=cut
sub mean {
return shift->stats->mean;
}
#-----------------------------------------------------------------------------
=head2 sum
Title : sum
Usage : $a = $self->sum()
Function: Returns the sum of the list of elements
Returns :
Args :
=cut
sub sum {
return shift->stats->sum;
}
#-----------------------------------------------------------------------------
=head2 variance
Title : variance
Usage : $a = $self->variance()
Function: Returns the variance of the list of elements
Returns :
Args :
=cut
sub variance {
return shift->stats->variance();
}
#-----------------------------------------------------------------------------
=head2 standard_deviation
Title : standard_deviation
Usage : $a = $self->standard_deviation()
Function: Returns the standard_deviation of the list of elements
Returns :
Args :
=cut
sub standard_deviation {
return shift->stats->standard_deviation();
}
#-----------------------------------------------------------------------------
=head2 min
Title : min
Usage : $a = $self->min()
Function: Returns the min of the list of elements
Returns :
Args :
=cut
sub min {
return shift->stats->min();
}
#-----------------------------------------------------------------------------
=head2 mindex
Title : mindex
Usage : $a = $self->mindex()
Function: Returns the mindex of the list of elements
Returns :
Args :
=cut
sub mindex {
return shift->stats->mindex();
}
#-----------------------------------------------------------------------------
=head2 max
Title : max
Usage : $a = $self->max()
Function: Returns the max of the list of elements
Returns :
Args :
=cut
sub max {
return shift->stats->max();
}
#-----------------------------------------------------------------------------
=head2 maxdex
Title : maxdex
Usage : $a = $self->maxdex()
Function: Returns the maxdex of the list of elements
Returns :
Args :
=cut
sub maxdex {
return shift->stats->maxdex();
}
#-----------------------------------------------------------------------------
=head2 sample_range
Title : sample_range
Usage : $a = $self->sample_range()
Function: Returns the sample_range of the list of elements
Returns :
Args :
=cut
sub sample_range {
return shift->stats->sample_range();
}
#-----------------------------------------------------------------------------
=head2 median
Title : median
Usage : $a = $self->median()
Function: Returns the median of the list of elements
Returns :
Args :
=cut
sub median {
return shift->stats->median();
}
#-----------------------------------------------------------------------------
=head2 percentile
Title : percentile
Usage : $a = $self->percentile(10)
Function: Returns the percentile of the list of elements
Returns :
Args : The percentile to return
=cut
sub percentile {
return shift->stats->percentile(shift);
}
#-----------------------------------------------------------------------------
=head2 harmonic_mean
Title : harmonic_mean
Usage : $a = $self->harmonic_mean()
Function: Returns the harmonic_mean of the list of elements
Returns :
Args :
=cut
sub harmonic_mean {
return shift->stats->harmonic_mean();
}
#-----------------------------------------------------------------------------
=head2 geometric_mean
Title : geometric_mean
Usage : $a = $self->geometric_mean()
Function: Returns the geometric_mean of the list of elements
Returns :
Args :
=cut
sub geometric_mean {
return shift->stats->geometric_mean();
}
#-----------------------------------------------------------------------------
=head2 mode
Title : mode
Usage : $a = $self->mode()
Function: Returns the mode of the list of elements
Returns :
Args :
=cut
sub mode {
return shift->stats->mode();
}
#-----------------------------------------------------------------------------
=head2 trimmed_mean
Title : trimmed_mean
Usage : $a = $self->trimmed_mean()
Function: Returns the trimmed_mean of the list of elements
Returns :
Args :
=cut
sub trimmed_mean {
return shift->stats->trimmed_mean(@_);
}
#-----------------------------------------------------------------------------
=head2 frequency_distribution
Title : frequency_distribution
Usage : $a = $self->frequency_distribution()
Function: Returns the frequency_distribution of the list of elements
Returns :
Args :
=cut
sub frequency_distribution {
return shift->stats->frequency_distribution(shift);
}
#-----------------------------------------------------------------------------
=head2 least_squares_fit
Title : least_squares_fit
Usage : $a = $self->least_squares_fit()
Function: Returns the least_squares_fit of the list of elements
Returns :
Args :
=cut
sub least_squares_fit {
return shift->stats->least_squares_fit(@_);
}
#-----------------------------------------------------------------------------
=head2 histogram
Title : histogram
Usage : $a = $self->histogram()
Function:
Returns :
Args :
=cut
sub histogram {
my $self = shift;
$self->throw('developer_error', 'Metohd histogram not implimented yet');
}
#-----------------------------------------------------------------------------
=head2 bins
Title : bins
Usage : @bins = $self->bins($bin_count)
@bins = $self->bins($min, $max, $step)
@bins = $self->bins(\@bins)
Function: Get/Set bin values
Returns : A list or reference of the bin values.
Args : A reference to a list of bin values.
=cut
sub bins {
my ($self, $min, $max, $step) = @_;
if (ref $min eq 'ARRAY') {
my $bins = $min;
$self->{bins} = $bins;
}
else {
if ($min && ! defined $max && ! defined $step) {
my $bin_count = $min;
$min = $self->min unless defined $min;
$max = $self->max unless defined $max;
$step = ($max - $min) / $bin_count;
}
$min = $self->min unless defined $min;
$max = $self->max unless defined $max;
$step = ($max - $min) / 10 unless defined $step;
$step = sprintf "%.1g", $step;
my @bins;
foreach (my $i = $min + $step; $i <= $max + $step; $i += $step) {
push @bins, $i;
}
$self->{bins} = \@bins;
}
$self->bin_range unless $self->{bins};
return wantarray ? @{$self->{bins}} : $self->{bins};
}
#-----------------------------------------------------------------------------
=head2 bin_range
Title : bin_range
Usage : @bins = $self->bin_range($min, $max, $step)
Function: Get
Returns :
Args :
=cut
sub bin_range {
my ($self, $min, $max, $step) = @_;
$min = $self->min unless defined $min;
$max = $self->max unless defined $max;
$step = ($max - $min) / 10 unless defined $step;
$step = sprintf "%.1g", $step;
my @bins;
foreach (my $i = $min + $step; $i <= $max + $step; $i += $step) {
push @bins, $i;
}
$self->{bins} = \@bins;
return wantarray ? @bins : \@bins;
}
#-----------------------------------------------------------------------------
=head2 fd
Title : fd
Usage : $a = $self->fd()
Function:
Returns :
Args :
=cut
sub fd {
my ($self, $bin_value) = @_;
my @bins;
my $min = $self->min;
my $max = $self->max;
if (! $self->{frequencey_distribution} || $bin_value) {
if (ref $bin_value eq 'ARRAY') {
@bins = @{$bin_value};
}
elsif (ref $bin_value eq 'HASH') {
$self->bin_range(@{$bin_value}->{qw(min max step)});
}
elsif ($bin_value && $bin_value == int($bin_value)) {
my $step = ($max - $min) / $bin_value;
$self->bin_range(undef, undef, $step);
}
my @bins = $self->bins;
my %fd;
DATUM:
for my $datum ($self->list) {
my $min_bin = 0;
my $max_bin = scalar @bins - 1;
my $bindex = int($max_bin / 2);
while (1) {
next DATUM if ($bindex < $min_bin ||
$bindex > $max_bin);
my $upper = $bins[$bindex];
my $lower = $bindex - 1 >= 0 ? $bins[$bindex - 1] : $min - 1;
if ($self->float_le($datum, $upper)) {
$max_bin = $bindex;
if ($self->float_gt($datum, $lower)) {
$fd{$bins[$bindex]}++;
next DATUM;
}
else {
$bindex -= (int(($bindex - $min_bin) / 2) || 1);
}
}
else {
$min_bin = $bindex;
$bindex += (int(($max_bin - $bindex) / 2) || 1);
}
}
}
map {$fd{$_} ||= 0} @bins;
$self->{fd} = \%fd;
}
return wantarray ? %{$self->{fd}} : $self->{fd};
}
#-----------------------------------------------------------------------------
=head2 cfd
Title : cfd
Usage : $a = $self->cfd()
Function:
Returns :
Args :
=cut
sub cfd {
my $self = shift;
my %cfd = $self->fd;
my $running_total;
for my $key (sort {$a <=> $b} keys %cfd) {
my $datum = $cfd{$key};
$running_total += $datum;
$cfd{$key} = $running_total;
}
return wantarray ? %cfd : \%cfd;
}
#-----------------------------------------------------------------------------
=head2 relative_fd
Title : relative_fd
Usage : $a = $self->relative_fd()
Function:
Returns :
Args :
=cut
sub relative_fd {
my $self = shift;
my %relative_fd = $self->fd;
my $count = $self->count;
map {$relative_fd{$_} /= $count} keys %relative_fd;
return wantarray ? %relative_fd : \%relative_fd;
}
#-----------------------------------------------------------------------------
=head2 relative_cfd
Title : relative_cfd
Usage : $a = $self->relative_cfd()
Function:
Returns :
Args :
=cut
sub relative_cfd {
my $self = shift;
my %relative_cfd = $self->cfd;
my $count = $self->count;
map {$relative_cfd{$_} /= $count} keys %relative_cfd;
return wantarray ? %relative_cfd : \%relative_cfd;
}
#-----------------------------------------------------------------------------
=head2 method
Title : method
Usage : $a = $self->method()
Function:
Returns :
Args :
=cut
sub method {
my $self = shift;
$self->throw('developer_error', 'Metohd not implimented yet');
}
#-----------------------------------------------------------------------------
=head1 DIAGNOSTICS
=over
=item C<< Metohd histogram not implimented yet >>
There is a place holder for the histogram method, but the method has
not been written yet.
=back
=head1 CONFIGURATION AND ENVIRONMENT
<GAL::List::Numeric> requires no configuration files or environment variables.
=head1 DEPENDENCIES
<GAL::List>
<Statistics::Descriptive>
=head1 INCOMPATIBILITIES
None reported.
=head1 BUGS AND LIMITATIONS
No bugs have been reported.
Please report any bugs or feature requests to:
barry.moore@genetics.utah.edu
=head1 AUTHOR
Barry Moore <barry.moore@genetics.utah.edu>
=head1 LICENCE AND COPYRIGHT
Copyright (c) 2010-2014, Barry Moore <barry.moore@genetics.utah.edu>. All rights reserved.
This module is free software; you can redistribute it and/or
modify it under the same terms as Perl itself (See LICENSE).
=head1 DISCLAIMER OF WARRANTY
BECAUSE THIS SOFTWARE IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE SOFTWARE, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE SOFTWARE "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER
EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE SOFTWARE IS WITH
YOU. SHOULD THE SOFTWARE PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL
NECESSARY SERVICING, REPAIR, OR CORRECTION.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE SOFTWARE AS PERMITTED BY THE ABOVE LICENCE, BE
LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL,
OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE
THE SOFTWARE (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
FAILURE OF THE SOFTWARE TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
=cut
1;
| 4ureliek/TEanalysis | Lib/GAL/List/Numeric.pm | Perl | mit | 17,045 |
#!perl
use strict;
use warnings;
use constant DAT_DIR => 'csv/';
use constant OUT_DIR => 'idx/';
use Text::CSV;
my $csv = Text::CSV->new();
my @files;
{
opendir (DAT, DAT_DIR) or die "can't open: ", DAT_DIR, ":$!";
@files = map DAT_DIR. $_, grep /\.csv$/, readdir(DAT);
}
# generate wordlist
my %words;
my %twitter;
my %blogs;
my %news;
for (@files) {
open (my $fh, '<', $_) or die "can't open: $_: $!";
while (my $row = $csv->getline($fh)) {
$words{$_}++ for (@$row);
if (/twitter/) {
$twitter{$_}++ for (@$row);
}elsif (/news/) {
$news{$_}++ for (@$row);
}else{
$blogs{$_}++ for (@$row);
}
}
warn "finished: $_\n";
}
my @words = sort keys %words;
open (WLIST, '>', OUT_DIR . 'wordlist.txt') or die "can't generate wordlist.txt: $!";
print WLIST map "$_\n", @words;
open (BLOGS, '>', OUT_DIR .'blogs_wordlist.txt') or die "can't generate blogs_wordlist.txt: $!";
print BLOGS map "$_\n", sort keys %blogs;
open (TWITTER, '>', OUT_DIR .'twitter_wordlist.txt') or die "can't generate twitter_wordlist.txt: $!";
print TWITTER map "$_\n", sort keys %twitter;
open (NEWS, '>', OUT_DIR .'news_wordlist.txt') or die "can't generate news_wordlist.txt: $!";
print NEWS map "$_\n", sort keys %news;
| yxes/Capstone | data/perl/index.pl | Perl | mit | 1,256 |
# PerlModule
use strict;
use Perl::Module;
use Error::Programatic;
use Error::Logical;
use Data::UUID;
use Data::Hub qw($Hub);
use Data::Hub::Util qw(:all);
use Data::OrderedHash;
our $UG = Data::UUID->new();
our $TYPE_AUTO = 'auto';
our $TYPE_CATEGORY = 'category';
our $TYPE_WEBPAGE = 'webpage';
sub ACCESS {
my $u = $$Hub{'/sys/user'} or return;
$u->is_member('admins') or return;
1;
}
sub get_sitemap {
my $addr = $Hub->get('/sys/conf/ext/sitemap/addr') || '/web/data/sitemap.hf';
my $sitemap = $Hub->get($addr) || new_sitemap(addr => $addr);
return $sitemap;
}
sub get_sitemap_addr {
get_sitemap()->get_addr;
}
sub new_sitemap {
my ($opts, %params) = my_opts(\@_);
my $addr = $params{'addr'} or die;
my $name = $params{'name'} || 'Website',
my $type = 'category';
my $uuid = $UG->to_string($UG->create());
my $data = new Data::OrderedHash(
'.name' => $name,
'.type' => $type,
'.uuid' => $uuid,
);
throw Error::Logical 'Sitemap already exists'
if $$Hub{$addr} && !$$opts{'force'};
$Hub->get('/sys/log')->warn('Creating sitemap: ' . $addr);
my $sitemap = $Hub->set($addr, $data)->save();
$Hub->set('/sys/conf/ext/sitemap/addr', $addr);
$Hub->get('/sys/cfgldr')->write_value('ext/sitemap/addr', $addr);
$Hub->get('/sys/cfgldr')->refresh();
$sitemap;
}
sub new_category {
my $params = {@_};
# Create sitemap entry
my $info = _create_sitemap_entry($TYPE_CATEGORY, $params);
# Create target directory
my $dir = $Hub->vivify($$info{'target_addr'});
$dir->save;
my $target_addr = $dir->get_addr;
return {
commands => [
['fetch', $$info{'index_addr'}],
['fetch', $target_addr],
]
};
}
sub update_category {
my $params = {@_};
my $sitemap = get_sitemap();
my @commands = (['fetch', $sitemap->get_addr]);
my $current_addr = $$params{'addr'};
my $current_pathname = addr_name($current_addr);
my $current_entry = _get_sitemap_entry($current_addr);
# Rename
my $new_name = $$params{'name'};
$$current_entry{'.name'} = $new_name;
# Move
my $new_pathname = $$params{'pathname'};
if ($current_pathname ne $new_pathname) {
my $parent_addr = addr_parent($current_addr);
my $parent_entry = _get_sitemap_entry($parent_addr);
my $new_entry_addr = "$parent_addr/$new_pathname";
my $new_target_addr = _build_target_address($new_entry_addr);
my $current_target_addr = _build_target_address($current_addr);
my $parent_target_addr = _build_target_address($parent_addr);
my $parent_dir = $$Hub{$parent_target_addr} or die "Missing parent dir";
my $current_dir = $$parent_dir{$current_pathname} or die "Current target missing";
my $new_dir = $$parent_dir{$new_pathname} and die "New target exists";
dir_move($current_dir->get_path, $parent_dir->get_path . '/' . $new_pathname);
$$parent_entry{$new_pathname} = $current_entry;
delete $$parent_entry{$current_pathname};
$$current_entry{'.addr'} = $new_target_addr;
$current_entry->walk(sub {
my ($key, $entry, $depth, $addr, $struct) = @_;
return unless isa($entry, 'HASH');
my $target_addr = _build_target_address("$new_entry_addr/$addr");
my $old_addr = $$entry{'.addr'};
$$entry{'.addr'} = $target_addr;
#warn "walk-update: $old_addr = $target_addr\n";
push @commands, ['fetch', $old_addr];
push @commands, ['fetch', $target_addr];
});
}
$sitemap->save();
return {
commands => [@commands]
};
}
sub new_entry {
my $params = {@_};
# Create sitemap entry
my $info = _create_sitemap_entry($TYPE_AUTO, $params);
return {
commands => [
['fetch', $$info{'index_addr'}],
['fetch', $$info{'target_addr'}],
]
};
}
sub new_webpage {
my $params = {@_};
# Skeleton config dictates how to construct the target webpage
my $skel = $$params{'skel'};
addr_normalize($skel) eq $skel or throw Error::IllegalParam('skel');
my $skel_config = $$Hub{"$skel/config.hf"}
or throw Error::Programatic("Missing $skel/config.hf");
# Find the skeleton file
my $skel_file_addr = addr_normalize($skel . '/' . $$skel_config{'file'});
my $skel_file = $$Hub{$skel_file_addr}
or throw Error::Programatic('Cannot find skel file');
# Create sitemap entry
my $info = _create_sitemap_entry($TYPE_WEBPAGE, $params);
my $entry = $$info{'entry'};
# Copy skel file
my $dir = $$Hub{$$info{'target_dir'}};
my $pathname = $$info{'pathname'};
my $target_file = $$dir{$pathname};
unless ($target_file) {
$target_file = $dir->{$pathname} = $skel_file;
# Auto-populate page data elements from named title
if (my $page = $$target_file{'page'}) {
$$page{'title'} = $$entry{'.name'};
$$page{'heading'} = $$entry{'.name'};
}
$target_file->save();
}
my $target_addr = $target_file->get_addr;
return {
commands => [
['fetch', $$info{'index_addr'}],
['fetch', $target_addr],
]
};
}
sub remove {
my $params = {@_};
my $commands = [];
my $index_addr = $$params{'addr'};
push @$commands, ['remove', $index_addr];
if ($$params{'remove-data'}) {
my $entry = _get_sitemap_entry($index_addr);
my $target_addr = $$entry{'.addr'};
push @$commands, ['remove', $target_addr] if $target_addr;
}
return {commands => $commands};
}
sub _get_sitemap_entry {
my $addr = shift;
my $sitemap = get_sitemap();
# Sitemap
index($addr, $sitemap->get_addr) == 0 or throw Error::IllegalParam 'addr' . " $addr";
my $entry = $$Hub{$addr};
$$entry{'.addr'} ||= _build_target_address($addr);
return $entry;
}
sub _create_sitemap_entry {
my $type = shift;
my $params = shift;
# Sitemap
my $sitemap = get_sitemap();
# Friendly name
my $name = $$params{'name'} or throw Error::IllegalParam 'Invalid name';
# Filesystem node name
my $pathname = $$params{'pathname'};
# Parent entry
my $paddr = addr_normalize($$params{'paddr'}) or throw Error::MissingParam 'paddr';
index($paddr, $sitemap->get_addr) == 0 or throw Error::IllegalParam 'paddr';
addr_normalize($paddr) eq $paddr or throw Error::IllegalParam 'paddr';
my $parent = $$Hub{$paddr};
$$parent{$pathname} and throw Error::IllegalParam 'Sitemap entry exists';
# Target properties
my $target_dir = _build_target_address($paddr);
my $target_addr = $target_dir . '/' . $pathname;
# Detect type
if ($type eq $TYPE_AUTO) {
my $target = $Hub->get($target_addr)
or throw Error::Logical 'Resource does not exist';
$type = isa($target, FS('Directory'))
? $TYPE_CATEGORY
: $TYPE_WEBPAGE;
}
# Entry within the index (sitemap.hf)
my $entry = new Data::OrderedHash(
'.addr' => $target_addr,
'.name' => $name,
'.type' => $type,
'.uuid' => $UG->to_string($UG->create()),
);
# Create the new sitemap entry
$parent->{$pathname} = $entry;
$sitemap->save();
return {
entry => $entry,
index_addr => "$paddr/$pathname",
target_dir => $target_dir || '/',
target_addr => $target_addr,
pathname => $pathname,
};
}
sub _build_target_address {
my $addr = shift;
my $sitemap = get_sitemap();
my $result = Data::Hub::Address->new();
while ($addr && $addr ne $sitemap->get_addr) {
$result->unshift(addr_name($addr));
$addr = addr_parent($addr);
}
$result->unshift('');
return $result->to_string;
}
1;
| ryangies/lsn-lime | src/share/web/ext/sitemap/module.pm | Perl | mit | 7,379 |
#!/usr/bin/perl
## this script reads an -m 8 blast file and prints the requested field (0,1,2,...)
## usage: perl m8_seqid.pl <input blast file> <field number>
## splits input file using \t (whitespace: tab only)
my $field;
$field = $ARGV[1];
open(IN, $ARGV[0]) or die "cannot open file $ARGV[0]\n";
while ($line = <IN>) {
chomp $line;
@info = split(/\t/,$line);
$seqid = $info[$field];
print $seqid, "\n";
}
| cuttlefishh/papers | cyanophage-light-dark-transcriptomics/code/m8_tab.pl | Perl | mit | 432 |
# PODNAME: Pod::WikiDoc::Cookbook
# ABSTRACT: Examples of Pod::WikiDoc usage
our $VERSION = '0.20'; # VERSION
__END__
=pod
=head1 NAME
Pod::WikiDoc::Cookbook - Examples of Pod::WikiDoc usage
=head1 VERSION
version 0.20
=head1 DESCRIPTION
This file contains some examples of ways to use L<Pod::WikiDoc>
or to integrate L<Pod::WikiDoc> with other tools.
=head1 BASIC RECIPES
=head2 Editor settings for working with Pod::WikiDoc
I<(Seeking equivalent settings for other editors or alternatives for vim.)>
=head3 Vim
In vim, use the C<<< comments >>> and C<<< formatoptions >>> settings in C<<< .vimrc >>> to
have vim automatically insert the wikidoc comment leader when pressing
return from a wikidoc comment line. For example, the following lines in a
C<<< .vimrc >>> file will activate this option whenever a perl-ish file is loaded.
autocmd BufNewFile,BufRead *.p? set comments=b:###
autocmd BufNewFile,BufRead *.p? set formatoptions+=r
=head2 Subclassing Module::Build to automatically generate Pod from wikidoc
With a little extra work in the Build.PL file, Pod::WikiDoc
can work easily with L<Module::Build> to extract wikidoc into .pod
files automatically during the distribution process
The Build.PL file below subclasses L<Module::Build> with three functions:
=over
=item *
ACTION_wikidoc -- adds a new C<<< Build wikidoc >>> action that extracts Pod
and wikidoc from all .pm files in the C<<< lib >>> directory and adds them to
the MANIFEST
=item *
ACTION_testpod -- adds a dependency on the C<<< wikidoc >>> action to regenerate
.pod files before testing them
=item *
ACTION_distdir -- adds a dependency on the C<<< wikidoc >>> action to regenerate
.pod files before bundling up a distribution
=back
As an extra feature, ACTION_wikidoc also sets a VERSION keyword that can
be used to insert the current version number into the generated Pod.
= VERSION
This documentation refers to version %%VERSION%%.
By making wikidoc extraction part of the C<<< distdir >>> action, users installing the
distribution will receive it with .pod files already created, and will not need
to have L<Pod::WikiDoc> installed themselves.
# Build.PL
use Module::Build;
my $class = Module::Build->subclass(
class => "Module::Build::WikiDoc",
code => <<'SUBCLASS' );
sub ACTION_wikidoc {
my $self = shift;
eval "use Pod::WikiDoc";
if ( $@ eq '' ) {
my $parser = Pod::WikiDoc->new( {
comment_blocks => 1}
keywords => { VERSION => $self->dist_version },
});
for my $src ( keys %{ $self->find_pm_files() } ) {
(my $tgt = $src) =~ s{\.pm$}{.pod};
$parser->filter( {
input => $src,
output => $tgt,
});
print "Creating $tgt\n";
$tgt =~ s{\\}{/}g; # for win32
$self->_add_to_manifest( 'MANIFEST', $tgt );
}
}
else {
warn "Pod::WikiDoc not available. Skipping wikidoc.\n";
}
}
sub ACTION_testpod {
my $self = shift;
$self->depends_on('wikidoc');
$self->SUPER::ACTION_testpod;
}
sub ACTION_distdir {
my $self = shift;
$self->depends_on('wikidoc');
$self->SUPER::ACTION_distdir;
}
SUBCLASS
$class->new(
# regular Module::Build options
)->create_build_script;
=head2 Including extra pure-wikidoc files in a distribution
To add extra documentation files to a distribution, create them as .pm
files and let Pod::WikiDoc convert them as normal. To prevent the .pm
files from being indexed (e.g. by search.cpan.org), list them as "no_index"
in the META.yml file of the distribution.
Example of a simple .pm documentation file:
package Some::Module::About;
use strict; # make CPANTS happy
1;
__END__
=begin wikidoc
Your wikidoc goes here.
=end wikidoc
Adding "no_index" to META.yml via Build.PL (requires Module::Build
0.28):
my $builder = $class->new(
# regular Module::Build options
meta_add => {
no_index => {
file => [ qw{
lib/Some/Module/About.pm
} ]
}
},
);
=head1 AUTHOR
David A Golden <dagolden@cpan.org>
=head1 COPYRIGHT AND LICENSE
This software is Copyright (c) 2012 by David A Golden.
This is free software, licensed under:
The Apache License, Version 2.0, January 2004
=cut
| gitpan/Pod-WikiDoc | lib/Pod/WikiDoc/Cookbook.pod | Perl | apache-2.0 | 4,775 |
# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# This file has been modified by NativeTouch.com
# Contact information: hello@nativetouch.com
# Modifications copyright (C) 2017 <Native Touch>
package AwsSignatureV4;
use strict;
use warnings;
# Package URI::Escape is implemented in Perl.
use URI::Escape qw(uri_escape_utf8);
# Package Digest::SHA is implemnted in C for performance.
use Digest::SHA qw(sha256_hex hmac_sha256 hmac_sha256_hex);
# For using PurePerl implementation of SHA functions
# use Digest::SHA::PurePerl qw(sha256_hex hmac_sha256 hmac_sha256_hex);
# RFC3986 safe/unsafe characters
our $SAFE_CHARACTERS = 'A-Za-z0-9\-\._~';
our $UNSAFE_CHARACTERS = '^'.$SAFE_CHARACTERS;
# Name of the signature encoding algorithm.
our $ALGORITHM_NAME = 'AWS4-HMAC-SHA256';
#
# Creates a new signing context object for signing an arbitrary request.
#
# Signing context object is a hash of all request data needed to create
# a valid AWS Signature V4. After the signing takes place, the context object
# gets populated with intermediate signing artifacts and the actual signature.
#
# Input:
#
# $opts - reference to hash that contains control options for the request
# url => endpoint of the service to call, e.g. https://monitoring.us-west-2.amazonaws.com/
# (the URL can contain path but it should not include query string, i.e. args after ?)
# aws-region => explicitly specifies AWS region (if not specified, region is extracted
# from endpoint URL; if region is not part of URL, 'us-east-1' is used by default)
# aws-service => explicitly specifies AWS service name (this is necessary when service
# name is not part of the endpoint URL, e.g. mail/ses, but usually it is)
# aws-access-key-id => Access Key Id of AWS credentials
# aws-secret-key => Secret Key of AWS credentials
# aws-security-token => Security Token in case of STS call
#
sub new
{
my $class = shift;
my $self = {opts => shift};
$self->{'payload'} = '';
bless $self, $class;
return $self;
}
#
# Creates a new signing context object for signing AWS/Query request.
#
# AWS/Query request can be signed for either HTTP GET method or POST method.
# The recommended method is POST as it skips sorting of query string keys
# and therefore performs faster.
#
# Input:
#
# $params - reference to the hash that contains all (name, value) pairs of AWS/Query request
# (do not url-encode this data, it will be done as a part of signing and creating payload)
#
# $opts - see defition of 'new' constructor
#
sub new_aws_query
{
my $class = shift;
my $self = {params => shift, opts => shift};
$self->{'content-type'} = 'application/x-www-form-urlencoded; charset=utf-8';
$self->{'payload'} = '';
return bless $self, $class;
}
#
# Creates a new signing context object for signing RPC/JSON request.
# It only makes sense to sign JSON request for HTTP POST method.
#
# Input:
#
# $payload - input data in RPC/JSON format
# $opts - see defition of 'new' constructor
#
sub new_rpc_json
{
my $class = shift;
my $self = {payload => shift, opts => shift};
$self->{'content-type'} = 'application/json; charset=utf-8';
bless $self, $class;
return $self;
}
#
# Creates a new signing context object for signing AWS/JSON request.
# It only makes sense to sign JSON request for HTTP POST method.
#
# Input:
#
# $operation - operation name to invoke
# $payload - input data in AWS/JSON format
# $opts - see defition of 'new' constructor
#
sub new_aws_json
{
my $class = shift;
my $operation = shift;
my $payload = shift;
my $opts = shift;
my $self = {payload => $payload, opts => $opts};
$self->{'content-type'} = 'application/x-amz-json-1.0';
if (not exists $opts->{'extra-headers'}) {
$opts->{'extra-headers'} = {};
}
my $extra_headers = $opts->{'extra-headers'};
$extra_headers->{'X-Amz-Target'} = $operation;
bless $self, $class;
return $self;
}
#
# Signs the generic HTTP request.
#
# Input: (all arguments optional and if specified override what is currently set)
#
# $method - HTTP method
# $ctype - content-type of the body
# $payload - request body data
#
sub sign_http_request
{
my $self = shift;
my $method = shift;
my $ctype = shift;
my $payload = shift;
$self->{'http-method'} = $method if $method;
$self->{'content-type'} = $ctype if $ctype;
$self->{'payload'} = $payload if $payload;
my $opts = $self->{opts};
$opts->{'create-authz-header'} = 1;
$self->{'request-url'} = $opts->{'url'};
if (!$self->sign()) {
return 0;
}
$self->create_authz_header();
return 1;
}
#
# Signs request for HTTP POST.
#
sub sign_http_post
{
my $self = shift;
return $self->sign_http_request('POST');
}
#
# Signs request for HTTP PUT.
#
sub sign_http_put
{
my $self = shift;
return $self->sign_http_request('PUT');
}
#
# Signs request for HTTP GET.
#
sub sign_http_get
{
my $self = shift;
my $opts = $self->{opts};
$self->{'http-method'} = 'GET';
if (!$self->sign()) {
return 0;
}
my $postfix = "";
my $query_string = $self->{'query-string'};
if ($query_string) {
$postfix = '?'.$query_string;
}
$self->{'request-url'} = $opts->{'url'}.$postfix;
$postfix .= ($query_string ? '&' : '?');
$self->{'signed-url'} = $opts->{'url'}.$postfix.'X-Amz-Signature='.$self->{'signature'};
if ($opts->{'create-authz-header'}) {
$self->create_authz_header();
}
return 1;
}
#
# Prepares and signs the request data.
#
sub sign
{
my $self = shift;
if (!$self->initialize()) {
return 0;
}
$self->create_basic_headers();
$self->create_query_string();
$self->create_signature();
return 1;
}
#
# Returns reference to a hash containing all required HTTP headers.
# In case of HTTP POST and PUT methods it will also include the
# Authorization header that carries the signature itself.
#
sub headers
{
my $self = shift;
return $self->{'headers'};
}
#
# In case of AWS/Query request and HTTP POST or PUT method, returns
# url-encoded query string to be used as a body of HTTP POST request.
#
sub payload
{
my $self = shift;
return $self->{'payload'};
}
#
# Returns complete signed URL to be used in HTTP GET request 'as is'.
# You can place this value into Web browser location bar and make a call.
#
sub signed_url
{
my $self = shift;
return $self->{'signed-url'};
}
#
# Returns URL to be used in HTTP GET request for the case,
# when the signature is passed via Authorization HTTP header.
#
# You can not use this URL with the Web browser since it does
# not contain the signature.
#
sub request_url
{
my $self = shift;
return $self->{'request-url'};
}
#
# Returns an error message if any.
#
sub error
{
my $self = shift;
return $self->{'error'};
}
#
# Returns both timestamp and daystamp in the format required for SigV4.
#
sub get_timestamp_daystamp
{
my $time = shift;
my ($sec, $min, $hour, $mday, $mon, $year, $wday, $yday, $isdst) = gmtime(time);
my $timestamp = sprintf("%04d%02d%02dT%02d%02d%02dZ", $year + 1900, $mon + 1, $mday, $hour, $min, $sec);
my $daystamp = substr($timestamp, 0, 8);
return ($timestamp, $daystamp);
}
#
# Applies regex to get FQDN from URL.
#
sub extract_fqdn_from_url
{
my $fqdn = shift;
$fqdn =~ s!^https?://([^/:?]*).*$!$1!;
return $fqdn;
}
#
# Applies regex to get service name from the FQDN.
#
sub extract_service_from_fqdn
{
my $fqdn = shift;
my $service = $fqdn;
$service =~ s!^([^\.]+)\..*$!$1!;
return $service;
}
#
# Applies regex to get region from the FQDN.
#
sub extract_region_from_fqdn
{
my $fqdn = shift;
my @parts = split(/\./, $fqdn);
return $parts[1] if $parts[1] =~ /\w{2}-\w+-\d+/;
return 'us-east-1';
}
#
# Applies regex to get the path part of the URL.
#
sub extract_path_from_url
{
my $url = shift;
my $path = $url;
$path =~ s!^https?://[^/]+([^\?]*).*$!$1!;
$path = '/' if !$path;
return $path;
}
#
# Populates essential HTTP headers required for SigV4.
#
# CanonicalHeaders =
# CanonicalHeadersEntry0 + CanonicalHeadersEntry1 + ... + CanonicalHeadersEntryN
# CanonicalHeadersEntry =
# LOWERCASE(HeaderName) + ':' + TRIM(HeaderValue) '\n'
#
# SignedHeaders =
# LOWERCASE(HeaderName0) + ';' + LOWERCASE(HeaderName1) + ... + LOWERCASE(HeaderNameN)
#
sub create_basic_headers
{
my $self = shift;
my $opts = $self->{opts};
my %headers = ();
$headers{'Host'} = $self->{'fqdn'};
my $extra_date_specified = 0;
my $extra_headers = $opts->{'extra-headers'};
if ($extra_headers)
{
foreach my $extra_name ( keys %$extra_headers ) {
$headers{$extra_name} = $extra_headers->{$extra_name};
if (lc($extra_name) eq 'date' || lc($extra_name) eq 'x-amz-date') {
$extra_date_specified = 1;
}
}
}
if ($opts->{'aws-security-token'}) {
$headers{'X-Amz-Security-Token'} = $opts->{'aws-security-token'};
}
if (!$extra_date_specified && $opts->{'create-authz-header'}) {
$headers{'X-Amz-Date'} = $self->{'timestamp'};
}
if ($self->{'http-method'} ne 'GET' && $self->{'content-type'}) {
$headers{'Content-Type'} = $self->{'content-type'};
}
my %lc_headers = ();
my $signed_headers = '';
my $canonical_headers = '';
foreach my $header_name ( keys %headers ) {
my $header_value = $headers{$header_name};
# trim leading and trailing whitespaces, see
# http://perldoc.perl.org/perlfaq4.html#How-do-I-strip-blank-space-from-the-beginning%2fend-of-a-string%3f
$header_value =~ s/^\s+//;
$header_value =~ s/\s+$//;
# now convert sequential spaces to a single space, but do not remove
# extra spaces from any values that are inside quotation marks
my @parts = split /("[^"]*")/, $header_value;
foreach my $part (@parts) {
unless ($part =~ /^"/) {
$part =~ s/[ ]+/ /g;
}
}
$header_value = join '', @parts;
$lc_headers{lc($header_name)} = $header_value;
}
for my $lc_header (sort keys %lc_headers)
{
$signed_headers .= ';' if length($signed_headers) > 0;
$signed_headers .= $lc_header;
$canonical_headers .= $lc_header . ':' . $lc_headers{$lc_header} . "\n";
}
$self->{'signed-headers'} = $signed_headers;
$self->{'canonical-headers'} = $canonical_headers;
$self->{'headers'} = \%headers;
return 1;
}
#
# Validates input and populates essential pre-requisites.
#
sub initialize
{
my $self = shift;
my $opts = $self->{opts};
my $url = $opts->{'url'};
if (!$url) {
$self->{'error'} = 'Endpoint URL is not specified.';
return 0;
}
if (index($url, '?') != -1) {
$self->{'error'} = 'Endpoint URL cannot contain query string.';
return 0;
}
my $akid = $opts->{'aws-access-key-id'};
if (!$akid) {
$self->{'error'} = 'AWS Access Key Id is not specified.';
return 0;
}
if (!$opts->{'aws-secret-key'}) {
$self->{'error'} = 'AWS Secret Key is not specified.';
return 0;
}
# obtain FQDN from the endpoint url
my $fqdn = extract_fqdn_from_url($url);
if (!$fqdn) {
$self->{'error'} = 'Failed to extract FQDN from endpoint URL.';
return 0;
}
$self->{'fqdn'} = $fqdn;
# use pre-defined region if specified, otherwise grab it from url
my $region = $opts->{'aws-region'};
if (!$region) {
# if region is not part of url, the default region is returned
$region = extract_region_from_fqdn($fqdn);
}
$self->{'region'} = $region;
# use pre-defined service if specified, otherwise grab it from url
# this is specifically important when url does not include service name, e.g. ses/mail
my $service = $opts->{'aws-service'};
if (!$service) {
$service = extract_service_from_fqdn($fqdn);
if (!$service) {
$self->{'error'} = 'Failed to extract service name from endpoint URL.';
return 0;
}
}
$self->{'service'} = $service;
# obtain uri path part from the endpoint url
my $path = extract_path_from_url($url);
if (index($path, '.') != -1 || index($path, '//') != -1) {
$self->{'error'} = 'Endpoint URL path must be normalized.';
return 0;
}
$self->{'http-path'} = $path;
# initialize time of the signature
my ($timestamp, $daystamp);
if ($opts->{'timestamp'}) {
$timestamp = $opts->{'timestamp'};
$daystamp = substr($timestamp, 0, 8);
}
else {
my $time = time();
$self->{'time'} = $time;
($timestamp, $daystamp) = get_timestamp_daystamp($time);
}
$self->{'timestamp'} = $timestamp;
$self->{'daystamp'} = $daystamp;
# initialize scope & credential
my $scope = "$daystamp/$region/$service/aws4_request";
$self->{'scope'} = $scope;
my $credential = "$akid/$scope";
$self->{'credential'} = $credential;
return 1;
}
#
# Builds up AWS Query request as a chain of url-encoded name=value pairs separated by &.
#
# Note that SigV4 is payload-agnostic when it comes to POST request body so there is no
# need to sort arguments in the AWS Query string for the POST method.
#
sub create_query_string
{
my $self = shift;
my $opts = $self->{opts};
my $params = $self->{params};
if (!$params) {
$self->{'query-string'} = '';
return 1;
}
my @args = ();
my @keys = ();
my $http_method = $self->{'http-method'};
if ($http_method eq 'GET')
{
if (!$opts->{'create-authz-header'})
{
$params->{'X-Amz-Date'} = $self->{'timestamp'};
$params->{'X-Amz-Algorithm'} = $ALGORITHM_NAME;
$params->{'X-Amz-Credential'} = $self->{'credential'};
$params->{'X-Amz-SignedHeaders'} = $self->{'signed-headers'};
}
if ($opts->{'aws-security-token'}) {
$params->{'X-Amz-Security-Token'} = $opts->{'aws-security-token'};
}
@keys = sort keys %{$params};
}
else # POST
{
@keys = keys %{$params};
}
foreach my $key (@keys)
{
my $value = $params->{$key};
my ($ekey, $evalue) = (uri_escape_utf8($key, $UNSAFE_CHARACTERS),
uri_escape_utf8($value, $UNSAFE_CHARACTERS));
push @args, "$ekey=$evalue";
}
my $aws_query_string = join '&', @args;
if ($http_method eq 'GET')
{
$self->{'query-string'} = $aws_query_string;
$self->{'payload'} = '';
}
else # POST
{
$self->{'query-string'} = '';
$self->{'payload'} = $aws_query_string;
}
return 1;
}
#
# CanonicalRequest =
# Method + '\n' +
# CanonicalURI + '\n' +
# CanonicalQueryString + '\n' +
# CanonicalHeaders + '\n' +
# SignedHeaders + '\n' +
# HEX(Hash(Payload))
#
sub create_canonical_request
{
my $self = shift;
my $opts = $self->{opts};
my $canonical_request = $self->{'http-method'} . "\n";
$canonical_request .= $self->{'http-path'} . "\n";
$canonical_request .= $self->{'query-string'} . "\n";
$canonical_request .= $self->{'canonical-headers'} . "\n";
$canonical_request .= $self->{'signed-headers'} . "\n";
$canonical_request .= sha256_hex($self->{'payload'});
$self->{'canonical-request'} = $canonical_request;
return $canonical_request;
}
#
# StringToSign =
# Algorithm + '\n' +
# Timestamp + '\n' +
# Scope + '\n' +
# HEX(Hash(CanonicalRequest))
#
sub create_string_to_sign
{
my $self = shift;
my $opts = $self->{opts};
my $canonical_request = $self->create_canonical_request();
my $string_to_sign = $ALGORITHM_NAME . "\n";
$string_to_sign .= $self->{'timestamp'} . "\n";
$string_to_sign .= $self->{'scope'} . "\n";
$string_to_sign .= sha256_hex($canonical_request);
$self->{'string-to-sign'} = $string_to_sign;
return $string_to_sign;
}
#
# Performs the actual signing of the request.
#
sub create_signature
{
my $self = shift;
my $opts = $self->{opts};
my $ksecret = $opts->{'aws-secret-key'};
my $kdate = hmac_sha256($self->{'daystamp'}, 'AWS4' . $ksecret);
my $kregion = hmac_sha256($self->{'region'}, $kdate);
my $kservice = hmac_sha256($self->{'service'}, $kregion);
my $kcreds = hmac_sha256('aws4_request', $kservice);
my $string_to_sign = $self->create_string_to_sign();
my $signature = hmac_sha256_hex($string_to_sign, $kcreds);
$self->{'signature'} = $signature;
return $signature;
}
#
# Populates HTTP header that carries authentication data.
#
sub create_authz_header
{
my $self = shift;
my $opts = $self->{opts};
my $credential = $self->{'credential'};
my $signed_headers = $self->{'signed-headers'};
my $signature = $self->{'signature'};
my $authorization =
"$ALGORITHM_NAME Credential=$credential, ".
"SignedHeaders=$signed_headers, ".
"Signature=$signature";
my $headers = $self->{'headers'};
$headers->{'Authorization'} = $authorization;
return 1;
}
1;
| nativetouch/ansible-aws-metrics | files/usr/local/aws-scripts-mon/AwsSignatureV4.pm | Perl | apache-2.0 | 17,041 |
#!/usr/bin/perl
# Creates an md5 hash of an directory tree
# Version of this script
my $version = "2.0.2.1";
use warnings;
use strict;
use LoxBerry::System;
use File::Find::Rule;
use Getopt::Long;
use Digest::MD5;
my $basepath = ".";
my $filename;
my $comparemd5;
my $verbose;
GetOptions (
"path=s" => \$basepath, # string
"file=s" => \$filename,
"verbose" => \$verbose,
"compare=s" => \$comparemd5,
);
print STDERR "Script Version: $version\n" if $verbose;
my @files;
my @files_sorted;
my @hashes;
if ( $filename ) {
print STDERR "file parameter: $filename\n" if $verbose;
if ( ! -e $filename ) {
print STDERR "File $filename not found\n";
exit(2);
}
push @files_sorted, $filename;
} else {
print STDERR "path parameter: $basepath\n" if $verbose;
@files = File::Find::Rule->file()
# ->name( '*' )
# ->size( ">=$size" . "M" )
# ->nonempty
->in($basepath);
print STDERR "Found " . scalar(@files) . " files\n" if $verbose;
@files_sorted = sort @files;
# print STDERR "Files (sorted):\n";
# print STDERR join("\n", @files_sorted) if $verbose;
# print STDERR "\n" if $verbose;
}
my $x = 0;
foreach( @files_sorted ) {
my $data = LoxBerry::System::read_file($_);
my $digest = Digest::MD5::md5_hex($data);
print STDERR $_ . " - " . $digest . "\n" if ($verbose);
push @hashes, $digest;
}
my $treemd5 = Digest::MD5::md5_hex(join '', @hashes);
print "$treemd5\n";
$comparemd5 = trim($comparemd5);
if( $comparemd5 ) {
if( $treemd5 eq $comparemd5 ) {
print STDERR "EQUAL: Checked $treemd5 is equal to $comparemd5\n";
exit(0);
} else {
print STDERR "INVALID: Checked $treemd5 is NOT equal to $comparemd5\n";
exit(1);
}
}
| mschlenstedt/Loxberry | bin/dirtree_md5.pl | Perl | apache-2.0 | 1,694 |
#!C:/Trainings/Softwares/ActivePerl64/bin/perl.exe
print "<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">";
print "<html>";
print "<head>";
print "<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">";
print "<title>Loans Public Page</title>";
print "</head>";
print "<body>";
print "<h3>Loans Page</h3>";
print "<h5><img alt="" src="images/loans_image.jpg"></h5>";
print "<a href="bankhomepage.html">Click here to go back to homepage</a>";
print "</body>";
print "</html>"; | Illusionist80/SpringTutorial | SpringChapter0/CGIPerlSample/WebContent/htdocs/loans.pl | Perl | apache-2.0 | 565 |
package Bio::BioVeL::AsynchronousService::Mock;
use strict;
use warnings;
use Bio::BioVeL::AsynchronousService;
use base 'Bio::BioVeL::AsynchronousService';
=head1 NAME
Bio::BioVeL::AsynchronousService::Mock - example asynchronous service
=head1 DESCRIPTION
This dummy service runs the 'sleep' shell command for the provided number
of seconds, then returns with a simple text message.
=head1 METHODS
=over
=item new
The constructor defines a single object property: the number of seconds
the service should sleep for.
=cut
sub new {
shift->SUPER::new( 'parameters' => [ 'seconds' ], @_ );
}
=item launch
Runs the shell's C<sleep> command to demonstrate asynchronous operation.
Updates the status as needed to indicate success or failure.
=cut
sub launch {
my $self = shift;
if ( system("sleep", ( $self->seconds || 2 ) ) ) {
$self->status( Bio::BioVeL::AsynchronousService::ERROR );
$self->lasterr( $? );
}
else {
$self->status( Bio::BioVeL::AsynchronousService::DONE );
}
}
=item response_body
Returns a simple text string that specifies how long the process has slept for.
=cut
sub response_body { "I slept for ".shift->seconds." seconds" }
=back
=cut
1; | naturalis/biovel-nbc | lib/Bio/BioVeL/AsynchronousService/Mock.pm | Perl | apache-2.0 | 1,189 |
#!/usr/bin/perl
# License: http://www.apache.org/licenses/LICENSE-2.0.txt
use strict;
use CGI;
use CGI::Carp qw(fatalsToBrowser);
use GD::Simple;
my ( $in, $percent );
my ( $nickname, $doel, $donaties, $toezeggingen, $titel, $subtitel, $intro, $img);
my $query = CGI::new();
my $nick = $query->param("nick");
$nick = shift unless $nick;
$nick = "total" unless $nick;
open(DON, "donaties.txt") or die "Unable to open donaties.txt";
while(<DON>) {
chomp;
( $nickname, $doel, $donaties, $toezeggingen, $titel, $subtitel, $intro, $img) = split /\t/, $_;
if ( $nick eq $nickname) {
$in = $donaties + $toezeggingen;
$percent = int((($in / $doel) * 100) + 0.5);
last;
}
}
close DON;
$titel = "SBPad6" if $nick eq "total";
$subtitel = undef if $subtitel == " ";
my @hoogtes = (
724,725,744,797,848,900,950,994,1031,1067,1108,1150,1190,1225,1266,1310,1354,1390,1437,1478,1512,1533,1565,1637,1682,1723,1764,1795,1814,1815
);
my $startat = 700;
my $stopat = 1900;
#die join "\n", GD::Simple->color_names;
print $query->header("image/png");
my $width = $query->param("width");
my $height = $query->param("height");
my ($mleft, $mright, $mtop, $mbottom) = (400,5,5,5);
$width += 0;
$height += 0;
$width = 600 unless $width;
$width = 600 if $width < 600;
$height = 120 if $height < 120;
$percent += 0;
#$percent = 101 if $percent > 100;
$percent = 0 if $percent < 0;
# O.K. lets do some math
# Drawing area
my $gx = $width-$mleft-$mright;
my $gy = $height-$mtop,$mbottom;
# Pixel per meter
my $mpp = ($stopat-$startat)/$gy;
# Stop at pixel
my $percentx = ($gx * $percent / 100) + $mleft;
$percentx = 8 if ( $percent > 0 and $percentx <8);
# Pixel per step
my $perstep = $gx / (@hoogtes - 1);
# Start drawing
my $img = GD::Simple->new($width,$height);
$img->penSize(1,1);
$img->bgcolor('white');
$img->fgcolor('blue');
$img->bgcolor('red');
$img->fgcolor('red');
my $x = $mleft;
my $current = shift @hoogtes;
$current -= $startat;
while ( @hoogtes ) {
if ( $x < $percentx ) {
$img->fgcolor('red');
} else {
$img->fgcolor('blue');
}
my $previous = $current;
$current = shift @hoogtes;
$current -= $startat;
my $y = $previous / $mpp;
my $upperstep = ($current-$previous) / ($perstep -1 );
$img->moveTo($x,$height-$mbottom);
my $y = $previous/$mpp;
$img->lineTo($x,$height-($mtop+$y));
for my $tx ( $x+1..$x+$perstep-1 ) {
if ( $tx < $percentx ) {
$img->fgcolor('red');
} else {
$img->fgcolor('blue');
}
$y += ($upperstep/$mpp);
$img->moveTo($tx,$height-$mbottom);
$img->lineTo($tx,$height-($mbottom+$y));
}
$x += $perstep;
}
#$img->bgcolor('red');
$img->fgcolor('white');
$img->font('/usr/share/fonts/corefonts/georgiab.ttf');
$img->fontsize(9);
my ($stringx , $stringy) = $img->stringBounds("$titel");
$img->moveTo($mleft+($gx/2)-($stringx/4),($height-$mbottom-14));
$img->string("$titel");
my $string = sprintf("\xE2\x82\xAC %d (%d%)", $in, $percent);
($stringx , $stringy) = $img->stringBounds("$string");
$img->moveTo($mleft+($gx/2)-($stringx/4),($height-$mbottom-2));
$img->string("$string");
$img->font('/usr/share/fonts/corefonts/georgia.ttf');
$img->fgcolor('blue');
$string = "$titel";
$string .= " - $subtitel" if $subtitel;
$string =~ s/\s+$//;
($stringx , $stringy) = $img->stringBounds("$string");
$img->moveTo(0,($mtop+10));
$img->string("$string");
$img->moveTo(0,($mtop+11));
$img->lineTo(-5+$stringx,$mtop+11);
$intro =~ s/\\n/ \\n /g;
$intro =~ s/[^\x00-\x7f]//g;
$intro =~ s/\r//g;
my @words = split / +/, $intro;
my $runy = $mtop+20;
while ( @words ) {
$string = "";
($stringx , $stringy) = $img->stringBounds(join(" ",$string,$words[0]));
while ($stringx <= $mleft-5 && @words && $words[0] ne "\\n") {
$string = join(" ",$string,shift @words);
($stringx , $stringy) = $img->stringBounds(join(" ",$string,$words[0]));
}
shift @words if $words[0] eq "\\n";
($stringx , $stringy) = $img->stringBounds("$string");
$runy+=$stringy-1;
$img->moveTo(0,$runy);
$img->string("$string");
}
print $img->png;
exit;
| seccubus/ad6bergjes | alpe_email.pl | Perl | apache-2.0 | 3,999 |
package ReseqTrack::Hive::PipeSeed::RunMergeSeed;
use strict;
use warnings;
use base ('ReseqTrack::Hive::PipeSeed::BasePipeSeed');
use ReseqTrack::Tools::Exception qw(throw);
use Exporter qw( import );
our @EXPORT_OK = qw( _check_exp_attribute );
sub create_seed_params {
my ($self) = @_;
my $options = $self->options;
my $metadata_file = $options->{'metadata_file'};
throw('require metadata file') unless $metadata_file;
my $path_names_array = $options->{'path_names_array'} ? $options->{'path_names_array'} : undef;
my $metadata_hash = _get_metadata_hash( $metadata_file, 'EXPERIMENT_ID' );
my $experiment_collection_type = $options->{'experiment_collection_type'} ?
$options->{'experiment_collection_type'} : undef;
my $experiment_merge_type = $options->{'experiment_merge_type'} ?
$options->{'experiment_merge_type'} : undef;
my $run_collection_type = $options->{'run_collection_type'} ?
$options->{'run_collection_type'} : undef;
my $output_experiment_columns = ref($options->{'output_experiment_columns'}) eq 'ARRAY' ? $options->{'output_experiment_columns'}
: defined $options->{'output_experiment_columns'} ? [$options->{'output_experiment_columns'}]
: [];
my $output_experiment_attributes = ref($options->{'output_experiment_attributes'}) eq 'ARRAY' ? $options->{'output_experiment_attributes'}
: defined $options->{'output_experiment_attributes'} ? [$options->{'output_experiment_attributes'}]
: [];
my $output_sample_columns = ref($options->{'output_sample_columns'}) eq 'ARRAY' ? $options->{'output_sample_columns'}
: defined $options->{'output_sample_columns'} ? [$options->{'output_sample_columns'}]
: [];
my $output_sample_attributes = ref($options->{'output_sample_attributes'}) eq 'ARRAY' ? $options->{'output_sample_attributes'}
: defined $options->{'output_sample_attributes'} ? [$options->{'output_sample_attributes'}]
: [];
my $output_study_columns = ref($options->{'output_study_columns'}) eq 'ARRAY' ? $options->{'output_study_columns'}
: defined $options->{'output_study_columns'} ? [$options->{'output_study_columns'}]
: [];
my $output_study_attributes = ref($options->{'output_study_attributes'}) eq 'ARRAY' ? $options->{'output_study_attributes'}
: defined $options->{'output_study_attributes'} ? [$options->{'output_study_attributes'}]
: [];
throw('this module will only accept pipelines that work on the experiment table')
if $self->table_name ne 'experiment';
throw('this module require both experiment_collection_type and run_collection_type')
unless ( $experiment_collection_type && $experiment_merge_type && $run_collection_type );
my $file_table = 'file';
my $db = $self->db();
my $ea = $db->get_ExperimentAdaptor;
my $ca = $db->get_CollectionAdaptor;
my $ra = $db->get_RunAdaptor;
my $sta = $db->get_StudyAdaptor;
my $sa = $db->get_SampleAdaptor;
$self->SUPER::create_seed_params();
my @new_seed_params;
SEED:
foreach my $seed_params (@{$self->seed_params}) {
my ($experiment, $output_hash) = @$seed_params;
my $experiment_source_id = $experiment->experiment_source_id;
my $experiment_id = $experiment->dbID;
my $runs = $ra->fetch_by_experiment_id( $experiment_id );
my $run = $$runs[0];
next SEED if !$run;
throw("$experiment_source_id not present in $metadata_file") unless exists ( $$metadata_hash{$experiment_source_id} );
my $metadata_path_hash = _get_path_hash( $experiment_source_id, $metadata_hash, $path_names_array );
foreach my $path_name ( keys %{$metadata_path_hash} ){
my $path_value = $$metadata_path_hash{$path_name};
$output_hash->{$path_name} = $path_value;
}
if (scalar @$output_sample_columns || scalar @$output_sample_attributes) {
my $sample = $run->sample;
throw('did not get a sample for run with id '.$run->name) if !$sample;
foreach my $column_name (@$output_sample_columns) {
$output_hash->{$column_name} = &{$sa->column_mappings($sample)->{$column_name}}();
}
if (@$output_sample_attributes) {
my $sample_attributes = $sample->attributes;
ATTRIBUTE:
foreach my $attribute_name (@$output_sample_attributes) {
my ($attribute) = grep {$_->attribute_name eq $attribute_name} @$sample_attributes;
next ATTRIBUTE if !$attribute;
$output_hash->{$attribute_name} = $attribute->attribute_value;
}
}
}
foreach my $column_name (@$output_experiment_columns) {
$output_hash->{$column_name} = &{$ea->column_mappings($experiment)->{$column_name}}();
}
my $experiment_attributes = $experiment->attributes;
my $exp_type_check = _check_exp_attribute( $experiment_attributes, $output_hash, $output_experiment_attributes );
next SEED unless $exp_type_check;
if (scalar @$output_study_columns || scalar @$output_study_attributes) {
my $study = $sta->fetch_by_dbID($experiment->study_id);
throw('did not get a study with id '.$experiment->study_id) if !$study;
foreach my $column_name (@$output_study_columns) {
$output_hash->{$column_name} = &{$sta->column_mappings($study)->{$column_name}}();
}
if (@$output_study_attributes) {
my $study_attributes = $study->attributes;
ATTRIBUTE:
foreach my $attribute_name (@$output_study_attributes) {
my ($attribute) = grep {$_->attribute_name eq $attribute_name} @$study_attributes;
next ATTRIBUTE if !$attribute;
$output_hash->{$attribute_name} = $attribute->attribute_value;
}
}
}
my $seed_experiment = 0;
if( $ca->fetch_by_name_and_table_name( $experiment_source_id, $file_table ) ) { ## update an existing merge bam
my $experiment_collections = $ca->fetch_by_name_and_table_name( $experiment_source_id, $file_table );
my $experiment_exists = 0;
foreach my $experiment_collection ( @$experiment_collections ){
my $collection_name = $experiment_collection->name;
$experiment_exists++ if $collection_name eq $experiment_collection_type
}
next SEED if $experiment_exists; ## implement method for updating merge
my $runs = $ra->fetch_by_experiment_id( $experiment_id );
RUN:
foreach my $run ( @$runs ){
my $run_source_id = $run->run_source_id;
next RUN unless ( $ca->fetch_by_name_and_table_name( $run_source_id, $file_table ) ); ## no file exists for run
my $run_collections = $ca->fetch_by_name_and_table_name( $run_source_id, $file_table );
RUN_COLLECTION:
foreach my $run_collection ( @$run_collections ){
my $collection_type = $run_collection->type;
next RUN_COLLECTION unless $collection_type eq $run_collection_type; ## look for only specific collections
$seed_experiment ++; ## merger where existing merged bam not present
}
}
}
else { ## create a new merge bam
my $runs = $ra->fetch_by_experiment_id( $experiment_id );
RUN:
foreach my $run ( @$runs ){
my $run_source_id = $run->run_source_id;
next RUN unless ( $ca->fetch_by_name_and_table_name( $run_source_id, $file_table ) ); ## no file exists for run
my $run_collections = $ca->fetch_by_name_and_table_name( $run_source_id, $file_table );
RUN_COLLECTION:
foreach my $run_collection ( @$run_collections ){
my $collection_type = $run_collection->type;
next RUN_COLLECTION unless $collection_type eq $run_collection_type; ## look for only specific collections
$seed_experiment++;
}
}
}
push ( @new_seed_params, $seed_params ) if $seed_experiment; ## creating new seed param list based on criteria
}
$self->seed_params(\@new_seed_params); ## updating the seed param
}
=head1
=cut
sub _check_exp_attribute {
my ( $experiment_attributes, $output_hash, $output_experiment_attributes ) = @_;
my $exp_type_check_flag = 1; ## default pass all
my ($exp_type_attribute) = grep {$_->attribute_name eq 'EXPERIMENT_TYPE'} @$experiment_attributes;
$exp_type_check_flag = 0 unless $exp_type_attribute;
if (@$output_experiment_attributes) {
ATTRIBUTE:
foreach my $attribute_name (@$output_experiment_attributes) {
my ($attribute) = grep {$_->attribute_name eq $attribute_name} @$experiment_attributes;
next ATTRIBUTE if !$attribute;
my $attribute_value = $attribute->attribute_value;
$attribute_value=~ s/Histone\s+//g
if( $attribute_name eq 'EXPERIMENT_TYPE' ); ## fix for blueprint ChIP file name
$attribute_value=~ s/\//_/g
if( $attribute_name eq 'EXPERIMENT_TYPE' ); ## fix for blueprint ChIP file name for H3k9/14ac
$attribute_value=~ s/ChIP-Seq\s+//g
if( $attribute_name eq 'EXPERIMENT_TYPE' ); ## fix for blueprint ChIP file name
$attribute_value=~ s/Chromatin\sAccessibility/Dnase/
if( $attribute_name eq 'EXPERIMENT_TYPE' ); ## fix for blueprint Dnase file name
$output_hash->{$attribute_name} = $attribute_value;
}
}
return $exp_type_check_flag;
}
=head1 _get_path_hash
Returns a metadata hash from metadat hash. Inputs are key metadata id, metadata hash and an array of parameters (optional).
=cut
sub _get_path_hash {
my ( $key_id, $metadata_hash, $path_names_array ) = @_;
my $path_hash;
throw("$key_id not found in metadata file") unless exists $$metadata_hash{ $key_id };
$$metadata_hash{ $key_id }{SAMPLE_DESC_1} = "NO_TISSUE"
if ( $$metadata_hash{ $key_id }{SAMPLE_DESC_1} eq "-" );
$$metadata_hash{ $key_id }{SAMPLE_DESC_2} = "NO_SOURCE"
if ( $$metadata_hash{ $key_id }{SAMPLE_DESC_2} eq "-" );
$$metadata_hash{ $key_id }{SAMPLE_DESC_3} = "NO_CELL_TYPE"
if ( $$metadata_hash{ $key_id }{SAMPLE_DESC_3} eq "-" );
if ( scalar @$path_names_array >= 1 ){
my @uc_path_names_array = map{ uc($_) } @$path_names_array;
my $key_metadata_hash = $$metadata_hash{ $key_id };
foreach my $key( @uc_path_names_array ){
throw("$key in not present in metadata") unless exists $$key_metadata_hash{ $key };
}
my @path_name_values = @$key_metadata_hash{ @uc_path_names_array };
@path_name_values = map{ s/[\s=\/\\;,'"()]/_/g; $_; }@path_name_values;
@path_name_values = map{ s/_+/_/g; $_; }@path_name_values;
@path_name_values = map{ s/_$//g; $_; }@path_name_values;
@path_name_values = map{ s/^_//g; $_; }@path_name_values;
@$path_hash{ @$path_names_array } = @path_name_values;
}
else {
$path_hash = $$metadata_hash{ $key_id };
}
return $path_hash;
}
=head1 _get_metadata_hash
Returns metadata hash from an index file keyed by any selected field.
=cut
sub _get_metadata_hash {
my ( $file, $key_string ) = @_;
open my $fh, '<', $file;
my @header;
my %data;
my $key_index = undef;
while ( <$fh> ) {
chomp;
next if m/^#/;
my @vals = split "\t", $_;
if ( @header ) {
throw("$key_string not found in $file") unless $key_index >= 0;
$data { $vals[$key_index] }{ $header[$_] } = $vals[$_] for 0..$#header;
}
else {
@header = map { uc($_) } @vals;
my @key_index_array = grep{ $header[$_] eq $key_string } 0..$#header;
$key_index = $key_index_array[0];
}
}
return \%data;
close( $fh );
}
1;
| EMBL-EBI-GCA/bp_project | lib/ReseqTrack/Hive/PipeSeed/RunMergeSeed.pm | Perl | apache-2.0 | 11,890 |
#
# Copyright 2018 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package storage::netapp::restapi::mode::fcportstatus;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
my $instance_mode;
sub custom_status_threshold {
my ($self, %options) = @_;
my $status = 'ok';
my $message;
eval {
local $SIG{__WARN__} = sub { $message = $_[0]; };
local $SIG{__DIE__} = sub { $message = $_[0]; };
if (defined($instance_mode->{option_results}->{critical_status}) && $instance_mode->{option_results}->{critical_status} ne '' &&
eval "$instance_mode->{option_results}->{critical_status}") {
$status = 'critical';
} elsif (defined($instance_mode->{option_results}->{warning_status}) && $instance_mode->{option_results}->{warning_status} ne '' &&
eval "$instance_mode->{option_results}->{warning_status}") {
$status = 'warning';
}
};
if (defined($message)) {
$self->{output}->output_add(long_msg => 'filter status issue: ' . $message);
}
return $status;
}
sub custom_status_output {
my ($self, %options) = @_;
my $msg = sprintf("Status is '%s', State is '%s' [adapter: %s] [switch port: %s] [fabric established: %s]",
$self->{result_values}->{status}, $self->{result_values}->{state},
$self->{result_values}->{adapter}, $self->{result_values}->{switch_port},
$self->{result_values}->{fabric_established});
return $msg;
}
sub custom_status_calc {
my ($self, %options) = @_;
$self->{result_values}->{display} = $options{new_datas}->{$self->{instance} . '_wwpn'};
$self->{result_values}->{adapter} = $options{new_datas}->{$self->{instance} . '_adapter'};
$self->{result_values}->{status} = $options{new_datas}->{$self->{instance} . '_status'};
$self->{result_values}->{state} = $options{new_datas}->{$self->{instance} . '_state'};
$self->{result_values}->{switch_port} = $options{new_datas}->{$self->{instance} . '_switch_port'};
$self->{result_values}->{fabric_established} = ($options{new_datas}->{$self->{instance} . '_fabric_established'}) ? "true" : "false";
return 0;
}
sub prefix_output {
my ($self, %options) = @_;
return "FC port '" . $options{instance_value}->{wwpn} . "' ";
}
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'fcports', type => 1, cb_prefix_output => 'prefix_output', message_multiple => 'All FC ports status are ok' },
];
$self->{maps_counters}->{fcports} = [
{ label => 'status', set => {
key_values => [ { name => 'wwpn' }, { name => 'status' }, { name => 'state' }, { name => 'switch_port' }, { name => 'fabric_established' }, { name => 'adapter' } ],
closure_custom_calc => $self->can('custom_status_calc'),
closure_custom_output => $self->can('custom_status_output'),
closure_custom_perfdata => sub { return 0; },
closure_custom_threshold_check => $self->can('custom_status_threshold'),
}
},
];
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
$options{options}->add_options(arguments =>
{
"filter-name:s" => { name => 'filter_name' },
"warning-status:s" => { name => 'warning_status' },
"critical-status:s" => { name => 'critical_status', default => '%{status} !~ /online/i || %{state} !~ /online/i' },
});
return $self;
}
sub change_macros {
my ($self, %options) = @_;
foreach ('warning_status', 'critical_status') {
if (defined($self->{option_results}->{$_})) {
$self->{option_results}->{$_} =~ s/%\{(.*?)\}/\$self->{result_values}->{$1}/g;
}
}
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::check_options(%options);
$instance_mode = $self;
$self->change_macros();
}
sub manage_selection {
my ($self, %options) = @_;
my $result = $options{custom}->get(path => '/fc-ports');
foreach my $fcport (@{$result}) {
if (defined($self->{option_results}->{filter_name}) && $self->{option_results}->{filter_name} ne '' &&
$fcport->{wwpn} !~ /$self->{option_results}->{filter_name}/) {
$self->{output}->output_add(long_msg => "skipping '" . $fcport->{wwpn} . "': no matching filter name.", debug => 1);
next;
}
$self->{fcports}->{$fcport->{key}} = {
wwpn => $fcport->{wwpn},
adapter => $fcport->{adapter},
status => $fcport->{status},
state => $fcport->{state},
switch_port => $fcport->{switch_port},
fabric_established => $fcport->{fabric_established},
}
}
if (scalar(keys %{$self->{fcports}}) <= 0) {
$self->{output}->add_option_msg(short_msg => "No entry found.");
$self->{output}->option_exit();
}
}
1;
__END__
=head1 MODE
Check NetApp FC ports status.
=over 8
=item B<--filter-*>
Filter qtree.
Can be: 'name', 'volume' (can be a regexp).
=item B<--warning-status>
Set warning threshold for status (Default: '').
Can used special variables like: %{state}, %{state}
=item B<--critical-status>
Set critical threshold for status (Default: '%{status} !~ /online/i || %{state} !~ /online/i').
Can used special variables like: %{status}, %{state}
=back
=cut
| wilfriedcomte/centreon-plugins | storage/netapp/restapi/mode/fcportstatus.pm | Perl | apache-2.0 | 6,374 |
=head1 LICENSE
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
=head1 NAME
Bio::EnsEMBL::Analysis::Runnable::Pecan -
=head1 SYNOPSIS
my $runnable = new Bio::EnsEMBL::Analysis::Runnable::Pecan
(-workdir => $workdir,
-fasta_files => $fasta_files,
-tree_string => $tree_string,
-program => "/path/to/program");
$runnable->run;
my @output = @{$runnable->output};
=head1 DESCRIPTION
Mavid expects to run the program mavid, a global multiple aligner for large genomic sequences,
using a fasta file and a tree file (Newick format), and eventually a constraints file.
The output (multiple alignment) is parsed and return as a Bio::EnsEMBL::Compara::GenomicAlignBlock object.
=head1 METHODS
=cut
package Bio::EnsEMBL::Analysis::Runnable::Pecan;
use strict;
use warnings;
use Bio::EnsEMBL::Utils::Exception;
use Bio::EnsEMBL::Utils::Argument;
use Bio::EnsEMBL::Compara::GenomicAlign;
use Bio::EnsEMBL::Compara::GenomicAlignBlock;
use Bio::EnsEMBL::Analysis::Config::Compara;
use Bio::EnsEMBL::Analysis::Runnable;
our @ISA = qw(Bio::EnsEMBL::Analysis::Runnable);
my $java_exe = "/software/bin/java";
my $uname = `uname`;
$uname =~ s/[\r\n]+//;
my $default_exonerate = $EXONERATE;
my $default_jar_file = "pecan_v0.8.jar";
my $default_java_class = "bp.pecan.Pecan";
my $estimate_tree = "/software/ensembl/compara/pecan/EstimateTree.py";
=head2 new
Arg [1] : -workdir => "/path/to/working/directory"
Arg [2] : -fasta_files => "/path/to/fasta/file"
Arg [3] : -tree_string => "/path/to/tree/file" (optional)
Arg [4] : -parameters => "parameter" (optional)
Function : contruct a new Bio::EnsEMBL::Analysis::Runnable::Pecan
runnable
Returntype: Bio::EnsEMBL::Analysis::Runnable::Pecan
Exceptions: none
Example :
=cut
sub new {
my ($class,@args) = @_;
my $self = $class->SUPER::new(@args);
my ($workdir, $fasta_files, $tree_string, $parameters,
$jar_file, $java_class, $exonerate) =
rearrange(['WORKDIR', 'FASTA_FILES', 'TREE_STRING','PARAMETERS',
'JAR_FILE', 'JAVA_CLASS', 'EXONERATE'], @args);
chdir $self->workdir;
$self->fasta_files($fasta_files) if (defined $fasta_files);
if (defined $tree_string) {
$self->tree_string($tree_string)
} else {
# Use EstimateTree.py program to get a tree from the sequences
my $run_str = "python $estimate_tree " . join(" ", @$fasta_files);
print "RUN $run_str\n";
my @estimate = qx"$run_str";
if (($estimate[0] !~ /^FINAL_TREE: \(.+\);/) or ($estimate[2] !~ /^ORDERED_SEQUENCES: (.+)/)) {
throw "Error while running EstimateTree program for Pecan";
}
($tree_string) = $estimate[0] =~ /^FINAL_TREE: (\(.+\);)/;
$self->tree_string($tree_string);
# print "THIS TREE $tree_string\n";
my ($files) = $estimate[2] =~ /^ORDERED_SEQUENCES: (.+)/;
@$fasta_files = split(" ", $files);
$self->fasta_files($fasta_files);
# print "THESE FILES ", join(" ", @$fasta_files), "\n";
## Build newick tree which can be stored in the meta table
foreach my $this_file (@$fasta_files) {
my $header = qx"head -1 $this_file";
my ($dnafrag_id, $name, $start, $end, $strand) = $header =~ /^>DnaFrag(\d+)\|([^\.+])\.(\d+)\-(\d+)\:(\-?1)/;
# print "HEADER: $dnafrag_id, $name, $start, $end, $strand $header";
$strand = 0 if ($strand != 1);
$tree_string =~ s/(\W)\d+(\W)/$1${dnafrag_id}_${start}_${end}_${strand}$2/;
}
$self->{tree_to_save} = $tree_string;
# print "TREE_TO_SAVE: $tree_string\n";
}
$self->parameters($parameters) if (defined $parameters);
unless (defined $self->program) {
if (defined($self->analysis) and defined($self->analysis->program)) {
$self->program($self->analysis->program);
} else {
$self->program($java_exe);
}
}
if (defined $jar_file) {
$self->jar_file($jar_file);
} else {
$self->jar_file($default_jar_file);
}
if (defined $java_class) {
$self->java_class($java_class);
} else {
$self->java_class($default_java_class);
}
if (defined $exonerate) {
$self->exonerate($exonerate);
} else {
$self->exonerate($default_exonerate);
}
# Try to locate jar file in usual places...
if (!-e $self->jar_file) {
$default_jar_file = $self->jar_file;
if (-e "/usr/local/pecan/$default_jar_file") {
$self->jar_file("/usr/local/pecan/$default_jar_file");
} elsif (-e "/usr/local/ensembl/pecan/$default_jar_file") {
$self->jar_file("/usr/local/ensembl/pecan/$default_jar_file");
} elsif (-e "/usr/local/ensembl/bin/$default_jar_file") {
$self->jar_file("/usr/local/ensembl/bin/$default_jar_file");
} elsif (-e "/usr/local/bin/pecan/$default_jar_file") {
$self->jar_file("/usr/local/bin/pecan/$default_jar_file");
} elsif (-e $ENV{HOME}."/pecan/$default_jar_file") {
$self->jar_file($ENV{HOME}."/pecan/$default_jar_file");
} elsif (-e $ENV{HOME}."/Downloads/$default_jar_file") {
$self->jar_file($ENV{HOME}."/Downloads/$default_jar_file");
} elsif (-e "/software/ensembl/compara/pecan/$default_jar_file") {
$self->jar_file("/software/ensembl/compara/pecan/$default_jar_file");
} else {
throw("Cannot find Pecan JAR file!");
}
}
return $self;
}
sub fasta_files {
my $self = shift;
$self->{'_fasta_files'} = shift if(@_);
return $self->{'_fasta_files'};
}
sub tree_string {
my $self = shift;
$self->{'_tree_string'} = shift if(@_);
return $self->{'_tree_string'};
}
sub parameters {
my $self = shift;
$self->{'_parameters'} = shift if(@_);
return $self->{'_parameters'};
}
sub jar_file {
my $self = shift;
$self->{'_jar_file'} = shift if(@_);
return $self->{'_jar_file'};
}
sub java_class {
my $self = shift;
$self->{'_java_class'} = shift if(@_);
return $self->{'_java_class'};
}
sub exonerate {
my $self = shift;
$self->{'_exonerate'} = shift if(@_);
return $self->{'_exonerate'};
}
=head2 run_analysis
Arg [1] : Bio::EnsEMBL::Analysis::Runnable::Pecan
Arg [2] : string, program name
Function : create and open a commandline for the program trf
Returntype: none
Exceptions: throws if the program in not executable or if the results
file doesnt exist
Example :
=cut
sub run_analysis {
my ($self, $program) = @_;
$self->run_pecan;
$self->parse_results;
return 1;
}
sub run_pecan {
my $self = shift;
chdir $self->workdir;
throw($self->program . " is not executable Pecan::run_analysis ")
unless ($self->program && -x $self->program);
my $command = $self->program;
if ($self->parameters) {
$command .= " " . $self->parameters;
}
$command .= " -cp ".$self->jar_file." ".$self->java_class;
if (@{$self->fasta_files}) {
$command .= " -F";
foreach my $fasta_file (@{$self->fasta_files}) {
$command .= " $fasta_file";
}
}
#Remove -X option. Transitive anchoring is now switched off by default
#$command .= " -J '" . $self->exonerate . "' -X";
$command .= " -J '" . $self->exonerate . "'";
if ($self->tree_string) {
$command .= " -E '" . $self->tree_string . "'";
}
$command .= " -G pecan.mfa";
if ($self->options) {
$command .= " " . $self->options;
}
print "Running pecan: " . $command . "\n";
open(PECAN, "$command 2>&1 |") || die "Failed: $!\n";
my $java_error = <PECAN>;
if ($java_error) {
die ($java_error);
}
close PECAN;
# unless (system($command) == 0) {
# throw("pecan execution failed\n");
# }
}
=head2 parse_results
Arg [1] : Bio::EnsEMBL::Analysis::Runnable::Pecan
Function : parse the specifed file and produce RepeatFeatures
Returntype: nine
Exceptions: throws if fails to open or close the results file
Example :
=cut
sub parse_results{
my ($self, $run_number) = @_;
my $alignment_file = $self->workdir . "/pecan.mfa";
my $this_genomic_align_block = new Bio::EnsEMBL::Compara::GenomicAlignBlock;
open F, $alignment_file || throw("Could not open $alignment_file");
my $seq = "";
my $this_genomic_align;
print "Reading $alignment_file...\n";
while (<F>) {
next if (/^\s*$/);
chomp;
## FASTA headers are defined in the Bio::EnsEMBL::Compara::Production::GenomicAlignBlock::Pecan
## module (or any other module you use to create this Pecan analysis job). Here is an example:
## >DnaFrag1234|X.10001-20000:-1
## This will correspond to chromosome X, which has dnafrag_id 1234 and the region goes from
## position 10001 to 20000 on the reverse strand.
if (/^>/) {
if (/^>DnaFrag(\d+)\|(.+)\.(\d+)\-(\d+)\:(\-?1)$/) {
if (defined($this_genomic_align) and $seq) {
$this_genomic_align->aligned_sequence($seq);
$this_genomic_align_block->add_GenomicAlign($this_genomic_align);
}
$this_genomic_align = new Bio::EnsEMBL::Compara::GenomicAlign;
$this_genomic_align->dnafrag_id($1);
$this_genomic_align->dnafrag_start($3);
$this_genomic_align->dnafrag_end($4);
$this_genomic_align->dnafrag_strand($5);
$seq = "";
} else {
throw("Error while parsing the FASTA header. It must start by \">DnaFrag#####\" where ##### is the dnafrag_id\n$_");
}
} else {
$seq .= $_;
}
}
close F;
$this_genomic_align->aligned_sequence($seq);
$this_genomic_align_block->add_GenomicAlign($this_genomic_align);
$self->output([$this_genomic_align_block]);
}
1;
| mn1/ensembl-analysis | modules/Bio/EnsEMBL/Analysis/Runnable/Pecan.pm | Perl | apache-2.0 | 10,273 |
#!/usr/bin/env perl
=head1 LICENSE
Copyright (c) 1999-2011 The European Bioinformatics Institute and
Genome Research Limited. All rights reserved.
This software is distributed under a modified Apache license.
For license details, please see
http://www.ensembl.org/info/about/code_licence.html
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <ensembl-dev@ebi.ac.uk>.
Questions may also be sent to the Ensembl help desk at
<helpdesk@ensembl.org>.
=head1 NAME
create_input_ids.pl -- generates a list of input_ids from Encode regions for
eFG analysis pipeline
=head1 SYNOPSIS
create_input_ids.pl OPTIONS
=head1 OPTIONS
-host HOST database host
-port PORT database port
-user USER user name
-pass PASSWORD password
-dbname DBNAME database name
-slice select slice as input id type
-encode uses encode regions as input_ids (w/ -slice)
-toplevel uses all toplevel slices as input_ids (w/ -slice)
-array select array as input id type
-file uses files in given directory (-dir) as input_ids
-dir dir directory to read infiles from (w/ -file)
-exp_regex REGEX regular expression to select certain experiments
(default: fetch all available if omitted)
=head1 DESCRIPTION
This script generates a list of input_ids from Encode regions / files for
setting up the eFG analysis pipeline.
=cut
use strict;
use warnings;
use Data::Dumper;
use Getopt::Long;
my ($pass,$port,$host,$user,$dbname,$species,$data_version,
$exp_regex,$exp_suffix,$slice,$encode,$toplevel,$array,$file,$dir,
$help,$man,$debug, $submit_type);
$host = $ENV{EFG_HOST};
$port = $ENV{EFG_PORT};
$user = $ENV{EFG_WRITE_USER};
$dbname = $ENV{EFG_DBNAME};
$species = $ENV{SPECIES};
$data_version = $ENV{DATA_VERSION};
$exp_regex = '.*';
GetOptions (
'pass|p:s' => \$pass,
'port:i' => \$port,
'host|h=s' => \$host,
'user|u=s' => \$user,
'dbname|d=s' => \$dbname,
'species=s' => \$species,
'data_version=s' => \$data_version,
'exp_regex|e=s' => \$exp_regex,
'exp_suffix=s' => \$exp_suffix,
'help|?' => \$help,
'man|m' => \$man,
'debug' => \$debug,
'slice' => \$slice,
'encode' => \$encode,
'toplevel' => \$toplevel,
'array' => \$array,
'file' => \$file,
'dir=s' => \$dir,
);
### defaults ###
if (!$port) {
$port = 3306;
warn("No port specified, using default '$port'.")
}
if (!$species) {
$species = 'homo_sapiens';
warn("No species specified, using default '$species'.")
}
### check options ###
throw("Must specify mandatory database hostname (-host).\n") if ! defined $host;
throw("Must specify mandatory database username. (-user)\n") if ! defined $user;
throw("Must specify mandatory database name (-dbname).\n") if ! defined $dbname;
throw("Must specify mandatory password (-pass).\n") if ! defined $pass;
$| = 1;
use Bio::EnsEMBL::Utils::Exception qw(verbose throw warning info stack_trace_dump);
use Bio::EnsEMBL::DBSQL::DBAdaptor;
use Bio::EnsEMBL::Funcgen::DBSQL::DBAdaptor;
use Bio::EnsEMBL::Funcgen::Utils::EFGUtils qw(open_file);
use Bio::EnsEMBL::Funcgen::Utils::Encode qw(get_encode_regions);
# Get eFG database adaptor
my $dnadb = Bio::EnsEMBL::DBSQL::DBAdaptor->new
(
-host => $ENV{CORE_HOST},
-user => $ENV{CORE_USER},
-port => $ENV{CORE_PORT},
#-host => 'ens-staging',
#-user => $user,
#-port => $port,
-dbname => $ENV{CORE_DBNAME},
-species => $species,
#-pass => $pass,
);
my $db = Bio::EnsEMBL::Funcgen::DBSQL::DBAdaptor->new
(
-host => $host,
-user => $user,
-dbname => $dbname,
# -species => $species,
-pass => $pass,
-port => $port,
-dnadb => $dnadb,
);
my $pdb = Bio::EnsEMBL::Funcgen::DBSQL::DBAdaptor->new
(
-host => $host,
-user => $user,
-dbname => $ENV{'PDBNAME'},
-species => $species,
-pass => $pass,
-port => $port,
);
if ($slice) {
$submit_type='Slice';
# get analysis_id of submit_type
my $analysis_id = &get_analysis_id($submit_type);
# Get all experiments
my $ea = $db->get_ExperimentAdaptor();
my $exp = $ea->fetch_all();
my @input_ids = ();
if ($encode) {
# Get Encode regions
my $encode_regions = get_encode_regions($db->dnadb());
#map { print Dumper $_->slice->name } @$encode_regions;
foreach my $e (@$exp) {
next if (defined $exp_regex && $e->name !~ m/$exp_regex/);
map {
push ( @input_ids, join(':', $e->name, $_->slice->coord_system->name,
$_->slice->coord_system->version,
$_->slice->seq_region_name, $_->start,
$_->end, $_->strand) );
} @$encode_regions;
}
} elsif ($toplevel) {
# Get toplevel slices
my $sa = $db->get_SliceAdaptor();
my $tls = $sa->fetch_all('toplevel');
foreach my $e (@$exp) {
next if (defined $exp_regex && $e->name !~ m/$exp_regex/);
map {
push ( @input_ids, join(':', $e->name, $_->name ) );
} @$tls;
}
} else {
throw("Need to specify slice type (either -toplevel or -encode)");
}
foreach my $input_id (@input_ids) {
my $sql = "insert into input_id_analysis (input_id,analysis_id,input_id_type)".
" values ('${input_id}',${analysis_id},'${submit_type}');";
#warn($sql);
eval {
$pdb->dbc->do($sql);
};
throw("Couldn't store input_id '$input_id'. Most likely it has already been ".
"stored. Drop your input_ids with CleanInputIds and rerun CreateInputIds.")
if ($@);
}
} elsif ($array) {
$submit_type='Array';
# get analysis_id of submit_type
my $analysis_id = &get_analysis_id($submit_type);
# Get all experiments
my $ea = $db->get_ExperimentAdaptor();
my $exp = $ea->fetch_all();
my @input_ids = ();
foreach my $e (@$exp) {
next if (defined $exp_regex && $e->name !~ m/$exp_regex/);
#warn $e->name;
push ( @input_ids, join(':', $e->name, "ARRAY" ) );
}
foreach my $input_id (@input_ids) {
my $sql = "insert into input_id_analysis (input_id,analysis_id,input_id_type)".
" values ('${input_id}',${analysis_id},'${submit_type}');";
#warn($sql);
eval {
$pdb->dbc->do($sql);
};
throw("Couldn't store input_id '$input_id'. Most likely it has already been ".
"stored. Drop your input_ids with CleanInputIds and rerun CreateInputIds.")
if ($@);
}
} elsif ($file) {
$submit_type='File';
# get cell and feature type adapter
my $cta = $db->get_CellTypeAdaptor();
my $fta = $db->get_FeatureTypeAdaptor();
# get analysis_id of submit_type
my $analysis_id = &get_analysis_id($submit_type);
if (! $dir) {
throw("Need to specify a input directory containing ".
"files to be processed")
}
opendir(DIR, $dir)
or throw("Can't open directory '$dir'");
#$exp_regex='.*' unless ($exp_regex);
my @files = grep { /^[^.]/ && /${exp_regex}/ } readdir DIR;
closedir DIR;
throw("No gzipped files found in input directory that match the regular expression '$exp_regex'")
unless (@files);
#print Dumper @files;
unless (-d "$ENV{ANALYSIS_WORK_DIR}/infiles") {
system("mkdir -p $ENV{ANALYSIS_WORK_DIR}/infiles");
}
foreach my $f (sort @files) {
### Check that files are gzipped
throw("File is not compressed with gzip!") unless &is_gzip("$dir/$f");
### Check also that files are bed file format
throw("File '$dir/$f' format is not bed format compliant!") unless &is_bed("$dir/$f");
(my $experiment_name = $f) =~ s,(.*/)?([^/_]+_[^/_]+).*\.bed\.gz,$2,;
$experiment_name .= '_'.$exp_suffix if ($exp_suffix);
### validate cell and feature type
my ($cell_type, $feature_type) = split('_', $experiment_name);
throw ("Cell type '$cell_type' doesn't exist in database! ".
"Edit and rerun run_import_type.pl.")
unless (defined $cta->fetch_by_name($cell_type));
throw ("Feature type '$feature_type' doesn't exist in database! ".
"Edit and rerun run_import_type.pl.")
unless (defined $fta->fetch_by_name($feature_type));
# write input_id to database
my $input_id = sprintf "%s:%s", $experiment_name, $f;
#warn($input_id);
# need to generate links in a workdir infiles directory to know where the
# files are that will be processed
system("ln -s $dir/$f $ENV{ANALYSIS_WORK_DIR}/infiles/$input_id") == 0
or throw("Can't link 'ln -s $dir/$f $ENV{ANALYSIS_WORK_DIR}/infiles/$input_id'");
my $sql = "insert into input_id_analysis (input_id,analysis_id,input_id_type)".
" values ('${input_id}',${analysis_id},'${submit_type}');";
#warn($sql);
eval {
$pdb->dbc->do($sql);
};
throw("Couldn't store input_id '$input_id'. Most likely it has already been ".
"stored. Drop your input_ids with CleanInputIds and rerun CreateInputIds.")
if ($@);
}
} else {
throw("Need to specify type of input_ids to be specified ".
"(either -slice or -file)");
}
sub is_gzip {
my ($file) = @_;
open(FILE, "file -L $file |")
or throw("Can't execute command 'file' on '$file'");
my $retval = <FILE>;
#print $retval, "\n";
close FILE;
return ($retval =~ m/gzip compressed data/) ? 1 : 0;
}
sub is_bed {
my ($file) = @_;
open(FILE, "zcat $file 2>&1 |")
or throw("Can't execute command 'file' on '$file'");
my @line = ();
while (<FILE>) {
chomp;
@line = split("\t", $_);
last;
}
close FILE;
#print '<', join('><', @line),">\n";
if (scalar @line < 6) {
warn("Infile '$file' does not have 6 or more columns. We expect bed format: CHROM START END NAME SCORE STRAND.");
return 0;
#} elsif ($line[0] !~ m/^((chr)?[MTXYNT_\d]+)$/) {
# warn ("1st column must contain name of seq_region (e.g. chr1 or 1) in '$file'");
# return 0;
#Commented this out for now due to HSCHR_RANDOM seqs
#How does the webcode handle this?
} elsif ($line[1] !~ m/^\d+$/ && $line[2] =~ m/^\d+$/) {
warn ("2nd and 3rd column must contain start and end respectively in '$file'");
return 0;
} elsif ($line[5] !~ m/^[+-]$/) {
warn ("6th column must define strand (either '+' or '-') in '$file'");
return 0;
}
return 1;
}
sub get_analysis_id () {
my ($type) = @_;
# get analysis_id of submit_type
my $sql = "select analysis_id from analysis where logic_name='Submit$type'";
my $sth = $pdb->dbc->prepare($sql);
$sth->execute;
throw("No analysis_id stored for logic_name='Submit$type'")
unless my $analysis_id = $sth->fetchrow;
return $analysis_id;
}
1;
| adamsardar/perl-libs-custom | EnsemblAPI/ensembl-functgenomics/scripts/pipeline/create_input_ids.pl | Perl | apache-2.0 | 11,778 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2021] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=head1 DESCRIPTION
Module is used in protein function prediction pipeline for
annotating all possible amino acid substitutions in a translation
with dbNSFP (revel, meta_lr and mutation_assessor) scores and predictions.
=cut
use strict;
use warnings;
package Bio::EnsEMBL::Variation::Utils::DbNSFPProteinFunctionAnnotation;
use Bio::EnsEMBL::Variation::Utils::BaseProteinFunctionAnnotation;
our @ISA = ('Bio::EnsEMBL::Variation::Utils::BaseProteinFunctionAnnotation');
my $REVEL_CUTOFF = 0.5;
=head2 new
Example :
my $dbnsfp = Bio::EnsEMBL::Variation::Utils::DbNSFPProteinFunctionAnnotation->new(
-species => 'Homo_sapiens',
-annotation_file => 'dbNSFP3.5a_grch37.txt.gz',
-assembly => 'GRCh37',
-annotation_file_version => '3.5a',
-pipeline_mode => 0,
-debug_mode => 1,
);
Description: Constructor. Instantiates a new DbNSFPProteinFunctionAnnotation object.
Returntype : DbNSFPProteinFunctionAnnotation
Exceptions : throws on unsupported version
Caller : Bio::EnsEMBL::Variation::Pipeline::ProteinFunction::RunDbNSFP::run
Status : Stable
=cut
sub new {
my $caller = shift;
my $class = ref($caller) || $caller;
my $self = $class->SUPER::new(@_);
if (! grep {$_ eq $self->annotation_file_version} ('3.5a', '4.0a', '4.1a', '4.2a')) {
die "dbNSFP version " . $self->annotation_file_version . " is not supported.";
}
$self->analysis([qw/dbnsfp_revel dbnsfp_meta_lr dbnsfp_mutation_assessor/]);
return $self;
}
my $predictions = {
dbnsfp_meta_lr => {
T => 'tolerated',
D => 'damaging',
},
dbnsfp_mutation_assessor => {
H => 'high',
M => 'medium',
L => 'low',
N => 'neutral',
}
};
#The rankscore cutoffs between "H" and "M", "M" and "L", and "L" and "N", are 0.9307, 0.52043 and 0.19675,
# dbNSFP has assembly and version specific headers
# If a new version is added, the new version first needs to tested and the column_names
# hash needs to be updated
# the column names are changed for readability and consistency
my $column_names = {
'3.5a' => {
assembly_unspecific => {
chr => '#chr',
ref => 'ref',
refcodon => 'refcodon',
alt => 'alt',
aaalt => 'aaalt',
aaref => 'aaref',
revel_score => 'REVEL_score',
meta_lr_score => 'MetaLR_score',
meta_lr_pred => 'MetaLR_pred',
mutation_assessor_score => 'MutationAssessor_score_rankscore',
mutation_assessor_pred => 'MutationAssessor_pred',
},
'assembly_specific' => {
'GRCh37' => {
pos => 'hg19_pos(1-based)'
},
'GRCh38' => {
pos => 'pos(1-based)'
},
},
},
'4.0a' => {
assembly_unspecific => {
chr => '#chr',
ref => 'ref',
refcodon => 'refcodon',
alt => 'alt',
aaalt => 'aaalt',
aaref => 'aaref',
revel_score => 'REVEL_score',
meta_lr_score => 'MetaLR_score',
meta_lr_pred => 'MetaLR_pred',
mutation_assessor_score => 'MutationAssessor_rankscore',
mutation_assessor_pred => 'MutationAssessor_pred',
},
'assembly_specific' => {
'GRCh37' => {
pos => 'hg19_pos(1-based)'
},
'GRCh38' => {
pos => 'pos(1-based)'
},
},
},
'4.1a' => {
assembly_unspecific => {
chr => '#chr',
ref => 'ref',
refcodon => 'refcodon',
alt => 'alt',
aaalt => 'aaalt',
aaref => 'aaref',
revel_score => 'REVEL_score',
meta_lr_score => 'MetaLR_score',
meta_lr_pred => 'MetaLR_pred',
mutation_assessor_score => 'MutationAssessor_rankscore',
mutation_assessor_pred => 'MutationAssessor_pred',
},
'assembly_specific' => {
'GRCh37' => {
pos => 'hg19_pos(1-based)'
},
'GRCh38' => {
pos => 'pos(1-based)'
},
},
},
'4.2a' => {
assembly_unspecific => {
chr => '#chr',
ref => 'ref',
refcodon => 'refcodon',
alt => 'alt',
aaalt => 'aaalt',
aaref => 'aaref',
revel_score => 'REVEL_score',
meta_lr_score => 'MetaLR_score',
meta_lr_pred => 'MetaLR_pred',
mutation_assessor_score => 'MutationAssessor_rankscore',
mutation_assessor_pred => 'MutationAssessor_pred',
},
'assembly_specific' => {
'GRCh37' => {
pos => 'hg19_pos(1-based)'
},
'GRCh38' => {
pos => 'pos(1-based)'
},
},
},
};
sub load_predictions_for_triplets {
my $self = shift;
my $triplets = shift;
foreach my $entry (@$triplets) {
my $aa = $entry->{aa};
$self->amino_acids($aa);
next if $aa eq 'X';
my @coords = @{$entry->{coords}};
my $chrom = $entry->{chrom};
my $triplet_seq = $entry->{triplet_seq};
my $i = $entry->{aa_position};
my $new_triplets = $entry->{new_triplets};
foreach my $coord (@coords) {
my $triplet_start = $coord->[0];
my $triplet_end = $coord->[1];
my $iter = $self->get_tabix_iterator($chrom, $triplet_start, $triplet_end);
next if (!defined $iter);
while (my $line = $iter->next) {
my $data = $self->get_dbNSFP_row($line);
my $chr = $data->{'chr'};
my $pos = $data->{'pos'};
my $ref = $data->{'ref'};
my $refcodon = $data->{'refcodon'};
my $alt = $data->{'alt'};
my $aaalt = $data->{'aaalt'};
my $aaref = $data->{'aaref'};
next if ($alt eq $ref);
my $nucleotide_position = ($self->reverse) ? $triplet_end - $pos : $pos - $triplet_start;
my $mutated_triplet = $new_triplets->{$triplet_seq}->{$nucleotide_position}->{$alt};
my $mutated_aa = $self->codon_table->translate($mutated_triplet);
next if ($aaalt ne $mutated_aa);
$self->add_predictions($data, $i, $mutated_aa);
}
}
}
}
sub add_predictions {
my ($self, $data, $i, $mutated_aa) = @_;
if ($data->{revel_score} ne '.') {
my $prediction = ($data->{revel_score} >= $REVEL_CUTOFF) ? 'likely disease causing' : 'likely benign';
$self->add_prediction($i, $mutated_aa, 'dbnsfp_revel', $data->{revel_score}, $prediction);
}
if ($data->{meta_lr_score} ne '.') {
my $prediction = $predictions->{dbnsfp_meta_lr}->{$data->{meta_lr_pred}};
$self->add_prediction($i, $mutated_aa, 'dbnsfp_meta_lr', $data->{meta_lr_score}, $prediction);
}
if ($data->{mutation_assessor_score} ne '.') {
my $prediction;
if ($self->annotation_file_version eq '3.5a') {
$prediction = $predictions->{dbnsfp_mutation_assessor}->{$data->{mutation_assessor_pred}};
} elsif (grep {$_ eq $self->annotation_file_version} ('4.0a', '4.1a', '4.2a')) {
# In 4.0a the prediction is not always provided and we need to assign it based on the score thresholds
# The rankscore cutoffs between "H" and "M", "M" and "L", and "L" and "N", are 0.9307, 0.52043 and 0.19675,
my $score = $data->{mutation_assessor_score};
if ($score >= 0.9307) {
$prediction = 'high';
} elsif ($score >= 0.52043) {
$prediction = 'medium'
} elsif ($score >= 0.19675) {
$prediction = 'low'
} else {
$prediction = 'neutral';
}
} else {
die "dbNSFP version " . $self->annotation_file_version . " is not supported.";
}
$self->add_prediction($i, $mutated_aa, 'dbnsfp_mutation_assessor', $data->{mutation_assessor_score}, $prediction);
}
}
=head2 get_dbNSFP_row
Arg 1 : String $line from parser
Description: - Join header column with row value
- Use assembly and file version specific header
Returntype : Hashref mapping header column to row value
Exceptions : None
Caller : load_predictions_for_triplets()
Status :
=cut
sub get_dbNSFP_row {
my $self = shift;
my $line = shift;
my @split = split /\t/, $line;
my $header = $self->header;
my $assembly = $self->assembly;
my $dbnsfp_version = $self->annotation_file_version;
my %raw_data = map {$header->[$_] => $split[$_]} (0..(scalar @{$header} - 1));
my $data = {};
if (!defined $column_names->{$dbnsfp_version}) {
die "dbNSFP file column names are not specified for version $dbnsfp_version";
}
my $assembly_unspecific = $column_names->{$dbnsfp_version}->{assembly_unspecific};
foreach my $column_name (keys %{$assembly_unspecific}) {
$data->{$column_name} = $raw_data{$assembly_unspecific->{$column_name}};
}
my $assembly_specific = $column_names->{$dbnsfp_version}->{assembly_specific}->{$assembly};
foreach my $column_name (keys %{$assembly_specific}) {
$data->{$column_name} = $raw_data{$assembly_specific->{$column_name}};
}
return $data;
}
1;
| Ensembl/ensembl-variation | modules/Bio/EnsEMBL/Variation/Utils/DbNSFPProteinFunctionAnnotation.pm | Perl | apache-2.0 | 9,576 |
package Google::Ads::AdWords::v201809::CampaignFeed::Status;
use strict;
use warnings;
sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201809'};
# derivation by restriction
use base qw(
SOAP::WSDL::XSD::Typelib::Builtin::string);
1;
__END__
=pod
=head1 NAME
=head1 DESCRIPTION
Perl data type class for the XML Schema defined simpleType
CampaignFeed.Status from the namespace https://adwords.google.com/api/adwords/cm/v201809.
Status of the CampaignFeed.
This clase is derived from
SOAP::WSDL::XSD::Typelib::Builtin::string
. SOAP::WSDL's schema implementation does not validate data, so you can use it exactly
like it's base type.
# Description of restrictions not implemented yet.
=head1 METHODS
=head2 new
Constructor.
=head2 get_value / set_value
Getter and setter for the simpleType's value.
=head1 OVERLOADING
Depending on the simple type's base type, the following operations are overloaded
Stringification
Numerification
Boolification
Check L<SOAP::WSDL::XSD::Typelib::Builtin> for more information.
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| googleads/googleads-perl-lib | lib/Google/Ads/AdWords/v201809/CampaignFeed/Status.pm | Perl | apache-2.0 | 1,105 |
#
# Copyright 2022 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package storage::hp::eva::cli::mode::components::diskgrp;
use strict;
use warnings;
sub load {
my ($self) = @_;
$self->{ssu_commands}->{'ls disk_group full xml'} = 1;
}
sub check {
my ($self) = @_;
$self->{output}->output_add(long_msg => "Checking disk groups");
$self->{components}->{diskgrp} = {name => 'disk groups', total => 0, skip => 0};
return if ($self->check_filter(section => 'diskgrp'));
# <object>
# <objecttype>diskgroupfolder</objecttype>
# <objectname>\Disk Groups\XXX</objectname>
# <operationalstate>good</operationalstate>
foreach my $object (@{$self->{xml_result}->{object}}) {
next if ($object->{objecttype} ne 'diskgroupfolder');
$object->{objectname} =~ s/\\/\//g;
my $instance = $object->{objectname};
next if ($self->check_filter(section => 'diskgrp', instance => $instance));
$self->{components}->{diskgrp}->{total}++;
$self->{output}->output_add(long_msg => sprintf("disk group '%s' status is '%s' [instance = %s]",
$instance, $object->{operationalstate}, $instance,
));
my $exit = $self->get_severity(label => 'default', section => 'diskgrp', value => $object->{operationalstate});
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("Disk group '%s' status is '%s'", $instance, $object->{operationalstate}));
}
}
}
1;
| centreon/centreon-plugins | storage/hp/eva/cli/mode/components/diskgrp.pm | Perl | apache-2.0 | 2,415 |
package Paws::DMS::DeleteReplicationSubnetGroup;
use Moose;
has ReplicationSubnetGroupIdentifier => (is => 'ro', isa => 'Str', required => 1);
use MooseX::ClassAttribute;
class_has _api_call => (isa => 'Str', is => 'ro', default => 'DeleteReplicationSubnetGroup');
class_has _returns => (isa => 'Str', is => 'ro', default => 'Paws::DMS::DeleteReplicationSubnetGroupResponse');
class_has _result_key => (isa => 'Str', is => 'ro');
1;
### main pod documentation begin ###
=head1 NAME
Paws::DMS::DeleteReplicationSubnetGroup - Arguments for method DeleteReplicationSubnetGroup on Paws::DMS
=head1 DESCRIPTION
This class represents the parameters used for calling the method DeleteReplicationSubnetGroup on the
AWS Database Migration Service service. Use the attributes of this class
as arguments to method DeleteReplicationSubnetGroup.
You shouldn't make instances of this class. Each attribute should be used as a named argument in the call to DeleteReplicationSubnetGroup.
As an example:
$service_obj->DeleteReplicationSubnetGroup(Att1 => $value1, Att2 => $value2, ...);
Values for attributes that are native types (Int, String, Float, etc) can passed as-is (scalar values). Values for complex Types (objects) can be passed as a HashRef. The keys and values of the hashref will be used to instance the underlying object.
=head1 ATTRIBUTES
=head2 B<REQUIRED> ReplicationSubnetGroupIdentifier => Str
The subnet group name of the replication instance.
=head1 SEE ALSO
This class forms part of L<Paws>, documenting arguments for method DeleteReplicationSubnetGroup in L<Paws::DMS>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/DMS/DeleteReplicationSubnetGroup.pm | Perl | apache-2.0 | 1,789 |
package VMOMI::HostPortGroup;
use parent 'VMOMI::DynamicData';
use strict;
use warnings;
our @class_ancestors = (
'DynamicData',
);
our @class_members = (
['key', undef, 0, 1],
['port', 'HostPortGroupPort', 1, 1],
['vswitch', undef, 0, 1],
['computedPolicy', 'HostNetworkPolicy', 0, ],
['spec', 'HostPortGroupSpec', 0, ],
);
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/HostPortGroup.pm | Perl | apache-2.0 | 571 |
x_and_y_offsets(37, 2).
| leuschel/ecce | www/CiaoDE/ciao/lib/compiler/emulator_data.pl | Perl | apache-2.0 | 24 |
package VMOMI::VmPoweringOnWithCustomizedDVPortEvent;
use parent 'VMOMI::VmEvent';
use strict;
use warnings;
our @class_ancestors = (
'VmEvent',
'Event',
'DynamicData',
);
our @class_members = (
['vnic', 'VnicPortArgument', 1, ],
);
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/VmPoweringOnWithCustomizedDVPortEvent.pm | Perl | apache-2.0 | 471 |
package Error::Pure::Bio;
# Pragmas.
use base qw(Exporter);
use strict;
use warnings;
# Modules.
use Error::Pure::Utils qw(err_helper);
use Error::Pure::Output::Bio qw(err_bio);
use List::MoreUtils qw(none);
use Readonly;
# Constants.
Readonly::Array our @EXPORT_OK => qw(err);
Readonly::Scalar my $EVAL => 'eval {...}';
# Version.
our $VERSION = 0.01;
# Ignore die signal.
$SIG{__DIE__} = 'IGNORE';
# Process error.
sub err {
my @msg = @_;
# Get errors structure.
my @errors = err_helper(@msg);
# Finalize in main on last err.
my $stack_ar = $errors[-1]->{'stack'};
if ($stack_ar->[-1]->{'class'} eq 'main'
&& none { $_ eq $EVAL || $_ =~ m/^eval '/ms }
map { $_->{'sub'} } @{$stack_ar}) {
die err_bio(@errors);
# Die for eval.
} else {
my $e = $errors[-1]->{'msg'}->[0];
chomp $e;
die "$e\n";
}
return;
}
1;
| tupinek/Error-Pure-Bio | Bio.pm | Perl | bsd-2-clause | 842 |
# Contains a function to supply the syslog module with extra logs
do 'squid-lib.pl';
# syslog_getlogs()
# Returns the Squid cache and store logs
sub syslog_getlogs
{
local @rv;
if (-d $config{'log_dir'}) {
push(@rv, { 'file' => "$config{'log_dir'}/cache.log",
'desc' => $text{'syslog_cache'},
'active' => 1 });
push(@rv, { 'file' => "$config{'log_dir'}/store.log",
'desc' => $text{'syslog_store'},
'active' => 1 });
}
return @rv;
}
| xtso520ok/webmin | squid/syslog_logs.pl | Perl | bsd-3-clause | 462 |
#!/usr/bin/env perl
use strict;
use warnings;
use Data::Dumper;
use Algorithm::Permute;
use File::Slurp qw/read_file/;
my @data = read_file('input');
my %conn;
foreach my $line (@data) {
chomp $line;
my ($from, $to, $dist) = $line =~ m/^(\w+) to (\w+) = (\d+)$/;
$conn{$from}{$to} = $dist;
$conn{$to}{$from} = $dist;
}
my $min = 1e99;
my $p = new Algorithm::Permute([keys %conn]);
while (my @res = $p->next) {
my $len = 0;
my $count = 1;
my $prev = shift @res;
foreach my $city (@res) {
last unless $conn{$prev}{$city};
$len += $conn{$prev}{$city};
$prev = $city;
$count++;
}
next unless $count == scalar keys %conn;
$min = $len if $len < $min;
}
print Dumper $min;
| Aneurysm9/advent | 2015/day9/day9.pl | Perl | bsd-3-clause | 694 |
#!/usr/bin/perl
use strict;
use warnings;
use LWP::Simple qw(get $ua);
# WPEngine does not allow LWP::Simple clients
$ua->agent("Mozilla/0.1");
our $VERSION = '0.01';
#my $content = get('http://dev.maxmind.com/static/maxmind-region-codes.csv');
my $content = get('http://dev.maxmind.com/static/csv/codes/maxmind/region.csv');
exit 5 unless $content;
print "Update fips10_4.txt\n";
open my $fips, '>', 'fips10_4.txt' or die $!;
open my $content_fh, '<', \$content or die $!;
print $fips q["iso 3166 country","fips 10-4 region code","name"], "\n";
print $fips grep { !/^(US|CA)/ } <$content_fh>;
print $fips <<__FIPS_CA_US__;
CA,01,"Alberta"
CA,02,"British Columbia"
CA,03,"Manitoba"
CA,04,"New Brunswick"
CA,05,"Newfoundland and Labrador"
CA,07,"Nova Scotia"
CA,08,"Ontario"
CA,09,"Prince Edward Island"
CA,10,"Quebec"
CA,11,"Saskatchewan"
CA,12,"Yukon Territory"
CA,13,"Northwest Territories"
CA,14,"Nunavut"
US,01,"Alabama"
US,02,"Alaska"
US,04,"Arizona"
US,05,"Arkansas"
US,06,"California"
US,08,"Colorado"
US,09,"Connecticut"
US,10,"Delaware"
US,11,"District of Columbia"
US,12,"Florida"
US,13,"Georgia"
US,15,"Hawaii"
US,16,"Idaho"
US,17,"Illinois"
US,18,"Indiana"
US,19,"Iowa"
US,20,"Kansas"
US,21,"Kentucky"
US,22,"Louisiana"
US,23,"Maine"
US,24,"Maryland"
US,25,"Massachusetts"
US,26,"Michigan"
US,27,"Minnesota"
US,28,"Mississippi"
US,29,"Missouri"
US,30,"Montana"
US,31,"Nebraska"
US,32,"Nevada"
US,33,"New Hampshire"
US,34,"New Jersey"
US,35,"New Mexico"
US,36,"New York"
US,37,"North Carolina"
US,38,"North Dakota"
US,39,"Ohio"
US,40,"Oklahoma"
US,41,"Oregon"
US,42,"Pennsylvania"
US,44,"Rhode Island"
US,45,"South Carolina"
US,46,"South Dakota"
US,47,"Tennessee"
US,48,"Texas"
US,49,"Utah"
US,50,"Vermont"
US,51,"Virginia"
US,53,"Washington"
US,54,"West Virginia"
US,55,"Wisconsin"
US,56,"Wyoming"
__FIPS_CA_US__
print <<__TXT__;
Now run
./generate_regionName.pl > ../libGeoIP/regionName.c
__TXT__
exit 0;
| ishmaelthedestroyer/noSeed | node_modules/geoip/deps/geoip-api-c-1.6.0/regioncode/update-region-codes.pl | Perl | mit | 1,933 |
# Copyright 2012, Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Custom visitor for generation of typemaps based on
# SOAP::WSDL::Generator::Visitor::Typemap, with some overriden methods via
# inheritance.
package Google::Ads::SOAP::Generator::TypemapVisitor;
use base qw(SOAP::WSDL::Generator::Visitor::Typemap);
use Class::Std::Fast::Storable;
sub set_typemap_entry {
my ($self, $value) = @_;
my $path = join(q{/}, @{$self->get_path()});
my $tm = $self->get_typemap();
if ($tm->{$path} && $path =~ m/Fault\/detail\/ApiExceptionFault/) {
return;
}
$tm->{$path} = $value;
}
sub visit_XSD_Element {
my ( $self, $ident, $element ) = ( $_[0], ident $_[0], $_[1] );
my @path = @{ $self->get_path() };
my $path = join '/', @path;
my $parent = $self->get_typemap()->{ $path };
# PATCH breaking cycles
if (scalar(@path) > 30) {
return;
}
# END PATCH
$self->SUPER::visit_XSD_Element($_[1]);
}
sub visit_XSD_ComplexType {
my ($self, $ident, $type) = ($_[0], ident $_[0], $_[1]);
my $variety = $type->get_variety();
my $derivation = $type->get_derivation();
my $content_model = $type->get_contentModel();
return if not $variety or ($content_model eq "simpleContent");
if (grep {$_ eq $variety} qw(all sequence choice)) {
# Recursively going to visit child element since the type variety is
# either all sequence choice.
if (my $type_name = $type->get_base()) {
my $subtype = $self->get_definitions()->first_types()->find_type(
$type->expand($type_name));
for (@{$subtype->get_element() || []}) {
$_->_accept($self);
}
}
for (@{$type->get_element() || []}) {
$_->_accept($self);
}
}
# PATCH - We need to also check if the complex type has derivations and
# include type path for all the types that derived from it.
my $last_path_elem = pop(@{$self->get_path()});
my $def_types = $self->get_definitions()->first_types();
my $schema = @{$def_types->get_schema()}[1];
my @types = @{$schema->get_type()};
my $base_type = $type->get_name();
if (@{$self->get_path()}[0] &&
@{$self->get_path()}[0] eq "ApiExceptionFault") {
@{$self->get_path()}[0] = "Fault/detail/ApiExceptionFault";
}
if (defined $base_type) {
my $schemas =
@{$self->get_definitions()->get_types()}[0]->get_schema;
SCHEMA: foreach my $my_schema (@{$schemas}) {
next SCHEMA if ($my_schema->isa("SOAP::WSDL::XSD::Schema::Builtin"));
my @types = @{$my_schema->get_type()};
TYPE: foreach my $type (@types) {
if ($type->isa("SOAP::WSDL::XSD::ComplexType")) {
my $type_name = $type->get_name();
my $base = $type->get_base();
next TYPE if !$base;
$base =~ s{ .*: }{}xms;
if ($base eq $base_type) {
# Checking for infinite cycles if the type has already been mapped
# before we skip to the next one.
foreach my $path_elem (@{$self->get_path()}) {
next TYPE if $path_elem eq $last_path_elem . "[$type_name]";
}
# In this case we generate a new path that includes the type name
# E.G. /elem1/elem2[type]
if ($last_path_elem =~ m/\[[^\]]+\]/) {
$last_path_elem =~ s/\[[^\]]+\]/[${type_name}]/;
push(@{$self->get_path()}, $last_path_elem);
} else{
push(@{$self->get_path()}, $last_path_elem . "[$type_name]");
}
my $typeclass = $self->get_resolver()->create_xsd_name($type);
# Setting current typemap class before to allow it to be used from
# inside _accept.
$self->set_typemap_entry($typeclass);
$type->_accept($self);
# Setting it afterwards again since accept could have touch it.
$self->set_typemap_entry($typeclass);
pop(@{$self->get_path()});
}
}
}
}
}
push(@{$self->get_path()}, $last_path_elem);
# END OF PATCH.
return if (!$derivation);
if ($derivation eq "restriction") {
# Resolving the base, getting atomic type and runnning on elements.
if (my $type_name = $type->get_base()) {
my $subtype = $self->get_definitions()->first_types()->find_type(
$type->expand($type_name));
for (@{$subtype->get_element() || []}) {
$_->_accept($self);
}
}
} elsif ($derivation eq "extension") {
# Resolving the base, getting atomic type and runnning on elements.
while (my $type_name = $type->get_base()) {
$type = $self->get_definitions()->first_types()->find_type(
$type->expand($type_name));
for (@{$type->get_element() || []}) {
$_->_accept($self);
}
}
}
}
return 1;
| gitpan/GOOGLE-ADWORDS-PERL-CLIENT | lib/Google/Ads/SOAP/Generator/TypemapVisitor.pm | Perl | apache-2.0 | 5,299 |
package #
Date::Manip::TZ::ansyow00;
# Copyright (c) 2008-2014 Sullivan Beck. All rights reserved.
# This program is free software; you can redistribute it and/or modify it
# under the same terms as Perl itself.
# This file was automatically generated. Any changes to this file will
# be lost the next time 'tzdata' is run.
# Generated on: Fri Nov 21 10:41:41 EST 2014
# Data version: tzdata2014j
# Code version: tzcode2014j
# This module contains data from the zoneinfo time zone database. The original
# data was obtained from the URL:
# ftp://ftp.iana.org/tz
use strict;
use warnings;
require 5.010000;
our (%Dates,%LastRule);
END {
undef %Dates;
undef %LastRule;
}
our ($VERSION);
$VERSION='6.48';
END { undef $VERSION; }
%Dates = (
1 =>
[
[ [1,1,2,0,0,0],[1,1,2,0,0,0],'+00:00:00',[0,0,0],
'zzz',0,[1957,1,28,23,59,59],[1957,1,28,23,59,59],
'0001010200:00:00','0001010200:00:00','1957012823:59:59','1957012823:59:59' ],
],
1957 =>
[
[ [1957,1,29,0,0,0],[1957,1,29,3,0,0],'+03:00:00',[3,0,0],
'SYOT',0,[9999,12,31,0,0,0],[9999,12,31,3,0,0],
'1957012900:00:00','1957012903:00:00','9999123100:00:00','9999123103:00:00' ],
],
);
%LastRule = (
);
1;
| nriley/Pester | Source/Manip/TZ/ansyow00.pm | Perl | bsd-2-clause | 1,277 |
use 5.008;
package fields;
require 5.005;
use strict;
no strict 'refs';
unless( eval q{require warnings::register; warnings::register->import; 1} ) {
*warnings::warnif = sub {
require Carp;
Carp::carp(@_);
}
}
use vars qw(%attr $VERSION);
$VERSION = '2.23';
$VERSION =~ tr/_//d;
# constant.pm is slow
sub PUBLIC () { 2**0 }
sub PRIVATE () { 2**1 }
sub INHERITED () { 2**2 }
sub PROTECTED () { 2**3 }
# The %attr hash holds the attributes of the currently assigned fields
# per class. The hash is indexed by class names and the hash value is
# an array reference. The first element in the array is the lowest field
# number not belonging to a base class. The remaining elements' indices
# are the field numbers. The values are integer bit masks, or undef
# in the case of base class private fields (which occupy a slot but are
# otherwise irrelevant to the class).
sub import {
my $class = shift;
return unless @_;
my $package = caller(0);
# avoid possible typo warnings
%{"$package\::FIELDS"} = () unless %{"$package\::FIELDS"};
my $fields = \%{"$package\::FIELDS"};
my $fattr = ($attr{$package} ||= [1]);
my $next = @$fattr;
# Quiet pseudo-hash deprecation warning for uses of fields::new.
bless \%{"$package\::FIELDS"}, 'pseudohash';
if ($next > $fattr->[0]
and ($fields->{$_[0]} || 0) >= $fattr->[0])
{
# There are already fields not belonging to base classes.
# Looks like a possible module reload...
$next = $fattr->[0];
}
foreach my $f (@_) {
my $fno = $fields->{$f};
# Allow the module to be reloaded so long as field positions
# have not changed.
if ($fno and $fno != $next) {
require Carp;
if ($fno < $fattr->[0]) {
if ($] < 5.006001) {
warn("Hides field '$f' in base class") if $^W;
} else {
warnings::warnif("Hides field '$f' in base class") ;
}
} else {
Carp::croak("Field name '$f' already in use");
}
}
$fields->{$f} = $next;
$fattr->[$next] = ($f =~ /^_/) ? PRIVATE : PUBLIC;
$next += 1;
}
if (@$fattr > $next) {
# Well, we gave them the benefit of the doubt by guessing the
# module was reloaded, but they appear to be declaring fields
# in more than one place. We can't be sure (without some extra
# bookkeeping) that the rest of the fields will be declared or
# have the same positions, so punt.
require Carp;
Carp::croak ("Reloaded module must declare all fields at once");
}
}
sub inherit {
require base;
goto &base::inherit_fields;
}
sub _dump # sometimes useful for debugging
{
for my $pkg (sort keys %attr) {
print "\n$pkg";
if (@{"$pkg\::ISA"}) {
print " (", join(", ", @{"$pkg\::ISA"}), ")";
}
print "\n";
my $fields = \%{"$pkg\::FIELDS"};
for my $f (sort {$fields->{$a} <=> $fields->{$b}} keys %$fields) {
my $no = $fields->{$f};
print " $no: $f";
my $fattr = $attr{$pkg}[$no];
if (defined $fattr) {
my @a;
push(@a, "public") if $fattr & PUBLIC;
push(@a, "private") if $fattr & PRIVATE;
push(@a, "inherited") if $fattr & INHERITED;
print "\t(", join(", ", @a), ")";
}
print "\n";
}
}
}
if ($] < 5.009) {
*new = sub {
my $class = shift;
$class = ref $class if ref $class;
return bless [\%{$class . "::FIELDS"}], $class;
}
} else {
*new = sub {
my $class = shift;
$class = ref $class if ref $class;
require Hash::Util;
my $self = bless {}, $class;
# The lock_keys() prototype won't work since we require Hash::Util :(
&Hash::Util::lock_keys(\%$self, _accessible_keys($class));
return $self;
}
}
sub _accessible_keys {
my ($class) = @_;
return (
keys %{$class.'::FIELDS'},
map(_accessible_keys($_), @{$class.'::ISA'}),
);
}
sub phash {
die "Pseudo-hashes have been removed from Perl" if $] >= 5.009;
my $h;
my $v;
if (@_) {
if (ref $_[0] eq 'ARRAY') {
my $a = shift;
@$h{@$a} = 1 .. @$a;
if (@_) {
$v = shift;
unless (! @_ and ref $v eq 'ARRAY') {
require Carp;
Carp::croak ("Expected at most two array refs\n");
}
}
}
else {
if (@_ % 2) {
require Carp;
Carp::croak ("Odd number of elements initializing pseudo-hash\n");
}
my $i = 0;
@$h{grep ++$i % 2, @_} = 1 .. @_ / 2;
$i = 0;
$v = [grep $i++ % 2, @_];
}
}
else {
$h = {};
$v = [];
}
[ $h, @$v ];
}
1;
__END__
=head1 NAME
fields - compile-time class fields
=head1 SYNOPSIS
{
package Foo;
use fields qw(foo bar _Foo_private);
sub new {
my Foo $self = shift;
unless (ref $self) {
$self = fields::new($self);
$self->{_Foo_private} = "this is Foo's secret";
}
$self->{foo} = 10;
$self->{bar} = 20;
return $self;
}
}
my $var = Foo->new;
$var->{foo} = 42;
# this will generate a run-time error
$var->{zap} = 42;
# this will generate a compile-time error
my Foo $foo = Foo->new;
$foo->{zap} = 24;
# subclassing
{
package Bar;
use base 'Foo';
use fields qw(baz _Bar_private); # not shared with Foo
sub new {
my $class = shift;
my $self = fields::new($class);
$self->SUPER::new(); # init base fields
$self->{baz} = 10; # init own fields
$self->{_Bar_private} = "this is Bar's secret";
return $self;
}
}
=head1 DESCRIPTION
The C<fields> pragma enables compile-time and run-time verified class
fields.
NOTE: The current implementation keeps the declared fields in the %FIELDS
hash of the calling package, but this may change in future versions.
Do B<not> update the %FIELDS hash directly, because it must be created
at compile-time for it to be fully useful, as is done by this pragma.
If a typed lexical variable (C<my Class
$var>) holding a reference is used to access a
hash element and a package with the same name as the type has
declared class fields using this pragma, then the hash key is
verified at compile time. If the variables are not typed, access is
only checked at run time.
The related C<base> pragma will combine fields from base classes and any
fields declared using the C<fields> pragma. This enables field
inheritance to work properly. Inherited fields can be overridden but
will generate a warning if warnings are enabled.
B<Only valid for Perl 5.8.x and earlier:> Field names that start with an
underscore character are made private to the class and are not visible
to subclasses.
Also, B<in Perl 5.8.x and earlier>, this pragma uses pseudo-hashes, the
effect being that you can have objects with named fields which are as
compact and as fast arrays to access, as long as the objects are
accessed through properly typed variables.
The following functions are supported:
=over 4
=item new
fields::new() creates and blesses a hash comprised of the fields declared
using the C<fields> pragma into the specified class. It is the
recommended way to construct a fields-based object.
This makes it possible to write a constructor like this:
package Critter::Sounds;
use fields qw(cat dog bird);
sub new {
my $self = shift;
$self = fields::new($self) unless ref $self;
$self->{cat} = 'meow'; # scalar element
@$self{'dog','bird'} = ('bark','tweet'); # slice
return $self;
}
=item phash
B<This function only works in Perl 5.8.x and earlier.> Pseudo-hashes
were removed from Perl as of 5.10. Consider using restricted hashes or
fields::new() instead (which itself uses restricted hashes under 5.10+).
See L<Hash::Util>. Using fields::phash() under 5.10 or higher will
cause an error.
fields::phash() can be used to create and initialize a plain (unblessed)
pseudo-hash. This function should always be used instead of creating
pseudo-hashes directly.
If the first argument is a reference to an array, the pseudo-hash will
be created with keys from that array. If a second argument is supplied,
it must also be a reference to an array whose elements will be used as
the values. If the second array contains less elements than the first,
the trailing elements of the pseudo-hash will not be initialized.
This makes it particularly useful for creating a pseudo-hash from
subroutine arguments:
sub dogtag {
my $tag = fields::phash([qw(name rank ser_num)], [@_]);
}
fields::phash() also accepts a list of key-value pairs that will
be used to construct the pseudo hash. Examples:
my $tag = fields::phash(name => "Joe",
rank => "captain",
ser_num => 42);
my $pseudohash = fields::phash(%args);
=back
=head1 SEE ALSO
L<base>, L<Hash::Util>
=cut
| operepo/ope | bin/usr/share/perl5/core_perl/fields.pm | Perl | mit | 9,488 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.