code stringlengths 2 1.05M | repo_name stringlengths 5 101 | path stringlengths 4 991 | language stringclasses 3 values | license stringclasses 5 values | size int64 2 1.05M |
|---|---|---|---|---|---|
#
# Copyright 2019 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package database::oracle::mode::asmdiskgroupusage;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
use centreon::plugins::templates::catalog_functions qw(catalog_status_threshold);
sub custom_offline_output {
my ($self, %options) = @_;
my $msg = 'Offline disks : ' . $self->{result_values}->{offline_disks};
return $msg;
}
sub custom_offline_calc {
my ($self, %options) = @_;
$self->{result_values}->{offline_disks} = $options{new_datas}->{$self->{instance} . '_offline_disks'};
$self->{result_values}->{type} = $options{new_datas}->{$self->{instance} . '_type'};
$self->{result_values}->{display} = $options{new_datas}->{$self->{instance} . '_display'};
return 0;
}
sub custom_status_output {
my ($self, %options) = @_;
my $msg = 'status: ' . $self->{result_values}->{status};
return $msg;
}
sub custom_status_threshold {
my ($self, %options) = @_;
my $status = catalog_status_threshold($self, %options);
$self->{instance_mode}->{last_status} = 0;
if (!$self->{output}->is_status(value => $status, compare => 'ok', litteral => 1)) {
$self->{instance_mode}->{last_status} = 1;
}
return $status;
}
sub custom_status_calc {
my ($self, %options) = @_;
$self->{result_values}->{status} = $options{new_datas}->{$self->{instance} . '_status'};
return 0;
}
sub custom_usage_perfdata {
my ($self, %options) = @_;
my $label = 'used';
my $value_perf = $self->{result_values}->{used};
if ($self->{result_values}->{total} > 0 && defined($self->{instance_mode}->{option_results}->{free})) {
$label = 'free';
$value_perf = $self->{result_values}->{free};
}
$label .= '_' . $self->{result_values}->{label_ref}
if (defined($self->{result_values}->{label_ref}));
my %total_options = ();
if ($self->{result_values}->{total} > 0 && $self->{instance_mode}->{option_results}->{units} eq '%') {
$total_options{total} = $self->{result_values}->{total};
$total_options{cast_int} = 1;
}
$self->{output}->perfdata_add(
label => $label, unit => 'B',
instances => $self->use_instances(extra_instance => $options{extra_instance}) ? $self->{result_values}->{display} : undef,
value => $value_perf,
warning => $self->{perfdata}->get_perfdata_for_output(label => 'warning-' . $self->{thlabel}, %total_options),
critical => $self->{perfdata}->get_perfdata_for_output(label => 'critical-' . $self->{thlabel}, %total_options),
min => 0, max => $self->{result_values}->{total}
);
}
sub custom_usage_threshold {
my ($self, %options) = @_;
# cannot use '%' or free option with unlimited system
return 'ok' if ($self->{result_values}->{total} <= 0 && ($self->{instance_mode}->{option_results}->{units} eq '%' || $self->{instance_mode}->{option_results}->{free}));
my ($exit, $threshold_value);
$threshold_value = $self->{result_values}->{used};
$threshold_value = $self->{result_values}->{free} if (defined($self->{instance_mode}->{option_results}->{free}));
if ($self->{instance_mode}->{option_results}->{units} eq '%') {
$threshold_value = $self->{result_values}->{prct_used};
$threshold_value = $self->{result_values}->{prct_free} if (defined($self->{instance_mode}->{option_results}->{free}));
}
$exit = $self->{perfdata}->threshold_check(value => $threshold_value, threshold => [ { label => 'critical-' . $self->{thlabel}, exit_litteral => 'critical' }, { label => 'warning-'. $self->{thlabel}, exit_litteral => 'warning' } ]);
return $exit;
}
sub custom_usage_output {
my ($self, %options) = @_;
my $label = 'Disk ';
if (defined($self->{result_values}->{label_ref})) {
$label = 'Disk Failure';
}
my ($total_used_value, $total_used_unit) = $self->{perfdata}->change_bytes(value => $self->{result_values}->{used});
my $msg;
if ($self->{result_values}->{total} <= 0) {
$msg = sprintf("%s Used: %s (unlimited)", $label, $total_used_value . " " . $total_used_unit);
} else {
my ($total_size_value, $total_size_unit) = $self->{perfdata}->change_bytes(value => $self->{result_values}->{total});
my ($total_free_value, $total_free_unit) = $self->{perfdata}->change_bytes(value => $self->{result_values}->{free});
$msg = sprintf("%s Total: %s Used: %s (%.2f%%) Free: %s (%.2f%%)",
$label,
$total_size_value . " " . $total_size_unit,
$total_used_value . " " . $total_used_unit, $self->{result_values}->{prct_used},
$total_free_value . " " . $total_free_unit, $self->{result_values}->{prct_free});
}
return $msg;
}
sub custom_usage_calc {
my ($self, %options) = @_;
return -10 if (defined($self->{instance_mode}->{last_status}) && $self->{instance_mode}->{last_status} == 0);
my $label_used = 'used';
$label_used .= '_' . $options{extra_options}->{label_ref}
if (defined($options{extra_options}->{label_ref}));
$self->{result_values}->{label_ref} = defined($options{extra_options}->{label_ref}) ? $options{extra_options}->{label_ref} : undef;
$self->{result_values}->{display} = $options{new_datas}->{$self->{instance} . '_display'};
$self->{result_values}->{total} = $options{new_datas}->{$self->{instance} . '_total'};
$self->{result_values}->{used} = $options{new_datas}->{$self->{instance} . '_' . $label_used};
return 0 if ($self->{result_values}->{total} == 0);
$self->{result_values}->{prct_used} = $self->{result_values}->{used} * 100 / $self->{result_values}->{total};
$self->{result_values}->{prct_free} = 100 - $self->{result_values}->{prct_used};
$self->{result_values}->{free} = $self->{result_values}->{total} - $self->{result_values}->{used};
return 0;
}
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'dg', type => 1, cb_prefix_output => 'prefix_dg_output', message_multiple => 'All diskgroups are ok', skipped_code => { -10 => 1 } },
];
$self->{maps_counters}->{dg} = [
{ label => 'status', threshold => 0, set => {
key_values => [ { name => 'status' } ],
closure_custom_calc => $self->can('custom_status_calc'),
closure_custom_output => $self->can('custom_status_output'),
closure_custom_perfdata => sub { return 0; },
closure_custom_threshold_check => $self->can('custom_status_output'),
}
},
{ label => 'offline-disks', set => {
key_values => [ { name => 'offline_disks' }, { name => 'type' }, { name => 'display' } ],
closure_custom_calc => $self->can('custom_offline_calc'),
closure_custom_output => $self->can('custom_offline_output'),
closure_custom_perfdata => sub { return 0; },
closure_custom_threshold_check => \&catalog_status_threshold,
}
},
{ label => 'usage', set => {
key_values => [ { name => 'used' }, { name => 'total' }, { name => 'display' } ],
closure_custom_calc => $self->can('custom_usage_calc'),
closure_custom_output => $self->can('custom_usage_output'),
closure_custom_perfdata => $self->can('custom_usage_perfdata'),
closure_custom_threshold_check => $self->can('custom_usage_threshold'),
}
},
{ label => 'usage-failure', set => {
key_values => [ { name => 'used_failure' }, { name => 'total' }, { name => 'display' } ],
closure_custom_calc => $self->can('custom_usage_calc'), closure_custom_calc_extra_options => { label_ref => 'failure' },
closure_custom_output => $self->can('custom_usage_output'),
closure_custom_perfdata => $self->can('custom_usage_perfdata'),
closure_custom_threshold_check => $self->can('custom_usage_threshold'),
}
},
];
}
sub prefix_dg_output {
my ($self, %options) = @_;
return "Diskgroup '" . $options{instance_value}->{display} . "' ";
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$options{options}->add_options(arguments => {
"unknown-status:s" => { name => 'unknown_status', default => '' },
"warning-status:s" => { name => 'warning_status', default => '' },
"critical-status:s" => { name => 'critical_status', default => '' },
"warning-offline-disks:s" => { name => 'warning_offline_disks', default => '(%{offline_disks} > 0 && %{type} eq "extern") || (%{offline_disks} > 1 && %{type} eq "high")' },
"critical-offline-disks:s" => { name => 'critical_offline_disks', default => '%{offline_disks} > 0 && %{type} =~ /^normal|high$/' },
"filter-name:s" => { name => 'filter_name', },
"units:s" => { name => 'units', default => '%' },
"free" => { name => 'free' },
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::check_options(%options);
$self->change_macros(macros => [
'warning_offline_disks', 'critical_offline_disks',
'warning_status', 'critical_status', 'unknown_status',
]);
}
sub manage_selection {
my ($self, %options) = @_;
$options{sql}->connect();
my $query = q{SELECT name, state, type, total_mb, usable_file_mb, offline_disks, FREE_MB FROM V$ASM_DISKGROUP};
$options{sql}->query(query => $query);
my $result = $options{sql}->fetchall_arrayref();
$options{sql}->disconnect();
$self->{dg} = {};
foreach my $row (@$result) {
my ($name, $state, $type, $total_mb, $usable_file_mb, $offline_disks, $free_mb) = @$row;
if (defined($self->{option_results}->{filter_name}) && $self->{option_results}->{filter_name} ne '' &&
$name !~ /$self->{option_results}->{filter_name}/) {
$self->{output}->output_add(long_msg => "skipping '" . $name . "': no matching filter name.", debug => 1);
next;
}
my $used_failure = ($total_mb * 1024 * 1024) - ($usable_file_mb * 1024 * 1024);
if ($usable_file_mb < 0) {
$used_failure = ($total_mb * 1024 * 1024);
}
$self->{dg}->{$name} = {
display => $name,
total => $total_mb * 1024 * 1024,
used => ($total_mb * 1024 * 1024) - ($free_mb * 1024 * 1024),
used_failure => $used_failure,
status => $state,
type => lc($type),
offline_disks => $offline_disks
};
}
if (scalar(keys %{$self->{dg}}) <= 0) {
$self->{output}->add_option_msg(short_msg => "No diskgroup found.");
$self->{output}->option_exit();
}
}
1;
__END__
=head1 MODE
Check Oracle ASM diskgroup usage and status.
=over 8
=item B<--warning-usage>
Threshold warning.
=item B<--critical-usage>
Threshold critical.
=item B<--warning-usage-failure>
Threshold warning.
=item B<--critical-usage-failure>
Threshold critical.
=item B<--unknown-status>
Set warning threshold for status (Default: '').
Can used special variables like: %{status}, %{display}
=item B<--warning-status>
Set warning threshold for status (Default: '').
Can used special variables like: %{status}, %{display}
=item B<--critical-status>
Set critical threshold for status (Default: '').
Can used special variables like: %{status}, %{display}
=item B<--warning-offline-disks>
Set warning threshold for offline disks (Default: '(%{offline_disks} > 0 && %{type} eq "extern") || (%{offline_disks} > 1 && %{type} eq "high")').
Can used special variables like: %{offline_disks}, %{type}, %{display}
=item B<--critical-offline-disks>
Set critical threshold for offline disks (Default: '%{offline_disks} > 0 && %{type} =~ /^normal|high$/').
Can used special variables like: %{offline_disks}, %{type}, %{display}
=item B<--units>
Units of thresholds (Default: '%') ('%', 'B').
=item B<--free>
Thresholds are on free space left.
=item B<--filter-name>
Filter by name (regexp can be used).
=back
=cut
| Sims24/centreon-plugins | database/oracle/mode/asmdiskgroupusage.pm | Perl | apache-2.0 | 13,203 |
# Ensembl module for Bio::EnsEMBL::Funcgen::BindingMatrix::Converter
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2020] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=head1 NAME
Bio::EnsEMBL::Funcgen::BindingMatrix::Converter
=head1 SYNOPSIS
=head1 DESCRIPTION
Bio::EnsEMBL::Funcgen::BindingMatrix objects which are fetched from funcgen
databases contain frequency values.
Bio::EnsEMBL::Funcgen::BindingMatrix::Converter includes methods that convert
frequency values to other units such as probabilities, bits and weights
=head1 SEE ALSO
Bio::EnsEMBL::Funcgen::BindingMatrix
Bio::EnsEMBL::Funcgen::DBSQL::BindingMatrixAdaptor
Bio::EnsEMBL::Funcgen::MotifFeature
=cut
package Bio::EnsEMBL::Funcgen::BindingMatrix::Converter;
use strict;
use warnings;
use autodie;
use feature qw(say);
use Bio::EnsEMBL::Utils::Scalar qw( assert_ref check_ref );
use Bio::EnsEMBL::Utils::Exception qw( throw );
use Bio::EnsEMBL::Utils::Argument qw( rearrange );
use Bio::EnsEMBL::Funcgen::BindingMatrix::Constants qw ( :all );
require Bio::EnsEMBL::Funcgen::BindingMatrix;
=head2 new
Example : my $converter =
Bio::EnsEMBL::Funcgen::BindingMatrix::Converter->new();
Description: Creates a new BindingMatrix::Converter object
Returntype : Bio::EnsEMBL::Funcgen::BindingMatrix::Converter
Exceptions : None
Caller : general
Status : Stable
=cut
sub new {
my $caller = shift;
my $obj_class = ref($caller) || $caller;
my $self = {};
return bless $self, $obj_class;
}
=head2 from_frequencies_to_probabilities
Arg [1] : Bio::EnsEMBL::Funcgen::BindingMatrix, the frequencies matrix
which will be converted
Arg [2] : (optional) integer, the pseudocount value used in the calculation
of the probabilities
Example : my $probabilities_matrix =
$converter->from_frequencies_to_probabilities($freq_matrix);
Description: Converts a frequencies matrix to a probabilities matrix
Returntype : Bio::EnsEMBL::Funcgen::TranscriptionFactorComplex
Exceptions : Thrown if the matrix passed is not a
Bio::EnsEMBL::Funcgen::BindingMatrix object.
Thrown if the matrix passed does not contain frequency units.
Thrown if the pseudocount passed is equal to 0.
Caller : general
Status : Stable
=cut
sub from_frequencies_to_probabilities {
my ( $self, $binding_matrix, $pseudocount ) = @_;
assert_ref( $binding_matrix, 'Bio::EnsEMBL::Funcgen::BindingMatrix',
'BindingMatrix' );
if ( $binding_matrix->unit ne FREQUENCIES ) {
throw( 'Please supply a binding matrix with '
. FREQUENCIES
. ' units instead of '
. $binding_matrix->unit() );
}
my $default_pseudocount = 0.1;
if (defined $pseudocount){
throw ('Pseudocount can not be 0.') if $pseudocount == 0;
}
else{
$pseudocount = $default_pseudocount;
}
my $probabilities = {};
for (
my $position = 1 ;
$position <= $binding_matrix->length() ;
$position++
)
{
$probabilities->{$position} //= {};
my $frequency_sum_by_position =
$self->_get_frequency_sum_by_position( $binding_matrix, $position );
for my $nucleotide ( @{ $self->_nucleotides() } ) {
my $frequency =
$binding_matrix->get_element_by_position_nucleotide( $position,
$nucleotide );
$probabilities->{$position}->{$nucleotide} =
( $frequency + $pseudocount ) /
( $frequency_sum_by_position + 4 * $pseudocount );
}
}
my $probabilities_binding_matrix =
$self->_convert_BindingMatrix( $binding_matrix, $probabilities,
PROBABILITIES );
return $probabilities_binding_matrix;
}
=head2 from_probabilities_to_weights
Arg [-BINDING_MATRIX]:
Bio::EnsEMBL::Funcgen::BindingMatrix, the probabilities matrix
which will be converted
Arg [-EXPECTED_FREQUENCY_A]:
(optional) integer, the expected frequency for nucleotide A which
is used in the calculation of weights
Arg [-EXPECTED_FREQUENCY_C]:
(optional) integer, the expected frequency for nucleotide C which
is used in the calculation of weights
Arg [-EXPECTED_FREQUENCY_G]:
(optional) integer, the expected frequency for nucleotide G which
is used in the calculation of weights
Arg [-EXPECTED_FREQUENCY_T]:
(optional) integer, the expected frequency for nucleotide T which
is used in the calculation of weights
Example : my $weight_matrix =
$converter->from_probabilities_to_weights($prob_matrix);
Description: Converts a probabilities matrix to a weights matrix
Returntype : Bio::EnsEMBL::Funcgen::TranscriptionFactorComplex
Exceptions : Thrown if the matrix passed is not a
Bio::EnsEMBL::Funcgen::BindingMatrix object.
Thrown if the matrix passed does not contain probability units.
Thrown if the sum of expected frequencies is not equal to 1.
Thrown if an expected frequency passed is equal to 0.
Caller : general
Status : Stable
=cut
sub from_probabilities_to_weights {
my $self = shift;
my $binding_matrix;
my %expected_frequency;
(
$binding_matrix, $expected_frequency{'A'},
$expected_frequency{'C'}, $expected_frequency{'G'},
$expected_frequency{'T'}
)
= rearrange(
[
'BINDING_MATRIX', 'EXPECTED_FREQUENCY_A',
'EXPECTED_FREQUENCY_C', 'EXPECTED_FREQUENCY_G',
'EXPECTED_FREQUENCY_T'
],
@_
);
assert_ref( $binding_matrix, 'Bio::EnsEMBL::Funcgen::BindingMatrix',
'BindingMatrix' );
if ( $binding_matrix->unit ne PROBABILITIES ) {
throw( 'Please supply a binding matrix with '
. PROBABILITIES
. ' units instead of '
. $binding_matrix->unit() );
}
my $default_expected_frequency = 0.25;
for my $nucleotide ( keys %expected_frequency ) {
$expected_frequency{$nucleotide} //= $default_expected_frequency;
}
$self->_check_expected_frequencies_are_valid(\%expected_frequency);
my $weights = {};
for (
my $position = 1 ;
$position <= $binding_matrix->length() ;
$position++
)
{
$weights->{$position} //= {};
for my $nucleotide ( @{ $self->_nucleotides() } ) {
my $probability =
$binding_matrix->get_element_by_position_nucleotide( $position,
$nucleotide );
$weights->{$position}->{$nucleotide} =
$self->_log2( $probability / $expected_frequency{$nucleotide} );
}
}
my $weights_binding_matrix =
$self->_convert_BindingMatrix( $binding_matrix, $weights, WEIGHTS );
return $weights_binding_matrix;
}
=head2 from_probabilities_to_bits
Arg [1]: Bio::EnsEMBL::Funcgen::BindingMatrix, the probabilities matrix
which will be converted
Example : my $bits_matrix =
$converter->from_probabilities_to_bits($prob_matrix);
Description: Converts a probabilities matrix to a bits matrix
Returntype : Bio::EnsEMBL::Funcgen::TranscriptionFactorComplex
Exceptions : Thrown if the matrix passed is not a
Bio::EnsEMBL::Funcgen::BindingMatrix object.
Thrown if the matrix passed does not contain probability units.
Caller : general
Status : Stable
=cut
sub from_probabilities_to_bits {
my ( $self, $binding_matrix ) = @_;
assert_ref( $binding_matrix, 'Bio::EnsEMBL::Funcgen::BindingMatrix',
'BindingMatrix' );
if ( $binding_matrix->unit ne PROBABILITIES ) {
throw( 'Please supply a binding matrix with '
. PROBABILITIES
. ' units instead of '
. $binding_matrix->unit() );
}
my $bits = {};
for (
my $position = 1;
$position <= $binding_matrix->length();
$position++
)
{
$bits->{$position} //= {};
my $h = $self->_get_h( $binding_matrix, $position );
my $IC = 2 - $h;
for my $nucleotide ( @{ $self->_nucleotides() } ) {
my $probability
= $binding_matrix->get_element_by_position_nucleotide(
$position, $nucleotide );
$bits->{$position}->{$nucleotide} = $probability * $IC;
}
}
my $bits_binding_matrix
= $self->_convert_BindingMatrix( $binding_matrix, $bits, BITS );
return $bits_binding_matrix;
}
=head2 from_frequencies_to_bits
Arg [1] : Bio::EnsEMBL::Funcgen::BindingMatrix, the frequencies matrix
which will be converted
Arg [2] : (optional) integer, the pseudocount value used in the calculation
of the probabilities
Example : my $bits_matrix =
$converter->from_frequencies_to_bits($freq_matrix);
Description: Converts a frequencies matrix to a bits matrix
Returntype : Bio::EnsEMBL::Funcgen::TranscriptionFactorComplex
Exceptions : Thrown if the matrix passed is not a
Bio::EnsEMBL::Funcgen::BindingMatrix object.
Thrown if the matrix passed does not contain frequency units.
Thrown if the pseudocount passed is equal to 0.
Caller : general
Status : Stable
=cut
sub from_frequencies_to_bits {
my ( $self, $binding_matrix, $pseudocount ) = @_;
my $probabilities_binding_matrix
= $self->from_frequencies_to_probabilities($binding_matrix, $pseudocount);
my $bits_binding_matrix
= $self->from_probabilities_to_bits($probabilities_binding_matrix);
return $bits_binding_matrix;
}
=head2 from_frequencies_to_weights
Arg [-BINDING_MATRIX]:
Bio::EnsEMBL::Funcgen::BindingMatrix, the probabilities matrix
which will be converted
Arg [-PSEUDOCOUNT]:
(optional) integer, the pseudocount value used in the calculation
of the probabilities
Arg [-EXPECTED_FREQUENCY_A]:
(optional) integer, the expected frequency for nucleotide A which
is used in the calculation of weights
Arg [-EXPECTED_FREQUENCY_C]:
(optional) integer, the expected frequency for nucleotide C which
is used in the calculation of weights
Arg [-EXPECTED_FREQUENCY_G]:
(optional) integer, the expected frequency for nucleotide G which
is used in the calculation of weights
Arg [-EXPECTED_FREQUENCY_T]:
(optional) integer, the expected frequency for nucleotide T which
is used in the calculation of weights
Example : my $weights_matrix =
$converter->from_frequencies_to_weights($freq_matrix);
Description: Converts a frequencies matrix to a weights matrix
Returntype : Bio::EnsEMBL::Funcgen::TranscriptionFactorComplex
Exceptions : Thrown if the matrix passed is not a
Bio::EnsEMBL::Funcgen::BindingMatrix object.
Thrown if the matrix passed does not contain frequency units.
Thrown if the sum of expected frequencies is not equal to 1.
Thrown if an expected frequency passed is equal to 0.
Thrown if the pseudocount passed is equal to 0.
Caller : general
Status : Stable
=cut
sub from_frequencies_to_weights {
my $self = shift;
my $binding_matrix;
my %expected_frequency;
my $pseudocount;
(
$binding_matrix, $pseudocount,
$expected_frequency{'A'}, $expected_frequency{'C'},
$expected_frequency{'G'}, $expected_frequency{'T'}
)
= rearrange(
[
'BINDING_MATRIX', 'PSEUDOCOUNT',
'EXPECTED_FREQUENCY_A', 'EXPECTED_FREQUENCY_C',
'EXPECTED_FREQUENCY_G', 'EXPECTED_FREQUENCY_T'
],
@_
);
my $probabilities_binding_matrix =
$self->from_frequencies_to_probabilities( $binding_matrix, $pseudocount );
my $weights_binding_matrix = $self->from_probabilities_to_weights(
-BINDING_MATRIX => $probabilities_binding_matrix,
-EXPECTED_FREQUENCY_A => $expected_frequency{A},
-EXPECTED_FREQUENCY_G => $expected_frequency{G},
-EXPECTED_FREQUENCY_C => $expected_frequency{C},
-EXPECTED_FREQUENCY_T => $expected_frequency{T}
);
return $weights_binding_matrix;
}
sub _get_frequency_sum_by_position {
my ( $self, $binding_matrix, $position ) = @_;
assert_ref( $binding_matrix, 'Bio::EnsEMBL::Funcgen::BindingMatrix',
'BindingMatrix' );
throw('Must supply a position parameter') if !defined $position;
my $frequency_sum;
for my $nucleotide ( @{ $self->_nucleotides() } ) {
my $frequency
= $binding_matrix->get_element_by_position_nucleotide( $position,
$nucleotide );
$frequency_sum += $frequency;
}
return $frequency_sum;
}
sub _log2 {
my ( $self, $n ) = @_;
throw('Must supply a parameter') if !defined $n;
return log($n) / log(2);
}
sub _get_h {
my ( $self, $binding_matrix, $position ) = @_;
assert_ref( $binding_matrix, 'Bio::EnsEMBL::Funcgen::BindingMatrix',
'BindingMatrix' );
throw('Must specify a position') if !defined $position;
my $h = 0;
for my $nucleotide ( @{ $self->_nucleotides() } ) {
my $probability
= $binding_matrix->get_element_by_position_nucleotide( $position,
$nucleotide );
$h -= $probability * $self->_log2($probability);
}
return $h;
}
sub _nucleotides { return [ 'A', 'C', 'G', 'T' ]; }
sub _convert_BindingMatrix {
my ( $self, $binding_matrix, $elements, $unit ) = @_;
assert_ref( $binding_matrix, 'Bio::EnsEMBL::Funcgen::BindingMatrix',
'BindingMatrix' );
throw('Must supply an -elements parameter') if !defined $elements;
throw('Must supply a -unit parameter') if !defined $unit;
return Bio::EnsEMBL::Funcgen::BindingMatrix->new(
-NAME => $binding_matrix->name(),
-SOURCE => $binding_matrix->source(),
-THRESHOLD => $binding_matrix->threshold(),
-ELEMENTS => $elements,
-UNIT => $unit,
-STABLE_ID => $binding_matrix->stable_id(),
-ASSOCIATED_TRANSCRIPTION_FACTOR_COMPLEXES =>
$binding_matrix->get_all_associated_TranscriptionFactorComplexes()
);
}
sub _check_expected_frequencies_are_valid {
my ( $self, $expected_frequency ) = @_;
if ( $expected_frequency->{A} +
$expected_frequency->{C} +
$expected_frequency->{G} +
$expected_frequency->{T} != 1 )
{
throw(
'Invalid expected frequencies passed. The sum is not equal to 1.');
}
for my $ef ( values %{$expected_frequency} ) {
if ( $ef == 0 ) {
throw(
'Invalid expected frequencies passed. No zero (0) values allowed'
);
}
}
}
1;
| Ensembl/ensembl-funcgen | modules/Bio/EnsEMBL/Funcgen/BindingMatrix/Converter.pm | Perl | apache-2.0 | 16,324 |
#
# Copyright 2019 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package apps::vmware::connector::mode::statushost;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
use centreon::plugins::templates::catalog_functions qw(catalog_status_threshold);
sub custom_status_output {
my ($self, %options) = @_;
my $msg = 'status ' . $self->{result_values}->{status};
return $msg;
}
sub custom_status_calc {
my ($self, %options) = @_;
$self->{result_values}->{status} = $options{new_datas}->{$self->{instance} . '_state'};
return 0;
}
sub custom_overall_output {
my ($self, %options) = @_;
my $msg = 'overall status is ' . $self->{result_values}->{overall_status};
return $msg;
}
sub custom_overall_calc {
my ($self, %options) = @_;
$self->{result_values}->{overall_status} = $options{new_datas}->{$self->{instance} . '_overall_status'};
return 0;
}
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'host', type => 1, cb_prefix_output => 'prefix_host_output', message_multiple => 'All ESX Hosts are ok' },
];
$self->{maps_counters}->{host} = [
{ label => 'status', threshold => 0, set => {
key_values => [ { name => 'state' } ],
closure_custom_calc => $self->can('custom_status_calc'),
closure_custom_output => $self->can('custom_status_output'),
closure_custom_perfdata => sub { return 0; },
closure_custom_threshold_check => \&catalog_status_threshold,
}
},
{ label => 'overall-status', threshold => 0, set => {
key_values => [ { name => 'overall_status' } ],
closure_custom_calc => $self->can('custom_overall_calc'),
closure_custom_output => $self->can('custom_overall_output'),
closure_custom_perfdata => sub { return 0; },
closure_custom_threshold_check => \&catalog_status_threshold,
}
},
];
}
sub prefix_host_output {
my ($self, %options) = @_;
return "Host '" . $options{instance_value}->{display} . "' : ";
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$options{options}->add_options(arguments => {
"esx-hostname:s" => { name => 'esx_hostname' },
"filter" => { name => 'filter' },
"scope-datacenter:s" => { name => 'scope_datacenter' },
"scope-cluster:s" => { name => 'scope_cluster' },
"unknown-status:s" => { name => 'unknown_status', default => '%{status} !~ /^connected$/i' },
"warning-status:s" => { name => 'warning_status', default => '' },
"critical-status:s" => { name => 'critical_status', default => '' },
"unknown-overall-status:s" => { name => 'unknown_overall_status', default => '%{overall_status} =~ /gray/i' },
"warning-overall-status:s" => { name => 'warning_overall_status', default => '%{overall_status} =~ /yellow/i' },
"critical-overall-status:s" => { name => 'critical_overall_status', default => '%{overall_status} =~ /red/i' },
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::check_options(%options);
$self->change_macros(macros => ['unknown_status', 'warning_status', 'critical_status',
'unknown_overall_status', 'warning_overall_status', 'critical_overall_status']);
}
sub manage_selection {
my ($self, %options) = @_;
$self->{host} = {};
my $response = $options{custom}->execute(params => $self->{option_results},
command => 'statushost');
foreach my $host_id (keys %{$response->{data}}) {
my $host_name = $response->{data}->{$host_id}->{name};
$self->{host}->{$host_name} = {
display => $host_name,
state => $response->{data}->{$host_id}->{state},
overall_status => $response->{data}->{$host_id}->{overall_status},
};
}
}
1;
__END__
=head1 MODE
Check ESX global status.
=over 8
=item B<--esx-hostname>
ESX hostname to check.
If not set, we check all ESX.
=item B<--filter>
ESX hostname is a regexp.
=item B<--scope-datacenter>
Search in following datacenter(s) (can be a regexp).
=item B<--scope-cluster>
Search in following cluster(s) (can be a regexp).
=item B<--unknown-status>
Set warning threshold for status (Default: '%{status} !~ /^connected$/i').
Can used special variables like: %{status}
=item B<--warning-status>
Set warning threshold for status (Default: '').
Can used special variables like: %{status}
=item B<--critical-status>
Set critical threshold for status (Default: '').
Can used special variables like: %{status}
=item B<--unknown-overall-status>
Set warning threshold for status (Default: '%{overall_status} =~ /gray/i').
Can used special variables like: %{overall_status}
=item B<--warning-overall-status>
Set warning threshold for status (Default: '%{overall_status} =~ /yellow/i').
Can used special variables like: %{overall_status}
=item B<--critical-overall-status>
Set critical threshold for status (Default: '%{overall_status} =~ /red/i').
Can used special variables like: %{overall_status}
=back
=cut
| Sims24/centreon-plugins | apps/vmware/connector/mode/statushost.pm | Perl | apache-2.0 | 6,066 |
#!/usr/bin/env perl
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
use strict;
use warnings;
use DBI;
use Getopt::Long;
use Bio::AlignIO;
use Bio::EnsEMBL::Utils::IO qw/:slurp :spurt/;
use Bio::EnsEMBL::Compara::DBSQL::DBAdaptor;
use Bio::EnsEMBL::Compara::GenomeDB;
use Bio::EnsEMBL::Compara::Graph::NewickParser;
use Bio::EnsEMBL::Compara::NestedSet;
use Bio::EnsEMBL::Compara::Utils::SpeciesTree;
my $self = {};
$self->{'speciesList'} = ();
$self->{'outputFasta'} = undef;
$self->{'noSplitSeqLines'} = undef;
$self->{'cdna'} = 0;
$self->{'scale'} = 10;
$self->{'extrataxon_sequenced'} = undef;
$self->{'multifurcation_deletes_node'} = undef;
$self->{'multifurcation_deletes_all_subnodes'} = undef;
$self->{'njtree_output_filename'} = undef;
$self->{'no_other_files'} = undef;
$self->{'no_print_tree'} = undef;
my $conf_file;
my ($help, $host, $user, $pass, $dbname, $port, $adaptor);
my $url;
GetOptions('help' => \$help,
'url=s' => \$url,
'tre=s' => \$self->{'newick_file'},
'tree_id=i' => \$self->{'tree_id'},
'gene=s' => \$self->{'gene_stable_id'},
'reroot=i' => \$self->{'new_root_id'},
'align' => \$self->{'print_align'},
'cdna' => \$self->{'cdna'},
'tag=s' => \$self->{'tag'},
'create_species_tree' => \$self->{'create_species_tree'},
'extrataxon_sequenced=s' => \$self->{'extrataxon_sequenced'},
'multifurcation_deletes_node=s' => \$self->{'multifurcation_deletes_node'},
'multifurcation_deletes_all_subnodes=s' => \$self->{'multifurcation_deletes_all_subnodes'},
'njtree_output_filename=s' => \$self->{'njtree_output_filename'}, # we need to be able to feed the filename from outside to make some automation possible
'no_other_files' => \$self->{'no_other_files'}, # and shut up the rest of it :)
'no_print_tree' => \$self->{'no_print_tree'}, # so all output goes to STDERR
'scale=f' => \$self->{'scale'},
);
if ($help) { usage(); }
if ($url) {
$self->{'comparaDBA'} = new Bio::EnsEMBL::Compara::DBSQL::DBAdaptor( -URL => $url );
}
unless(defined($self->{'comparaDBA'})) {
warn "Could not create compara_dba from url '$url'\n\n";
usage();
}
if($self->{'tree_id'}) {
my $treeDBA = $self->{'comparaDBA'}->get_GeneTreeAdaptor;
my $tree = $treeDBA->fetch_by_dbID($self->{'tree_id'});
$self->{'root'} = $tree->root;
}
if ($self->{'tree_id'}) {
print_protein_tree($self);
} elsif ($self->{'gene_stable_id'}) {
fetch_protein_tree_with_gene($self, $self->{'gene_stable_id'});
} elsif ($self->{'newick_file'}) {
parse_newick($self);
} elsif ($self->{'new_root_id'}) {
reroot($self);
} elsif ($self->{'print_align'}) {
dumpTreeMultipleAlignment($self);
} elsif ($self->{'create_species_tree'}) {
create_species_tree($self);
} else {
fetch_compara_ncbi_taxa($self);
}
#cleanup memory
if($self->{'root'}) {
warn("ABOUT TO MANUALLY release tree\n") if ($self->{'debug'});
$self->{'root'}->release_tree;
$self->{'root'} = undef;
warn("DONE\n") if ($self->{'debug'});
}
exit(0);
#######################
#
# subroutines
#
#######################
sub usage {
warn "testTaxonTree.pl [options]\n";
warn " -help : print this help\n";
warn " -url <url> : connect to compara at url\n";
warn " -tree_id <id> : print tree with node_id\n";
warn " -name <string> : search for <name> and print tree from that node\n";
warn " -align : print multiple alignment\n";
warn " -scale <num> : scale factor for printing tree (def: 100)\n";
warn " -mini : minimize tree\n";
warn "testTaxonTree.pl v1.1\n";
exit(1);
}
sub fetch_compara_ncbi_taxa {
my $self = shift;
warn("fetch_compara_ncbi_taxa\n");
my $root = Bio::EnsEMBL::Compara::Utils::SpeciesTree->create_species_tree(
-COMPARA_DBA => $self->{'comparaDBA'},
-RETURN_NCBI_TREE => 1,
);
$root->print_tree($self->{'scale'});
$self->{'root'} = $root;
}
sub create_species_tree {
my $self = shift;
warn("create_species_tree\n");
my @extrataxon_sequenced;
if($self->{'extrataxon_sequenced'}) {
my $temp = $self->{'extrataxon_sequenced'};
@extrataxon_sequenced = split ('_',$temp);
}
my @multifurcation_deletes_node;
if($self->{'multifurcation_deletes_node'}) {
my $temp = $self->{'multifurcation_deletes_node'};
@multifurcation_deletes_node = split ('_',$temp);
}
my @multifurcation_deletes_all_subnodes;
if($self->{'multifurcation_deletes_all_subnodes'}) {
my $temp = $self->{'multifurcation_deletes_all_subnodes'};
@multifurcation_deletes_all_subnodes = split ('_',$temp);
}
my $root = Bio::EnsEMBL::Compara::Utils::SpeciesTree->create_species_tree(
-COMPARA_DBA => $self->{'comparaDBA'},
-RETURN_NCBI_TREE => 1,
-EXTRATAXON_SEQUENCED => \@extrataxon_sequenced,
-MULTIFURCATION_DELETES_NODE => \@multifurcation_deletes_node,
-MULTIFURCATION_DELETES_ALL_SUBNODES => @multifurcation_deletes_all_subnodes,
);
unless($self->{'no_print_tree'}) {
$root->print_tree($self->{'scale'});
}
my $outname = $self->{'comparaDBA'}->dbc->dbname;
$outname .= ".".$self->{'tag'} if (defined($self->{'tag'}));
my $num_leaves = scalar(@{$root->get_all_leaves});
$outname = $num_leaves . "." . $outname;
my $newick_common;
eval {$newick_common = $root->newick_format("full_common");};
unless ($@) {
warn("\n\n$newick_common\n\n");
$newick_common =~ s/\ /\_/g;
unless($self->{'no_other_files'}) {
spurt("newick_common.$outname.nh", $newick_common);
}
}
my $newick = $root->newick_format;
warn("\n\n$newick\n\n");
unless($self->{'no_other_files'}) {
spurt("newick.$outname.nh", $newick);
}
my $newick_simple = $newick;
$newick_simple =~ s/\:\d\.\d+//g;
$newick_simple =~ s/\ /\_/g;
warn "$newick_simple\n\n";
unless($self->{'no_other_files'}) {
spurt("newick_simple.$outname.nh", $newick_simple);
}
my $species_short_name = $root->newick_format('species_short_name');
warn("$species_short_name\n\n");
unless($self->{'no_other_files'}) {
spurt("species_short_name.$outname.nh", $species_short_name);
}
my $njtree_tree = $root->newick_format('ncbi_taxon');
warn "==== Your njtree file njtree.$outname.nh ====\n";
warn "$njtree_tree\n\n";
unless($self->{'no_other_files'}) {
spurt("njtree.$outname.nh". $njtree_tree);
}
if($self->{'njtree_output_filename'}) { # we need to feed the filename from outside for some automation
spurt($self->{'njtree_output_filename'}, $njtree_tree);
}
my $s = join (":", map {$_->name} (@{$root->get_all_leaves}));
$s =~ s/\ /\_/g;
warn "$s\n";
$self->{'root'} = $root;
}
sub print_protein_tree {
my $self = shift;
my $tree = $self->{'root'};
$tree->tree->print_tree($self->{'scale'});
warn sprintf("%d proteins\n", scalar(@{$tree->get_all_leaves}));
my $newick = $tree->newick_format('simple');
warn("$newick\n");
}
sub fetch_protein_tree_with_gene {
my $self = shift;
my $gene_stable_id = shift;
my $member = $self->{'comparaDBA'}->get_GeneMemberAdaptor->fetch_by_stable_id($gene_stable_id);
print $member->toString(), "\n";
print $member->get_canonical_SeqMember->toString(), "\n";
my $treeDBA = $self->{'comparaDBA'}->get_GeneTreeAdaptor;
my $tree = $treeDBA->fetch_default_for_Member($member);
$tree->print_tree($self->{'scale'});
}
sub parse_newick {
my $self = shift;
warn "load from file ". $self->{'newick_file'}. "\n";
my $newick = slurp( $self->{'newick_file'} );
my $tree = Bio::EnsEMBL::Compara::Graph::NewickParser::parse_newick_into_tree($newick);
$tree->print_tree($self->{'scale'});
}
sub reroot {
my $self = shift;
my $node_id = $self->{'new_root_id'};
my $treeDBA = $self->{'comparaDBA'}->get_GeneTreeNodeAdaptor;
my $node = $treeDBA->fetch_node_by_node_id($node_id);
warn "tree at ". $node->root->node_id ."\n";
my $tree = $treeDBA->fetch_node_by_node_id($node->root->node_id);
$tree->print_tree($self->{'scale'});
my $new_root = $tree->find_node_by_node_id($node_id);
return unless $new_root;
my $tmp_root = Bio::EnsEMBL::Compara::NestedSet->new;
$tmp_root->merge_children($tree);
$new_root->re_root;
$tree->merge_children($new_root);
$tree->build_leftright_indexing;
$tree->print_tree($self->{'scale'});
$treeDBA->store($tree);
$treeDBA->delete_node($new_root);
}
sub dumpTreeMultipleAlignment
{
my $self = shift;
warn("missing tree\n") unless($self->{'root'});
my $tree = $self->{'root'};
$self->{'file_root'} = "proteintree_". $tree->node_id;
$self->{'file_root'} =~ s/\/\//\//g; # converts any // in path to /
my $clw_file = $self->{'file_root'} . ".aln";
if($self->{'debug'}) {
my $leafcount = scalar(@{$tree->get_all_leaves});
warn "dumpTreeMultipleAlignmentToWorkdir : $leafcount members\n";
warn "clw_file = '$clw_file'\n";
}
# "interleaved" is BioPerl's default way of printing phylip alignments
$tree->print_alignment_to_file($clw_file,
-FORMAT => 'phylip',
-ID_TYPE => 'MEMBER',
$self->{'cdna'} ? (-SEQ_TYPE => 'cds') : (),
);
}
| Ensembl/ensembl-compara | scripts/tree/testTaxonTree.pl | Perl | apache-2.0 | 10,062 |
#!/usr/bin/env perl
=head1 LICENSE
Copyright [2009-2014] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
use warnings;
use strict;
package Bio::EnsEMBL::GenomeExporter::GenomeExporterApi;
sub export_genes {
my ($self, $dba, $biotypes) = @_;
my @genes;
my $ga = $dba->get_GeneAdaptor();
my $xa = $dba->get_DBEntryAdaptor();
if (defined $biotypes) {
for my $biotype (@{$biotypes}) {
@genes = (@genes, @{$ga->fetch_all_by_biotype($biotype)});
}
}
else {
@genes = @{$ga->fetch_all()};
}
my @output = ();
for my $gene (@genes) {
my $gene_out = $self->_hash_gene($xa, $gene);
push @output, $gene_out;
}
return \@output;
}
sub _hash_gene {
my ($self, $xa, $gene) = @_;
my $gene_out = {id => $gene->stable_id(),
name => $gene->external_name(),
biotype => $gene->biotype(),
description => $gene->description(),
start => $gene->seq_region_start(),
end => $gene->seq_region_end(),
strand => $gene->seq_region_strand(),
seq_region_name => $gene->seq_region_name(),
transcripts => []};
if(defined $gene->display_xref()) {
$gene_out->{synonyms} = $gene->display_xref()->get_all_synonyms();
}
for my $dbentry (@{$xa->fetch_all_by_Gene($gene)}) {
push @{$gene_out->{xrefs}}, $self->_hash_xref($dbentry);
}
for my $transcript (@{$gene->get_all_Transcripts()}) {
push @{$gene_out->{transcripts}},
$self->_hash_transcript($xa, $transcript);
}
return $gene_out;
}
sub _hash_transcript {
my ($self, $xa, $transcript) = @_;
my $transcript_out = {
id => $transcript->stable_id(),
name => $transcript->external_name(),
biotype => $transcript->biotype(),
description => $transcript->description(),
start => $transcript->seq_region_start(),
end => $transcript->seq_region_end(),
strand => $transcript->seq_region_strand(),
seq_region_name => $transcript->seq_region_name()
};
for my $dbentry (@{$xa->fetch_all_by_Transcript($transcript)}) {
push @{$transcript_out->{xrefs}}, $self->_hash_xref($dbentry);
}
my $translation = $transcript->translation();
if (defined $translation) {
push @{$transcript_out->{translations}},
$self->_hash_translation($xa, $translation);
for my $alt_translation (
@{$transcript->get_all_alternative_translations()})
{
push @{$transcript_out->{translations}},
$self->_hash_translation($xa, $alt_translation);
}
}
return $transcript_out;
} ## end sub _hash_transcript
sub _hash_translation {
my ($self, $xa, $translation) = @_;
my $translation_out = {id => $translation->stable_id()};
for my $dbentry (@{$xa->fetch_all_by_Translation($translation)}) {
push @{$translation_out->{xrefs}}, $self->_hash_xref($dbentry);
}
for my $protein_feature (@{$translation->get_all_ProteinFeatures()}) {
push @{$translation_out->{protein_features}},
$self->_hash_protein_feature($protein_feature);
}
return $translation_out;
}
sub _hash_protein_feature {
my ($self, $protein_feature) = @_;
return {start => $protein_feature->start(),
end => $protein_feature->end(),
name => $protein_feature->display_id(),
dbname => $protein_feature->analysis()->db(),
description => $protein_feature->hdescription(),
interpro_ac => $protein_feature->interpro_ac()};
}
sub _hash_xref {
my ($self, $xref) = @_;
my $xref_out = {primary_id => $xref->primary_id(),
display_id => $xref->display_id(),
dbname => $xref->dbname()};
if (ref($xref) eq 'Bio::EnsEMBL::OntologyXref') {
for my $linkage_type (@{$xref->get_all_linkage_info()}) {
push @{$xref_out->{linkage_types}},
{evidence => $linkage_type->[0],
source => $self->_hash_xref($linkage_type->[1])};
}
}
return $xref_out;
}
1;
| EnsemblGenomes/eg-rest | lib/Bio/EnsEMBL/GenomeExporter/GenomeExporter.pm | Perl | apache-2.0 | 4,403 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2021] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
package EnsEMBL::Web::Component::Gene::GenePhenotypeVariation;
use strict;
use Bio::EnsEMBL::Variation::Utils::Constants;
use base qw(EnsEMBL::Web::Component::Gene);
sub _init {
my $self = shift;
$self->cacheable(1);
$self->ajaxable(1);
}
sub content {
my $self = shift;
my $hub = $self->hub;
my $phenotype = $hub->param('sub_table');
my $object = $self->object;
my ($display_name, $dbname, $ext_id, $dbname_disp, $info_text) = $object->display_xref;
my $html;
# Check if a variation database exists for the species.
if ($hub->database('variation')) {
my $no_data = qq(<h2>Phenotypes, diseases and traits associated with variants in this gene</h2><p>None found.</p>);
# Variation phenotypes
if ($phenotype) {
my $table_rows = $self->variation_table($phenotype, $display_name);
my $table = $table_rows ? $self->make_table($table_rows, $phenotype) : undef;
$html .= $table ? $self->render_content($table, $phenotype) : $no_data;
} else {
# no sub-table selected, just show stats
my $table = $self->stats_table($display_name);
$html .= $table ? $self->render_content($table) : $no_data;
}
}
return $html;
}
sub make_table {
my ($self, $table_rows, $phenotype) = @_;
my $columns = [
{ key => 'ID', sort => 'html', title => 'Variant ID' },
{ key => 'chr' , sort => 'position', title => 'Chr: bp' },
{ key => 'Alleles', sort => 'string', align => 'center' },
{ key => 'class', sort => 'string', title => 'Class', align => 'center' },
{ key => 'psource', sort => 'string', title => 'Phenotype Sources' },
{ key => 'pstudy', sort => 'string', title => 'Phenotype Studies' },
{ key => 'pvalue', sort => 'numeric_hidden', title => 'p-value' },
];
push (@$columns, { key => 'phe', sort => 'string', title => 'Phenotypes' }) if ($phenotype eq 'ALL');
my $table_id = $phenotype;
$table_id =~ s/[^\w]/_/g;
return $self->new_table($columns, $table_rows, { data_table => 1, sorting => [ 'chr asc' ], exportable => 1, id => $table_id."_table" });
}
sub render_content {
my ($self, $table, $phenotype) = @_;
my $stable_id = $self->object->stable_id;
my $html;
if ($phenotype) {
my $table_id = $phenotype;
$table_id =~ s/[^\w]/_/g;
$html = $self->toggleable_table("$phenotype associated variants", $table_id, $table, 1, qq(<span style="float:right"><a href="#$self->{'id'}_top">[back to top]</a></span>));
} else {
$html = qq(<a id="$self->{'id'}_top"></a><h2>Phenotype, disease and trait annotations associated with variants in this gene</h2>) . $table->render;
}
return $html;
}
sub stats_table {
my ($self, $gene_name) = @_;
my $hub = $self->hub;
my $species_defs = $hub->species_defs;
my $pf_adaptor = $self->hub->database('variation')->get_PhenotypeFeatureAdaptor;
my ($total_counts, %phenotypes, @va_ids);
my $skip_phenotypes_link = "non_specified";
my $columns = [
{ key => 'phen', title => 'Phenotype, disease and trait', sort => 'string', width => '35%' },
{ key => 'source', title => 'Source(s)', sort => 'string', width => '11%' },
{ key => 'count', title => 'Number of variants', sort => 'numeric_hidden', width => '10%', align => 'right' },
{ key => 'view', title => 'Show/hide details', sort => 'none', width => '10%', align => 'center' }
];
my $pf_list = $gene_name ? $pf_adaptor->fetch_all_by_associated_gene($gene_name) : [];
foreach my $pf (grep {$_->type eq 'Variation'} @{$pf_list}) {
my $phe_source = $pf->source_name;
next if ($self->check_source($phe_source));
my $var_name = $pf->object_id;
my $phe = $pf->phenotype->description;
my $phe_class= $pf->phenotype_class;
$phenotypes{$phe} ||= { id => $pf->{'_phenotype_id'} , name => $pf->{'_phenotype_name'}};
$phenotypes{$phe}{'count'}{$var_name} = 1;
$phenotypes{$phe}{'source'}{$phe_source} = 1;
$phenotypes{$phe}{skip_link} = $phe_class eq $skip_phenotypes_link ? 1 : 0;
$total_counts->{$var_name} = 1;
}
my $warning_text = qq{<span style="color:red">(WARNING: details table may not load for this number of variants!)</span>};
my ($url, @rows);
my $max_lines = 1000;
# add the row for ALL variations if there are any
if (my $total = scalar keys %$total_counts) {
my $warning = $total > $max_lines ? $warning_text : '';
push @rows, {
phen => "ALL variants with a phenotype annotation $warning",
count => qq{<span class="hidden">-</span>$total}, # create a hidden span to add so that ALL is always last in the table
view => $self->ajax_add($self->ajax_url(undef, { sub_table => 'ALL' }), 'ALL'),
source => '-',
lview => '-'
};
}
foreach (sort {$a cmp $b} keys %phenotypes) {
my $phenotype = $phenotypes{$_};
my $phe_desc = $_;
my $table_id = $phe_desc;
$table_id =~ s/[^\w]/_/g;
my $phe_count = scalar (keys(%{$phenotype->{'count'}}));
my $warning = $phe_count > $max_lines ? $warning_text : '';
my $sources_list = join ', ', map $self->source_link($_, undef, undef, $gene_name, $phe_desc), sort {$a cmp $b} keys(%{$phenotype->{'source'}});
my $phe_url = $_;
# Associate loci link
if ($hub->species_defs->ENSEMBL_CHROMOSOMES) {
$phe_url = sprintf (
'<a href="%s" title="%s">%s</a>',
$hub->url({
type => 'Phenotype',
action => 'Locations',
ph => $phenotype->{'id'}
}),
'View associate loci',
$_
) unless /(HGMD|COSMIC)/ || $phenotypes{$_}{skip_link};
}
push @rows, {
phen => "$phe_url $warning",
count => $phe_count,
view => $self->ajax_add($self->ajax_url(undef, { sub_table => $_ }), $table_id),
source => $sources_list
};
}
if (scalar @rows) {
return $self->new_table($columns, \@rows, { data_table => 'no_col_toggle', data_table_config => {iDisplayLength => 10}, exportable => 0 });
}
}
sub variation_table {
my ($self, $phenotype, $gene_name) = @_;
my $hub = $self->hub;
my $object = $self->object;
my $gene_slice = $object->get_Slice;
my $g_region = $gene_slice->seq_region_name;
my $g_start = $gene_slice->start;
my $g_end = $gene_slice->end;
my $phenotype_sql = $phenotype;
$phenotype_sql =~ s/'/\\'/; # Escape quote character
my $db_adaptor = $hub->database('variation');
my $pf_adaptor = $db_adaptor->get_PhenotypeFeatureAdaptor;
my $max_items_per_source = 2;
my (@rows, %list_sources, %list_phe, $list_variations);
# create some URLs - quicker than calling the url method for every variation
my $base_url = $hub->url({
type => 'Variation',
action => 'Phenotype',
vf => undef,
v => undef,
source => undef,
});
my $all_flag = ($phenotype eq 'ALL') ? 1 : 0;
my $submitter_max_length = 20;
my $pf_list = ($all_flag == 0) ? $pf_adaptor->fetch_all_by_associated_gene_phenotype_description($gene_name,$phenotype) : $pf_adaptor->fetch_all_by_associated_gene($gene_name);
foreach my $pf_gene_asso (grep {$_->type eq 'Variation'} @$pf_list) {
my $pf = $pf_adaptor->fetch_by_dbID($pf_gene_asso->dbID);
#### Phenotype ####
my $phe_source = $pf->source_name;
next if ($self->check_source($phe_source));
my $var = $pf->object;
my $var_name = $var->name;
my $list_sources;
if (!$list_variations->{$var_name}) {
my $location;
my $allele;
foreach my $vf (@{$var->get_all_VariationFeatures()}) {
my $vf_region = $vf->seq_region_name;
my $vf_start = $vf->start;
my $vf_end = $vf->end;
my $vf_allele = $vf->allele_string;
next if ($vf_region =~ /^CHR_/i && $g_region =~ /^\d+$|^X$|^Y$|^MT$/);
$vf_allele =~ s/(.{20})/$1\n/g;
$location .= '<br />' if ($location);
$allele .= '<br />' if ($allele);
if ($vf_region eq $g_region && $vf_start >= $g_start && $vf_end <= $g_end) {
$location = "$vf_region:$vf_start" . ($vf_start == $vf_end ? '' : "-$vf_end");
$allele = $vf_allele;
last;
}
else {
$location .= "$vf_region:$vf_start" . ($vf_start == $vf_end ? '' : "-$vf_end");
$allele .= $vf_allele;
}
}
$list_variations->{$var_name} = { 'class' => $var->var_class,
'chr' => $location,
'allele' => $allele
};
}
# List the phenotype sources for the variation
my $ref_source = $pf->external_reference;
$ref_source = $pf->external_id if (!$ref_source);
$list_phe{$var_name}{$pf->phenotype->description} = 1 if ($all_flag == 1);
if ($list_sources{$var_name}{$phe_source}{'ref'}) {
push (@{$list_sources{$var_name}{$phe_source}{'ref'}}, $ref_source) if $ref_source;
}
else {
if ($ref_source) {
$list_sources{$var_name}{$phe_source}{'ref'} = [$ref_source];
}
else {
$list_sources{$var_name}{$phe_source}{'ref'} = ['no_ref'];
}
}
# List sumbitters and study (ClinVar)
if ($phe_source =~ /clinvar/i) {
my $submitter_names_list = $pf->submitter_names;
if ($submitter_names_list) {
foreach my $submitter_name (@$submitter_names_list) {
$list_sources{$var_name}{$phe_source}{'submitter'}{$ref_source}{$submitter_name} = 1;
}
}
my $attributes = $pf->get_all_attributes();
if ($attributes->{'MIM'}) {
my @studies = split(',',$attributes->{'MIM'});
foreach my $study (@studies) {
$list_sources{$var_name}{$phe_source}{'clinvar_study'}{$ref_source}{$study} = 1;
}
}
}
# List the phenotype association p-values for the variation
my $p_value = $pf->p_value;
if ($p_value) {
if ($list_sources{$var_name}{$phe_source}{'p-value'}) {
push (@{$list_sources{$var_name}{$phe_source}{'p-value'}}, $p_value);
}
else {
$list_sources{$var_name}{$phe_source}{'p-value'} = [$p_value];
}
}
}
foreach my $var_name (sort (keys %list_sources)) {
my %sources_list;
my %source_exp_list;
my @ext_ref_list;
my @pvalues_list;
my ($max_exp, $max_pval);
foreach my $p_source (sort (keys (%{$list_sources{$var_name}}))) {
foreach my $ref (@{$list_sources{$var_name}{$p_source}{'ref'}}) {
# Source link
my $s_link = $self->source_link($p_source, $ref, $var_name, $gene_name, $phenotype);
my $exp_data = $p_source;
# Sumbitter data (ClinVar)
if ($list_sources{$var_name}{$p_source}{'submitter'} && $list_sources{$var_name}{$p_source}{'submitter'}{$ref}) {
my @submitter_names_list = keys(%{$list_sources{$var_name}{$p_source}{'submitter'}{$ref}});
my $submitter_names = join('|',@submitter_names_list);
my $submitter_label = $submitter_names;
$submitter_label = substr($submitter_names,0,$submitter_max_length).'...' if (length($submitter_names) > $submitter_max_length);
my $submitter_prefix = 'Submitter';
$submitter_prefix .= 's' if (scalar(@submitter_names_list) > 1);
$s_link .= " [$submitter_label]";
$s_link = qq{<span class="_ht" title="$submitter_prefix: $submitter_names">$s_link</span>};
$exp_data = "$p_source [$submitter_names]";
}
if (!grep {$s_link eq $_} @{$sources_list{$p_source}}) {
push(@{$sources_list{$p_source}}, $s_link);
}
$source_exp_list{$p_source}{$exp_data} = 1;
# Study link
my $ext_link = $self->external_reference_link($p_source, $ref, $phenotype);
if ($ext_link ne '-' && !grep {$ext_link eq $_} @ext_ref_list) {
push(@ext_ref_list, $ext_link);
}
# Study link (ClinVar)
if ($p_source =~ /clinvar/i) {
if ($list_sources{$var_name}{$p_source}{'clinvar_study'} && $list_sources{$var_name}{$p_source}{'clinvar_study'}{$ref}) {
my @data = keys(%{$list_sources{$var_name}{$p_source}{'clinvar_study'}{$ref}});
foreach my $ext_ref (@data) {
my $clinvar_ext_link = $hub->get_ExtURL_link('MIM:'.$ext_ref, 'OMIM', $ext_ref);
if (!grep {$clinvar_ext_link eq $_} @ext_ref_list) {
push(@ext_ref_list, $clinvar_ext_link);
}
}
}
}
}
# P-value data
if ($list_sources{$var_name}{$p_source}{'p-value'}) {
foreach my $pval (@{$list_sources{$var_name}{$p_source}{'p-value'}}) {
if (!grep {$pval eq $_} @pvalues_list) {
push(@pvalues_list, $pval);
# Get the minimal exponential value of the p-values
$pval =~ /^(\d+)\.?.*e-0?(\d+)$/i;
if (!$max_exp) {
$max_exp = $2;
$max_pval = $1;
}
elsif ($max_exp < $2) {
$max_exp = $2;
$max_pval = $1;
}
}
}
}
}
if (scalar(keys(%sources_list))) {
my $var_url = "$base_url;v=$var_name";
# Sort by the lowest p-value first
@pvalues_list = sort { $b =~ /e-0?$max_exp$/ <=> $a =~ /e-0?$max_exp$/ } @pvalues_list if (scalar @pvalues_list);
@pvalues_list = map { $self->render_p_value($_, 1) } @pvalues_list;
my $source_data = "";
foreach my $source (sort(keys(%sources_list))) {
my $div_id = 'src_'.$var_name.'_'.$source;
$div_id =~ s/ //g;
my @export_data = $source_exp_list{$source};
my $source_content = $self->display_items_list($div_id,"$source entries",$source,$sources_list{$source},\@export_data,1,$max_items_per_source);
$source_data .= ', ' if ($source_data ne "");
if (scalar(@{$sources_list{$source}}) < $max_items_per_source ) {
$source_data .= sprintf(qq{<span class="_no_export">%s</span><span class="hidden export">%s</span>}, $source_content,join(',',@export_data));
}
else {
$source_data .= $source_content;
}
}
my $row = {
ID => qq{<a href="$var_url">$var_name</a>},
class => $list_variations->{$var_name}{'class'},
Alleles => $list_variations->{$var_name}{'allele'},
chr => $list_variations->{$var_name}{'chr'},
psource => $source_data,
pstudy => (scalar @ext_ref_list) ? join(', ',@ext_ref_list) : '-',
pvalue => (scalar @pvalues_list) ? qq{<span class="hidden">$max_exp.$max_pval</span>} . join(', ', @pvalues_list) : '-'
};
$row->{'phe'} = join('; ',keys(%{$list_phe{$var_name}})) if ($all_flag == 1);
push @rows, $row;
}
}
return \@rows;
}
sub source_link {
my ($self, $source, $ext_id, $vname, $gname, $phenotype) = @_;
my $source_uc = uc $source;
$source_uc =~ s/\s/_/g;
if ($ext_id) {
$source_uc .= '_ID' if $source_uc =~ /COSMIC/;
$source_uc = $1 if $source_uc =~ /(HGMD)/;
}
$source_uc .= '_SEARCH' if $source_uc =~ /UNIPROT/;
my $url = $self->hub->species_defs->ENSEMBL_EXTERNAL_URLS->{$source_uc};
if ($url =~/ebi\.ac\.uk\/gwas/) {
my $search = ($vname) ? $vname : $phenotype;
$url =~ s/###ID###/$search/;
}
elsif ($url =~ /omim/ && $ext_id && $ext_id ne 'no-ref') {
$ext_id =~ s/MIM\://;
$url =~ s/###ID###/$ext_id/;
}
elsif ($url =~ /clinvar/ && $ext_id && $ext_id ne 'no-ref') {
$ext_id =~ /^(.+)\.\d+$/;
$ext_id = $1 if ($1);
$url =~ s/###ID###/$ext_id/;
}
elsif ($vname || $gname) {
if ($url =~ /omim/) {
my $search = "search?search=".($vname || $gname);
$url =~ s/###ID###/$search/;
}
elsif ($url =~ /hgmd/) {
$url =~ s/###ID###/$gname/;
$url =~ s/###ACC###/$vname/;
}
elsif ($url =~/cosmic/) {
if ($vname) {
my $cname = ($vname =~ /^COSM(\d+)/) ? $1 : $vname;
$url =~ s/###ID###/$cname/;
}
else {
$url =~ s/###ID###/$gname/;
}
}
else {
$url =~ s/###ID###/$vname/;
}
}
elsif ($url =~ /(.+)\?/) { # Only general source link
$url = $1;
}
else {
$url =~ s/###ID###//;
}
return $url ? qq{<a rel="external" href="$url">$source</a>} : $source;
}
sub external_reference_link {
my ($self, $source, $study, $phenotype) = @_;
my $hub = $self->hub;
if ($study =~ /(pubmed|PMID)/) {
my $study_id = $study;
$study_id =~ s/pubmed\///;
$study_id =~ s/PMID://;
my $link = $self->hub->species_defs->ENSEMBL_EXTERNAL_URLS->{'EPMC_MED'};
$link =~ s/###ID###/$study_id/;
$study =~ s/\//:/g;
$study =~ s/pubmed/PMID/;
return qq{<a rel="external" href="$link">$study</a>};
}
elsif ($study =~ /^MIM\:/) {
my $id = (split /\:/, $study)[-1];
my $link = $hub->get_ExtURL_link($study, 'OMIM', $id);
$link =~ s/^\, //g;
return $link;
}
elsif ($phenotype =~ /cosmic/i) {
my @tumour_info = split /\:/, $phenotype;
my $tissue = pop(@tumour_info);
$tissue =~ s/^\s+//;
my $tissue_formatted = $tissue;
my $source_study = uc($source) . '_STUDY';
$tissue_formatted =~ s/\s+/\_/g;
return $hub->get_ExtURL_link($tissue, $source_study, $tissue_formatted);
}
else {
return '-';
}
}
sub check_source {
my $self = shift;
my $source_name = shift;
return ($source_name eq 'COSMIC') ? 1 : 0;
}
1;
| Ensembl/ensembl-webcode | modules/EnsEMBL/Web/Component/Gene/GenePhenotypeVariation.pm | Perl | apache-2.0 | 18,752 |
#
# Copyright 2022 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::dlink::standard::snmp::mode::memory;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
sub custom_usage_output {
my ($self, %options) = @_;
return sprintf(
'total: %s %s used: %s %s (%.2f%%) free: %s %s (%.2f%%)',
$self->{perfdata}->change_bytes(value => $self->{result_values}->{total}),
$self->{perfdata}->change_bytes(value => $self->{result_values}->{used}),
$self->{result_values}->{prct_used},
$self->{perfdata}->change_bytes(value => $self->{result_values}->{free}),
$self->{result_values}->{prct_free}
);
}
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'memory', type => 1, cb_prefix_output => 'prefix_memory_output', message_multiple => 'All memory usages are ok', skipped_code => { -10 => 1 } }
];
$self->{maps_counters}->{memory} = [
{ label => 'usage', display_ok => 0, nlabel => 'memory.usage.bytes', set => {
key_values => [ { name => 'used' }, { name => 'free' }, { name => 'prct_used' }, { name => 'prct_free' }, { name => 'total' }, { name => 'display' } ],
closure_custom_output => $self->can('custom_usage_output'),
perfdatas => [
{ template => '%d', min => 0, max => 'total', unit => 'B', cast_int => 1, label_extra_instance => 1 }
]
}
},
{ label => 'usage-free', display_ok => 0, nlabel => 'memory.free.bytes', set => {
key_values => [ { name => 'free' }, { name => 'used' }, { name => 'prct_used' }, { name => 'prct_free' }, { name => 'total' }, { name => 'display' } ],
closure_custom_output => $self->can('custom_usage_output'),
perfdatas => [
{ template => '%d', min => 0, max => 'total', unit => 'B', cast_int => 1, label_extra_instance => 1 }
]
}
},
{ label => 'usage-prct', nlabel => 'memory.usage.percentage', set => {
key_values => [ { name => 'prct_used' }, { name => 'used' }, { name => 'free' }, { name => 'prct_free' }, { name => 'total' }, { name => 'display' } ],
closure_custom_output => $self->can('custom_usage_output'),
perfdatas => [
{ template => '%.2f', min => 0, max => 100, unit => '%', label_extra_instance => 1 }
]
}
}
];
}
sub prefix_memory_output {
my ($self, %options) = @_;
return "Memory '" . $options{instance_value}->{display} . "' ";
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options, force_new_perfdata => 1);
bless $self, $class;
$options{options}->add_options(arguments => {
});
return $self;
}
my $map_memory_type = {
1 => 'dram', 2 => 'flash', 3 => 'nvram'
};
my $mapping_industrial = {
total => { oid => '.1.3.6.1.4.1.171.14.5.1.4.1.3' }, # dEntityExtMemUtilTotal (KB)
used => { oid => '.1.3.6.1.4.1.171.14.5.1.4.1.4' }, # dEntityExtMemUtilUsed (KB)
free => { oid => '.1.3.6.1.4.1.171.14.5.1.4.1.5' } # dEntityExtMemUtilUsed (KB)
};
my $oid_dEntityExtMemoryUtilEntry = '.1.3.6.1.4.1.171.14.5.1.4.1';
my $mapping_common = {
total => { oid => '.1.3.6.1.4.1.171.17.5.1.4.1.3' }, # esEntityExtMemUtilTotal (KB)
used => { oid => '.1.3.6.1.4.1.171.17.5.1.4.1.4' }, # esEntityExtMemUtilUsed (KB)
free => { oid => '.1.3.6.1.4.1.171.17.5.1.4.1.5' } # esEntityExtMemUtilFree (KB)
};
my $oid_esEntityExtMemoryUtilEntry = '.1.3.6.1.4.1.171.17.5.1.4.1';
sub check_memory {
my ($self, %options) = @_;
return if ($self->{checked_memory} == 1);
foreach (keys %{$options{snmp_result}}) {
next if (! /^$options{mapping}->{total}->{oid}\.(\d+)\.(\d+)$/);
my $instance = $1 . '.' . $2;
my $display = 'unit' . $1 . $self->{output}->get_instance_perfdata_separator() . $map_memory_type->{$2};
my $result = $options{snmp}->map_instance(mapping => $options{mapping}, results => $options{snmp_result}, instance => $instance);
$self->{checked_memory} = 1;
$self->{memory}->{$display} = {
display => $display,
used => $result->{used} * 1024,
free => $result->{free} * 1024,
prct_used => ($result->{used} * 1024 * 100) / ($result->{total} * 1024),
prct_free => ($result->{free} * 1024 * 100) / ($result->{total} * 1024),
total => $result->{total} * 1024
};
}
}
sub manage_selection {
my ($self, %options) = @_;
my $snmp_result = $options{snmp}->get_multiple_table(
oids => [
{ oid => $oid_dEntityExtMemoryUtilEntry, start => $mapping_industrial->{total}->{oid} },
{ oid => $oid_esEntityExtMemoryUtilEntry, start => $mapping_common->{total}->{oid} }
],
nothing_quit => 1
);
$self->{checked_memory} = 0;
$self->check_memory(snmp => $options{snmp}, snmp_result => $snmp_result->{$oid_dEntityExtMemoryUtilEntry}, mapping => $mapping_industrial);
$self->check_memory(snmp => $options{snmp}, snmp_result => $snmp_result->{$oid_esEntityExtMemoryUtilEntry}, mapping => $mapping_common);
}
1;
__END__
=head1 MODE
Check memory usages.
=over 8
=item B<--warning-*> B<--critical-*>
Thresholds.
Can be: 'usage' (B), 'usage-free' (B), 'usage-prct' (%).
=back
=cut
| centreon/centreon-plugins | network/dlink/standard/snmp/mode/memory.pm | Perl | apache-2.0 | 6,233 |
package VMOMI::ComplianceResultStatus;
use parent 'VMOMI::SimpleType';
use strict;
use warnings;
1;
| stumpr/p5-vmomi | lib/VMOMI/ComplianceResultStatus.pm | Perl | apache-2.0 | 102 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2021] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=head1 NAME
Bio::EnsEMBL::Production::Pipeline::Common::SystemCmdRunner;
=head1 DESCRIPTION
=head1 MAINTAINER
ckong@ebi.ac.uk
=cut
package Bio::EnsEMBL::Production::Pipeline::Common::SystemCmdRunner;
use strict;
use Mouse;
has 'debug' => (is => 'rw', isa => 'Bool');
sub run_cmd {
my $self = shift;
my $cmd = shift;
my $test_for_success = shift;
my $ignore_exit_code = shift;
my $param_ok = (!defined $test_for_success) || (ref $test_for_success eq 'ARRAY');
confess("Parameter error! If test_for_success is set, it must be an array of hashes!") unless ($param_ok);
$self->warning("Running: $cmd") if ($self->debug);
system($cmd);
if (!$ignore_exit_code) {
my $execution_failed = $? == -1;
confess("Could not execute command:\n$cmd\n")
if ($execution_failed);
my $program_died = $? & 127;
confess(
sprintf (
"Child died with signal %d, %s coredump\n",
($? & 127), ($? & 128) ? 'with' : 'without'
)
) if ($program_died);
my $exit_value = $? >> 8;
my $program_completed_successfully = $exit_value == 0;
confess("The command\n\n$cmd\n\nexited with value $exit_value")
if (!$program_completed_successfully);
}
if ($test_for_success) {
foreach my $current_test_for_success (@$test_for_success) {
confess('Type error') unless(ref $current_test_for_success eq 'HASH');
use Hash::Util qw( lock_hash );
lock_hash(%$current_test_for_success);
my $current_test = $current_test_for_success->{test};
confess('Test must be a sub!') unless (ref $current_test eq 'CODE');
my $test_succeeded = $current_test->();
confess(
"The following command failed:\n"
. "\n" . $cmd . "\n\n"
. "Reason: " . $current_test_for_success->{fail_msg} . "\n"
) unless($test_succeeded);
}
}
}
1;
| Ensembl/ensembl-production | modules/Bio/EnsEMBL/Production/Pipeline/Common/SystemCmdRunner.pm | Perl | apache-2.0 | 2,577 |
#!/usr/bin/env perl
use strict;
use warnings;
no warnings('once');
use PipelineAWE;
use Getopt::Long;
umask 000;
my $index_ids = {
'a_thaliana' => 201,
'b_taurus' => 202,
'd_melanogaster' => 203,
'e_coli' => 204,
'h_sapiens' => 205,
'm_musculus' => 206,
's_scrofa' => 207,
'r_norvegicus' => 208
};
# options
my $fasta = "";
my $output = "";
my $run_bowtie = 1;
my $index = "";
my $proc = 8;
my $help = 0;
my $do_not_create_index_files = 0;
my $options = GetOptions (
"input=s" => \$fasta,
"output=s" => \$output,
"index=s" => \$index,
"proc=i" => \$proc,
"bowtie=i" => \$run_bowtie,
"no-shock" => \$do_not_create_index_files,
"help!" => \$help
);
if ($help){
print get_usage();
exit 0;
}elsif (length($fasta)==0){
PipelineAWE::logger('error', "input file was not specified");
exit 1;
}elsif (length($output)==0){
PipelineAWE::logger('error', "output file was not specified");
exit 1;
}elsif (! -e $fasta){
PipelineAWE::logger('error', "input sequence file [$fasta] does not exist");
exit 1;
}
# get api variable
my $api_key = $ENV{'MGRAST_WEBKEY'} || undef;
# skip it
if ($run_bowtie == 0) {
PipelineAWE::run_cmd("mv $fasta $output");
}
# run it
else {
# check indexes
my @indexes = split(/,/, $index);
if (scalar(@indexes) == 0) {
PipelineAWE::logger('error', "missing index");
exit 1;
}
for my $i (@indexes) {
unless ( defined $index_ids->{$i} ) {
PipelineAWE::logger('error', "undefined index name: $i");
exit 1;
}
}
# get index dir
my $index_dir = ".";
if ($ENV{'REFDBPATH'}) {
$index_dir = "$ENV{'REFDBPATH'}";
}
# truncate input to 1000 bp
my $input_file = $fasta.'.trunc';
PipelineAWE::run_cmd("seqUtil --truncate 1000 -i $fasta -o $input_file");
# run bowtie2
my $tmp_input_var = $input_file;
for my $index_name (@indexes) {
my $unaligned = $index_ids->{$index_name}.".".$index_name.".passed.fna";
# 'reorder' option outputs sequences in same order as input file
PipelineAWE::run_cmd("bowtie2 -f --reorder -p $proc --un $unaligned -x $index_dir/$index_name -U $tmp_input_var > /dev/null", 1);
$tmp_input_var = $unaligned;
}
PipelineAWE::run_cmd("mv $tmp_input_var $output");
# die if nothing passed
if (-z $output) {
# send email
if ($api_key) {
my $user_attr = PipelineAWE::get_userattr();
my $job_name = $user_attr->{name};
my $job_id = $user_attr->{id};
my $proj_name = $user_attr->{project_name};
my $subject = "MG-RAST Job Failed";
my $body_txt = qq(
The annotation job that you submitted for $job_name ($job_id) belonging to study $proj_name has failed.
No sequences passed our QC screening steps. Either your sequences were too short or your pipeline QC settings were to stringent.
This is an automated message. Please contact help\@mg-rast.org if you have any questions or concerns.
);
PipelineAWE::post_data($PipelineAWE::default_api."/user/".$user_attr->{owner}."/notify", $api_key, {'subject' => $subject, 'body' => $body_txt});
}
PipelineAWE::logger('error', "pipeline failed, no sequences passed bowtie screening, index=".$index);
# exit failed-permanent
exit 42;
}
# create subset record list
# note: parent and child files in same order
if (not $do_not_create_index_files ) {
PipelineAWE::run_cmd("index_subset_seq.py -p $input_file -c $output -s -m 20");
PipelineAWE::run_cmd("mv $output.index $output");
}
}
exit 0;
sub get_usage {
return "USAGE: mgrast_bowtie_screen.pl -input=<input fasta> -output=<output fasta> -index=<bowtie indexes separated by ,> [-proc=<number of threads, default: 8>]\n";
}
| wgerlach/pipeline | mgcmd/mgrast_bowtie_screen.pl | Perl | bsd-2-clause | 3,975 |
package Blocks;
use strict;
# Internal representation:
# [ list of tiles from left to right, top to bottom, with 0 for the blank ]
# External representation:
# Index into array of known nodes
my @node; # known nodes
my @h; # heuristic
my %node_map; # map node key to node number
my $width;
my $height;
my $goal;
my $USE_ASTAR = 0;
#
# initialize is called once to set things up. It processes the command line
# arguments, and it returns the external representation of the start node.
#
sub initialize
{
my @tiles;
while (@ARGV) {
my $arg = shift @ARGV;
if ( $arg eq '-h' ) {
$USE_ASTAR = 1;
next;
}
push @tiles, $arg;
}
$width = shift @tiles;
$height = shift @tiles;
my $start = node_for(\@tiles);
my @goal = sort {$a <=> $b} @tiles;
push @goal, 0;
shift @goal;
$goal = node_for(\@goal);
return $start;
}
#
# heuristic returns the heuristic for the given node, which is specified
# by its external representation.
#
sub heuristic
{
my($n) = @_;
return $h[$n];
}
#
# is_goal returns 1 if the given node, given by its external representation, is the
# goal node. Otherwise, it returns 0.
#
sub is_goal
{
my($n) = @_;
return $n == $goal;
}
#
# Given a node, named by its external representation, neighbors returns a list of
# neighboring nodes. Each neighbor is represented by a reference to an array of
# two items: the external representation of the neighbor, and the cost of the link
# to that neighbor.
#
sub neighbors
{
my($n) = @_;
my @neighbors;
my @tiles = @{$node[$n]};
for (my $z = 0; $z < @tiles; ++$z) {
if ( $tiles[$z] == 0 ) {
my $row = int($z/$width);
my $col = $z % $width;
if ( $col > 0 ) { # can move blank to left
my @new = @tiles;
($new[$z], $new[$z-1]) = ($new[$z-1], $new[$z]);
push @neighbors, [node_for(\@new), 1];
}
if ( $col < $width-1 ) { # can move blank to right
my @new = @tiles;
($new[$z], $new[$z+1]) = ($new[$z+1], $new[$z]);
push @neighbors, [node_for(\@new), 1];
}
if ( $row > 0 ) { # can move blank up
my @new = @tiles;
($new[$z], $new[$z-$width]) = ($new[$z-$width], $new[$z]);
push @neighbors, [node_for(\@new), 1];
}
if ( $row < $height-1 ) { # can move blank down
my @new = @tiles;
($new[$z], $new[$z+$width]) = ($new[$z+$width], $new[$z]);
push @neighbors, [node_for(\@new), 1];
}
last;
}
}
return @neighbors;
}
#
# to_string returns a human-friendly string representation of the given
# node, which is given by its external representation. This is used when
# printing the solution.
#
sub to_string
{
my($n) = @_;
my @tiles = @{$node[$n]};
my $out = "\n-------------\n";
for (my $r = 0; $r < $height; ++$r) {
my @row;
for (my $c = 0; $c < $width; ++$c) {
push @row, shift @tiles;
}
$out .= join("\t", @row) . "\n";
}
return $out;
}
##############################################################################
# Internal functions. These are not called by search.pl.
##############################################################################
sub node_for
{
my($tiles) = @_;
my $key = join(",", @$tiles);
if ( ! defined $node_map{$key} ) { # first time seeing this node
my @tiles = @$tiles;
$node_map{$key} = @node;
push @node, \@tiles;
push @h, node_h(\@tiles);
}
return $node_map{$key};
}
sub node_h
{
my($tiles) = @_;
return 0 unless $USE_ASTAR;
my $hval = 0;
for (my $i = 0; $i < @$tiles; ++$i) {
my $t = $tiles->[$i];
next if $t == 0;
my $row = int($i/$width);
my $col = $i % $width;
my $desired_row = int(($t-1)/$width);
my $desired_col = ($t-1) % $width;
$hval += abs($row - $desired_row) + abs($col - $desired_col);
}
return $hval;
}
#
# Generate a sample problem. Invoke from the command line:
#
# perl -MBlocks -e Blocks::sample width height moves
#
# This will generate a sample problem with the given width and height
# by setting up a solved puzzle of that size and then scrambling it by
# making random moves. The last parameter gives the number of random
# moves. Example:
#
# perl -MBlocks -e Blocks::sample 4 4 50
#
# generates a 4x4 puzzle (e.g., 15 blocks) that is scrambled by making
# 50 random moves.
#
# The output is a space separated list giving the width, the height,
# and then the tiles in left to right, top to bottom order with 0
# representing the blank tile.
#
sub sample
{
my $width = shift @ARGV || 4;
my $height = shift @ARGV || 4;
my $moves = shift @ARGV || 8;
my $tiles = $width * $height - 1;
my @tiles = (1..$tiles);
push @tiles, 0;
while ($moves-- > 0) {
for (my $z = 0; $z < @tiles; ++$z) {
if ( $tiles[$z] == 0 ) {
my $row = int($z/$width);
my $col = $z % $width;
my @allowed;
push @allowed, -1 if $col > 0;
push @allowed, 1 if $col < $width-1;
push @allowed, -$width if $row > 0;
push @allowed, $width if $row < $height-1;
my $move = $allowed[int(rand(@allowed))];
($tiles[$z], $tiles[$z+$move]) = ($tiles[$z+$move], $tiles[$z]);
last;
}
}
}
print join(" ", $width, $height, @tiles), "\n";
}
1;
| tzs/AIClass-Search-Heuristic-Examples | Blocks.pm | Perl | bsd-2-clause | 5,769 |
package Ndn::Environment::Builder::Perl;
use strict;
use warnings;
use autodie;
use File::Temp qw/tempfile/;
use Ndn::Environment qw/builder/;
use Ndn::Environment::Config;
use Ndn::Environment::Util qw/accessor/;
accessor outfile => sub {
my ( $oh, $outfile ) = tempfile;
close($oh);
return $outfile;
};
accessor dest => sub {
my $self = shift;
return "perl";
};
sub description {
return "Build a perl install.";
}
sub option_details {
return (
'rebuild' => 'Rebuild perl even if it is already built',
'version=s' => 'Download the specified perl verson',
'verbose' => 'Show complete perl build output',
'skip-perl-tests' => 'Do not run "make test" when building perl',
);
}
sub ready { 1 }
sub on_error {
my $self = shift;
return if $self->args->{'verbose'};
my $outfile = $self->outfile;
system( "cat $outfile" );
}
sub steps {
my $self = shift;
my $dest = NDN_ENV->dest . '/' . $self->dest;
return () if $self->check_built_version($dest);
my $cwd = NDN_ENV->cwd;
my $tmp = NDN_ENV->temp;
my $source = $self->source;
my $outfile = $self->outfile;
my $io = $self->args->{'verbose'} ? "" : " >> $outfile 2>&1";
my $config_args = config->{perl}->{config_args} ? join " " => @{config->{perl}->{config_args}} : "";
my $make_args = config->{perl}->{make_args} ? join " " => @{config->{perl}->{make_args}} : "";
my $env = config->{perl}->{environment} || {};
die "Dest cannot be root, really I mean it!" if !$dest || $dest =~ m{^/+$};
$self->environment($env);
return (
"rm -rf $dest",
"mkdir '$tmp/perl'",
"tar -zxf '$source' -C '$tmp/perl' --strip-components=1",
sub { chdir "$tmp/perl" || die "Could not chdir to temp '$tmp/perl': $!" },
sub { return if $self->args->{'verbose'}; print "Configuring and building perl, use 'tail -f $outfile' to watch\n" },
"./Configure -de -Dprefix='$dest' -Accflags='-fPIC' $config_args $io",
"make $make_args $io",
$self->args->{'skip-perl-tests'} ? () : ("make $make_args test $io"),
"make $make_args install $io",
sub { chdir $cwd || die "Could not chdir to working directory '$cwd': $!" },
);
}
sub source {
my $self = shift;
my $version = $self->args->{'version=s'} || config->{perl}->{version};
die "No perl version specified, and no source/perl.tar.gz file provided"
unless $version || -e 'source/perl.tar.gz';
return 'source/perl.tar.gz' unless $version;
my $perl = "perl-$version.tar.gz";
my $file = "source/$perl";
$self->run_shell("wget http://www.cpan.org/src/5.0/$perl -O $file")
unless -e $file;
return $file;
}
sub check_built_version {
my $self = shift;
my ($dest) = @_;
return if $self->args->{rebuild};
print "Checking for pre-built perl...\n";
return unless -d $dest;
print "Perl already built, not rebuilding.\n";
return 'done';
}
1;
__END__
=head1 NAME
Ndn::Environment::Builder::Perl - Builder that configures and compiles the
environments perl.
=head1 COPYRIGHT
Copyright (C) 2013 New Dream Network LLC
Ndn-Environment is free software; FreeBSD License
NDN-Environment is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the license for more details.
| dreamhost/ndn-environment | lib/Ndn/Environment/Builder/Perl.pm | Perl | bsd-2-clause | 3,502 |
use utf8;
package vcspr::schema::Result::VCSPushLog;
# Created by DBIx::Class::Schema::Loader
# DO NOT MODIFY THE FIRST PART OF THIS FILE
=head1 NAME
vcspr::schema::Result::VCSPushLog
=cut
use strict;
use warnings;
#use base 'DBIx::Class::Core';
use base qw/DBIx::Class/; #1
__PACKAGE__->load_components(qw/UTF8Columns PK::Auto Core /); #1
=head1 TABLE: C<VCSPushLog>
=cut
__PACKAGE__->table("VCSPushLog");
=head1 ACCESSORS
=head2 id
data_type: 'integer'
is_auto_increment: 1
is_nullable: 0
=head2 creater
data_type: 'varchar'
is_nullable: 1
size: 255
=head2 create_date
data_type: 'varchar'
is_nullable: 0
size: 255
=head2 create_uuid
data_type: 'varchar'
is_nullable: 0
size: 255
=head2 vcs_name
data_type: 'varchar'
is_nullable: 0
size: 255
=head2 vcs_alias_name
data_type: 'varchar'
is_nullable: 1
size: 255
=head2 vcs_source
data_type: 'varchar'
is_nullable: 1
size: 255
=head2 repo
data_type: 'varchar'
is_nullable: 0
size: 255
=head2 s_command_type
data_type: 'varchar'
is_nullable: 1
size: 255
=head2 s_commit_id
data_type: 'varchar'
is_nullable: 1
size: 255
=head2 s_commit_timestamp
data_type: 'varchar'
is_nullable: 1
size: 255
=head2 s_commit_author
data_type: 'varchar'
is_nullable: 1
size: 255
=head2 s_commit_email
data_type: 'varchar'
is_nullable: 1
size: 255
=head2 s_ref_type
data_type: 'varchar'
is_nullable: 1
size: 255
=head2 s_ref_value
data_type: 'varchar'
is_nullable: 1
size: 255
=head2 s_branch
data_type: 'varchar'
default_value: 'master'
is_nullable: 0
size: 255
=head2 s_commit_msg
data_type: 'varchar'
is_nullable: 1
size: 1000
=head2 is_trigger
data_type: 'integer'
is_nullable: 1
=cut
__PACKAGE__->add_columns(
"id",
{ data_type => "integer", is_auto_increment => 1, is_nullable => 0 },
"creater",
{ data_type => "varchar", is_nullable => 1, size => 255 },
"create_date",
{ data_type => "varchar", is_nullable => 0, size => 255 },
"create_uuid",
{ data_type => "varchar", is_nullable => 0, size => 255 },
"vcs_name",
{ data_type => "varchar", is_nullable => 0, size => 255 },
"vcs_alias_name",
{ data_type => "varchar", is_nullable => 1, size => 255 },
"vcs_source",
{ data_type => "varchar", is_nullable => 1, size => 255 },
"repo",
{ data_type => "varchar", is_nullable => 0, size => 255 },
"s_command_type",
{ data_type => "varchar", is_nullable => 1, size => 255 },
"s_commit_id",
{ data_type => "varchar", is_nullable => 1, size => 255 },
"s_commit_timestamp",
{ data_type => "varchar", is_nullable => 1, size => 255 },
"s_commit_author",
{ data_type => "varchar", is_nullable => 1, size => 255 },
"s_commit_email",
{ data_type => "varchar", is_nullable => 1, size => 255 },
"s_ref_type",
{ data_type => "varchar", is_nullable => 1, size => 255 },
"s_ref_value",
{ data_type => "varchar", is_nullable => 1, size => 255 },
"s_branch",
{
data_type => "varchar",
default_value => "master",
is_nullable => 0,
size => 255,
},
"s_commit_msg",
{ data_type => "varchar", is_nullable => 1, size => 1000 },
"is_trigger",
{ data_type => "integer", is_nullable => 1 },
);
__PACKAGE__->utf8_columns(qw/ id creater create_date create_uuid vcs_name vcs_alias_name vcs_source repo s_command_type s_commit_id s_commit_timestamp s_commit_author s_commit_email s_ref_type s_ref_value s_branch s_commit_msg is_trigger /); #1
=head1 PRIMARY KEY
=over 4
=item * L</id>
=back
=cut
__PACKAGE__->set_primary_key("id");
=head1 UNIQUE CONSTRAINTS
=head2 C<id_create_uuid_unique>
=over 4
=item * L</id>
=item * L</create_uuid>
=back
=cut
__PACKAGE__->add_unique_constraint("id_create_uuid_unique", ["id", "create_uuid"]);
# Created by DBIx::Class::Schema::Loader v0.07035 @ 2013-08-12 18:26:08
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:LBi85NXCREhr38koaczKnA
# You can replace this text with custom code or comments, and it will be preserved on regeneration
1;
| spunkmars/VCSPR | share/vcsprwebconsole/lib/vcspr/schema/Result/VCSPushLog.pm | Perl | bsd-3-clause | 4,050 |
#!/usr/bin/env perl
# This scripts fixes the supporting evidence links
# as part of the align feature tables optimisation.
use strict;
use warnings;
use Getopt::Long;
use Bio::EnsEMBL::Utils::Exception qw(verbose throw warning info);
use Bio::EnsEMBL::DBSQL::DBAdaptor;
$! = 1;
my $daf_outfile;
my $daf_infile;
my $sf_infile;
my $sf_outfile;
GetOptions(
'outdaf=s' => \$daf_outfile,
'outsf=s' => \$sf_outfile,
'indaf=s' => \$daf_infile,
'insf=s' => \$sf_infile,
);
if (!$daf_outfile || !$sf_outfile ) {
throw("Need 2 outfiles: -outdaf and -outsf");
}
if (!$daf_infile || !$sf_infile ) {
throw("Need 2 infiles: -indaf and -insf");
}
print STDERR "DAFIN\t$daf_infile\nSFIN\t$sf_infile\nDAFOUT\t$daf_outfile\nSFOUT\t$sf_outfile\n";
# open infile
open(DAFIN, "<$daf_infile") or die ("Can't read $daf_infile $! \n");
open(SFIN, "<$sf_infile") or die ("Can't read $sf_infile $! \n");
# open outfile
open(DAFOUT,">$daf_outfile") or die ("couldn't open file ".$daf_outfile." $!");
open(SFOUT, ">$sf_outfile") or die ("couldn't open file ".$sf_outfile." $!");
# read in daf list;
my %daf_positions;
my $count = 0;
while (<DAFIN>) {
my $line = $_;
my ($old_daf_id) = $line =~ /^(\d+)/;
$count++;
$line =~ s/^\d+/$count/;
print DAFOUT $line;
# key = old daf id
# value = new daf id
$daf_positions{$old_daf_id} = $count;
}
close (DAFIN);
close (DAFOUT);
# read in the tsf or sf file
# link to the daf files
# and write out to the new file
while (<SFIN>) {
my $line = $_;
chomp $line;
my @fields = split (/\t/,$line);
if (scalar(@fields) != 3) {
throw("Must have 3 fields for -infile: $line");
}
# define variables
my $supporting_feature_id = $fields[0];
my $feature_type = $fields[1];
my $feature_id = $fields[2];
if (!exists $daf_positions{$feature_id}|| !defined $daf_positions{$feature_id}) {
throw("Cannot find daf matching to supporting feature: $feature_id $feature_type $supporting_feature_id");
}
print SFOUT "$supporting_feature_id\t$feature_type\t".$daf_positions{$feature_id}."\n";
} # close infile
close (SFOUT);
close (SFIN);
| james-monkeyshines/ensembl-analysis | scripts/genebuild/fix_supporting_evidence_links.pl | Perl | apache-2.0 | 2,193 |
package Google::Ads::AdWords::v201409::ExperimentSummaryStats;
use strict;
use warnings;
__PACKAGE__->_set_element_form_qualified(1);
sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201409' };
our $XML_ATTRIBUTE_CLASS;
undef $XML_ATTRIBUTE_CLASS;
sub __get_attr_class {
return $XML_ATTRIBUTE_CLASS;
}
use Class::Std::Fast::Storable constructor => 'none';
use base qw(Google::Ads::SOAP::Typelib::ComplexType);
{ # BLOCK to scope variables
my %adGroupsCount_of :ATTR(:get<adGroupsCount>);
my %adGroupCriteriaCount_of :ATTR(:get<adGroupCriteriaCount>);
my %adGroupAdsCount_of :ATTR(:get<adGroupAdsCount>);
__PACKAGE__->_factory(
[ qw( adGroupsCount
adGroupCriteriaCount
adGroupAdsCount
) ],
{
'adGroupsCount' => \%adGroupsCount_of,
'adGroupCriteriaCount' => \%adGroupCriteriaCount_of,
'adGroupAdsCount' => \%adGroupAdsCount_of,
},
{
'adGroupsCount' => 'SOAP::WSDL::XSD::Typelib::Builtin::int',
'adGroupCriteriaCount' => 'SOAP::WSDL::XSD::Typelib::Builtin::int',
'adGroupAdsCount' => 'SOAP::WSDL::XSD::Typelib::Builtin::int',
},
{
'adGroupsCount' => 'adGroupsCount',
'adGroupCriteriaCount' => 'adGroupCriteriaCount',
'adGroupAdsCount' => 'adGroupAdsCount',
}
);
} # end BLOCK
1;
=pod
=head1 NAME
Google::Ads::AdWords::v201409::ExperimentSummaryStats
=head1 DESCRIPTION
Perl data type class for the XML Schema defined complexType
ExperimentSummaryStats from the namespace https://adwords.google.com/api/adwords/cm/v201409.
Contains how many changes were made associated with an experiment at the adgroup, critieria and creative levels
=head2 PROPERTIES
The following properties may be accessed using get_PROPERTY / set_PROPERTY
methods:
=over
=item * adGroupsCount
=item * adGroupCriteriaCount
=item * adGroupAdsCount
=back
=head1 METHODS
=head2 new
Constructor. The following data structure may be passed to new():
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| gitpan/GOOGLE-ADWORDS-PERL-CLIENT | lib/Google/Ads/AdWords/v201409/ExperimentSummaryStats.pm | Perl | apache-2.0 | 2,052 |
package Test2::API::Context;
use strict;
use warnings;
our $VERSION = '1.302015';
use Carp qw/confess croak longmess/;
use Scalar::Util qw/weaken/;
use Test2::Util qw/get_tid try pkg_to_file get_tid/;
use Test2::Util::Trace();
use Test2::API();
# Preload some key event types
my %LOADED = (
map {
my $pkg = "Test2::Event::$_";
my $file = "Test2/Event/$_.pm";
require $file unless $INC{$file};
( $pkg => $pkg, $_ => $pkg )
} qw/Ok Diag Note Plan Bail Exception Waiting Skip Subtest/
);
use Test2::Util::ExternalMeta qw/meta get_meta set_meta delete_meta/;
use Test2::Util::HashBase qw{
stack hub trace _on_release _depth _is_canon _is_spawn _aborted
errno eval_error child_error
};
# Private, not package vars
# It is safe to cache these.
my $ON_RELEASE = Test2::API::_context_release_callbacks_ref();
my $CONTEXTS = Test2::API::_contexts_ref();
sub init {
my $self = shift;
confess "The 'trace' attribute is required"
unless $self->{+TRACE};
confess "The 'hub' attribute is required"
unless $self->{+HUB};
$self->{+_DEPTH} = 0 unless defined $self->{+_DEPTH};
$self->{+ERRNO} = $! unless exists $self->{+ERRNO};
$self->{+EVAL_ERROR} = $@ unless exists $self->{+EVAL_ERROR};
$self->{+CHILD_ERROR} = $? unless exists $self->{+CHILD_ERROR};
}
sub snapshot { bless {%{$_[0]}, _is_canon => undef, _is_spawn => undef, _aborted => undef}, __PACKAGE__ }
sub restore_error_vars {
my $self = shift;
($!, $@, $?) = @$self{+ERRNO, +EVAL_ERROR, +CHILD_ERROR};
}
sub DESTROY {
return unless $_[0]->{+_IS_CANON} || $_[0]->{+_IS_SPAWN};
return if $_[0]->{+_ABORTED} && ${$_[0]->{+_ABORTED}};
my ($self) = @_;
my $hub = $self->{+HUB};
my $hid = $hub->{hid};
# Do not show the warning if it looks like an exception has been thrown, or
# if the context is not local to this process or thread.
if($self->{+EVAL_ERROR} eq $@ && $hub->is_local) {
my $frame = $self->{+_IS_SPAWN} || $self->{+TRACE}->frame;
warn <<" EOT";
A context appears to have been destroyed without first calling release().
Based on \$@ it does not look like an exception was thrown (this is not always
a reliable test)
This is a problem because the global error variables (\$!, \$@, and \$?) will
not be restored. In addition some release callbacks will not work properly from
inside a DESTROY method.
Here are the context creation details, just in case a tool forgot to call
release():
File: $frame->[1]
Line: $frame->[2]
Tool: $frame->[3]
Cleaning up the CONTEXT stack...
EOT
}
return if $self->{+_IS_SPAWN};
# Remove the key itself to avoid a slow memory leak
delete $CONTEXTS->{$hid};
$self->{+_IS_CANON} = undef;
if (my $cbk = $self->{+_ON_RELEASE}) {
$_->($self) for reverse @$cbk;
}
if (my $hcbk = $hub->{_context_release}) {
$_->($self) for reverse @$hcbk;
}
$_->($self) for reverse @$ON_RELEASE;
}
# release exists to implement behaviors like die-on-fail. In die-on-fail you
# want to die after a failure, but only after diagnostics have been reported.
# The ideal time for the die to happen is when the context is released.
# Unfortunately die does not work in a DESTROY block.
sub release {
my ($self) = @_;
($!, $@, $?) = @$self{+ERRNO, +EVAL_ERROR, +CHILD_ERROR} and return $self->{+_IS_SPAWN} = undef
if $self->{+_IS_SPAWN};
croak "release() should not be called on context that is neither canon nor a child"
unless $self->{+_IS_CANON};
my $hub = $self->{+HUB};
my $hid = $hub->{hid};
croak "context thinks it is canon, but it is not"
unless $CONTEXTS->{$hid} && $CONTEXTS->{$hid} == $self;
# Remove the key itself to avoid a slow memory leak
$self->{+_IS_CANON} = undef;
delete $CONTEXTS->{$hid};
if (my $cbk = $self->{+_ON_RELEASE}) {
$_->($self) for reverse @$cbk;
}
if (my $hcbk = $hub->{_context_release}) {
$_->($self) for reverse @$hcbk;
}
$_->($self) for reverse @$ON_RELEASE;
# Do this last so that nothing else changes them.
# If one of the hooks dies then these do not get restored, this is
# intentional
($!, $@, $?) = @$self{+ERRNO, +EVAL_ERROR, +CHILD_ERROR};
return;
}
sub do_in_context {
my $self = shift;
my ($sub, @args) = @_;
# We need to update the pid/tid and error vars.
my $clone = $self->snapshot;
@$clone{+ERRNO, +EVAL_ERROR, +CHILD_ERROR} = ($!, $@, $?);
$clone->{+TRACE} = $clone->{+TRACE}->snapshot;
$clone->{+TRACE}->set_pid($$);
$clone->{+TRACE}->set_tid(get_tid());
my $hub = $clone->{+HUB};
my $hid = $hub->hid;
my $old = $CONTEXTS->{$hid};
$clone->{+_IS_CANON} = 1;
$CONTEXTS->{$hid} = $clone;
weaken($CONTEXTS->{$hid});
my ($ok, $err) = &try($sub, @args);
my ($rok, $rerr) = try { $clone->release };
delete $clone->{+_IS_CANON};
if ($old) {
$CONTEXTS->{$hid} = $old;
weaken($CONTEXTS->{$hid});
}
else {
delete $CONTEXTS->{$hid};
}
die $err unless $ok;
die $rerr unless $rok;
}
sub done_testing {
my $self = shift;
$self->hub->finalize($self->trace, 1);
return;
}
sub throw {
my ($self, $msg) = @_;
${$self->{+_ABORTED}}++ if $self->{+_ABORTED};
$self->release if $self->{+_IS_CANON} || $self->{+_IS_SPAWN};
$self->trace->throw($msg);
}
sub alert {
my ($self, $msg) = @_;
$self->trace->alert($msg);
}
sub send_event {
my $self = shift;
my $event = shift;
my %args = @_;
my $pkg = $LOADED{$event} || $self->_parse_event($event);
$self->{+HUB}->send(
$pkg->new(
trace => $self->{+TRACE}->snapshot,
%args,
)
);
}
sub build_event {
my $self = shift;
my $event = shift;
my %args = @_;
my $pkg = $LOADED{$event} || $self->_parse_event($event);
$pkg->new(
trace => $self->{+TRACE}->snapshot,
%args,
);
}
sub ok {
my $self = shift;
my ($pass, $name, $diag) = @_;
my $hub = $self->{+HUB};
my $e = bless {
trace => bless( {%{$self->{+TRACE}}}, 'Test2::Util::Trace'),
pass => $pass,
name => $name,
}, 'Test2::Event::Ok';
$e->init;
$hub->send($e);
return $e if $pass;
$self->failure_diag($e);
if ($diag && @$diag) {
$self->diag($_) for @$diag
}
return $e;
}
sub failure_diag {
my $self = shift;
my ($e) = @_;
# This behavior is inherited from Test::Builder which injected a newline at
# the start of the first diagnostics when the harness is active, but not
# verbose. This is important to keep the diagnostics from showing up
# appended to the existing line, which is hard to read. In a verbose
# harness there is no need for this.
my $prefix = $ENV{HARNESS_ACTIVE} && !$ENV{HARNESS_IS_VERBOSE} ? "\n" : "";
# Figure out the debug info, this is typically the file name and line
# number, but can also be a custom message. If no trace object is provided
# then we have nothing useful to display.
my $name = $e->name;
my $trace = $e->trace;
my $debug = $trace ? $trace->debug : "[No trace info available]";
# Create the initial diagnostics. If the test has a name we put the debug
# info on a second line, this behavior is inherited from Test::Builder.
my $msg = defined($name)
? qq[${prefix}Failed test '$name'\n$debug.\n]
: qq[${prefix}Failed test $debug.\n];
$self->diag($msg);
}
sub skip {
my $self = shift;
my ($name, $reason, @extra) = @_;
$self->send_event(
'Skip',
name => $name,
reason => $reason,
pass => 1,
@extra,
);
}
sub note {
my $self = shift;
my ($message) = @_;
$self->send_event('Note', message => $message);
}
sub diag {
my $self = shift;
my ($message) = @_;
my $hub = $self->{+HUB};
$self->send_event(
'Diag',
message => $message,
);
}
sub plan {
my ($self, $max, $directive, $reason) = @_;
${$self->{+_ABORTED}}++ if $self->{+_ABORTED} && $directive && $directive =~ m/^(SKIP|skip_all)$/;
$self->send_event('Plan', max => $max, directive => $directive, reason => $reason);
}
sub bail {
my ($self, $reason) = @_;
${$self->{+_ABORTED}}++ if $self->{+_ABORTED};
$self->send_event('Bail', reason => $reason);
}
sub _parse_event {
my $self = shift;
my $event = shift;
my $pkg;
if ($event =~ m/^\+(.*)/) {
$pkg = $1;
}
else {
$pkg = "Test2::Event::$event";
}
unless ($LOADED{$pkg}) {
my $file = pkg_to_file($pkg);
my ($ok, $err) = try { require $file };
$self->throw("Could not load event module '$pkg': $err")
unless $ok;
$LOADED{$pkg} = $pkg;
}
confess "'$pkg' is not a subclass of 'Test2::Event'"
unless $pkg->isa('Test2::Event');
$LOADED{$event} = $pkg;
return $pkg;
}
1;
__END__
=pod
=encoding UTF-8
=head1 NAME
Test2::API::Context - Object to represent a testing context.
=head1 DESCRIPTION
The context object is the primary interface for authors of testing tools
written with L<Test2>. The context object represents the context in
which a test takes place (File and Line Number), and provides a quick way to
generate events from that context. The context object also takes care of
sending events to the correct L<Test2::Hub> instance.
=head1 SYNOPSIS
In general you will not be creating contexts directly. To obtain a context you
should always use C<context()> which is exported by the L<Test2::API> module.
use Test2::API qw/context/;
sub my_ok {
my ($bool, $name) = @_;
my $ctx = context();
$ctx->ok($bool, $name);
$ctx->release; # You MUST do this!
return $bool;
}
Context objects make it easy to wrap other tools that also use context. Once
you grab a context, any tool you call before releasing your context will
inherit it:
sub wrapper {
my ($bool, $name) = @_;
my $ctx = context();
$ctx->diag("wrapping my_ok");
my $out = my_ok($bool, $name);
$ctx->release; # You MUST do this!
return $out;
}
=head1 CRITICAL DETAILS
=over 4
=item you MUST always use the context() sub from Test2::API
Creating your own context via C<< Test2::API::Context->new() >> will almost never
produce a desirable result. Use C<context()> which is exported by L<Test2>.
There are a handful of cases where a tool author may want to create a new
context by hand, which is why the C<new> method exists. Unless you really know
what you are doing you should avoid this.
=item You MUST always release the context when done with it
Releasing the context tells the system you are done with it. This gives it a
chance to run any necessary callbacks or cleanup tasks. If you forget to
release the context it will try to detect the problem and warn you about it.
=item You MUST NOT pass context objects around
When you obtain a context object it is made specifically for your tool and any
tools nested within. If you pass a context around you run the risk of polluting
other tools with incorrect context information.
If you are certain that you want a different tool to use the same context you
may pass it a snapshot. C<< $ctx->snapshot >> will give you a shallow clone of
the context that is safe to pass around or store.
=item You MUST NOT store or cache a context for later
As long as a context exists for a given hub, all tools that try to get a
context will get the existing instance. If you try to store the context you
will pollute other tools with incorrect context information.
If you are certain that you want to save the context for later, you can use a
snapshot. C<< $ctx->snapshot >> will give you a shallow clone of the context
that is safe to pass around or store.
C<context()> has some mechanisms to protect you if you do cause a context to
persist beyond the scope in which it was obtained. In practice you should not
rely on these protections, and they are fairly noisy with warnings.
=item You SHOULD obtain your context as soon as possible in a given tool
You never know what tools you call from within your own tool will need a
context. Obtaining the context early ensures that nested tools can find the
context you want them to find.
=back
=head1 METHODS
=over 4
=item $ctx->done_testing;
Note that testing is finished. If no plan has been set this will generate a
Plan event.
=item $clone = $ctx->snapshot()
This will return a shallow clone of the context. The shallow clone is safe to
store for later.
=item $ctx->release()
This will release the context. This runs cleanup tasks, and several important
hooks. It will also restore C<$!>, C<$?>, and C<$@> to what they were when the
context was created.
B<Note:> If a context is acquired more than once an internal refcount is kept.
C<release()> decrements the ref count, none of the other actions of
C<release()> will occur unless the refcount hits 0. This means only the last
call to C<release()> will reset C<$?>, C<$!>, C<$@>,and run the cleanup tasks.
=item $ctx->throw($message)
This will throw an exception reporting to the file and line number of the
context. This will also release the context for you.
=item $ctx->alert($message)
This will issue a warning from the file and line number of the context.
=item $stack = $ctx->stack()
This will return the L<Test2::API::Stack> instance the context used to find
the current hub.
=item $hub = $ctx->hub()
This will return the L<Test2::Hub> instance the context recognises as
the current one to which all events should be sent.
=item $dbg = $ctx->trace()
This will return the L<Test2::Util::Trace> instance used by the context.
=item $ctx->do_in_context(\&code, @args);
Sometimes you have a context that is not current, and you want things to use it
as the current one. In these cases you can call
C<< $ctx->do_in_context(sub { ... }) >>. The codeblock will be run, and
anything inside of it that looks for a context will find the one on which the
method was called.
This B<DOES NOT> affect context on other hubs, only the hub used by the context
will be affected.
my $ctx = ...;
$ctx->do_in_context(sub {
my $ctx = context(); # returns the $ctx the sub is called on
});
B<Note:> The context will actually be cloned, the clone will be used instead of
the original. This allows the TID, PID, and error vars to be correct without
modifying the original context.
=item $ctx->restore_error_vars()
This will set C<$!>, C<$?>, and C<$@> to what they were when the context was
created. There is no localization or anything done here, calling this method
will actually set these vars.
=item $! = $ctx->errno()
The (numeric) value of C<$!> when the context was created.
=item $? = $ctx->child_error()
The value of C<$?> when the context was created.
=item $@ = $ctx->eval_error()
The value of C<$@> when the context was created.
=back
=head2 EVENT PRODUCTION METHODS
=over 4
=item $event = $ctx->ok($bool, $name)
=item $event = $ctx->ok($bool, $name, \@diag)
This will create an L<Test2::Event::Ok> object for you. If C<$bool> is false
then an L<Test2::Event::Diag> event will be sent as well with details about the
failure. If you do not want automatic diagnostics you should use the
C<send_event()> method directly.
The C<\@diag> can contain diagnostics messages you wish to have displayed in the
event of a failure. For a passing test the diagnostics array will be ignored.
=item $event = $ctx->note($message)
Send an L<Test2::Event::Note>. This event prints a message to STDOUT.
=item $event = $ctx->diag($message)
Send an L<Test2::Event::Diag>. This event prints a message to STDERR.
=item $event = $ctx->plan($max)
=item $event = $ctx->plan(0, 'SKIP', $reason)
This can be used to send an L<Test2::Event::Plan> event. This event
usually takes either a number of tests you expect to run. Optionally you can
set the expected count to 0 and give the 'SKIP' directive with a reason to
cause all tests to be skipped.
=item $event = $ctx->skip($name, $reason);
Send an L<Test2::Event::Skip> event.
=item $event = $ctx->bail($reason)
This sends an L<Test2::Event::Bail> event. This event will completely
terminate all testing.
=item $event = $ctx->send_event($Type, %parameters)
This lets you build and send an event of any type. The C<$Type> argument should
be the event package name with C<Test2::Event::> left off, or a fully
qualified package name prefixed with a '+'. The event is returned after it is
sent.
my $event = $ctx->send_event('Ok', ...);
or
my $event = $ctx->send_event('+Test2::Event::Ok', ...);
=item $event = $ctx->build_event($Type, %parameters)
This is the same as C<send_event()>, except it builds and returns the event
without sending it.
=back
=head1 HOOKS
There are 2 types of hooks, init hooks, and release hooks. As the names
suggest, these hooks are triggered when contexts are created or released.
=head2 INIT HOOKS
These are called whenever a context is initialized. That means when a new
instance is created. These hooks are B<NOT> called every time something
requests a context, just when a new one is created.
=head3 GLOBAL
This is how you add a global init callback. Global callbacks happen for every
context for any hub or stack.
Test2::API::test2_add_callback_context_init(sub {
my $ctx = shift;
...
});
=head3 PER HUB
This is how you add an init callback for all contexts created for a given hub.
These callbacks will not run for other hubs.
$hub->add_context_init(sub {
my $ctx = shift;
...
});
=head3 PER CONTEXT
This is how you specify an init hook that will only run if your call to
C<context()> generates a new context. The callback will be ignored if
C<context()> is returning an existing context.
my $ctx = context(on_init => sub {
my $ctx = shift;
...
});
=head2 RELEASE HOOKS
These are called whenever a context is released. That means when the last
reference to the instance is about to be destroyed. These hooks are B<NOT>
called every time C<< $ctx->release >> is called.
=head3 GLOBAL
This is how you add a global release callback. Global callbacks happen for every
context for any hub or stack.
Test2::API::test2_add_callback_context_release(sub {
my $ctx = shift;
...
});
=head3 PER HUB
This is how you add a release callback for all contexts created for a given
hub. These callbacks will not run for other hubs.
$hub->add_context_release(sub {
my $ctx = shift;
...
});
=head3 PER CONTEXT
This is how you add release callbacks directly to a context. The callback will
B<ALWAYS> be added to the context that gets returned, it does not matter if a
new one is generated, or if an existing one is returned.
my $ctx = context(on_release => sub {
my $ctx = shift;
...
});
=head1 THIRD PARTY META-DATA
This object consumes L<Test2::Util::ExternalMeta> which provides a consistent
way for you to attach meta-data to instances of this class. This is useful for
tools, plugins, and other extentions.
=head1 SOURCE
The source code repository for Test2 can be found at
F<http://github.com/Test-More/test-more/>.
=head1 MAINTAINERS
=over 4
=item Chad Granum E<lt>exodist@cpan.orgE<gt>
=back
=head1 AUTHORS
=over 4
=item Chad Granum E<lt>exodist@cpan.orgE<gt>
=item Kent Fredric E<lt>kentnl@cpan.orgE<gt>
=back
=head1 COPYRIGHT
Copyright 2016 Chad Granum E<lt>exodist@cpan.orgE<gt>.
This program is free software; you can redistribute it and/or
modify it under the same terms as Perl itself.
See F<http://dev.perl.org/licenses/>
=cut
| ashkanx/binary-mt | scripts/local/lib/perl5/Test2/API/Context.pm | Perl | apache-2.0 | 19,875 |
#!perl
# Copyright (c) 2015 Timm Murray
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
use v5.14;
use warnings;
use Attach::Stuff;
my $attach = Attach::Stuff->new({
width => 85,
height => 56,
screw_default_radius => (2.75 / 2) + 0.25,
screw_holes => [
[ 3.5, 3.5 ],
[ 58 + 3.5, 3.5 ],
[ 3.5, 49 + 3.5 ],
[ 58 + 3.5, 49 + 3.5 ],
],
});
my $svg = $attach->draw;
print $svg->xmlify;
| frezik/race_record | svg_creators/rpi_model_b_plus.pl | Perl | bsd-2-clause | 1,784 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package APR;
use DynaLoader ();
our $VERSION = '0.009000';
our @ISA = qw(DynaLoader);
#dlopen("APR.so", RTDL_GLOBAL); so we only need to link libapr.a once
# XXX: see xs/ModPerl/Const/Const.pm for issues of using 0x01
use Config ();
use constant DL_GLOBAL =>
( $Config::Config{dlsrc} eq 'dl_dlopen.xs' && $^O ne 'openbsd' ) ? 0x01 : 0x0;
sub dl_load_flags { DL_GLOBAL }
unless (defined &APR::XSLoader::BOOTSTRAP) {
__PACKAGE__->bootstrap($VERSION);
*APR::XSLoader::BOOTSTRAP = sub () { 1 };
}
1;
__END__
| gitpan/mod_perl | xs/APR/APR/APR.pm | Perl | apache-2.0 | 1,301 |
# !!!!!!! DO NOT EDIT THIS FILE !!!!!!!
# This file is machine-generated by lib/unicore/mktables from the Unicode
# database, Version 6.1.0. Any changes made here will be lost!
# !!!!!!! INTERNAL PERL USE ONLY !!!!!!!
# This file is for internal use by core Perl only. The format and even the
# name or existence of this file are subject to change without notice. Don't
# use it directly.
# The name this swash is to be known by, with the format of the mappings in
# the main body of the table, and what all code points missing from this file
# map to.
$utf8::SwashInfo{'ToCombiningClass'}{'format'} = 'i'; # integer
$utf8::SwashInfo{'ToCombiningClass'}{'missing'} = '0';
return <<'END';
0300 0314 230
0315 232
0316 0319 220
031A 232
031B 216
031C 0320 220
0321 0322 202
0323 0326 220
0327 0328 202
0329 0333 220
0334 0338 1
0339 033C 220
033D 0344 230
0345 240
0346 230
0347 0349 220
034A 034C 230
034D 034E 220
0350 0352 230
0353 0356 220
0357 230
0358 232
0359 035A 220
035B 230
035C 233
035D 035E 234
035F 233
0360 0361 234
0362 233
0363 036F 230
0483 0487 230
0591 220
0592 0595 230
0596 220
0597 0599 230
059A 222
059B 220
059C 05A1 230
05A2 05A7 220
05A8 05A9 230
05AA 220
05AB 05AC 230
05AD 222
05AE 228
05AF 230
05B0 10
05B1 11
05B2 12
05B3 13
05B4 14
05B5 15
05B6 16
05B7 17
05B8 18
05B9 05BA 19
05BB 20
05BC 21
05BD 22
05BF 23
05C1 24
05C2 25
05C4 230
05C5 220
05C7 18
0610 0617 230
0618 30
0619 31
061A 32
064B 27
064C 28
064D 29
064E 30
064F 31
0650 32
0651 33
0652 34
0653 0654 230
0655 0656 220
0657 065B 230
065C 220
065D 065E 230
065F 220
0670 35
06D6 06DC 230
06DF 06E2 230
06E3 220
06E4 230
06E7 06E8 230
06EA 220
06EB 06EC 230
06ED 220
0711 36
0730 230
0731 220
0732 0733 230
0734 220
0735 0736 230
0737 0739 220
073A 230
073B 073C 220
073D 230
073E 220
073F 0741 230
0742 220
0743 230
0744 220
0745 230
0746 220
0747 230
0748 220
0749 074A 230
07EB 07F1 230
07F2 220
07F3 230
0816 0819 230
081B 0823 230
0825 0827 230
0829 082D 230
0859 085B 220
08E4 08E5 230
08E6 220
08E7 08E8 230
08E9 220
08EA 08EC 230
08ED 08EF 220
08F0 27
08F1 28
08F2 29
08F3 08F5 230
08F6 220
08F7 08F8 230
08F9 08FA 220
08FB 08FE 230
093C 7
094D 9
0951 230
0952 220
0953 0954 230
09BC 7
09CD 9
0A3C 7
0A4D 9
0ABC 7
0ACD 9
0B3C 7
0B4D 9
0BCD 9
0C4D 9
0C55 84
0C56 91
0CBC 7
0CCD 9
0D4D 9
0DCA 9
0E38 0E39 103
0E3A 9
0E48 0E4B 107
0EB8 0EB9 118
0EC8 0ECB 122
0F18 0F19 220
0F35 220
0F37 220
0F39 216
0F71 129
0F72 130
0F74 132
0F7A 0F7D 130
0F80 130
0F82 0F83 230
0F84 9
0F86 0F87 230
0FC6 220
1037 7
1039 103A 9
108D 220
135D 135F 230
1714 9
1734 9
17D2 9
17DD 230
18A9 228
1939 222
193A 230
193B 220
1A17 230
1A18 220
1A60 9
1A75 1A7C 230
1A7F 220
1B34 7
1B44 9
1B6B 230
1B6C 220
1B6D 1B73 230
1BAA 1BAB 9
1BE6 7
1BF2 1BF3 9
1C37 7
1CD0 1CD2 230
1CD4 1
1CD5 1CD9 220
1CDA 1CDB 230
1CDC 1CDF 220
1CE0 230
1CE2 1CE8 1
1CED 220
1CF4 230
1DC0 1DC1 230
1DC2 220
1DC3 1DC9 230
1DCA 220
1DCB 1DCC 230
1DCD 234
1DCE 214
1DCF 220
1DD0 202
1DD1 1DE6 230
1DFC 233
1DFD 220
1DFE 230
1DFF 220
20D0 20D1 230
20D2 20D3 1
20D4 20D7 230
20D8 20DA 1
20DB 20DC 230
20E1 230
20E5 20E6 1
20E7 230
20E8 220
20E9 230
20EA 20EB 1
20EC 20EF 220
20F0 230
2CEF 2CF1 230
2D7F 9
2DE0 2DFF 230
302A 218
302B 228
302C 232
302D 222
302E 302F 224
3099 309A 8
A66F 230
A674 A67D 230
A69F 230
A6F0 A6F1 230
A806 9
A8C4 9
A8E0 A8F1 230
A92B A92D 220
A953 9
A9B3 7
A9C0 9
AAB0 230
AAB2 AAB3 230
AAB4 220
AAB7 AAB8 230
AABE AABF 230
AAC1 230
AAF6 9
ABED 9
FB1E 26
FE20 FE26 230
101FD 220
10A0D 220
10A0F 230
10A38 230
10A39 1
10A3A 220
10A3F 9
11046 9
110B9 9
110BA 7
11100 11102 230
11133 11134 9
111C0 9
116B6 9
116B7 7
1D165 1D166 216
1D167 1D169 1
1D16D 226
1D16E 1D172 216
1D17B 1D182 220
1D185 1D189 230
1D18A 1D18B 220
1D1AA 1D1AD 230
1D242 1D244 230
END
| efortuna/AndroidSDKClone | ndk_experimental/prebuilt/linux-x86_64/lib/perl5/5.16.2/unicore/CombiningClass.pl | Perl | apache-2.0 | 3,914 |
print "hello unicode 👾\n";
| oyvindsk/perl | test/unicode.pl | Perl | mit | 30 |
# This file is auto-generated by the Perl DateTime Suite time zone
# code generator (0.07) This code generator comes with the
# DateTime::TimeZone module distribution in the tools/ directory
#
# Generated from /tmp/ympzZnp0Uq/africa. Olson data version 2012c
#
# Do not edit this file directly.
#
package DateTime::TimeZone::Africa::Kampala;
{
$DateTime::TimeZone::Africa::Kampala::VERSION = '1.46';
}
use strict;
use Class::Singleton 1.03;
use DateTime::TimeZone;
use DateTime::TimeZone::OlsonDB;
@DateTime::TimeZone::Africa::Kampala::ISA = ( 'Class::Singleton', 'DateTime::TimeZone' );
my $spans =
[
[
DateTime::TimeZone::NEG_INFINITY,
60825937820,
DateTime::TimeZone::NEG_INFINITY,
60825945600,
7780,
0,
'LMT'
],
[
60825937820,
60873368400,
60825948620,
60873379200,
10800,
0,
'EAT'
],
[
60873368400,
61441363800,
60873377400,
61441372800,
9000,
0,
'BEAT'
],
[
61441363800,
61725446100,
61441373700,
61725456000,
9900,
0,
'BEAUT'
],
[
61725446100,
DateTime::TimeZone::INFINITY,
61725456900,
DateTime::TimeZone::INFINITY,
10800,
0,
'EAT'
],
];
sub olson_version { '2012c' }
sub has_dst_changes { 0 }
sub _max_year { 2022 }
sub _new_instance
{
return shift->_init( @_, spans => $spans );
}
1;
| leighpauls/k2cro4 | third_party/perl/perl/vendor/lib/DateTime/TimeZone/Africa/Kampala.pm | Perl | bsd-3-clause | 1,254 |
# test/cms-test.pl
# Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL
# project.
#
# ====================================================================
# Copyright (c) 2008 The OpenSSL Project. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# 3. All advertising materials mentioning features or use of this
# software must display the following acknowledgment:
# "This product includes software developed by the OpenSSL Project
# for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
#
# 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
# endorse or promote products derived from this software without
# prior written permission. For written permission, please contact
# licensing@OpenSSL.org.
#
# 5. Products derived from this software may not be called "OpenSSL"
# nor may "OpenSSL" appear in their names without prior written
# permission of the OpenSSL Project.
#
# 6. Redistributions of any form whatsoever must retain the following
# acknowledgment:
# "This product includes software developed by the OpenSSL Project
# for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
#
# THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
# EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
# ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# ====================================================================
# CMS, PKCS7 consistency test script. Run extensive tests on
# OpenSSL PKCS#7 and CMS implementations.
my $ossl_path;
my $redir = " 2> cms.err > cms.out";
# Make VMS work
if ( $^O eq "VMS" && -f "OSSLX:openssl.exe" ) {
$ossl_path = "pipe mcr OSSLX:openssl";
$null_path = "NL:";
# On VMS, the lowest 3 bits of the exit code indicates severity
# 1 is success (perl translates it to 0 for $?), 2 is error
# (perl doesn't translate it)
$failure_code = 512; # 2 << 8 = 512
}
# Make MSYS work
elsif ( $^O eq "MSWin32" && -f "../apps/openssl.exe" ) {
$ossl_path = "cmd /c ..\\apps\\openssl";
$null_path = "NUL";
$failure_code = 256;
}
elsif ( -f "../apps/openssl$ENV{EXE_EXT}" ) {
$ossl_path = "../util/shlib_wrap.sh ../apps/openssl";
$null_path = "/dev/null";
$failure_code = 256;
}
elsif ( -f "..\\out32dll\\openssl.exe" ) {
$ossl_path = "..\\out32dll\\openssl.exe";
$null_path = "NUL";
$failure_code = 256;
}
elsif ( -f "..\\out32\\openssl.exe" ) {
$ossl_path = "..\\out32\\openssl.exe";
$null_path = "NUL";
$failure_code = 256;
}
else {
die "Can't find OpenSSL executable";
}
my $pk7cmd = "$ossl_path smime ";
my $cmscmd = "$ossl_path cms ";
my $smdir = "smime-certs";
my $halt_err = 1;
my $badcmd = 0;
my $no_ec;
my $no_ec2m;
my $no_ecdh;
my $ossl8 = `$ossl_path version -v` =~ /0\.9\.8/;
system ("$ossl_path no-ec > $null_path");
if ($? == 0)
{
$no_ec = 1;
}
elsif ($? == $failure_code)
{
$no_ec = 0;
}
else
{
die "Error checking for EC support\n";
}
system ("$ossl_path no-ec2m > $null_path");
if ($? == 0)
{
$no_ec2m = 1;
}
elsif ($? == $failure_code)
{
$no_ec2m = 0;
}
else
{
die "Error checking for EC2M support\n";
}
system ("$ossl_path no-ecdh > $null_path");
if ($? == 0)
{
$no_ecdh = 1;
}
elsif ($? == $failure_code)
{
$no_ecdh = 0;
}
else
{
die "Error checking for ECDH support\n";
}
my @smime_pkcs7_tests = (
[
"signed content DER format, RSA key",
"-sign -in smcont.txt -outform \"DER\" -nodetach"
. " -certfile $smdir/smroot.pem"
. " -signer $smdir/smrsa1.pem -out test.cms",
"-verify -in test.cms -inform \"DER\" "
. " \"-CAfile\" $smdir/smroot.pem -out smtst.txt"
],
[
"signed detached content DER format, RSA key",
"-sign -in smcont.txt -outform \"DER\""
. " -signer $smdir/smrsa1.pem -out test.cms",
"-verify -in test.cms -inform \"DER\" "
. " \"-CAfile\" $smdir/smroot.pem -out smtst.txt -content smcont.txt"
],
[
"signed content test streaming BER format, RSA",
"-sign -in smcont.txt -outform \"DER\" -nodetach"
. " -stream -signer $smdir/smrsa1.pem -out test.cms",
"-verify -in test.cms -inform \"DER\" "
. " \"-CAfile\" $smdir/smroot.pem -out smtst.txt"
],
[
"signed content DER format, DSA key",
"-sign -in smcont.txt -outform \"DER\" -nodetach"
. " -signer $smdir/smdsa1.pem -out test.cms",
"-verify -in test.cms -inform \"DER\" "
. " \"-CAfile\" $smdir/smroot.pem -out smtst.txt"
],
[
"signed detached content DER format, DSA key",
"-sign -in smcont.txt -outform \"DER\""
. " -signer $smdir/smdsa1.pem -out test.cms",
"-verify -in test.cms -inform \"DER\" "
. " \"-CAfile\" $smdir/smroot.pem -out smtst.txt -content smcont.txt"
],
[
"signed detached content DER format, add RSA signer",
"-resign -inform \"DER\" -in test.cms -outform \"DER\""
. " -signer $smdir/smrsa1.pem -out test2.cms",
"-verify -in test2.cms -inform \"DER\" "
. " \"-CAfile\" $smdir/smroot.pem -out smtst.txt -content smcont.txt"
],
[
"signed content test streaming BER format, DSA key",
"-sign -in smcont.txt -outform \"DER\" -nodetach"
. " -stream -signer $smdir/smdsa1.pem -out test.cms",
"-verify -in test.cms -inform \"DER\" "
. " \"-CAfile\" $smdir/smroot.pem -out smtst.txt"
],
[
"signed content test streaming BER format, 2 DSA and 2 RSA keys",
"-sign -in smcont.txt -outform \"DER\" -nodetach"
. " -signer $smdir/smrsa1.pem -signer $smdir/smrsa2.pem"
. " -signer $smdir/smdsa1.pem -signer $smdir/smdsa2.pem"
. " -stream -out test.cms",
"-verify -in test.cms -inform \"DER\" "
. " \"-CAfile\" $smdir/smroot.pem -out smtst.txt"
],
[
"signed content test streaming BER format, 2 DSA and 2 RSA keys, no attributes",
"-sign -in smcont.txt -outform \"DER\" -noattr -nodetach"
. " -signer $smdir/smrsa1.pem -signer $smdir/smrsa2.pem"
. " -signer $smdir/smdsa1.pem -signer $smdir/smdsa2.pem"
. " -stream -out test.cms",
"-verify -in test.cms -inform \"DER\" "
. " \"-CAfile\" $smdir/smroot.pem -out smtst.txt"
],
[
"signed content test streaming S/MIME format, 2 DSA and 2 RSA keys",
"-sign -in smcont.txt -nodetach"
. " -signer $smdir/smrsa1.pem -signer $smdir/smrsa2.pem"
. " -signer $smdir/smdsa1.pem -signer $smdir/smdsa2.pem"
. " -stream -out test.cms",
"-verify -in test.cms " . " \"-CAfile\" $smdir/smroot.pem -out smtst.txt"
],
[
"signed content test streaming multipart S/MIME format, 2 DSA and 2 RSA keys",
"-sign -in smcont.txt"
. " -signer $smdir/smrsa1.pem -signer $smdir/smrsa2.pem"
. " -signer $smdir/smdsa1.pem -signer $smdir/smdsa2.pem"
. " -stream -out test.cms",
"-verify -in test.cms " . " \"-CAfile\" $smdir/smroot.pem -out smtst.txt"
],
[
"enveloped content test streaming S/MIME format, 3 recipients",
"-encrypt -in smcont.txt"
. " -stream -out test.cms"
. " $smdir/smrsa1.pem $smdir/smrsa2.pem $smdir/smrsa3.pem ",
"-decrypt -recip $smdir/smrsa1.pem -in test.cms -out smtst.txt"
],
[
"enveloped content test streaming S/MIME format, 3 recipients, 3rd used",
"-encrypt -in smcont.txt"
. " -stream -out test.cms"
. " $smdir/smrsa1.pem $smdir/smrsa2.pem $smdir/smrsa3.pem ",
"-decrypt -recip $smdir/smrsa3.pem -in test.cms -out smtst.txt"
],
[
"enveloped content test streaming S/MIME format, 3 recipients, key only used",
"-encrypt -in smcont.txt"
. " -stream -out test.cms"
. " $smdir/smrsa1.pem $smdir/smrsa2.pem $smdir/smrsa3.pem ",
"-decrypt -inkey $smdir/smrsa3.pem -in test.cms -out smtst.txt"
],
[
"enveloped content test streaming S/MIME format, AES-256 cipher, 3 recipients",
"-encrypt -in smcont.txt"
. " -aes256 -stream -out test.cms"
. " $smdir/smrsa1.pem $smdir/smrsa2.pem $smdir/smrsa3.pem ",
"-decrypt -recip $smdir/smrsa1.pem -in test.cms -out smtst.txt"
],
);
my @smime_cms_tests = (
[
"signed content test streaming BER format, 2 DSA and 2 RSA keys, keyid",
"-sign -in smcont.txt -outform \"DER\" -nodetach -keyid"
. " -signer $smdir/smrsa1.pem -signer $smdir/smrsa2.pem"
. " -signer $smdir/smdsa1.pem -signer $smdir/smdsa2.pem"
. " -stream -out test.cms",
"-verify -in test.cms -inform \"DER\" "
. " \"-CAfile\" $smdir/smroot.pem -out smtst.txt"
],
[
"signed content test streaming PEM format, 2 DSA and 2 RSA keys",
"-sign -in smcont.txt -outform PEM -nodetach"
. " -signer $smdir/smrsa1.pem -signer $smdir/smrsa2.pem"
. " -signer $smdir/smdsa1.pem -signer $smdir/smdsa2.pem"
. " -stream -out test.cms",
"-verify -in test.cms -inform PEM "
. " \"-CAfile\" $smdir/smroot.pem -out smtst.txt"
],
[
"signed content MIME format, RSA key, signed receipt request",
"-sign -in smcont.txt -signer $smdir/smrsa1.pem -nodetach"
. " -receipt_request_to test\@openssl.org -receipt_request_all"
. " -out test.cms",
"-verify -in test.cms "
. " \"-CAfile\" $smdir/smroot.pem -out smtst.txt"
],
[
"signed receipt MIME format, RSA key",
"-sign_receipt -in test.cms"
. " -signer $smdir/smrsa2.pem"
. " -out test2.cms",
"-verify_receipt test2.cms -in test.cms"
. " \"-CAfile\" $smdir/smroot.pem"
],
[
"enveloped content test streaming S/MIME format, 3 recipients, keyid",
"-encrypt -in smcont.txt"
. " -stream -out test.cms -keyid"
. " $smdir/smrsa1.pem $smdir/smrsa2.pem $smdir/smrsa3.pem ",
"-decrypt -recip $smdir/smrsa1.pem -in test.cms -out smtst.txt"
],
[
"enveloped content test streaming PEM format, KEK",
"-encrypt -in smcont.txt -outform PEM -aes128"
. " -stream -out test.cms "
. " -secretkey 000102030405060708090A0B0C0D0E0F "
. " -secretkeyid C0FEE0",
"-decrypt -in test.cms -out smtst.txt -inform PEM"
. " -secretkey 000102030405060708090A0B0C0D0E0F "
. " -secretkeyid C0FEE0"
],
[
"enveloped content test streaming PEM format, KEK, key only",
"-encrypt -in smcont.txt -outform PEM -aes128"
. " -stream -out test.cms "
. " -secretkey 000102030405060708090A0B0C0D0E0F "
. " -secretkeyid C0FEE0",
"-decrypt -in test.cms -out smtst.txt -inform PEM"
. " -secretkey 000102030405060708090A0B0C0D0E0F "
],
[
"data content test streaming PEM format",
"-data_create -in smcont.txt -outform PEM -nodetach"
. " -stream -out test.cms",
"-data_out -in test.cms -inform PEM -out smtst.txt"
],
[
"encrypted content test streaming PEM format, 128 bit RC2 key",
"\"-EncryptedData_encrypt\" -in smcont.txt -outform PEM"
. " -rc2 -secretkey 000102030405060708090A0B0C0D0E0F"
. " -stream -out test.cms",
"\"-EncryptedData_decrypt\" -in test.cms -inform PEM "
. " -secretkey 000102030405060708090A0B0C0D0E0F -out smtst.txt"
],
[
"encrypted content test streaming PEM format, 40 bit RC2 key",
"\"-EncryptedData_encrypt\" -in smcont.txt -outform PEM"
. " -rc2 -secretkey 0001020304"
. " -stream -out test.cms",
"\"-EncryptedData_decrypt\" -in test.cms -inform PEM "
. " -secretkey 0001020304 -out smtst.txt"
],
[
"encrypted content test streaming PEM format, triple DES key",
"\"-EncryptedData_encrypt\" -in smcont.txt -outform PEM"
. " -des3 -secretkey 000102030405060708090A0B0C0D0E0F1011121314151617"
. " -stream -out test.cms",
"\"-EncryptedData_decrypt\" -in test.cms -inform PEM "
. " -secretkey 000102030405060708090A0B0C0D0E0F1011121314151617"
. " -out smtst.txt"
],
[
"encrypted content test streaming PEM format, 128 bit AES key",
"\"-EncryptedData_encrypt\" -in smcont.txt -outform PEM"
. " -aes128 -secretkey 000102030405060708090A0B0C0D0E0F"
. " -stream -out test.cms",
"\"-EncryptedData_decrypt\" -in test.cms -inform PEM "
. " -secretkey 000102030405060708090A0B0C0D0E0F -out smtst.txt"
],
);
my @smime_cms_comp_tests = (
[
"compressed content test streaming PEM format",
"-compress -in smcont.txt -outform PEM -nodetach"
. " -stream -out test.cms",
"-uncompress -in test.cms -inform PEM -out smtst.txt"
]
);
my @smime_cms_param_tests = (
[
"signed content test streaming PEM format, RSA keys, PSS signature",
"-sign -in smcont.txt -outform PEM -nodetach"
. " -signer $smdir/smrsa1.pem -keyopt rsa_padding_mode:pss"
. " -out test.cms",
"-verify -in test.cms -inform PEM "
. " \"-CAfile\" $smdir/smroot.pem -out smtst.txt"
],
[
"signed content test streaming PEM format, RSA keys, PSS signature, no attributes",
"-sign -in smcont.txt -outform PEM -nodetach -noattr"
. " -signer $smdir/smrsa1.pem -keyopt rsa_padding_mode:pss"
. " -out test.cms",
"-verify -in test.cms -inform PEM "
. " \"-CAfile\" $smdir/smroot.pem -out smtst.txt"
],
[
"signed content test streaming PEM format, RSA keys, PSS signature, SHA384 MGF1",
"-sign -in smcont.txt -outform PEM -nodetach"
. " -signer $smdir/smrsa1.pem -keyopt rsa_padding_mode:pss"
. " -keyopt rsa_mgf1_md:sha384 -out test.cms",
"-verify -in test.cms -inform PEM "
. " \"-CAfile\" $smdir/smroot.pem -out smtst.txt"
],
[
"enveloped content test streaming S/MIME format, OAEP default parameters",
"-encrypt -in smcont.txt"
. " -stream -out test.cms"
. " -recip $smdir/smrsa1.pem -keyopt rsa_padding_mode:oaep",
"-decrypt -recip $smdir/smrsa1.pem -in test.cms -out smtst.txt"
],
[
"enveloped content test streaming S/MIME format, OAEP SHA256",
"-encrypt -in smcont.txt"
. " -stream -out test.cms"
. " -recip $smdir/smrsa1.pem -keyopt rsa_padding_mode:oaep"
. " -keyopt rsa_oaep_md:sha256",
"-decrypt -recip $smdir/smrsa1.pem -in test.cms -out smtst.txt"
],
[
"enveloped content test streaming S/MIME format, ECDH",
"-encrypt -in smcont.txt"
. " -stream -out test.cms"
. " -recip $smdir/smec1.pem",
"-decrypt -recip $smdir/smec1.pem -in test.cms -out smtst.txt"
],
[
"enveloped content test streaming S/MIME format, ECDH, AES128, SHA256 KDF",
"-encrypt -in smcont.txt"
. " -stream -out test.cms"
. " -recip $smdir/smec1.pem -aes128 -keyopt ecdh_kdf_md:sha256",
"-decrypt -recip $smdir/smec1.pem -in test.cms -out smtst.txt"
],
[
"enveloped content test streaming S/MIME format, ECDH, K-283, cofactor DH",
"-encrypt -in smcont.txt"
. " -stream -out test.cms"
. " -recip $smdir/smec2.pem -aes128"
. " -keyopt ecdh_kdf_md:sha256 -keyopt ecdh_cofactor_mode:1",
"-decrypt -recip $smdir/smec2.pem -in test.cms -out smtst.txt"
],
[
"enveloped content test streaming S/MIME format, X9.42 DH",
"-encrypt -in smcont.txt"
. " -stream -out test.cms"
. " -recip $smdir/smdh.pem -aes128",
"-decrypt -recip $smdir/smdh.pem -in test.cms -out smtst.txt"
]
);
print "CMS => PKCS#7 compatibility tests\n";
run_smime_tests( \$badcmd, \@smime_pkcs7_tests, $cmscmd, $pk7cmd );
print "CMS <= PKCS#7 compatibility tests\n";
run_smime_tests( \$badcmd, \@smime_pkcs7_tests, $pk7cmd, $cmscmd );
print "CMS <=> CMS consistency tests\n";
run_smime_tests( \$badcmd, \@smime_pkcs7_tests, $cmscmd, $cmscmd );
run_smime_tests( \$badcmd, \@smime_cms_tests, $cmscmd, $cmscmd );
print "CMS <=> CMS consistency tests, modified key parameters\n";
run_smime_tests( \$badcmd, \@smime_cms_param_tests, $cmscmd, $cmscmd );
if ( `$ossl_path version -f` =~ /ZLIB/ ) {
run_smime_tests( \$badcmd, \@smime_cms_comp_tests, $cmscmd, $cmscmd );
}
else {
print "Zlib not supported: compression tests skipped\n";
}
print "Running modified tests for OpenSSL 0.9.8 cms backport\n" if($ossl8);
if ($badcmd) {
print "$badcmd TESTS FAILED!!\n";
}
else {
print "ALL TESTS SUCCESSFUL.\n";
}
unlink "test.cms";
unlink "test2.cms";
unlink "smtst.txt";
unlink "cms.out";
unlink "cms.err";
sub run_smime_tests {
my ( $rv, $aref, $scmd, $vcmd ) = @_;
foreach $smtst (@$aref) {
my ( $tnam, $rscmd, $rvcmd ) = @$smtst;
if ($ossl8)
{
# Skip smime resign: 0.9.8 smime doesn't support -resign
next if ($scmd =~ /smime/ && $rscmd =~ /-resign/);
# Disable streaming: option not supported in 0.9.8
$tnam =~ s/streaming//;
$rscmd =~ s/-stream//;
$rvcmd =~ s/-stream//;
}
if ($no_ec && $tnam =~ /ECDH/)
{
print "$tnam: skipped, EC disabled\n";
next;
}
if ($no_ecdh && $tnam =~ /ECDH/)
{
print "$tnam: skipped, ECDH disabled\n";
next;
}
if ($no_ec2m && $tnam =~ /K-283/)
{
print "$tnam: skipped, EC2M disabled\n";
next;
}
system("$scmd$rscmd$redir");
if ($?) {
print "$tnam: generation error\n";
$$rv++;
exit 1 if $halt_err;
next;
}
system("$vcmd$rvcmd$redir");
if ($?) {
print "$tnam: verify error\n";
$$rv++;
exit 1 if $halt_err;
next;
}
if (!cmp_files("smtst.txt", "smcont.txt")) {
print "$tnam: content verify error\n";
$$rv++;
exit 1 if $halt_err;
next;
}
print "$tnam: OK\n";
}
}
sub cmp_files {
use FileHandle;
my ( $f1, $f2 ) = @_;
my $fp1 = FileHandle->new();
my $fp2 = FileHandle->new();
my ( $rd1, $rd2 );
if ( !open( $fp1, "<$f1" ) ) {
print STDERR "Can't Open file $f1\n";
return 0;
}
if ( !open( $fp2, "<$f2" ) ) {
print STDERR "Can't Open file $f2\n";
return 0;
}
binmode $fp1;
binmode $fp2;
my $ret = 0;
for ( ; ; ) {
$n1 = sysread $fp1, $rd1, 4096;
$n2 = sysread $fp2, $rd2, 4096;
last if ( $n1 != $n2 );
last if ( $rd1 ne $rd2 );
if ( $n1 == 0 ) {
$ret = 1;
last;
}
}
close $fp1;
close $fp2;
return $ret;
}
| GaloisInc/hacrypto | src/C/openssl/openssl-1.0.2a/test/cms-test.pl | Perl | bsd-3-clause | 20,023 |
# This file is auto-generated by the Perl DateTime Suite time zone
# code generator (0.07) This code generator comes with the
# DateTime::TimeZone module distribution in the tools/ directory
#
# Generated from /tmp/rnClxBLdxJ/europe. Olson data version 2013a
#
# Do not edit this file directly.
#
package DateTime::TimeZone::Europe::Simferopol;
{
$DateTime::TimeZone::Europe::Simferopol::VERSION = '1.57';
}
use strict;
use Class::Singleton 1.03;
use DateTime::TimeZone;
use DateTime::TimeZone::OlsonDB;
@DateTime::TimeZone::Europe::Simferopol::ISA = ( 'Class::Singleton', 'DateTime::TimeZone' );
my $spans =
[
[
DateTime::TimeZone::NEG_INFINITY, # utc_start
59295534216, # utc_end 1879-12-31 21:43:36 (Wed)
DateTime::TimeZone::NEG_INFINITY, # local_start
59295542400, # local_end 1880-01-01 00:00:00 (Thu)
8184,
0,
'LMT',
],
[
59295534216, # utc_start 1879-12-31 21:43:36 (Wed)
60694523040, # utc_end 1924-05-01 21:44:00 (Thu)
59295542376, # local_start 1879-12-31 23:59:36 (Wed)
60694531200, # local_end 1924-05-02 00:00:00 (Fri)
8160,
0,
'SMT',
],
[
60694523040, # utc_start 1924-05-01 21:44:00 (Thu)
60888146400, # utc_end 1930-06-20 22:00:00 (Fri)
60694530240, # local_start 1924-05-01 23:44:00 (Thu)
60888153600, # local_end 1930-06-21 00:00:00 (Sat)
7200,
0,
'EET',
],
[
60888146400, # utc_start 1930-06-20 22:00:00 (Fri)
61246789200, # utc_end 1941-10-31 21:00:00 (Fri)
60888157200, # local_start 1930-06-21 01:00:00 (Sat)
61246800000, # local_end 1941-11-01 00:00:00 (Sat)
10800,
0,
'MSK',
],
[
61246789200, # utc_start 1941-10-31 21:00:00 (Fri)
61278426000, # utc_end 1942-11-02 01:00:00 (Mon)
61246796400, # local_start 1941-10-31 23:00:00 (Fri)
61278433200, # local_end 1942-11-02 03:00:00 (Mon)
7200,
1,
'CEST',
],
[
61278426000, # utc_start 1942-11-02 01:00:00 (Mon)
61291126800, # utc_end 1943-03-29 01:00:00 (Mon)
61278429600, # local_start 1942-11-02 02:00:00 (Mon)
61291130400, # local_end 1943-03-29 02:00:00 (Mon)
3600,
0,
'CET',
],
[
61291126800, # utc_start 1943-03-29 01:00:00 (Mon)
61307456400, # utc_end 1943-10-04 01:00:00 (Mon)
61291134000, # local_start 1943-03-29 03:00:00 (Mon)
61307463600, # local_end 1943-10-04 03:00:00 (Mon)
7200,
1,
'CEST',
],
[
61307456400, # utc_start 1943-10-04 01:00:00 (Mon)
61323181200, # utc_end 1944-04-03 01:00:00 (Mon)
61307460000, # local_start 1943-10-04 02:00:00 (Mon)
61323184800, # local_end 1944-04-03 02:00:00 (Mon)
3600,
0,
'CET',
],
[
61323181200, # utc_start 1944-04-03 01:00:00 (Mon)
61324034400, # utc_end 1944-04-12 22:00:00 (Wed)
61323188400, # local_start 1944-04-03 03:00:00 (Mon)
61324041600, # local_end 1944-04-13 00:00:00 (Thu)
7200,
1,
'CEST',
],
[
61324034400, # utc_start 1944-04-12 22:00:00 (Wed)
62490603600, # utc_end 1981-03-31 21:00:00 (Tue)
61324045200, # local_start 1944-04-13 01:00:00 (Thu)
62490614400, # local_end 1981-04-01 00:00:00 (Wed)
10800,
0,
'MSK',
],
[
62490603600, # utc_start 1981-03-31 21:00:00 (Tue)
62506411200, # utc_end 1981-09-30 20:00:00 (Wed)
62490618000, # local_start 1981-04-01 01:00:00 (Wed)
62506425600, # local_end 1981-10-01 00:00:00 (Thu)
14400,
1,
'MSD',
],
[
62506411200, # utc_start 1981-09-30 20:00:00 (Wed)
62522139600, # utc_end 1982-03-31 21:00:00 (Wed)
62506422000, # local_start 1981-09-30 23:00:00 (Wed)
62522150400, # local_end 1982-04-01 00:00:00 (Thu)
10800,
0,
'MSK',
],
[
62522139600, # utc_start 1982-03-31 21:00:00 (Wed)
62537947200, # utc_end 1982-09-30 20:00:00 (Thu)
62522154000, # local_start 1982-04-01 01:00:00 (Thu)
62537961600, # local_end 1982-10-01 00:00:00 (Fri)
14400,
1,
'MSD',
],
[
62537947200, # utc_start 1982-09-30 20:00:00 (Thu)
62553675600, # utc_end 1983-03-31 21:00:00 (Thu)
62537958000, # local_start 1982-09-30 23:00:00 (Thu)
62553686400, # local_end 1983-04-01 00:00:00 (Fri)
10800,
0,
'MSK',
],
[
62553675600, # utc_start 1983-03-31 21:00:00 (Thu)
62569483200, # utc_end 1983-09-30 20:00:00 (Fri)
62553690000, # local_start 1983-04-01 01:00:00 (Fri)
62569497600, # local_end 1983-10-01 00:00:00 (Sat)
14400,
1,
'MSD',
],
[
62569483200, # utc_start 1983-09-30 20:00:00 (Fri)
62585298000, # utc_end 1984-03-31 21:00:00 (Sat)
62569494000, # local_start 1983-09-30 23:00:00 (Fri)
62585308800, # local_end 1984-04-01 00:00:00 (Sun)
10800,
0,
'MSK',
],
[
62585298000, # utc_start 1984-03-31 21:00:00 (Sat)
62601030000, # utc_end 1984-09-29 23:00:00 (Sat)
62585312400, # local_start 1984-04-01 01:00:00 (Sun)
62601044400, # local_end 1984-09-30 03:00:00 (Sun)
14400,
1,
'MSD',
],
[
62601030000, # utc_start 1984-09-29 23:00:00 (Sat)
62616754800, # utc_end 1985-03-30 23:00:00 (Sat)
62601040800, # local_start 1984-09-30 02:00:00 (Sun)
62616765600, # local_end 1985-03-31 02:00:00 (Sun)
10800,
0,
'MSK',
],
[
62616754800, # utc_start 1985-03-30 23:00:00 (Sat)
62632479600, # utc_end 1985-09-28 23:00:00 (Sat)
62616769200, # local_start 1985-03-31 03:00:00 (Sun)
62632494000, # local_end 1985-09-29 03:00:00 (Sun)
14400,
1,
'MSD',
],
[
62632479600, # utc_start 1985-09-28 23:00:00 (Sat)
62648204400, # utc_end 1986-03-29 23:00:00 (Sat)
62632490400, # local_start 1985-09-29 02:00:00 (Sun)
62648215200, # local_end 1986-03-30 02:00:00 (Sun)
10800,
0,
'MSK',
],
[
62648204400, # utc_start 1986-03-29 23:00:00 (Sat)
62663929200, # utc_end 1986-09-27 23:00:00 (Sat)
62648218800, # local_start 1986-03-30 03:00:00 (Sun)
62663943600, # local_end 1986-09-28 03:00:00 (Sun)
14400,
1,
'MSD',
],
[
62663929200, # utc_start 1986-09-27 23:00:00 (Sat)
62679654000, # utc_end 1987-03-28 23:00:00 (Sat)
62663940000, # local_start 1986-09-28 02:00:00 (Sun)
62679664800, # local_end 1987-03-29 02:00:00 (Sun)
10800,
0,
'MSK',
],
[
62679654000, # utc_start 1987-03-28 23:00:00 (Sat)
62695378800, # utc_end 1987-09-26 23:00:00 (Sat)
62679668400, # local_start 1987-03-29 03:00:00 (Sun)
62695393200, # local_end 1987-09-27 03:00:00 (Sun)
14400,
1,
'MSD',
],
[
62695378800, # utc_start 1987-09-26 23:00:00 (Sat)
62711103600, # utc_end 1988-03-26 23:00:00 (Sat)
62695389600, # local_start 1987-09-27 02:00:00 (Sun)
62711114400, # local_end 1988-03-27 02:00:00 (Sun)
10800,
0,
'MSK',
],
[
62711103600, # utc_start 1988-03-26 23:00:00 (Sat)
62726828400, # utc_end 1988-09-24 23:00:00 (Sat)
62711118000, # local_start 1988-03-27 03:00:00 (Sun)
62726842800, # local_end 1988-09-25 03:00:00 (Sun)
14400,
1,
'MSD',
],
[
62726828400, # utc_start 1988-09-24 23:00:00 (Sat)
62742553200, # utc_end 1989-03-25 23:00:00 (Sat)
62726839200, # local_start 1988-09-25 02:00:00 (Sun)
62742564000, # local_end 1989-03-26 02:00:00 (Sun)
10800,
0,
'MSK',
],
[
62742553200, # utc_start 1989-03-25 23:00:00 (Sat)
62758278000, # utc_end 1989-09-23 23:00:00 (Sat)
62742567600, # local_start 1989-03-26 03:00:00 (Sun)
62758292400, # local_end 1989-09-24 03:00:00 (Sun)
14400,
1,
'MSD',
],
[
62758278000, # utc_start 1989-09-23 23:00:00 (Sat)
62766824400, # utc_end 1989-12-31 21:00:00 (Sun)
62758288800, # local_start 1989-09-24 02:00:00 (Sun)
62766835200, # local_end 1990-01-01 00:00:00 (Mon)
10800,
0,
'MSK',
],
[
62766824400, # utc_start 1989-12-31 21:00:00 (Sun)
62782470000, # utc_end 1990-06-30 23:00:00 (Sat)
62766835200, # local_start 1990-01-01 00:00:00 (Mon)
62782480800, # local_end 1990-07-01 02:00:00 (Sun)
10800,
0,
'MSK',
],
[
62782470000, # utc_start 1990-06-30 23:00:00 (Sat)
62829900000, # utc_end 1991-12-31 22:00:00 (Tue)
62782477200, # local_start 1990-07-01 01:00:00 (Sun)
62829907200, # local_end 1992-01-01 00:00:00 (Wed)
7200,
0,
'EET',
],
[
62829900000, # utc_start 1991-12-31 22:00:00 (Tue)
62837503200, # utc_end 1992-03-28 22:00:00 (Sat)
62829907200, # local_start 1992-01-01 00:00:00 (Wed)
62837510400, # local_end 1992-03-29 00:00:00 (Sun)
7200,
0,
'EET',
],
[
62837503200, # utc_start 1992-03-28 22:00:00 (Sat)
62853224400, # utc_end 1992-09-26 21:00:00 (Sat)
62837514000, # local_start 1992-03-29 01:00:00 (Sun)
62853235200, # local_end 1992-09-27 00:00:00 (Sun)
10800,
1,
'EEST',
],
[
62853224400, # utc_start 1992-09-26 21:00:00 (Sat)
62868952800, # utc_end 1993-03-27 22:00:00 (Sat)
62853231600, # local_start 1992-09-26 23:00:00 (Sat)
62868960000, # local_end 1993-03-28 00:00:00 (Sun)
7200,
0,
'EET',
],
[
62868952800, # utc_start 1993-03-27 22:00:00 (Sat)
62884674000, # utc_end 1993-09-25 21:00:00 (Sat)
62868963600, # local_start 1993-03-28 01:00:00 (Sun)
62884684800, # local_end 1993-09-26 00:00:00 (Sun)
10800,
1,
'EEST',
],
[
62884674000, # utc_start 1993-09-25 21:00:00 (Sat)
62900402400, # utc_end 1994-03-26 22:00:00 (Sat)
62884681200, # local_start 1993-09-25 23:00:00 (Sat)
62900409600, # local_end 1994-03-27 00:00:00 (Sun)
7200,
0,
'EET',
],
[
62900402400, # utc_start 1994-03-26 22:00:00 (Sat)
62903422800, # utc_end 1994-04-30 21:00:00 (Sat)
62900413200, # local_start 1994-03-27 01:00:00 (Sun)
62903433600, # local_end 1994-05-01 00:00:00 (Sun)
10800,
1,
'EEST',
],
[
62903422800, # utc_start 1994-04-30 21:00:00 (Sat)
62916120000, # utc_end 1994-09-24 20:00:00 (Sat)
62903437200, # local_start 1994-05-01 01:00:00 (Sun)
62916134400, # local_end 1994-09-25 00:00:00 (Sun)
14400,
1,
'MSD',
],
[
62916120000, # utc_start 1994-09-24 20:00:00 (Sat)
62931848400, # utc_end 1995-03-25 21:00:00 (Sat)
62916130800, # local_start 1994-09-24 23:00:00 (Sat)
62931859200, # local_end 1995-03-26 00:00:00 (Sun)
10800,
0,
'MSK',
],
[
62931848400, # utc_start 1995-03-25 21:00:00 (Sat)
62947569600, # utc_end 1995-09-23 20:00:00 (Sat)
62931862800, # local_start 1995-03-26 01:00:00 (Sun)
62947584000, # local_end 1995-09-24 00:00:00 (Sun)
14400,
1,
'MSD',
],
[
62947569600, # utc_start 1995-09-23 20:00:00 (Sat)
62963902800, # utc_end 1996-03-30 21:00:00 (Sat)
62947580400, # local_start 1995-09-23 23:00:00 (Sat)
62963913600, # local_end 1996-03-31 00:00:00 (Sun)
10800,
0,
'MSK',
],
[
62963902800, # utc_start 1996-03-30 21:00:00 (Sat)
62963913600, # utc_end 1996-03-31 00:00:00 (Sun)
62963917200, # local_start 1996-03-31 01:00:00 (Sun)
62963928000, # local_end 1996-03-31 04:00:00 (Sun)
14400,
1,
'MSD',
],
[
62963913600, # utc_start 1996-03-31 00:00:00 (Sun)
62982057600, # utc_end 1996-10-27 00:00:00 (Sun)
62963928000, # local_start 1996-03-31 04:00:00 (Sun)
62982072000, # local_end 1996-10-27 04:00:00 (Sun)
14400,
1,
'MSD',
],
[
62982057600, # utc_start 1996-10-27 00:00:00 (Sun)
62987749200, # utc_end 1996-12-31 21:00:00 (Tue)
62982068400, # local_start 1996-10-27 03:00:00 (Sun)
62987760000, # local_end 1997-01-01 00:00:00 (Wed)
10800,
0,
'MSK',
],
[
62987749200, # utc_start 1996-12-31 21:00:00 (Tue)
62995366800, # utc_end 1997-03-30 01:00:00 (Sun)
62987760000, # local_start 1997-01-01 00:00:00 (Wed)
62995377600, # local_end 1997-03-30 04:00:00 (Sun)
10800,
0,
'MSK',
],
[
62995366800, # utc_start 1997-03-30 01:00:00 (Sun)
63013510800, # utc_end 1997-10-26 01:00:00 (Sun)
62995377600, # local_start 1997-03-30 04:00:00 (Sun)
63013521600, # local_end 1997-10-26 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63013510800, # utc_start 1997-10-26 01:00:00 (Sun)
63026816400, # utc_end 1998-03-29 01:00:00 (Sun)
63013518000, # local_start 1997-10-26 03:00:00 (Sun)
63026823600, # local_end 1998-03-29 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63026816400, # utc_start 1998-03-29 01:00:00 (Sun)
63044960400, # utc_end 1998-10-25 01:00:00 (Sun)
63026827200, # local_start 1998-03-29 04:00:00 (Sun)
63044971200, # local_end 1998-10-25 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63044960400, # utc_start 1998-10-25 01:00:00 (Sun)
63058266000, # utc_end 1999-03-28 01:00:00 (Sun)
63044967600, # local_start 1998-10-25 03:00:00 (Sun)
63058273200, # local_end 1999-03-28 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63058266000, # utc_start 1999-03-28 01:00:00 (Sun)
63077014800, # utc_end 1999-10-31 01:00:00 (Sun)
63058276800, # local_start 1999-03-28 04:00:00 (Sun)
63077025600, # local_end 1999-10-31 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63077014800, # utc_start 1999-10-31 01:00:00 (Sun)
63089715600, # utc_end 2000-03-26 01:00:00 (Sun)
63077022000, # local_start 1999-10-31 03:00:00 (Sun)
63089722800, # local_end 2000-03-26 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63089715600, # utc_start 2000-03-26 01:00:00 (Sun)
63108464400, # utc_end 2000-10-29 01:00:00 (Sun)
63089726400, # local_start 2000-03-26 04:00:00 (Sun)
63108475200, # local_end 2000-10-29 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63108464400, # utc_start 2000-10-29 01:00:00 (Sun)
63121165200, # utc_end 2001-03-25 01:00:00 (Sun)
63108471600, # local_start 2000-10-29 03:00:00 (Sun)
63121172400, # local_end 2001-03-25 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63121165200, # utc_start 2001-03-25 01:00:00 (Sun)
63139914000, # utc_end 2001-10-28 01:00:00 (Sun)
63121176000, # local_start 2001-03-25 04:00:00 (Sun)
63139924800, # local_end 2001-10-28 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63139914000, # utc_start 2001-10-28 01:00:00 (Sun)
63153219600, # utc_end 2002-03-31 01:00:00 (Sun)
63139921200, # local_start 2001-10-28 03:00:00 (Sun)
63153226800, # local_end 2002-03-31 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63153219600, # utc_start 2002-03-31 01:00:00 (Sun)
63171363600, # utc_end 2002-10-27 01:00:00 (Sun)
63153230400, # local_start 2002-03-31 04:00:00 (Sun)
63171374400, # local_end 2002-10-27 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63171363600, # utc_start 2002-10-27 01:00:00 (Sun)
63184669200, # utc_end 2003-03-30 01:00:00 (Sun)
63171370800, # local_start 2002-10-27 03:00:00 (Sun)
63184676400, # local_end 2003-03-30 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63184669200, # utc_start 2003-03-30 01:00:00 (Sun)
63202813200, # utc_end 2003-10-26 01:00:00 (Sun)
63184680000, # local_start 2003-03-30 04:00:00 (Sun)
63202824000, # local_end 2003-10-26 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63202813200, # utc_start 2003-10-26 01:00:00 (Sun)
63216118800, # utc_end 2004-03-28 01:00:00 (Sun)
63202820400, # local_start 2003-10-26 03:00:00 (Sun)
63216126000, # local_end 2004-03-28 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63216118800, # utc_start 2004-03-28 01:00:00 (Sun)
63234867600, # utc_end 2004-10-31 01:00:00 (Sun)
63216129600, # local_start 2004-03-28 04:00:00 (Sun)
63234878400, # local_end 2004-10-31 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63234867600, # utc_start 2004-10-31 01:00:00 (Sun)
63247568400, # utc_end 2005-03-27 01:00:00 (Sun)
63234874800, # local_start 2004-10-31 03:00:00 (Sun)
63247575600, # local_end 2005-03-27 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63247568400, # utc_start 2005-03-27 01:00:00 (Sun)
63266317200, # utc_end 2005-10-30 01:00:00 (Sun)
63247579200, # local_start 2005-03-27 04:00:00 (Sun)
63266328000, # local_end 2005-10-30 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63266317200, # utc_start 2005-10-30 01:00:00 (Sun)
63279018000, # utc_end 2006-03-26 01:00:00 (Sun)
63266324400, # local_start 2005-10-30 03:00:00 (Sun)
63279025200, # local_end 2006-03-26 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63279018000, # utc_start 2006-03-26 01:00:00 (Sun)
63297766800, # utc_end 2006-10-29 01:00:00 (Sun)
63279028800, # local_start 2006-03-26 04:00:00 (Sun)
63297777600, # local_end 2006-10-29 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63297766800, # utc_start 2006-10-29 01:00:00 (Sun)
63310467600, # utc_end 2007-03-25 01:00:00 (Sun)
63297774000, # local_start 2006-10-29 03:00:00 (Sun)
63310474800, # local_end 2007-03-25 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63310467600, # utc_start 2007-03-25 01:00:00 (Sun)
63329216400, # utc_end 2007-10-28 01:00:00 (Sun)
63310478400, # local_start 2007-03-25 04:00:00 (Sun)
63329227200, # local_end 2007-10-28 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63329216400, # utc_start 2007-10-28 01:00:00 (Sun)
63342522000, # utc_end 2008-03-30 01:00:00 (Sun)
63329223600, # local_start 2007-10-28 03:00:00 (Sun)
63342529200, # local_end 2008-03-30 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63342522000, # utc_start 2008-03-30 01:00:00 (Sun)
63360666000, # utc_end 2008-10-26 01:00:00 (Sun)
63342532800, # local_start 2008-03-30 04:00:00 (Sun)
63360676800, # local_end 2008-10-26 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63360666000, # utc_start 2008-10-26 01:00:00 (Sun)
63373971600, # utc_end 2009-03-29 01:00:00 (Sun)
63360673200, # local_start 2008-10-26 03:00:00 (Sun)
63373978800, # local_end 2009-03-29 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63373971600, # utc_start 2009-03-29 01:00:00 (Sun)
63392115600, # utc_end 2009-10-25 01:00:00 (Sun)
63373982400, # local_start 2009-03-29 04:00:00 (Sun)
63392126400, # local_end 2009-10-25 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63392115600, # utc_start 2009-10-25 01:00:00 (Sun)
63405421200, # utc_end 2010-03-28 01:00:00 (Sun)
63392122800, # local_start 2009-10-25 03:00:00 (Sun)
63405428400, # local_end 2010-03-28 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63405421200, # utc_start 2010-03-28 01:00:00 (Sun)
63424170000, # utc_end 2010-10-31 01:00:00 (Sun)
63405432000, # local_start 2010-03-28 04:00:00 (Sun)
63424180800, # local_end 2010-10-31 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63424170000, # utc_start 2010-10-31 01:00:00 (Sun)
63436870800, # utc_end 2011-03-27 01:00:00 (Sun)
63424177200, # local_start 2010-10-31 03:00:00 (Sun)
63436878000, # local_end 2011-03-27 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63436870800, # utc_start 2011-03-27 01:00:00 (Sun)
63455619600, # utc_end 2011-10-30 01:00:00 (Sun)
63436881600, # local_start 2011-03-27 04:00:00 (Sun)
63455630400, # local_end 2011-10-30 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63455619600, # utc_start 2011-10-30 01:00:00 (Sun)
63468320400, # utc_end 2012-03-25 01:00:00 (Sun)
63455626800, # local_start 2011-10-30 03:00:00 (Sun)
63468327600, # local_end 2012-03-25 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63468320400, # utc_start 2012-03-25 01:00:00 (Sun)
63487069200, # utc_end 2012-10-28 01:00:00 (Sun)
63468331200, # local_start 2012-03-25 04:00:00 (Sun)
63487080000, # local_end 2012-10-28 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63487069200, # utc_start 2012-10-28 01:00:00 (Sun)
63500374800, # utc_end 2013-03-31 01:00:00 (Sun)
63487076400, # local_start 2012-10-28 03:00:00 (Sun)
63500382000, # local_end 2013-03-31 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63500374800, # utc_start 2013-03-31 01:00:00 (Sun)
63518518800, # utc_end 2013-10-27 01:00:00 (Sun)
63500385600, # local_start 2013-03-31 04:00:00 (Sun)
63518529600, # local_end 2013-10-27 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63518518800, # utc_start 2013-10-27 01:00:00 (Sun)
63531824400, # utc_end 2014-03-30 01:00:00 (Sun)
63518526000, # local_start 2013-10-27 03:00:00 (Sun)
63531831600, # local_end 2014-03-30 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63531824400, # utc_start 2014-03-30 01:00:00 (Sun)
63549968400, # utc_end 2014-10-26 01:00:00 (Sun)
63531835200, # local_start 2014-03-30 04:00:00 (Sun)
63549979200, # local_end 2014-10-26 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63549968400, # utc_start 2014-10-26 01:00:00 (Sun)
63563274000, # utc_end 2015-03-29 01:00:00 (Sun)
63549975600, # local_start 2014-10-26 03:00:00 (Sun)
63563281200, # local_end 2015-03-29 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63563274000, # utc_start 2015-03-29 01:00:00 (Sun)
63581418000, # utc_end 2015-10-25 01:00:00 (Sun)
63563284800, # local_start 2015-03-29 04:00:00 (Sun)
63581428800, # local_end 2015-10-25 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63581418000, # utc_start 2015-10-25 01:00:00 (Sun)
63594723600, # utc_end 2016-03-27 01:00:00 (Sun)
63581425200, # local_start 2015-10-25 03:00:00 (Sun)
63594730800, # local_end 2016-03-27 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63594723600, # utc_start 2016-03-27 01:00:00 (Sun)
63613472400, # utc_end 2016-10-30 01:00:00 (Sun)
63594734400, # local_start 2016-03-27 04:00:00 (Sun)
63613483200, # local_end 2016-10-30 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63613472400, # utc_start 2016-10-30 01:00:00 (Sun)
63626173200, # utc_end 2017-03-26 01:00:00 (Sun)
63613479600, # local_start 2016-10-30 03:00:00 (Sun)
63626180400, # local_end 2017-03-26 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63626173200, # utc_start 2017-03-26 01:00:00 (Sun)
63644922000, # utc_end 2017-10-29 01:00:00 (Sun)
63626184000, # local_start 2017-03-26 04:00:00 (Sun)
63644932800, # local_end 2017-10-29 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63644922000, # utc_start 2017-10-29 01:00:00 (Sun)
63657622800, # utc_end 2018-03-25 01:00:00 (Sun)
63644929200, # local_start 2017-10-29 03:00:00 (Sun)
63657630000, # local_end 2018-03-25 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63657622800, # utc_start 2018-03-25 01:00:00 (Sun)
63676371600, # utc_end 2018-10-28 01:00:00 (Sun)
63657633600, # local_start 2018-03-25 04:00:00 (Sun)
63676382400, # local_end 2018-10-28 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63676371600, # utc_start 2018-10-28 01:00:00 (Sun)
63689677200, # utc_end 2019-03-31 01:00:00 (Sun)
63676378800, # local_start 2018-10-28 03:00:00 (Sun)
63689684400, # local_end 2019-03-31 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63689677200, # utc_start 2019-03-31 01:00:00 (Sun)
63707821200, # utc_end 2019-10-27 01:00:00 (Sun)
63689688000, # local_start 2019-03-31 04:00:00 (Sun)
63707832000, # local_end 2019-10-27 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63707821200, # utc_start 2019-10-27 01:00:00 (Sun)
63721126800, # utc_end 2020-03-29 01:00:00 (Sun)
63707828400, # local_start 2019-10-27 03:00:00 (Sun)
63721134000, # local_end 2020-03-29 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63721126800, # utc_start 2020-03-29 01:00:00 (Sun)
63739270800, # utc_end 2020-10-25 01:00:00 (Sun)
63721137600, # local_start 2020-03-29 04:00:00 (Sun)
63739281600, # local_end 2020-10-25 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63739270800, # utc_start 2020-10-25 01:00:00 (Sun)
63752576400, # utc_end 2021-03-28 01:00:00 (Sun)
63739278000, # local_start 2020-10-25 03:00:00 (Sun)
63752583600, # local_end 2021-03-28 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63752576400, # utc_start 2021-03-28 01:00:00 (Sun)
63771325200, # utc_end 2021-10-31 01:00:00 (Sun)
63752587200, # local_start 2021-03-28 04:00:00 (Sun)
63771336000, # local_end 2021-10-31 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63771325200, # utc_start 2021-10-31 01:00:00 (Sun)
63784026000, # utc_end 2022-03-27 01:00:00 (Sun)
63771332400, # local_start 2021-10-31 03:00:00 (Sun)
63784033200, # local_end 2022-03-27 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63784026000, # utc_start 2022-03-27 01:00:00 (Sun)
63802774800, # utc_end 2022-10-30 01:00:00 (Sun)
63784036800, # local_start 2022-03-27 04:00:00 (Sun)
63802785600, # local_end 2022-10-30 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63802774800, # utc_start 2022-10-30 01:00:00 (Sun)
63815475600, # utc_end 2023-03-26 01:00:00 (Sun)
63802782000, # local_start 2022-10-30 03:00:00 (Sun)
63815482800, # local_end 2023-03-26 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63815475600, # utc_start 2023-03-26 01:00:00 (Sun)
63834224400, # utc_end 2023-10-29 01:00:00 (Sun)
63815486400, # local_start 2023-03-26 04:00:00 (Sun)
63834235200, # local_end 2023-10-29 04:00:00 (Sun)
10800,
1,
'EEST',
],
[
63834224400, # utc_start 2023-10-29 01:00:00 (Sun)
63847530000, # utc_end 2024-03-31 01:00:00 (Sun)
63834231600, # local_start 2023-10-29 03:00:00 (Sun)
63847537200, # local_end 2024-03-31 03:00:00 (Sun)
7200,
0,
'EET',
],
[
63847530000, # utc_start 2024-03-31 01:00:00 (Sun)
63865674000, # utc_end 2024-10-27 01:00:00 (Sun)
63847540800, # local_start 2024-03-31 04:00:00 (Sun)
63865684800, # local_end 2024-10-27 04:00:00 (Sun)
10800,
1,
'EEST',
],
];
sub olson_version { '2013a' }
sub has_dst_changes { 47 }
sub _max_year { 2023 }
sub _new_instance
{
return shift->_init( @_, spans => $spans );
}
sub _last_offset { 7200 }
my $last_observance = bless( {
'format' => 'EE%sT',
'gmtoff' => '2:00',
'local_start_datetime' => bless( {
'formatter' => undef,
'local_rd_days' => 729113,
'local_rd_secs' => 14400,
'offset_modifier' => 0,
'rd_nanosecs' => 0,
'tz' => bless( {
'name' => 'floating',
'offset' => 0
}, 'DateTime::TimeZone::Floating' ),
'utc_rd_days' => 729113,
'utc_rd_secs' => 14400,
'utc_year' => 1998
}, 'DateTime' ),
'offset_from_std' => 0,
'offset_from_utc' => 7200,
'until' => [],
'utc_start_datetime' => bless( {
'formatter' => undef,
'local_rd_days' => 729113,
'local_rd_secs' => 3600,
'offset_modifier' => 0,
'rd_nanosecs' => 0,
'tz' => bless( {
'name' => 'floating',
'offset' => 0
}, 'DateTime::TimeZone::Floating' ),
'utc_rd_days' => 729113,
'utc_rd_secs' => 3600,
'utc_year' => 1998
}, 'DateTime' )
}, 'DateTime::TimeZone::OlsonDB::Observance' )
;
sub _last_observance { $last_observance }
my $rules = [
bless( {
'at' => '1:00u',
'from' => '1996',
'in' => 'Oct',
'letter' => '',
'name' => 'EU',
'offset_from_std' => 0,
'on' => 'lastSun',
'save' => '0',
'to' => 'max',
'type' => undef
}, 'DateTime::TimeZone::OlsonDB::Rule' ),
bless( {
'at' => '1:00u',
'from' => '1981',
'in' => 'Mar',
'letter' => 'S',
'name' => 'EU',
'offset_from_std' => 3600,
'on' => 'lastSun',
'save' => '1:00',
'to' => 'max',
'type' => undef
}, 'DateTime::TimeZone::OlsonDB::Rule' )
]
;
sub _rules { $rules }
1;
| Dokaponteam/ITF_Project | xampp/perl/vendor/lib/DateTime/TimeZone/Europe/Simferopol.pm | Perl | mit | 26,732 |
#! /usr/bin/env perl
##**************************************************************
##
## Copyright (C) 1990-2007, Condor Team, Computer Sciences Department,
## University of Wisconsin-Madison, WI.
##
## Licensed under the Apache License, Version 2.0 (the "License"); you
## may not use this file except in compliance with the License. You may
## obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
##**************************************************************
##########################################################################
# For information on command line options of this script, call it with -h
#
# The logic in this code is divided into two functions. They are parse()
# and reconstitute().
# parse() takes one string as an argument. It will parse the string into
# one giant associative array, and return a reference.
# reconstitute() takes
#
# Because this deals a great deal with specifying meta-information on
# parameters, comments and variable naming can be confusing. So I will
# try to use the term "property" to only refer to the meta-information,
# and the term "parameter" or "params" to refer to those parameters that
# are actually referenced in Condor code, and (will be) actually
# configured by users.
# This main sub simply specifies a default, and calls parse.
# NOTE: $option contains values of command line options. See configure()
# at the bottom of this script if you want more info.
sub main {
# fetch contents of input file into string $input_contents
my $input_contents = file_get_contents($options{input});
# parse contents, and put into associative array $params
my $params = &parse($input_contents);
# set defaults
my $defaults = {
version => '7.1.0',
};
# call reconstitute on the params to do all of the outputting.
reconstitute($params, $default) unless $options{on_the_fly};
# The reason that it is not called if on_the_fly is set is because
# on_the_fly will cause &parse to call many reconstitute commands
# "on the fly" as it parses the string. If it were called anyway,
# then it would end up repeating output.
# hack for our build system
# This #includes param_info_init.c
#`touch param_info.c`;
}
##########################################################################
use strict;
use warnings;
no warnings 'closure';
use Data::Dumper;
use Getopt::Std;
use Storable qw(dclone);
# Global variables. The first three are used internally by &parse,
# and the %options is set immediately after execution with the command
# line options for easy access, as specified in &configure.
use vars qw($remaining_text $parameters $current_parameter %options);
# You may be surprised to see $remaining_text, $parameters, and
# $current_parameter listed here as global variables, even though they
# are used exclusively by the &parse sub. While it probably isn't as
# clean as it (c|sh)ould be, it ended up being a step in the simplest
# solution to making recursive calls to &parse function as expected,
# due to a variety of subtleties involving scoping in subs contained
# Here we define the syntax rules for the parser. Each character class
# consists of an array ref of two elements, the first being a regular
# expression meant to match at least one of those characters, the latter
# being a string name for the character class.
# These constants will be used in the &parse function.
use constant {
WHITESPACE => ['[\r\n\s]+','whitespace'], # TODO: combine these into
COMMENTS => ['\#[^\n]+\n', 'comments'],
LINEBREAK => ['\n\r?','linebreak'],
SPACES => ['[ ]+','spaces'],
ASSIGNMENT => ['\:?\=?','assignment operator'],
ASSIGNMENT_EQUALS => ['\=?','assignment equals operator'],
ASSIGNMENT_COLON => ['\:?','assignment colon operator'],
ASSIGNMENT_HEREDOC => ['[A-Za-z]+', 'heredoc deliminator'],
PARAMETER_TITLE => ['[a-zA-Z0-9_\.]+','parameter title'],
PROPERTY_NAME => ['[a-zA-Z0-9_-]+','property name'],
PROPERTY_VALUE => ['[^\n]+','property value'],
DATACLASS_NAME => ['[a-zA-Z0-9_-]+','dataclass name'],
OPEN_BRACKET => ['\[','open bracket'],
CLOSE_BRACKET => ['\]','close bracket'],
OPEN_PARENTHESIS => ['\(', 'open parenthesis'],
CLOSE_PARENTHESIS => ['\)','close parenthesis'],
};
##################################################################################
# This is the template to be used when substituting for the parameters properties.
# The string that is substituted is in the format of %property%, where property is
# the name of the property to be substituted.
# (property types and names are defined farther below in $property_types)
##################################################################################
use constant { RECONSTITUTE_TEMPLATE_FUNC =>
'param_info_insert(%parameter_name%, %version%, %default%, %range%,
%state%, %type%, %is_macro%, %reconfig%, %customization%,
%friendly_name%, %usage%,
%url%,
%tags%);
'
};
use constant { RECONSTITUTE_TEMPLATE_OLD =>
'static const param_info_%typequal% param_def_info_%parameter_var% = {
{%parameter_name%, %default%, %version%,
%friendly_name%, %usage%,
%url%, %tags%,
%type%, %state%, %customization%, %reconfig%, %is_macro%, %def_valid%, %range_valid%},
%cooked_values%
};
'
};
use constant { RECONSTITUTE_TEMPLATE =>
'
static const char PVAR_%parameter_var%_default [] = %default%;
'
};
use constant { RECONSTITUTE_TEMPLATE_WIN =>
'
#ifdef WIN32
static const char PVAR_%parameter_var%_default [] = %win32_default%;
#else
static const char PVAR_%parameter_var%_default [] = %default%;
#endif
'
};
use constant { PARAM_INIT_HEADER =>
'void
param_info_init()
{
static int done_once = 0;
// guard against multiple initializations of the default table.
if (done_once == 1) {
return;
}
done_once = 1;
param_info_hash_create(¶m_info);
param_info_storage_t tmp;
'
};
use constant { PARAM_INIT_INFO =>
'
tmp.type_string.hdr.name = %parameter_name%;
tmp.type_string.hdr.str_val = %default%;
tmp.type_string.hdr.type = %type%;
tmp.type_string.hdr.default_valid = %def_valid%;
tmp.type_string.hdr.range_valid = %range_valid%;
%cooked_values%
param_info_hash_insert(param_info, &tmp);
'
};
use constant { PARAM_INIT_INFO_WIN =>
'
tmp.type_string.hdr.name = %parameter_name%;
tmp.type_string.hdr.type = %type%;
tmp.type_string.hdr.range_valid = %range_valid%;
#ifdef WIN32
tmp.type_string.hdr.str_val = %win32_default%;
tmp.type_string.hdr.default_valid = %win_valid%;
#else
tmp.type_string.hdr.str_val = %default%;
tmp.type_string.hdr.default_valid = %def_valid%;
#endif
param_info_hash_insert(param_info, &tmp);
'
};
use constant { PARAM_INIT_FOOTER =>
'
param_info_hash_optimize(param_info);
}
'
};
##################################################################################
# $property_types customizes the type and options of the properties. Each property is
# pointing toward a hash, containing the following metadata:
# type => (String specifying the type of that property. Types are defined in
# the $type_subs variable below)
# optional => (Set this to 1 to make this property optional.)
# dont_trim => (Set this to 1 to not trim trailing whitespace on value.)
##################################################################################
my $property_types = {
daemon_name => { type => "literal", optional => 1 },
parameter_name => { type => "char[]" },
parameter_var => { type => "nodots" },
default => { type => "char[]", dont_trim => 1 },
# win32_default => { type => "char[]", dont_trim => 1, optional => 1 },
friendly_name => { type => "char[]" },
type => { type => "param_type" },
state => { type => "state_type" },
version => { type => "char[]", optional => 1 },
tags => { type => "char[]" },
usage => { type => "char[]" },
# id => { type => "int", optional => 1},
aliases => { type => "char[]", optional => 1 },
range => { type => "char[]", optional => 1 },
is_macro => { type => "is_macro_type", optional => 1 },
reconfig => { type => "reconfig_type", optional => 1 },
customization => { type => "customization_type", optional => 1 },
url => { type => "char[]", optional => 1 }
};
##################################################################################
# $type_subs tells this script how to treat all the different types of parameters
# Each sub takes the value as an argument and returns the properly formatted value.
# It should be formatted such that it can be inserted without problem in the
# RECONSTITUTE_TEMPLATE.
# Also, it should be in charge of dieing if it encounters a bad value.
# When writing these subs, you have the following subs available:
# escape( $ ): takes one argument, escapes all potentially problematic characters.
# enum($, @_ ...): The first argument should be the input value. The remaining
# arguments should be acceptable values. If will try to
# (case-insensitively) match the user input with the remaining
# acceptable values. If it cannot find a match, it will die.
# Otherwise, it will correct the capitalization.
# type_error($, $): Dies with a nice error message. The first argument should be
# the value, the second the type.
##################################################################################
my $type_subs = {
'char[]' => sub { return '"'.escape($_[0]).'"'; },
'literal' => sub { return $_[0]; },
'bool' => sub { return enum($_[0],'true','false'); },
'int' => sub { return $_[0]=~/^\d+$/?$_[0]:type_error($_[0], 'int'); },
'float' => sub { return $_[0]=~/^\d+\.\d+$/?$_[0]:type_error($_[0], 'float'); },
'double' => sub { return $_[0]=~/^\d+\.\d+$/?$_[0]: type_error($_[0], 'double');},
'char' => sub { return $_[0]=~/^\d+$/ and $_[0]<256 ? $_[0]:type_error($_[0], 'char');},
'state_type' => sub {
my $state = enum($_[0],'USER','AUTODEFAULT','DEFAULT', 'RUNTIME');
return "STATE_".$state;
},
'nodots' => sub {
my $param_var = $_[0];
$param_var =~ s/\./_/g;
return $param_var;
},
'param_type' => sub {
my $type = enum($_[0],'STRING','INT','BOOL', 'DOUBLE');
return "PARAM_TYPE_".$type;
},
'is_macro_type' => sub {
my $is_macro = enum($_[0],'true','false');
return ($is_macro =~ /true/) ? 1 : 0;
},
'reconfig_type' => sub {
my $reconfig = enum($_[0],'true', 'false');
return ($reconfig =~ /true/) ? 1 : 0;
},
'customization_type' => sub {
my $customization = enum($_[0], 'NORMAL', 'SELDOM', 'EXPERT');
return "CUSTOMIZATION_".$customization;
},
};
###############################################################################################
# The reconstitute function takes a hash of parameters as its first argument, and a default
# parameter structure as its second. The hash of parameters should be in the same format as
# the one that is generated by the &parse function. The default parameters should be a hash,
# with the keys being property names and the values being the actual default property
# values.
# Possible TODO: Allow &parse to load default structure from a magic "[_default]" parameter.
sub reconstitute {
my $structure = shift;
my $default_structure = shift;
my $output_filename = $options{output};
###########################################################################
## All of the actual file output is contained in this section. ##
###########################################################################
sub begin_output {
open REC_OUT, ($options{append}?'>':'').">$output_filename" unless $options{stdout};
$options{append} = 1;
}
sub continue_output {
if ($options{stdout}) { print $_[0]; }
else { print REC_OUT $_[0]; }
}
sub end_output {
print REC_OUT "\n" unless $options{stdout};
close REC_OUT unless $options{stdout};
}
############################################################################
# replace_by_hash takes a hash as the first argument and a string as a second.
# It then replaces all keys contained in the hash by their respective values.
sub replace_by_hash {
my ($replace,$context) = @_;
while(my($key, $val) = each %{$replace} ) {
$key =~ s/\W/\\$&/mg;
$context =~ s/$key/$val/g ;
}
return $context;
}
# param_err is just a simple wrapper for errors encountered.
sub param_err {
print STDERR $_[0]."\n\t\t" and die unless($options{ignore});
}
#####################################################################
# do_one_property
# This function gets the correct replacement value of one property.
# It is called with a ref to the parameters substructure, with the
# type (ie, char[], int, etc) of the property, and with the name
# of the property. If it cannot find the property, it will return
# the default value instead.
sub do_one_property {
# $s is a ref to the structure of this parameter (ie, {name=>'foo',usage=>'bar'})
# $i is the metadata of the field (ie, {type=>'char[]',optional=1})
# $p is the name of the property (ie, 'friendly_name')
my ($s,$i,$p) = @_;
##############################################################################
# escape and enum are two functions useful for subs contained in type_subs. #
# They assist with common user input formatting needs. #
##############################################################################
# type_error generates a nice error message for wrong types
sub type_error {
my ($value, $type) = @_;
param_err("PARAMETER TYPE ERROR: '$value' is not a valid type $type.");
}
# escape will escape various control characters from a string so that it
# can be safely used in quotes in C code.
sub escape {
my $input = shift;
return $input unless $input;
# trim trailing whitespace
if (exists($i->{dont_trim})) {
$input =~ s/\s+$// if $i->{dont_trim} != 1;
}
$input =~ s/\\/\\\\/g;
$input =~ s/\n/\\n/g;
$input =~ s/\t/\\t/g;
$input =~ s/\r/\\r/g;
$input =~ s/\f/\\f/g;
$input =~ s/'/\\\'/g;
$input =~ s/"/\\\"/g;
$input =~ s/\?/\\\?/g;
return $input;
}
# The first argument of enum is a user inputted value that is matched
# in a case-insensitive manner with the remaining arguments. If there is
# a match, then it returns the match, using the capitalization of the
# latter argument. If there is not a match, it will explode with an error.
sub enum {
my $c = shift;
my @list = @_;
foreach (@list) { return $_ if lc($c) eq lc($_); }
return param_err("$p isn't valid ".$i->{type}.". Use one of '@_' instead of $c.");
}
# All the logic in this function is contained in the line below. It calls the
# type_sub for proper type, with either the param's value for that property,
# or the default value for that property (if the param does not contain that
# property).
return $type_subs->{$i->{type}}(exists $s->{$p} ? $s->{$p} : $default_structure->{$p} );
}
#####################################################################
# Here we have the main logic of this function.
begin_output(); # opening the file, and beginning output
continue_output("#include \"param_info.h\"\n");
my @var_info;
# Loop through each of the parameters in the structure passed as an argument
while(my ($param_name, $sub_structure) = each %{$structure}){
#my $daemon_name = "";
#if ($param_name =~ /\./) {
# my @aaa = split(/\./, $param_name);
# $param_name = $aaa[1];
# $daemon_name = $aaa[0];
# print "$aaa[1] of $aaa[0]\n";
#}
my %replace=();
# Quickly add the pseudo-property "parameter_name" for the name of the
# parameter, so that it can be treated just like any other property.
$sub_structure->{'parameter_name'} = $param_name;
$sub_structure->{'parameter_var'} = $param_name;
my $typequal = "t";
my $cooked_values = "";
my $win_cooked_values = "";
my $typequal_ranged = "";
my $cooked_range = "";
my $nix_default = $sub_structure->{'default'};
my $win_default = $sub_structure->{'win32_default'};
my $def_valid = (defined $nix_default && $nix_default ne "") ? "1" : "0";
my $win_valid = (defined $win_default && $win_default ne "") ? "1" : "0";
my $range_valid = "0";
my $var_name = $param_name;
my $range_type = "";
$var_name =~ s/\./_/g;
print Dumper($sub_structure) if $options{debug};
# Loop through each of the properties in the hash specifying property
# rules. (This hash is defined at the top of this file and it details
# how every property should be treated).
#while(my($name, $info) = each %{$property_types}){
for my $name ( sort keys %{$property_types} ){
my $info = $property_types->{$name};
# unless the $sub_structure contains the property or if that property
# is optional, summon an error.
unless(defined $sub_structure->{$name} or $info->{'optional'}){
param_err ("$param_name does not have required property $name.");}
# Get the property value; procesed, formatted, and ready for insertion
# by do_one_property().
$replace{"%$name%"}=do_one_property($sub_structure,$info,$name);
# TYPECHECK: certain parameters types must have a non-empty default
# this is also where we set convert string default value to int or double as needed
# and decide whether to set the default_valid flag or not.
if ($name eq "type")
{
$typequal = do_one_property($sub_structure,$info,$name);
#print "type1: ".($type_subs->{$info->{type}})."\n";
#print "type: ".($type_subs->{$info->{type}}(exists $sub_structure->{type} ? $sub_structure->{type} : $default_structure->{type}))."\n";
# Integer parameters
if ($type_subs->{$info->{type}}(exists $sub_structure->{type} ? $sub_structure->{type} : $default_structure->{type}) eq "PARAM_TYPE_INT")
{
$cooked_values = $nix_default;
if ($cooked_values =~ /^[0-9\-\*\/\(\) \t]*$/) {
$def_valid = "1";
} else {
#print "$param_name default is expression $cooked_values\n";
$cooked_values = "0";
$def_valid = "0";
}
if (defined $win_default)
{
$win_cooked_values = $win_default;
if ($win_cooked_values =~ /^[0-9\-\*\/\(\) \t]*$/) {
$win_valid = "1";
} else {
#print "$param_name default is expression $win_cooked_values\n";
$win_cooked_values = "0";
$win_valid = "0";
}
$win_cooked_values = "tmp.type_int.int_val = ".$win_cooked_values.";";
}
if ($nix_default eq "") {
print "ERROR: Integer parameter $param_name needs " .
"a default!\n";
}
#print "$param_name cooked is $cooked_values\n";
$cooked_values = "tmp.type_int.int_val = ".$cooked_values.";";
}
# Boolean parameters
if ($type_subs->{$info->{type}}(exists $sub_structure->{type} ? $sub_structure->{type} : $default_structure->{type}) eq "PARAM_TYPE_BOOL")
{
$cooked_values = $nix_default;
if ($cooked_values =~ /^[ \t]*TRUE|FALSE|true|false|0|1[ \t]*$/) {
$def_valid = "1";
} else {
#print "$param_name default is expression $cooked_values\n";
$cooked_values = "0";
$def_valid = "0";
}
if (defined $win_default)
{
$win_cooked_values = $win_default;
if ($win_cooked_values =~ /^[ \t]*TRUE|FALSE|true|false|0|1[ \t]*$/) {
$win_valid = "1";
} else {
#print "$param_name default is expression $win_cooked_values\n";
$win_cooked_values = "0";
$win_valid = "0";
}
$win_cooked_values = "tmp.type_int.int_val = ".$win_cooked_values.";";
}
$cooked_values = "tmp.type_int.int_val = ".$cooked_values.";";
}
# Double parameters
if ($type_subs->{$info->{type}}(exists $sub_structure->{type} ? $sub_structure->{type} : $default_structure->{type}) eq "PARAM_TYPE_DOUBLE")
{
$cooked_values = $nix_default;
if ($cooked_values =~ /^[0-9\.\-eE+\*\/\(\) \t]*$/) {
$def_valid = "1";
} else {
#print "$param_name default is expression $cooked_values\n";
$cooked_values = "0";
$def_valid = "0";
}
if (defined $win_default)
{
$win_cooked_values = $nix_default;
if ($win_cooked_values =~ /^[0-9\.\-eE+\*\/\(\) \t]*$/) {
$win_valid = "1";
} else {
#print "$param_name default is expression $win_cooked_values\n";
$win_cooked_values = "0";
$win_valid = "0";
}
$win_cooked_values = "tmp.type_double.dbl_val = ".$win_cooked_values.";";
}
if ($nix_default eq "") {
print "ERROR: Double parameter $param_name needs " .
"a default!\n";
}
$cooked_values = "tmp.type_double.dbl_val = ".$cooked_values.";";
}
}
# convert ranges from string to int or double if we can
# if range can be set a compile time, then we need to emit a xxx_ranged
# structure and two aditional data values. plus we set the
# range_valid flag.
#
if ($name eq "range")
{
if (($sub_structure->{"type"}) eq "int") {
$range_type = "PARAM_TYPE_INT";
} else {
$range_type = "PARAM_TYPE_DOUBLE";
}
#print "range_type: $range_type\n";
my $range_raw = ".*";
if (exists $sub_structure->{'range'}) {
$range_raw = $sub_structure->{'range'};
}
if ($range_raw ne ".*")
{
if ($range_raw =~ /^[0-9\.\-eE+, \t]*$/)
{
#print "$param_name range is numeric $range_raw\n";
my @range_list = split(/,/, $range_raw);
$typequal_ranged = "_ranged";
if ($range_type eq "PARAM_TYPE_DOUBLE") {
$cooked_range = "\n\ttmp.type_double_ranged.dbl_min = ".$range_list[0].";\n";
} else {
$cooked_range = "\n\ttmp.type_int_ranged.int_min = ".$range_list[0].";\n";
#print "$param_name range min is ".$range_list[0]."\n";
}
if (scalar(@range_list) > 1 && length($range_list[1]) > 0) {
if ($range_type eq "PARAM_TYPE_DOUBLE") {
$cooked_range .= "\ttmp.type_double_ranged.dbl_max = ".$range_list[1].";\n";
} else {
$cooked_range .= "\ttmp.type_int_ranged.int_max = ".$range_list[1].";\n";
#print "$param_name range max is ".$range_list[1]."\n";
}
} else {
if ($range_type eq "PARAM_TYPE_DOUBLE") {
$cooked_range .= "\ttmp.type_double_ranged.dbl_max = DBL_MAX;\n";
} else {
$cooked_range .= "\ttmp.type_int_ranged.int_max = INT_MAX;\n";
#print "$param_name range default max \n";
}
}
$range_valid = "1";
}
else
{
#print "$param_name range is expression $range_raw\n";
}
}
}
}
$replace{"%def_valid%"} = $def_valid;
$replace{"%range_valid%"} = $range_valid;
$replace{"%cooked_values%"} = $cooked_values.$cooked_range;
$replace{"%typequal%"} = $typequal.$typequal_ranged;
# Here we actually apply the template and output the parameter.
if (defined $win_default) {
$replace{"%win32_default%"} = '"'.escape($win_default).'"';
$replace{"%win_valid%"} = $win_valid;
$replace{"%win_cooked_values%"} = $win_cooked_values.$cooked_range;
#continue_output(replace_by_hash(\%replace, RECONSTITUTE_TEMPLATE_WIN));
} else {
#continue_output(replace_by_hash(\%replace, RECONSTITUTE_TEMPLATE));
}
push @var_info, dclone(\%replace);
}
# Output the table into a function.
# Why a function instead of as symbols? Because the function goes into
# the text segment (increasing the sharing for the shadow). Getting
# structs into the read-only data segment is difficult.
continue_output(PARAM_INIT_HEADER);
for (@var_info) {
my %temp = %{$_};
if (exists $temp{"%win32_default%"}) {
continue_output(replace_by_hash($_, PARAM_INIT_INFO_WIN));
} else {
continue_output(replace_by_hash($_, PARAM_INIT_INFO));
}
}
continue_output(PARAM_INIT_FOOTER);
# wrap things up.
end_output();
}
##########################################################################
# &parse parses a string. It is totally self-contained, using no outside functions (although
# it does use the character type constants such as PARAMETER_NAME defined in the top of this
# file). It accepts a string as its only argument, and returns a hash structure. No attempt
# is made (in *this* function) to check any of the data; it ONLY parses strings into more
# readable formats.
# The following string...
# - - - - - - - - - -
# [TEST_PARAM]
# # Comment, I am ignored
# var1 = vala
# var2 = valb
#
# [NEXT_PARAM]
# var1 = blah a
# var2 : EOF
# multiline string line 1
# line 2
# EOF
# recursive_structure : (classname) EOF
# sub_val1 = 1
# sub_val2 = 2
# EOF
# - - - - - - - - - -
# ...would be parsed into...
# - - - - - - - - - -
# {
# NEXT_PARAM => { var1 => 'blah a', var2 => "multiline string line 1\nline 2"
# recursive_structure=>{ '_dataclass'=>'classname', sub_val1=>'1', sub_val2=>'2' }
# },
# TEST_PARAM => { var1 => 'vala', var2 => 'valb'}
# }
# - - - - - - - - - -
##########################################################################
sub parse {
# TODO:
# it would be be best if WHITESPACE and COMMENT types were
# combined, as anywhere there is a WHITESPACE ignored, comments should be
# ignored also.
# get argument and put into $remaining_text
$remaining_text = shift;
# reset current_parameter and parameters variables
$current_parameter = {};
$parameters = {};
#########################################################
# Main parser logic functions #
#########################################################
sub error { die ("ERROR! Expected valid ".$_[0]." at '".$_[1]."'\n"); }
# ignore will ignore the supplied character class
sub ignore {
my $regex = $_[0]->[0];
$remaining_text =~ s/\A$regex//s;
}
# next_is will look ahead and return true if the next character(s) match
# the given chracter class
sub next_is {
my $regex = $_[0]->[0];
$remaining_text =~ /^($regex)/s;
return $1 && length($1) > 0;
}
# not_ignore is used by &accept and &until. It requires that the next characters
# be of the supplied regex and will return them, otherwise erroring.
sub not_ignore {
my ($regex, $context, $chartype) = @_;
($context ? $context : $remaining_text) =~ s/$regex//s;
return (length($1)>0) ? $1 : error($chartype, substr($remaining_text,0,90));
}
# accept will require that the next characters be of the supplied character class,
# returning the matching string.
sub accept { return not_ignore("^(".$_[0]->[0].")", $_[1], $_[0]->[1]); }
# until will require that there be somewhere ahead the supplied character class, and
# will return the text leading up to that supplied class
sub until { return not_ignore("^(.*?)(?=".$_[0]->[0].")", $_[1], $_[0]->[1]); }
##########################################
# Array building functions here #
##########################################
# add_property will add a property to $current_parameter. It is called with the
# property name, the property value, and the dataclass name (only if there is one.
# The dataclass name is not specified for normal strings or normal multiline strings).
sub add_property {
my ($property, $value, $dataclass) = @_;
if ($dataclass) {
# TODO: the [FOOBAR] thing is sloppy. It is only added to make
# the recursive call to &parse happy with the text and parse it.
# Actually, this entire block is rather sloppy...
$value = "[FOOBAR]$value";
# All of the $h_.+ type variables below are used to temporarily "freeze"
# the global variables in the execution stack, so that calling &parse
# recursively below will function as expected. It's pretty messy,
# but it works for now at least...
# TODO: Very sloppy
my $h_remaining_text = $remaining_text;
my %h_current_parameter = %{$current_parameter};
my %h_parameters = %{$parameters};
$remaining_text = $value; # reassigning $remaining_text to equal $value
$value = parse("$value")->{'FOOBAR'}; # actual parse call
$value->{'_dataclass'} = $dataclass;
$remaining_text = $h_remaining_text;
$current_parameter = \%h_current_parameter;
$parameters = \%h_parameters;
}
$current_parameter->{$property} = $value;
}
# add_parameter is called after a parameter is added. It resets $current_parameter.
# It then adds $current_parameter to the %parameters hash.
# If on_the_fly is set to 1, it will call reconstitute on the parameter right away.
sub add_parameter {
my ($title) = @_;
$parameters->{$title} = $current_parameter;
reconstitute({"$title"=>$current_parameter}) if $options{on_the_fly};
$current_parameter = {};
}
#################################################################
# Actual parser logic contained here... #
#################################################################
&ignore(WHITESPACE); # First, ignore all whitespace and comments
&ignore(COMMENTS);
&ignore(WHITESPACE);
while(length($remaining_text)>1){ ### Main loop, through the entire text
# We first get the name of the next parameter, enclosed in brackets
&accept(OPEN_BRACKET);
my $parameter_title = &accept(PARAMETER_TITLE);
&accept(CLOSE_BRACKET);
&ignore(WHITESPACE);
&ignore(COMMENTS);
&ignore(WHITESPACE);
until(&next_is(OPEN_BRACKET)){
# Now we get all of its properties, looping through until we hit the
# next parameter definition.
if(length($remaining_text)<1){ last; } # End of file
# Get the property name...
my $property_name = &accept(PROPERTY_NAME);
&ignore(WHITESPACE);
my $assignment = &accept(ASSIGNMENT);
# Get the assignment operator
my ($property_value, $dataclass_name);
if($assignment eq '=') {
# If it is an equals sign (normal assignment)...
&ignore(SPACES);
$property_value = "" if &next_is(LINEBREAK);
$property_value = &accept(PROPERTY_VALUE) unless &next_is(LINEBREAK);
&ignore(LINEBREAK);
} else {
# If it is a colon (multiline and special
# dataclass assignment, such as for roles)...
&ignore(SPACES);
if(&next_is(OPEN_PARENTHESIS)){
# This means that it is NOT simply a multiline string,
# but rather a dataclass (such as, default : (role) EOF)
&accept(OPEN_PARENTHESIS);
&ignore(SPACES);
$dataclass_name = &accept(DATACLASS_NAME);
&ignore(SPACES);
&accept(CLOSE_PARENTHESIS);
&ignore(SPACES);
}
# This code grabs heredoc delimiter, and then the text until
# the heredoc delimiter. It will be used for both multiline
# strings and dataclass assignments.
my $heredoc = &accept(ASSIGNMENT_HEREDOC);
&ignore(SPACES);
&accept(LINEBREAK);
my $heredoc_charclass = ['\r?\n'.$heredoc.'\r?\n', $heredoc];
$property_value = &until($heredoc_charclass);
&ignore($heredoc_charclass);
}
# add_property will add the newly created property to
# @current_parameter. If it is a single or multiline string, it
# will simply set the new parameter to equal the string.
# However, if $dataclass is longer than 0 characters, it will
# attempt to parse the string.
add_property($property_name, $property_value, $dataclass_name);
ignore(WHITESPACE);
&ignore(COMMENTS);
&ignore(WHITESPACE);
if(length($remaining_text)<1){ last; } # End of file
}
# add_parameter will add @current_parameter (the parameter implicitly
# constructed with add_property) to the hash $parameters. If on_the_fly
# is set, it will also call the reconstruct function on this structure
# and output the results on the fly.
add_parameter($parameter_title);
}
return $parameters;
}
# Really simple function that just brutally gets the contents of an entire file into
# a string.
# If, however, the option stdin is set, then it will instead get input from
# standard in.
sub file_get_contents {
my $file_path = shift;
my @text;
if ($options{stdin}){
@text = <STDIN>;
} else {
open FILE_NAME, "<$file_path" or die "Cannot find $file_path...";
@text = <FILE_NAME>;
close FILE_NAME;
}
return join "", @text;
}
############################################################
# Some generic configuration code... #
# This makes adding / removing new switches much easier #
# To add new command line options, just add them to the #
# list @switches contained below. #
############################################################
sub configure {
my @switches = (
# flag, arg, short name, default, usage description
['h', 0, 'help', 0, 'print this usage information'],
['f', 0, 'on_the_fly', 0, 'output the result as it is parsed'],
['i', 1, 'input', 'param_info.in', 'input file (default: "param_info.in")'],
['o', 1, 'output', 'param_info_init.c', 'output file (default: "param_info.c")'],
['I', 0, 'stdin', 0, 'input from standard in instead of file'],
['O', 0, 'stdout', 0, 'print to standard out instead of file'],
['a', 0, 'append', 0, "append: don't clobber output file"],
['e', 0, 'errors', 0, 'do not die on some errors'],
['d', 0, 'debug', 0, 0], # 0 makes it hidden on -h
);
sub usage {
my $switches;
# goes through all of the flags, generating a "help" line for each item
foreach my $s(@_) {
$s->[2]=~s/_/ /g; # replace underscores
# (the "$switch->[4] and" allows options to be disabled from display by setting the usage description to a false value)
$s->[4] and $switches .= "\t-".$s->[0].($s->[1]?" [".($s->[2])."]\t":"\t\t")."\t".$s->[4]."\n";
}
print << "EOF";
Parameter Parser for Condor
Example usage:
perl $0 -i param_table -o output_source.C -f
Full argument list:
$switches
EOF
}
sub bomb { usage(@_); exit 0; }
my %opts;
getopts(join ('', map { $_->[0].($_->[1]?':':'') } @switches),\%opts); # get CLI options, with ':' properly specifying arguments
$opts{'h'} and bomb(@switches);
for my $switch (@switches) {
if( !defined $opts{$switch->[0]} or $opts{$switch->[0]}=~/^$/ ) { # If argument was not set...
$options{$switch->[2]} = $switch->[3]; # ...set the options value equal to the default value.
} else { # Otherwise, set the options value equal to either the argument value, or in the case of...
$options{$switch->[2]} = $switch->[1] ? $opts{$switch->[0]} : !$switch->[3]; # ...a flag style switch...
} # ...instead invert the default value.
}
}
configure();
main();
exit(0);
| djw8605/htcondor | src/condor_utils/param_info_c_generator.pl | Perl | apache-2.0 | 35,135 |
#!/usr/bin/env perl
use strict;
use warnings;
my $usage = "\n\n\tusage: $0 acc_list_file.txt target_db.fasta\n\n";
my $acc_list_file = $ARGV[0] or die $usage;
my $target_db = $ARGV[1] or die $usage;
main: {
my $samtools = `which samtools`;
unless ($samtools =~ /\w/) {
die "Error, need samtools in your PATH setting.";
}
chomp $samtools;
my @accs = `cat $acc_list_file`;
chomp @accs;
if (! -s "$target_db.fasta.fai") {
my $cmd = "$samtools faidx $target_db";
my $ret = system $cmd;
if ($ret) {
die "Error, cmd: $cmd died with ret $ret";
}
}
my $ret = 0;
foreach my $acc (@accs) {
$acc =~ s/\s//g;
unless ($acc =~ /\w/) { next; }
my $cmd = "$samtools faidx $target_db \"$acc\"";
my $result = `$cmd`;
if ($result) {
print $result;
}
else {
print STDERR "No entry retrieved for acc: $acc\n";
$ret = 1;
}
}
exit($ret);
}
| mr-c/trinityrnaseq | util/retrieve_sequences_from_fasta.pl | Perl | bsd-3-clause | 1,058 |
package TAP::Formatter::Console;
use strict;
use TAP::Formatter::Base ();
use POSIX qw(strftime);
use vars qw($VERSION @ISA);
@ISA = qw(TAP::Formatter::Base);
=head1 NAME
TAP::Formatter::Console - Harness output delegate for default console output
=head1 VERSION
Version 3.26
=cut
$VERSION = '3.26';
=head1 DESCRIPTION
This provides console orientated output formatting for TAP::Harness.
=head1 SYNOPSIS
use TAP::Formatter::Console;
my $harness = TAP::Formatter::Console->new( \%args );
=head2 C<< open_test >>
See L<TAP::Formatter::Base>
=cut
sub open_test {
my ( $self, $test, $parser ) = @_;
my $class
= $self->jobs > 1
? 'TAP::Formatter::Console::ParallelSession'
: 'TAP::Formatter::Console::Session';
eval "require $class";
$self->_croak($@) if $@;
my $session = $class->new(
{ name => $test,
formatter => $self,
parser => $parser,
show_count => $self->show_count,
}
);
$session->header;
return $session;
}
# Use _colorizer delegate to set output color. NOP if we have no delegate
sub _set_colors {
my ( $self, @colors ) = @_;
if ( my $colorizer = $self->_colorizer ) {
my $output_func = $self->{_output_func} ||= sub {
$self->_output(@_);
};
$colorizer->set_color( $output_func, $_ ) for @colors;
}
}
sub _output_success {
my ( $self, $msg ) = @_;
$self->_set_colors('green');
$self->_output($msg);
$self->_set_colors('reset');
}
sub _failure_output {
my $self = shift;
$self->_set_colors('red');
my $out = join '', @_;
my $has_newline = chomp $out;
$self->_output($out);
$self->_set_colors('reset');
$self->_output($/)
if $has_newline;
}
1;
| Dokaponteam/ITF_Project | xampp/perl/lib/TAP/Formatter/Console.pm | Perl | mit | 1,789 |
# format-lib.pl
# Common functions for partitioning and formatting disks under solaris
BEGIN { push(@INC, ".."); };
use WebminCore;
&init_config();
&foreign_require("mount", "mount-lib.pl");
&foreign_require("proc", "proc-lib.pl");
%access = &get_module_acl();
$| = 1;
# list_disks()
# Returns a list of structures, one per disk
sub list_disks
{
local(@rv);
local $temp = &transname();
open(TEMP, ">$temp");
print TEMP "disk\n";
close(TEMP);
open(FORMAT, "format -f $temp |");
while(1) {
local $rv = &wait_for(FORMAT, 'Specify', '\s+\d+\. (\S+) <(.*) cyl (\d+) alt (\d+) hd (\d+) sec (\d+)>\s*(\S*)', '\s+\d+\. (\S+) <drive type unknown>', 'space for more');
if ($rv <= 0) { last; }
elsif ($rv == 1) {
local $disk = { 'device' => "/dev/dsk/$matches[1]",
'type' => $matches[2] eq 'DEFAULT' ?
undef : $matches[2],
'cyl' => $matches[3],
'alt' => $matches[4],
'hd' => $matches[5],
'sec' => $matches[6],
'volume' => $matches[7] };
if ($matches[1] =~ /c(\d+)t(\d+)d(\d+)$/) {
$disk->{'desc'} = &text('select_device',
"$1", "$2", "$3");
}
elsif ($matches[1] =~ /c(\d+)d(\d+)$/) {
$disk->{'desc'} = &text('select_idedevice',
chr($1*2 + $2 + 65));
}
push(@rv, $disk);
}
}
close(FORMAT);
unlink($temp);
return @rv;
}
# disk_info(disk)
# Returns an array containing a disks vendor, product and revision
sub disk_info
{
local(@rv);
&open_format();
&choose_disk($_[0]);
&fast_wait_for($fh, 'format>');
&wprint("inquiry\n");
&wait_for($fh, 'Vendor:\s+(.*)\r\nProduct:\s+(.*)\r\nRevision:\s+(.*)\r\n');
@rv = ($matches[1],$matches[2],$matches[3]);
&wait_for($fh, 'format>');
return @rv;
}
# list_partitions(device)
# Returns a list of structures, one per partition
sub list_partitions
{
local(@rv, $secs, $i);
local @tag = &list_tags();
open(VTOC, "prtvtoc $_[0]s0 |");
while(<VTOC>) {
if (/(\d+)\s+sectors\/cylinder/) {
$secs = $1;
}
if (/^\s+(\d+)\s+(\S+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)/) {
local $n = $1;
local $part = { 'tag' => $tag[$2],
'flag' => $3 eq "00" ? "wm" :
$3 eq "01" ? "wu" :
$3 eq "10" ? "rm" : "ru",
'start' => int($4 / $secs),
'end' => int($6 / $secs),
'device' => $_[0]."s$n" };
$rv[$n] = $part;
}
}
close(VTOC);
for($i=0; $i<8 || $i<@rv; $i++) {
$rv[$i] = { 'tag' => 'unassigned',
'flag' => 'wm',
'device' => $_[0]."s$i" } if (!$rv[$i]);
if ($_[0] =~ /c(\d+)t(\d+)d(\d+)$/) {
$rv[$i]->{'desc'} = &text('select_part',
"$1", "$2", "$3", $i);
}
elsif ($_[0] =~ /c(\d+)d(\d+)$/) {
$rv[$i]->{'desc'} = &text('select_idepart',
chr($1*2 + $2 + 65), $i);
}
}
return @rv;
#&open_format();
#&choose_disk($_[0]);
#if (!&wait_for($fh, 'unformatted', 'formatted')) { return (); }
#&wait_for($fh, 'format>');
#&wprint("partition\n");
#&wait_for($fh, 'partition>');
#&wprint("print\n");
#&wait_for($fh, 'Blocks\r\n');
#while(&wait_for($fh, 'partition>', '\s+\d+\s+(\S+)\s+(\S+)\s+(\d+)(\s+-\s+(\d+))?.*\r\n')) {
# local $part = { 'tag' => $matches[1],
# 'flag' => $matches[2],
# 'start' => $matches[3],
# 'end' => $matches[5] ? $matches[5] : $matches[3] };
# if ($matches[1] =~ /c(\d+)t(\d+)d(\d+)s(\d+)$/) {
# $part->{'desc'} = &text('select_part', "$1", "$2", "$3", "$4");
# }
# push(@rv, $part);
# }
#&wprint("quit\n");
#&wait_for($fh, 'format>');
#return @rv[0..7];
}
# modify_partition(disk, partition, tag, flag, start, end)
# Changes an existing partition
sub modify_partition
{
local(@rv);
&open_format();
&choose_disk($_[0]);
&wait_for($fh, 'format>');
&wprint("partition\n");
local $fd = &wait_for($fh, 'partition>', 'run fdisk');
if ($fd == 1) {
# Run fdisk first
&wprint("fdisk\n");
&wprint("y\n");
&wait_for($fh, 'partition>');
}
&wprint("$_[1]\n");
&wait_for($fh, 'Enter.*:'); &wprint("$_[2]\n");
&wait_for($fh, 'Enter.*:'); &wprint("$_[3]\n");
&wait_for($fh, 'Enter.*:'); &wprint("$_[4]\n");
&wait_for($fh, 'Enter.*:');
if ($_[4] || $_[5]) { &wprint(($_[5]-$_[4]+1)."c\n"); }
else {
# deleting this partition..
&wprint("0\n");
}
&wait_for($fh, 'partition>');
&wprint("label\n");
if (&wait_for($fh, 'continue', 'Cannot')) {
&error($text{'emounted'});
}
&wprint("y\n");
if (&wait_for($fh, 'partition>', 'no backup labels')) {
&error($text{'elast'});
}
&wprint("quit\n");
&wait_for($fh, 'format>');
}
# list_tags()
# Returns a list of all known tags
sub list_tags
{
return ("unassigned", "boot", "root", "swap",
"usr", "backup", "stand", "var", "home", "alternates", "cache");
}
# device_status(device)
# Returns the mount point, type and status of some device. Uses the mount module
# to query the list of known and mounted filesystems
sub device_status
{
@mounted = &foreign_call("mount", "list_mounted") if (!@mounted);
@mounts = &foreign_call("mount", "list_mounts") if (!@mounts);
local ($mounted) = grep { $_->[1] eq $_[0] } @mounted;
local ($mount) = grep { $_->[1] eq $_[0] } @mounts;
if ($mounted) { return ($mounted->[0], $mounted->[2], 1,
&indexof($mount, @mounts),
&indexof($mounted, @mounted)); }
elsif ($mount) { return ($mount->[0], $mount->[2], 0,
&indexof($mount, @mounts)); }
else {
&metamap_init();
if ($metastat{$_[0]}) { return ("meta", "meta", 1); }
if ($metadb{$_[0]}) { return ("meta", "metadb", 1); }
return ();
}
}
# fstype_name(type)
# Returns a human-readable filesystem name
sub fstype_name
{
return $text{"fstype_$_[0]"} ? $text{"fstype_$_[0]"}
: $text{'fstype_unknown'};
}
# filesystem_type(device)
# Calls fstyp to get the filesystem on some device
sub filesystem_type
{
local($out);
chop($out = `fstyp $_[0] 2>&1`);
if ($out =~ /^\S+$/) { return $out; }
return undef;
}
# fsck_error(code)
# Translate an error code from fsck
sub fsck_error
{
return $text{"fsck_$_[0]"} ? $text{"fsck_$_[0]"} : $text{'fsck_unknown'};
}
#############################################################################
# Internal functions
#############################################################################
# open_format()
# Internal function to run the 'format' command
sub open_format
{
return if ($format_already_open);
($fh, $fpid) = &foreign_call("proc", "pty_process_exec", "format");
while(1) {
local $rv = &wait_for($fh, 'Specify.*:', 'no disks found', 'space for more');
if ($rv == 0) { last; }
elsif ($rv == 1) { &error($text{'eformat'}); }
else { &wprint(" "); }
}
&wprint("0\n");
&wait_for($fh, 'format>');
$format_already_open++;
}
sub wprint
{
syswrite($fh, $_[0], length($_[0]));
}
sub opt_input
{
print $_[2] ? "<tr>" : "";
print "<td align=right><b>$text{$_[0]}</b></td> <td nowrap>\n";
print "<input type=radio name=$_[0]_def value=1 checked> $text{'default'}\n";
print " <input type=radio name=$_[0]_def value=0>\n";
print "<input name=$_[0] size=6> $_[1]</td>";
print $_[2] ? "\n" : "</tr>\n";
}
sub opt_check
{
if ($in{"$_[0]_def"}) { return ""; }
elsif ($in{$_[0]} !~ /^$_[1]$/) {
&error(&text('opt_error', $in{$_[0]}, $text{$_[0]}));
}
else { return " $_[2] $in{$_[0]}"; }
}
# metamap_init()
# internal function to build %metastat and %metadb arrays
sub metamap_init
{
if ($done_metamap_init) { return; }
$done_metamap_init = 1;
if (-x $config{metastat_path} && -x $config{metadb_path}) {
open(METASTAT, "$config{metastat_path} 2>&1 |");
while(<METASTAT>) {
if (/(c\d+t\d+d\d+s\d+)/) { $metastat{"/dev/dsk/$1"}++; }
}
close(METASTAT);
open(METADB, "$config{metadb_path} -i 2>&1 |");
while(<METADB>) {
if (/(c\d+t\d+d\d+s\d+)/) { $metadb{"/dev/dsk/$1"}++; }
}
close(METADB);
}
}
sub choose_disk
{
&wprint("disk\n");
while(&wait_for($fh, 'Specify.*:', 'space for more')) {
&wprint(" ");
}
&wprint("$_[0]\n");
}
# can_edit_disk(device)
sub can_edit_disk
{
$_[0] =~ /(c\d+t\d+d\d+)/;
foreach (split(/\s+/, $access{'disks'})) {
return 1 if ($_ eq "*" || $_ eq $1);
}
return 0;
}
# partition_select(name, value, mode, &found)
# Returns HTML for selecting a disk or partition
# mode 0 = disk partitions
# 1 = disks
# 2 = disks and disk partitions
sub partition_select
{
local $rv = "<select name=$_[0]>\n";
local ($found, $d, $p);
local @dlist = &list_disks();
foreach $d (@dlist) {
if ($_[0] > 2) {
local $name = $d->{'desc'};
$name .= " ($d->{'type'})" if ($d->{'type'});
$rv .= sprintf "<option value=%s %s>%s</option>\n",
$d->{'device'},
$_[1] eq $d->{'device'} ? "selected" : "", $name;
$found++ if ($_[1] eq $d->{'device'});
}
if ($_[0] != 1) {
local @parts = &list_partitions($d->{'device'});
foreach $p (@parts) {
local $name = $p->{'desc'};
next if (!$p->{'end'});
$name .= " ($p->{'tag'})" if ($p->{'tag'});
$rv .= sprintf "<option %s value=%s>%s</option>\n",
$_[1] eq $p->{'device'} ? "selected" : "",
$p->{'device'}, $name;
$found++ if ($_[1] eq $p->{'device'});
}
}
}
if (!$found && $_[1] && !$_[3]) {
$rv .= "<option selected>$_[1]</option>\n";
}
if ($_[3]) {
${$_[3]} = $found;
}
$rv .= "</select>\n";
return $rv;
}
# disk_space(device)
# Returns the amount of total and free space for some filesystem, or an
# empty array if not appropriate.
sub disk_space
{
local $out = `df -k $_[0] 2>&1`;
$out =~ /(\/dev\/\S+)\s+(\d+)\s+\S+\s+(\d+)/ || return ();
return ($2, $3);
}
| HasClass0/webmin | format/format-lib.pl | Perl | bsd-3-clause | 9,178 |
package Archive::Extract;
use if $] > 5.017, 'deprecate';
use strict;
use Cwd qw[cwd chdir];
use Carp qw[carp];
use IPC::Cmd qw[run can_run];
use FileHandle;
use File::Path qw[mkpath];
use File::Spec;
use File::Basename qw[dirname basename];
use Params::Check qw[check];
use Module::Load::Conditional qw[can_load check_install];
use Locale::Maketext::Simple Style => 'gettext';
### solaris has silly /bin/tar output ###
use constant ON_SOLARIS => $^O eq 'solaris' ? 1 : 0;
use constant ON_NETBSD => $^O eq 'netbsd' ? 1 : 0;
use constant ON_OPENBSD => $^O =~ m!^(openbsd|bitrig)$! ? 1 : 0;
use constant ON_FREEBSD => $^O =~ m!^(free|midnight|dragonfly)(bsd)?$! ? 1 : 0;
use constant ON_LINUX => $^O eq 'linux' ? 1 : 0;
use constant FILE_EXISTS => sub { -e $_[0] ? 1 : 0 };
### VMS may require quoting upper case command options
use constant ON_VMS => $^O eq 'VMS' ? 1 : 0;
### Windows needs special treatment of Tar options
use constant ON_WIN32 => $^O eq 'MSWin32' ? 1 : 0;
### we can't use this extraction method, because of missing
### modules/binaries:
use constant METHOD_NA => [];
### If these are changed, update @TYPES and the new() POD
use constant TGZ => 'tgz';
use constant TAR => 'tar';
use constant GZ => 'gz';
use constant ZIP => 'zip';
use constant BZ2 => 'bz2';
use constant TBZ => 'tbz';
use constant Z => 'Z';
use constant LZMA => 'lzma';
use constant XZ => 'xz';
use constant TXZ => 'txz';
use vars qw[$VERSION $PREFER_BIN $PROGRAMS $WARN $DEBUG
$_ALLOW_BIN $_ALLOW_PURE_PERL $_ALLOW_TAR_ITER
];
$VERSION = '0.76';
$PREFER_BIN = 0;
$WARN = 1;
$DEBUG = 0;
$_ALLOW_PURE_PERL = 1; # allow pure perl extractors
$_ALLOW_BIN = 1; # allow binary extractors
$_ALLOW_TAR_ITER = 1; # try to use Archive::Tar->iter if available
# same as all constants
my @Types = ( TGZ, TAR, GZ, ZIP, BZ2, TBZ, Z, LZMA, XZ, TXZ );
local $Params::Check::VERBOSE = $Params::Check::VERBOSE = 1;
=pod
=head1 NAME
Archive::Extract - A generic archive extracting mechanism
=head1 SYNOPSIS
use Archive::Extract;
### build an Archive::Extract object ###
my $ae = Archive::Extract->new( archive => 'foo.tgz' );
### extract to cwd() ###
my $ok = $ae->extract;
### extract to /tmp ###
my $ok = $ae->extract( to => '/tmp' );
### what if something went wrong?
my $ok = $ae->extract or die $ae->error;
### files from the archive ###
my $files = $ae->files;
### dir that was extracted to ###
my $outdir = $ae->extract_path;
### quick check methods ###
$ae->is_tar # is it a .tar file?
$ae->is_tgz # is it a .tar.gz or .tgz file?
$ae->is_gz; # is it a .gz file?
$ae->is_zip; # is it a .zip file?
$ae->is_bz2; # is it a .bz2 file?
$ae->is_tbz; # is it a .tar.bz2 or .tbz file?
$ae->is_lzma; # is it a .lzma file?
$ae->is_xz; # is it a .xz file?
$ae->is_txz; # is it a .tar.xz or .txz file?
### absolute path to the archive you provided ###
$ae->archive;
### commandline tools, if found ###
$ae->bin_tar # path to /bin/tar, if found
$ae->bin_gzip # path to /bin/gzip, if found
$ae->bin_unzip # path to /bin/unzip, if found
$ae->bin_bunzip2 # path to /bin/bunzip2 if found
$ae->bin_unlzma # path to /bin/unlzma if found
$ae->bin_unxz # path to /bin/unxz if found
=head1 DESCRIPTION
Archive::Extract is a generic archive extraction mechanism.
It allows you to extract any archive file of the type .tar, .tar.gz,
.gz, .Z, tar.bz2, .tbz, .bz2, .zip, .xz,, .txz, .tar.xz or .lzma
without having to worry how it
does so, or use different interfaces for each type by using either
perl modules, or commandline tools on your system.
See the C<HOW IT WORKS> section further down for details.
=cut
### see what /bin/programs are available ###
$PROGRAMS = {};
CMD: for my $pgm (qw[tar unzip gzip bunzip2 uncompress unlzma unxz]) {
if ( $pgm eq 'unzip' and ON_FREEBSD and my $unzip = can_run('info-unzip') ) {
$PROGRAMS->{$pgm} = $unzip;
next CMD;
}
if ( $pgm eq 'unzip' and ( ON_NETBSD or ON_FREEBSD ) ) {
local $IPC::Cmd::INSTANCES = 1;
($PROGRAMS->{$pgm}) = grep { ON_NETBSD ? m!/usr/pkg/! : m!/usr/local! } can_run($pgm);
next CMD;
}
if ( $pgm eq 'unzip' and ON_LINUX ) {
# Check if 'unzip' is busybox masquerading
local $IPC::Cmd::INSTANCES = 1;
my $opt = ON_VMS ? '"-Z"' : '-Z';
($PROGRAMS->{$pgm}) = grep { scalar run(command=> [ $_, $opt, '-1' ]) } can_run($pgm);
next CMD;
}
if ( $pgm eq 'tar' and ( ON_OPENBSD || ON_SOLARIS || ON_NETBSD ) ) {
# try gtar first
next CMD if $PROGRAMS->{$pgm} = can_run('gtar');
}
$PROGRAMS->{$pgm} = can_run($pgm);
}
### mapping from types to extractor methods ###
my $Mapping = { # binary program # pure perl module
is_tgz => { bin => '_untar_bin', pp => '_untar_at' },
is_tar => { bin => '_untar_bin', pp => '_untar_at' },
is_gz => { bin => '_gunzip_bin', pp => '_gunzip_cz' },
is_zip => { bin => '_unzip_bin', pp => '_unzip_az' },
is_tbz => { bin => '_untar_bin', pp => '_untar_at' },
is_bz2 => { bin => '_bunzip2_bin', pp => '_bunzip2_bz2'},
is_Z => { bin => '_uncompress_bin', pp => '_gunzip_cz' },
is_lzma => { bin => '_unlzma_bin', pp => '_unlzma_cz' },
is_xz => { bin => '_unxz_bin', pp => '_unxz_cz' },
is_txz => { bin => '_untar_bin', pp => '_untar_at' },
};
{ ### use subs so we re-generate array refs etc for the no-override flags
### if we don't, then we reuse the same arrayref, meaning objects store
### previous errors
my $tmpl = {
archive => sub { { required => 1, allow => FILE_EXISTS } },
type => sub { { default => '', allow => [ @Types ] } },
_error_msg => sub { { no_override => 1, default => [] } },
_error_msg_long => sub { { no_override => 1, default => [] } },
};
### build accessors ###
for my $method( keys %$tmpl,
qw[_extractor _gunzip_to files extract_path],
) {
no strict 'refs';
*$method = sub {
my $self = shift;
$self->{$method} = $_[0] if @_;
return $self->{$method};
}
}
=head1 METHODS
=head2 $ae = Archive::Extract->new(archive => '/path/to/archive',[type => TYPE])
Creates a new C<Archive::Extract> object based on the archive file you
passed it. Automatically determines the type of archive based on the
extension, but you can override that by explicitly providing the
C<type> argument.
Valid values for C<type> are:
=over 4
=item tar
Standard tar files, as produced by, for example, C</bin/tar>.
Corresponds to a C<.tar> suffix.
=item tgz
Gzip compressed tar files, as produced by, for example C</bin/tar -z>.
Corresponds to a C<.tgz> or C<.tar.gz> suffix.
=item gz
Gzip compressed file, as produced by, for example C</bin/gzip>.
Corresponds to a C<.gz> suffix.
=item Z
Lempel-Ziv compressed file, as produced by, for example C</bin/compress>.
Corresponds to a C<.Z> suffix.
=item zip
Zip compressed file, as produced by, for example C</bin/zip>.
Corresponds to a C<.zip>, C<.jar> or C<.par> suffix.
=item bz2
Bzip2 compressed file, as produced by, for example, C</bin/bzip2>.
Corresponds to a C<.bz2> suffix.
=item tbz
Bzip2 compressed tar file, as produced by, for example C</bin/tar -j>.
Corresponds to a C<.tbz> or C<.tar.bz2> suffix.
=item lzma
Lzma compressed file, as produced by C</bin/lzma>.
Corresponds to a C<.lzma> suffix.
=item xz
Xz compressed file, as produced by C</bin/xz>.
Corresponds to a C<.xz> suffix.
=item txz
Xz compressed tar file, as produced by, for example C</bin/tar -J>.
Corresponds to a C<.txz> or C<.tar.xz> suffix.
=back
Returns a C<Archive::Extract> object on success, or false on failure.
=cut
### constructor ###
sub new {
my $class = shift;
my %hash = @_;
### see above why we use subs here and generate the template;
### it's basically to not re-use arrayrefs
my %utmpl = map { $_ => $tmpl->{$_}->() } keys %$tmpl;
my $parsed = check( \%utmpl, \%hash ) or return;
### make sure we have an absolute path ###
my $ar = $parsed->{archive} = File::Spec->rel2abs( $parsed->{archive} );
### figure out the type, if it wasn't already specified ###
unless ( $parsed->{type} ) {
$parsed->{type} =
$ar =~ /.+?\.(?:tar\.gz|tgz)$/i ? TGZ :
$ar =~ /.+?\.gz$/i ? GZ :
$ar =~ /.+?\.tar$/i ? TAR :
$ar =~ /.+?\.(zip|jar|ear|war|par)$/i ? ZIP :
$ar =~ /.+?\.(?:tbz2?|tar\.bz2?)$/i ? TBZ :
$ar =~ /.+?\.bz2$/i ? BZ2 :
$ar =~ /.+?\.Z$/ ? Z :
$ar =~ /.+?\.lzma$/ ? LZMA :
$ar =~ /.+?\.(?:txz|tar\.xz)$/i ? TXZ :
$ar =~ /.+?\.xz$/ ? XZ :
'';
}
bless $parsed, $class;
### don't know what type of file it is
### XXX this *has* to be an object call, not a package call
return $parsed->_error(loc("Cannot determine file type for '%1'",
$parsed->{archive} )) unless $parsed->{type};
return $parsed;
}
}
=head2 $ae->extract( [to => '/output/path'] )
Extracts the archive represented by the C<Archive::Extract> object to
the path of your choice as specified by the C<to> argument. Defaults to
C<cwd()>.
Since C<.gz> files never hold a directory, but only a single file; if
the C<to> argument is an existing directory, the file is extracted
there, with its C<.gz> suffix stripped.
If the C<to> argument is not an existing directory, the C<to> argument
is understood to be a filename, if the archive type is C<gz>.
In the case that you did not specify a C<to> argument, the output
file will be the name of the archive file, stripped from its C<.gz>
suffix, in the current working directory.
C<extract> will try a pure perl solution first, and then fall back to
commandline tools if they are available. See the C<GLOBAL VARIABLES>
section below on how to alter this behaviour.
It will return true on success, and false on failure.
On success, it will also set the follow attributes in the object:
=over 4
=item $ae->extract_path
This is the directory that the files where extracted to.
=item $ae->files
This is an array ref with the paths of all the files in the archive,
relative to the C<to> argument you specified.
To get the full path to an extracted file, you would use:
File::Spec->catfile( $to, $ae->files->[0] );
Note that all files from a tar archive will be in unix format, as per
the tar specification.
=back
=cut
sub extract {
my $self = shift;
my %hash = @_;
### reset error messages
$self->_error_msg( [] );
$self->_error_msg_long( [] );
my $to;
my $tmpl = {
to => { default => '.', store => \$to }
};
check( $tmpl, \%hash ) or return;
### so 'to' could be a file or a dir, depending on whether it's a .gz
### file, or basically anything else.
### so, check that, then act accordingly.
### set an accessor specifically so _gunzip can know what file to extract
### to.
my $dir;
{ ### a foo.gz file
if( $self->is_gz or $self->is_bz2 or $self->is_Z or $self->is_lzma or $self->is_xz ) {
my $cp = $self->archive; $cp =~ s/\.(?:gz|bz2?|Z|lzma|xz)$//i;
### to is a dir?
if ( -d $to ) {
$dir = $to;
$self->_gunzip_to( basename($cp) );
### then it's a filename
} else {
$dir = dirname($to);
$self->_gunzip_to( basename($to) );
}
### not a foo.gz file
} else {
$dir = $to;
}
}
### make the dir if it doesn't exist ###
unless( -d $dir ) {
eval { mkpath( $dir ) };
return $self->_error(loc("Could not create path '%1': %2", $dir, $@))
if $@;
}
### get the current dir, to restore later ###
my $cwd = cwd();
my $ok = 1;
EXTRACT: {
### chdir to the target dir ###
unless( chdir $dir ) {
$self->_error(loc("Could not chdir to '%1': %2", $dir, $!));
$ok = 0; last EXTRACT;
}
### set files to an empty array ref, so there's always an array
### ref IN the accessor, to avoid errors like:
### Can't use an undefined value as an ARRAY reference at
### ../lib/Archive/Extract.pm line 742. (rt #19815)
$self->files( [] );
### find out the dispatch methods needed for this type of
### archive. Do a $self->is_XXX to figure out the type, then
### get the hashref with bin + pure perl dispatchers.
my ($map) = map { $Mapping->{$_} } grep { $self->$_ } keys %$Mapping;
### add pure perl extractor if allowed & add bin extractor if allowed
my @methods;
push @methods, $map->{'pp'} if $_ALLOW_PURE_PERL;
push @methods, $map->{'bin'} if $_ALLOW_BIN;
### reverse it if we prefer bin extractors
@methods = reverse @methods if $PREFER_BIN;
my($na, $fail);
for my $method (@methods) {
$self->debug( "# Extracting with ->$method\n" );
my $rv = $self->$method;
### a positive extraction
if( $rv and $rv ne METHOD_NA ) {
$self->debug( "# Extraction succeeded\n" );
$self->_extractor($method);
last;
### method is not available
} elsif ( $rv and $rv eq METHOD_NA ) {
$self->debug( "# Extraction method not available\n" );
$na++;
} else {
$self->debug( "# Extraction method failed\n" );
$fail++;
}
}
### warn something went wrong if we didn't get an extractor
unless( $self->_extractor ) {
my $diag = $fail ? loc("Extract failed due to errors") :
$na ? loc("Extract failed; no extractors available") :
'';
$self->_error($diag);
$ok = 0;
}
}
### and chdir back ###
unless( chdir $cwd ) {
$self->_error(loc("Could not chdir back to start dir '%1': %2'",
$cwd, $!));
}
return $ok;
}
=pod
=head1 ACCESSORS
=head2 $ae->error([BOOL])
Returns the last encountered error as string.
Pass it a true value to get the C<Carp::longmess()> output instead.
=head2 $ae->extract_path
This is the directory the archive got extracted to.
See C<extract()> for details.
=head2 $ae->files
This is an array ref holding all the paths from the archive.
See C<extract()> for details.
=head2 $ae->archive
This is the full path to the archive file represented by this
C<Archive::Extract> object.
=head2 $ae->type
This is the type of archive represented by this C<Archive::Extract>
object. See accessors below for an easier way to use this.
See the C<new()> method for details.
=head2 $ae->types
Returns a list of all known C<types> for C<Archive::Extract>'s
C<new> method.
=cut
sub types { return @Types }
=head2 $ae->is_tgz
Returns true if the file is of type C<.tar.gz>.
See the C<new()> method for details.
=head2 $ae->is_tar
Returns true if the file is of type C<.tar>.
See the C<new()> method for details.
=head2 $ae->is_gz
Returns true if the file is of type C<.gz>.
See the C<new()> method for details.
=head2 $ae->is_Z
Returns true if the file is of type C<.Z>.
See the C<new()> method for details.
=head2 $ae->is_zip
Returns true if the file is of type C<.zip>.
See the C<new()> method for details.
=head2 $ae->is_lzma
Returns true if the file is of type C<.lzma>.
See the C<new()> method for details.
=head2 $ae->is_xz
Returns true if the file is of type C<.xz>.
See the C<new()> method for details.
=cut
### quick check methods ###
sub is_tgz { return $_[0]->type eq TGZ }
sub is_tar { return $_[0]->type eq TAR }
sub is_gz { return $_[0]->type eq GZ }
sub is_zip { return $_[0]->type eq ZIP }
sub is_tbz { return $_[0]->type eq TBZ }
sub is_bz2 { return $_[0]->type eq BZ2 }
sub is_Z { return $_[0]->type eq Z }
sub is_lzma { return $_[0]->type eq LZMA }
sub is_xz { return $_[0]->type eq XZ }
sub is_txz { return $_[0]->type eq TXZ }
=pod
=head2 $ae->bin_tar
Returns the full path to your tar binary, if found.
=head2 $ae->bin_gzip
Returns the full path to your gzip binary, if found
=head2 $ae->bin_unzip
Returns the full path to your unzip binary, if found
=head2 $ae->bin_unlzma
Returns the full path to your unlzma binary, if found
=head2 $ae->bin_unxz
Returns the full path to your unxz binary, if found
=cut
### paths to commandline tools ###
sub bin_gzip { return $PROGRAMS->{'gzip'} if $PROGRAMS->{'gzip'} }
sub bin_unzip { return $PROGRAMS->{'unzip'} if $PROGRAMS->{'unzip'} }
sub bin_tar { return $PROGRAMS->{'tar'} if $PROGRAMS->{'tar'} }
sub bin_bunzip2 { return $PROGRAMS->{'bunzip2'} if $PROGRAMS->{'bunzip2'} }
sub bin_uncompress { return $PROGRAMS->{'uncompress'}
if $PROGRAMS->{'uncompress'} }
sub bin_unlzma { return $PROGRAMS->{'unlzma'} if $PROGRAMS->{'unlzma'} }
sub bin_unxz { return $PROGRAMS->{'unxz'} if $PROGRAMS->{'unxz'} }
=head2 $bool = $ae->have_old_bunzip2
Older versions of C</bin/bunzip2>, from before the C<bunzip2 1.0> release,
require all archive names to end in C<.bz2> or it will not extract
them. This method checks if you have a recent version of C<bunzip2>
that allows any extension, or an older one that doesn't.
=cut
sub have_old_bunzip2 {
my $self = shift;
### no bunzip2? no old bunzip2 either :)
return unless $self->bin_bunzip2;
### if we can't run this, we can't be sure if it's too old or not
### XXX stupid stupid stupid bunzip2 doesn't understand --version
### is not a request to extract data:
### $ bunzip2 --version
### bzip2, a block-sorting file compressor. Version 1.0.2, 30-Dec-2001.
### [...]
### bunzip2: I won't read compressed data from a terminal.
### bunzip2: For help, type: `bunzip2 --help'.
### $ echo $?
### 1
### HATEFUL!
### double hateful: bunzip2 --version also hangs if input is a pipe
### See #32370: Archive::Extract will hang if stdin is a pipe [+PATCH]
### So, we have to provide *another* argument which is a fake filename,
### just so it wont try to read from stdin to print its version..
### *sigh*
### Even if the file exists, it won't clobber or change it.
my $buffer;
scalar run(
command => [$self->bin_bunzip2, '--version', 'NoSuchFile'],
verbose => 0,
buffer => \$buffer
);
### no output
return unless $buffer;
my ($version) = $buffer =~ /version \s+ (\d+)/ix;
return 1 if $version < 1;
return;
}
#################################
#
# Untar code
#
#################################
### annoying issue with (gnu) tar on win32, as illustrated by this
### bug: https://rt.cpan.org/Ticket/Display.html?id=40138
### which shows that (gnu) tar will interpret a file name with a :
### in it as a remote file name, so C:\tmp\foo.txt is interpreted
### as a remote shell, and the extract fails.
{ my @ExtraTarFlags;
if( ON_WIN32 and my $cmd = __PACKAGE__->bin_tar ) {
### if this is gnu tar we are running, we need to use --force-local
push @ExtraTarFlags, '--force-local' if `$cmd --version` =~ /gnu tar/i;
}
### use /bin/tar to extract ###
sub _untar_bin {
my $self = shift;
### check for /bin/tar ###
### check for /bin/gzip if we need it ###
### if any of the binaries are not available, return NA
{ my $diag = !$self->bin_tar ?
loc("No '%1' program found", '/bin/tar') :
$self->is_tgz && !$self->bin_gzip ?
loc("No '%1' program found", '/bin/gzip') :
$self->is_tbz && !$self->bin_bunzip2 ?
loc("No '%1' program found", '/bin/bunzip2') :
$self->is_txz && !$self->bin_unxz ?
loc("No '%1' program found", '/bin/unxz') :
'';
if( $diag ) {
$self->_error( $diag );
return METHOD_NA;
}
}
### XXX figure out how to make IPC::Run do this in one call --
### currently i don't know how to get output of a command after a pipe
### trapped in a scalar. Mailed barries about this 5th of june 2004.
### see what command we should run, based on whether
### it's a .tgz or .tar
### GNU tar can't handled VMS filespecs, but VMSTAR can handle Unix filespecs.
my $archive = $self->archive;
$archive = VMS::Filespec::unixify($archive) if ON_VMS;
### XXX solaris tar and bsdtar are having different outputs
### depending whether you run with -x or -t
### compensate for this insanity by running -t first, then -x
{ my $cmd =
$self->is_tgz ? [$self->bin_gzip, '-c', '-d', '-f', $archive, '|',
$self->bin_tar, '-tf', '-'] :
$self->is_tbz ? [$self->bin_bunzip2, '-cd', $archive, '|',
$self->bin_tar, '-tf', '-'] :
$self->is_txz ? [$self->bin_unxz, '-cd', $archive, '|',
$self->bin_tar, '-tf', '-'] :
[$self->bin_tar, @ExtraTarFlags, '-tf', $archive];
### run the command
### newer versions of 'tar' (1.21 and up) now print record size
### to STDERR as well if v OR t is given (used to be both). This
### is a 'feature' according to the changelog, so we must now only
### inspect STDOUT, otherwise, failures like these occur:
### http://www.cpantesters.org/cpan/report/3230366
my $buffer = '';
my @out = run( command => $cmd,
buffer => \$buffer,
verbose => $DEBUG );
### command was unsuccessful
unless( $out[0] ) {
return $self->_error(loc(
"Error listing contents of archive '%1': %2",
$archive, $buffer ));
}
### no buffers available?
if( !IPC::Cmd->can_capture_buffer and !$buffer ) {
$self->_error( $self->_no_buffer_files( $archive ) );
} else {
### if we're on solaris we /might/ be using /bin/tar, which has
### a weird output format... we might also be using
### /usr/local/bin/tar, which is gnu tar, which is perfectly
### fine... so we have to do some guessing here =/
my @files = map { chomp;
!ON_SOLARIS ? $_
: (m|^ x \s+ # 'xtract' -- sigh
(.+?), # the actual file name
\s+ [\d,.]+ \s bytes,
\s+ [\d,.]+ \s tape \s blocks
|x ? $1 : $_);
### only STDOUT, see above. Sometimes, extra whitespace
### is present, so make sure we only pick lines with
### a length
} grep { length } map { split $/, $_ } join '', @{$out[3]};
### store the files that are in the archive ###
$self->files(\@files);
}
}
### now actually extract it ###
{ my $cmd =
$self->is_tgz ? [$self->bin_gzip, '-c', '-d', '-f', $archive, '|',
$self->bin_tar, '-xf', '-'] :
$self->is_tbz ? [$self->bin_bunzip2, '-cd', $archive, '|',
$self->bin_tar, '-xf', '-'] :
$self->is_txz ? [$self->bin_unxz, '-cd', $archive, '|',
$self->bin_tar, '-xf', '-'] :
[$self->bin_tar, @ExtraTarFlags, '-xf', $archive];
my $buffer = '';
unless( scalar run( command => $cmd,
buffer => \$buffer,
verbose => $DEBUG )
) {
return $self->_error(loc("Error extracting archive '%1': %2",
$archive, $buffer ));
}
### we might not have them, due to lack of buffers
if( $self->files ) {
### now that we've extracted, figure out where we extracted to
my $dir = $self->__get_extract_dir( $self->files );
### store the extraction dir ###
$self->extract_path( $dir );
}
}
### we got here, no error happened
return 1;
}
}
### use archive::tar to extract ###
sub _untar_at {
my $self = shift;
### Loading Archive::Tar is going to set it to 1, so make it local
### within this block, starting with its initial value. Whatever
### Achive::Tar does will be undone when we return.
###
### Also, later, set $Archive::Tar::WARN to $Archive::Extract::WARN
### so users don't have to even think about this variable. If they
### do, they still get their set value outside of this call.
local $Archive::Tar::WARN = $Archive::Tar::WARN;
### we definitely need Archive::Tar, so load that first
{ my $use_list = { 'Archive::Tar' => '0.0' };
unless( can_load( modules => $use_list ) ) {
$self->_error(loc("You do not have '%1' installed - " .
"Please install it as soon as possible.",
'Archive::Tar'));
return METHOD_NA;
}
}
### we might pass it a filehandle if it's a .tbz file..
my $fh_to_read = $self->archive;
### we will need Compress::Zlib too, if it's a tgz... and IO::Zlib
### if A::T's version is 0.99 or higher
if( $self->is_tgz ) {
my $use_list = { 'Compress::Zlib' => '0.0' };
$use_list->{ 'IO::Zlib' } = '0.0'
if $Archive::Tar::VERSION >= '0.99';
unless( can_load( modules => $use_list ) ) {
my $which = join '/', sort keys %$use_list;
$self->_error(loc(
"You do not have '%1' installed - Please ".
"install it as soon as possible.", $which)
);
return METHOD_NA;
}
} elsif ( $self->is_tbz ) {
my $use_list = { 'IO::Uncompress::Bunzip2' => '0.0' };
unless( can_load( modules => $use_list ) ) {
$self->_error(loc(
"You do not have '%1' installed - Please " .
"install it as soon as possible.",
'IO::Uncompress::Bunzip2')
);
return METHOD_NA;
}
my $bz = IO::Uncompress::Bunzip2->new( $self->archive ) or
return $self->_error(loc("Unable to open '%1': %2",
$self->archive,
$IO::Uncompress::Bunzip2::Bunzip2Error));
$fh_to_read = $bz;
} elsif ( $self->is_txz ) {
my $use_list = { 'IO::Uncompress::UnXz' => '0.0' };
unless( can_load( modules => $use_list ) ) {
$self->_error(loc(
"You do not have '%1' installed - Please " .
"install it as soon as possible.",
'IO::Uncompress::UnXz')
);
return METHOD_NA;
}
my $xz = IO::Uncompress::UnXz->new( $self->archive ) or
return $self->_error(loc("Unable to open '%1': %2",
$self->archive,
$IO::Uncompress::UnXz::UnXzError));
$fh_to_read = $xz;
}
my @files;
{
### $Archive::Tar::WARN is 1 by default in Archive::Tar, but we've
### localized $Archive::Tar::WARN already.
$Archive::Tar::WARN = $Archive::Extract::WARN;
### only tell it it's compressed if it's a .tgz, as we give it a file
### handle if it's a .tbz
my @read = ( $fh_to_read, ( $self->is_tgz ? 1 : 0 ) );
### for version of Archive::Tar > 1.04
local $Archive::Tar::CHOWN = 0;
### use the iterator if we can. it's a feature of A::T 1.40 and up
if ( $_ALLOW_TAR_ITER && Archive::Tar->can( 'iter' ) ) {
my $next;
unless ( $next = Archive::Tar->iter( @read ) ) {
return $self->_error(loc(
"Unable to read '%1': %2", $self->archive,
$Archive::Tar::error));
}
while ( my $file = $next->() ) {
push @files, $file->full_path;
$file->extract or return $self->_error(loc(
"Unable to read '%1': %2",
$self->archive,
$Archive::Tar::error));
}
### older version, read the archive into memory
} else {
my $tar = Archive::Tar->new();
unless( $tar->read( @read ) ) {
return $self->_error(loc("Unable to read '%1': %2",
$self->archive, $Archive::Tar::error));
}
### workaround to prevent Archive::Tar from setting uid, which
### is a potential security hole. -autrijus
### have to do it here, since A::T needs to be /loaded/ first ###
{ no strict 'refs'; local $^W;
### older versions of archive::tar <= 0.23
*Archive::Tar::chown = sub {};
}
{ local $^W; # quell 'splice() offset past end of array' warnings
# on older versions of A::T
### older archive::tar always returns $self, return value
### slightly fux0r3d because of it.
$tar->extract or return $self->_error(loc(
"Unable to extract '%1': %2",
$self->archive, $Archive::Tar::error ));
}
@files = $tar->list_files;
}
}
my $dir = $self->__get_extract_dir( \@files );
### store the files that are in the archive ###
$self->files(\@files);
### store the extraction dir ###
$self->extract_path( $dir );
### check if the dir actually appeared ###
return 1 if -d $self->extract_path;
### no dir, we failed ###
return $self->_error(loc("Unable to extract '%1': %2",
$self->archive, $Archive::Tar::error ));
}
#################################
#
# Gunzip code
#
#################################
sub _gunzip_bin {
my $self = shift;
### check for /bin/gzip -- we need it ###
unless( $self->bin_gzip ) {
$self->_error(loc("No '%1' program found", '/bin/gzip'));
return METHOD_NA;
}
my $fh = FileHandle->new('>'. $self->_gunzip_to) or
return $self->_error(loc("Could not open '%1' for writing: %2",
$self->_gunzip_to, $! ));
my $cmd = [ $self->bin_gzip, '-c', '-d', '-f', $self->archive ];
my $buffer;
unless( scalar run( command => $cmd,
verbose => $DEBUG,
buffer => \$buffer )
) {
return $self->_error(loc("Unable to gunzip '%1': %2",
$self->archive, $buffer));
}
### no buffers available?
if( !IPC::Cmd->can_capture_buffer and !$buffer ) {
$self->_error( $self->_no_buffer_content( $self->archive ) );
}
$self->_print($fh, $buffer) if defined $buffer;
close $fh;
### set what files where extract, and where they went ###
$self->files( [$self->_gunzip_to] );
$self->extract_path( File::Spec->rel2abs(cwd()) );
return 1;
}
sub _gunzip_cz {
my $self = shift;
my $use_list = { 'Compress::Zlib' => '0.0' };
unless( can_load( modules => $use_list ) ) {
$self->_error(loc("You do not have '%1' installed - Please " .
"install it as soon as possible.", 'Compress::Zlib'));
return METHOD_NA;
}
my $gz = Compress::Zlib::gzopen( $self->archive, "rb" ) or
return $self->_error(loc("Unable to open '%1': %2",
$self->archive, $Compress::Zlib::gzerrno));
my $fh = FileHandle->new('>'. $self->_gunzip_to) or
return $self->_error(loc("Could not open '%1' for writing: %2",
$self->_gunzip_to, $! ));
my $buffer;
$self->_print($fh, $buffer) while $gz->gzread($buffer) > 0;
$fh->close;
### set what files where extract, and where they went ###
$self->files( [$self->_gunzip_to] );
$self->extract_path( File::Spec->rel2abs(cwd()) );
return 1;
}
#################################
#
# Uncompress code
#
#################################
sub _uncompress_bin {
my $self = shift;
### check for /bin/gzip -- we need it ###
unless( $self->bin_uncompress ) {
$self->_error(loc("No '%1' program found", '/bin/uncompress'));
return METHOD_NA;
}
my $fh = FileHandle->new('>'. $self->_gunzip_to) or
return $self->_error(loc("Could not open '%1' for writing: %2",
$self->_gunzip_to, $! ));
my $cmd = [ $self->bin_uncompress, '-c', $self->archive ];
my $buffer;
unless( scalar run( command => $cmd,
verbose => $DEBUG,
buffer => \$buffer )
) {
return $self->_error(loc("Unable to uncompress '%1': %2",
$self->archive, $buffer));
}
### no buffers available?
if( !IPC::Cmd->can_capture_buffer and !$buffer ) {
$self->_error( $self->_no_buffer_content( $self->archive ) );
}
$self->_print($fh, $buffer) if defined $buffer;
close $fh;
### set what files where extract, and where they went ###
$self->files( [$self->_gunzip_to] );
$self->extract_path( File::Spec->rel2abs(cwd()) );
return 1;
}
#################################
#
# Unzip code
#
#################################
sub _unzip_bin {
my $self = shift;
### check for /bin/gzip if we need it ###
unless( $self->bin_unzip ) {
$self->_error(loc("No '%1' program found", '/bin/unzip'));
return METHOD_NA;
}
### first, get the files.. it must be 2 different commands with 'unzip' :(
{ ### on VMS, capital letter options have to be quoted. This is
### reported by John Malmberg on P5P Tue 21 Aug 2007 05:05:11
### Subject: [patch@31735]Archive Extract fix on VMS.
my $opt = ON_VMS ? '"-Z"' : '-Z';
my $cmd = [ $self->bin_unzip, $opt, '-1', $self->archive ];
my $buffer = '';
unless( scalar run( command => $cmd,
verbose => $DEBUG,
buffer => \$buffer )
) {
return $self->_error(loc("Unable to unzip '%1': %2",
$self->archive, $buffer));
}
### no buffers available?
if( !IPC::Cmd->can_capture_buffer and !$buffer ) {
$self->_error( $self->_no_buffer_files( $self->archive ) );
} else {
### Annoyingly, pesky MSWin32 can either have 'native' tools
### which have \r\n line endings or Cygwin-based tools which
### have \n line endings. Jan Dubois suggested using this fix
my $split = ON_WIN32 ? qr/\r?\n/ : "\n";
$self->files( [split $split, $buffer] );
}
}
### now, extract the archive ###
{ my $cmd = [ $self->bin_unzip, '-qq', '-o', $self->archive ];
my $buffer;
unless( scalar run( command => $cmd,
verbose => $DEBUG,
buffer => \$buffer )
) {
return $self->_error(loc("Unable to unzip '%1': %2",
$self->archive, $buffer));
}
if( scalar @{$self->files} ) {
my $files = $self->files;
my $dir = $self->__get_extract_dir( $files );
$self->extract_path( $dir );
}
}
return 1;
}
sub _unzip_az {
my $self = shift;
my $use_list = { 'Archive::Zip' => '0.0' };
unless( can_load( modules => $use_list ) ) {
$self->_error(loc("You do not have '%1' installed - Please " .
"install it as soon as possible.", 'Archive::Zip'));
return METHOD_NA;
}
my $zip = Archive::Zip->new();
unless( $zip->read( $self->archive ) == &Archive::Zip::AZ_OK ) {
return $self->_error(loc("Unable to read '%1'", $self->archive));
}
my @files;
### Address: #43278: Explicitly tell Archive::Zip where to put the files:
### "In my BackPAN indexing, Archive::Zip was extracting things
### in my script's directory instead of the current working directory.
### I traced this back through Archive::Zip::_asLocalName which
### eventually calls File::Spec::Win32::rel2abs which on Windows might
### call Cwd::getdcwd. getdcwd returns the wrong directory in my
### case, even though I think I'm on the same drive.
###
### To fix this, I pass the optional second argument to
### extractMember using the cwd from Archive::Extract." --bdfoy
## store cwd() before looping; calls to cwd() can be expensive, and
### it won't change during the loop
my $extract_dir = cwd();
### have to extract every member individually ###
for my $member ($zip->members) {
push @files, $member->{fileName};
### file to extract to, to avoid the above problem
my $to = File::Spec->catfile( $extract_dir, $member->{fileName} );
unless( $zip->extractMember($member, $to) == &Archive::Zip::AZ_OK ) {
return $self->_error(loc("Extraction of '%1' from '%2' failed",
$member->{fileName}, $self->archive ));
}
}
my $dir = $self->__get_extract_dir( \@files );
### set what files where extract, and where they went ###
$self->files( \@files );
$self->extract_path( File::Spec->rel2abs($dir) );
return 1;
}
sub __get_extract_dir {
my $self = shift;
my $files = shift || [];
return unless scalar @$files;
my($dir1, $dir2);
for my $aref ( [ \$dir1, 0 ], [ \$dir2, -1 ] ) {
my($dir,$pos) = @$aref;
### add a catdir(), so that any trailing slashes get
### take care of (removed)
### also, a catdir() normalises './dir/foo' to 'dir/foo';
### which was the problem in bug #23999
my $res = -d $files->[$pos]
? File::Spec->catdir( $files->[$pos], '' )
: File::Spec->catdir( dirname( $files->[$pos] ) );
$$dir = $res;
}
### if the first and last dir don't match, make sure the
### dirname is not set wrongly
my $dir;
### dirs are the same, so we know for sure what the extract dir is
if( $dir1 eq $dir2 ) {
$dir = $dir1;
### dirs are different.. do they share the base dir?
### if so, use that, if not, fall back to '.'
} else {
my $base1 = [ File::Spec->splitdir( $dir1 ) ]->[0];
my $base2 = [ File::Spec->splitdir( $dir2 ) ]->[0];
$dir = File::Spec->rel2abs( $base1 eq $base2 ? $base1 : '.' );
}
return File::Spec->rel2abs( $dir );
}
#################################
#
# Bunzip2 code
#
#################################
sub _bunzip2_bin {
my $self = shift;
### check for /bin/gzip -- we need it ###
unless( $self->bin_bunzip2 ) {
$self->_error(loc("No '%1' program found", '/bin/bunzip2'));
return METHOD_NA;
}
my $fh = FileHandle->new('>'. $self->_gunzip_to) or
return $self->_error(loc("Could not open '%1' for writing: %2",
$self->_gunzip_to, $! ));
### guard against broken bunzip2. See ->have_old_bunzip2()
### for details
if( $self->have_old_bunzip2 and $self->archive !~ /\.bz2$/i ) {
return $self->_error(loc("Your bunzip2 version is too old and ".
"can only extract files ending in '%1'",
'.bz2'));
}
my $cmd = [ $self->bin_bunzip2, '-cd', $self->archive ];
my $buffer;
unless( scalar run( command => $cmd,
verbose => $DEBUG,
buffer => \$buffer )
) {
return $self->_error(loc("Unable to bunzip2 '%1': %2",
$self->archive, $buffer));
}
### no buffers available?
if( !IPC::Cmd->can_capture_buffer and !$buffer ) {
$self->_error( $self->_no_buffer_content( $self->archive ) );
}
$self->_print($fh, $buffer) if defined $buffer;
close $fh;
### set what files where extract, and where they went ###
$self->files( [$self->_gunzip_to] );
$self->extract_path( File::Spec->rel2abs(cwd()) );
return 1;
}
### using cz2, the compact versions... this we use mainly in archive::tar
### extractor..
# sub _bunzip2_cz1 {
# my $self = shift;
#
# my $use_list = { 'IO::Uncompress::Bunzip2' => '0.0' };
# unless( can_load( modules => $use_list ) ) {
# return $self->_error(loc("You do not have '%1' installed - Please " .
# "install it as soon as possible.",
# 'IO::Uncompress::Bunzip2'));
# }
#
# my $bz = IO::Uncompress::Bunzip2->new( $self->archive ) or
# return $self->_error(loc("Unable to open '%1': %2",
# $self->archive,
# $IO::Uncompress::Bunzip2::Bunzip2Error));
#
# my $fh = FileHandle->new('>'. $self->_gunzip_to) or
# return $self->_error(loc("Could not open '%1' for writing: %2",
# $self->_gunzip_to, $! ));
#
# my $buffer;
# $fh->print($buffer) while $bz->read($buffer) > 0;
# $fh->close;
#
# ### set what files where extract, and where they went ###
# $self->files( [$self->_gunzip_to] );
# $self->extract_path( File::Spec->rel2abs(cwd()) );
#
# return 1;
# }
sub _bunzip2_bz2 {
my $self = shift;
my $use_list = { 'IO::Uncompress::Bunzip2' => '0.0' };
unless( can_load( modules => $use_list ) ) {
$self->_error(loc("You do not have '%1' installed - Please " .
"install it as soon as possible.",
'IO::Uncompress::Bunzip2'));
return METHOD_NA;
}
IO::Uncompress::Bunzip2::bunzip2($self->archive => $self->_gunzip_to)
or return $self->_error(loc("Unable to uncompress '%1': %2",
$self->archive,
$IO::Uncompress::Bunzip2::Bunzip2Error));
### set what files where extract, and where they went ###
$self->files( [$self->_gunzip_to] );
$self->extract_path( File::Spec->rel2abs(cwd()) );
return 1;
}
#################################
#
# UnXz code
#
#################################
sub _unxz_bin {
my $self = shift;
### check for /bin/unxz -- we need it ###
unless( $self->bin_unxz ) {
$self->_error(loc("No '%1' program found", '/bin/unxz'));
return METHOD_NA;
}
my $fh = FileHandle->new('>'. $self->_gunzip_to) or
return $self->_error(loc("Could not open '%1' for writing: %2",
$self->_gunzip_to, $! ));
my $cmd = [ $self->bin_unxz, '-c', '-d', '-f', $self->archive ];
my $buffer;
unless( scalar run( command => $cmd,
verbose => $DEBUG,
buffer => \$buffer )
) {
return $self->_error(loc("Unable to unxz '%1': %2",
$self->archive, $buffer));
}
### no buffers available?
if( !IPC::Cmd->can_capture_buffer and !$buffer ) {
$self->_error( $self->_no_buffer_content( $self->archive ) );
}
$self->_print($fh, $buffer) if defined $buffer;
close $fh;
### set what files where extract, and where they went ###
$self->files( [$self->_gunzip_to] );
$self->extract_path( File::Spec->rel2abs(cwd()) );
return 1;
}
sub _unxz_cz {
my $self = shift;
my $use_list = { 'IO::Uncompress::UnXz' => '0.0' };
unless( can_load( modules => $use_list ) ) {
$self->_error(loc("You do not have '%1' installed - Please " .
"install it as soon as possible.",
'IO::Uncompress::UnXz'));
return METHOD_NA;
}
IO::Uncompress::UnXz::unxz($self->archive => $self->_gunzip_to)
or return $self->_error(loc("Unable to uncompress '%1': %2",
$self->archive,
$IO::Uncompress::UnXz::UnXzError));
### set what files where extract, and where they went ###
$self->files( [$self->_gunzip_to] );
$self->extract_path( File::Spec->rel2abs(cwd()) );
return 1;
}
#################################
#
# unlzma code
#
#################################
sub _unlzma_bin {
my $self = shift;
### check for /bin/unlzma -- we need it ###
unless( $self->bin_unlzma ) {
$self->_error(loc("No '%1' program found", '/bin/unlzma'));
return METHOD_NA;
}
my $fh = FileHandle->new('>'. $self->_gunzip_to) or
return $self->_error(loc("Could not open '%1' for writing: %2",
$self->_gunzip_to, $! ));
my $cmd = [ $self->bin_unlzma, '-c', $self->archive ];
my $buffer;
unless( scalar run( command => $cmd,
verbose => $DEBUG,
buffer => \$buffer )
) {
return $self->_error(loc("Unable to unlzma '%1': %2",
$self->archive, $buffer));
}
### no buffers available?
if( !IPC::Cmd->can_capture_buffer and !$buffer ) {
$self->_error( $self->_no_buffer_content( $self->archive ) );
}
$self->_print($fh, $buffer) if defined $buffer;
close $fh;
### set what files where extract, and where they went ###
$self->files( [$self->_gunzip_to] );
$self->extract_path( File::Spec->rel2abs(cwd()) );
return 1;
}
sub _unlzma_cz {
my $self = shift;
my $use_list1 = { 'IO::Uncompress::UnLzma' => '0.0' };
my $use_list2 = { 'Compress::unLZMA' => '0.0' };
if (can_load( modules => $use_list1 ) ) {
IO::Uncompress::UnLzma::unlzma($self->archive => $self->_gunzip_to)
or return $self->_error(loc("Unable to uncompress '%1': %2",
$self->archive,
$IO::Uncompress::UnLzma::UnLzmaError));
}
elsif (can_load( modules => $use_list2 ) ) {
my $fh = FileHandle->new('>'. $self->_gunzip_to) or
return $self->_error(loc("Could not open '%1' for writing: %2",
$self->_gunzip_to, $! ));
my $buffer;
$buffer = Compress::unLZMA::uncompressfile( $self->archive );
unless ( defined $buffer ) {
return $self->_error(loc("Could not unlzma '%1': %2",
$self->archive, $@));
}
$self->_print($fh, $buffer) if defined $buffer;
close $fh;
}
else {
$self->_error(loc("You do not have '%1' or '%2' installed - Please " .
"install it as soon as possible.", 'Compress::unLZMA', 'IO::Uncompress::UnLzma'));
return METHOD_NA;
}
### set what files where extract, and where they went ###
$self->files( [$self->_gunzip_to] );
$self->extract_path( File::Spec->rel2abs(cwd()) );
return 1;
}
#################################
#
# Error code
#
#################################
# For printing binaries that avoids interfering globals
sub _print {
my $self = shift;
my $fh = shift;
local( $\, $", $, ) = ( undef, ' ', '' );
return print $fh @_;
}
sub _error {
my $self = shift;
my $error = shift;
my $lerror = Carp::longmess($error);
push @{$self->_error_msg}, $error;
push @{$self->_error_msg_long}, $lerror;
### set $Archive::Extract::WARN to 0 to disable printing
### of errors
if( $WARN ) {
carp $DEBUG ? $lerror : $error;
}
return;
}
sub error {
my $self = shift;
### make sure we have a fallback aref
my $aref = do {
shift()
? $self->_error_msg_long
: $self->_error_msg
} || [];
return join $/, @$aref;
}
=head2 debug( MESSAGE )
This method outputs MESSAGE to the default filehandle if C<$DEBUG> is
true. It's a small method, but it's here if you'd like to subclass it
so you can so something else with any debugging output.
=cut
### this is really a stub for subclassing
sub debug {
return unless $DEBUG;
print $_[1];
}
sub _no_buffer_files {
my $self = shift;
my $file = shift or return;
return loc("No buffer captured, unable to tell ".
"extracted files or extraction dir for '%1'", $file);
}
sub _no_buffer_content {
my $self = shift;
my $file = shift or return;
return loc("No buffer captured, unable to get content for '%1'", $file);
}
1;
=pod
=head1 HOW IT WORKS
C<Archive::Extract> tries first to determine what type of archive you
are passing it, by inspecting its suffix. It does not do this by using
Mime magic, or something related. See C<CAVEATS> below.
Once it has determined the file type, it knows which extraction methods
it can use on the archive. It will try a perl solution first, then fall
back to a commandline tool if that fails. If that also fails, it will
return false, indicating it was unable to extract the archive.
See the section on C<GLOBAL VARIABLES> to see how to alter this order.
=head1 CAVEATS
=head2 File Extensions
C<Archive::Extract> trusts on the extension of the archive to determine
what type it is, and what extractor methods therefore can be used. If
your archives do not have any of the extensions as described in the
C<new()> method, you will have to specify the type explicitly, or
C<Archive::Extract> will not be able to extract the archive for you.
=head2 Supporting Very Large Files
C<Archive::Extract> can use either pure perl modules or command line
programs under the hood. Some of the pure perl modules (like
C<Archive::Tar> and Compress::unLZMA) take the entire contents of the archive into memory,
which may not be feasible on your system. Consider setting the global
variable C<$Archive::Extract::PREFER_BIN> to C<1>, which will prefer
the use of command line programs and won't consume so much memory.
See the C<GLOBAL VARIABLES> section below for details.
=head2 Bunzip2 support of arbitrary extensions.
Older versions of C</bin/bunzip2> do not support arbitrary file
extensions and insist on a C<.bz2> suffix. Although we do our best
to guard against this, if you experience a bunzip2 error, it may
be related to this. For details, please see the C<have_old_bunzip2>
method.
=head1 GLOBAL VARIABLES
=head2 $Archive::Extract::DEBUG
Set this variable to C<true> to have all calls to command line tools
be printed out, including all their output.
This also enables C<Carp::longmess> errors, instead of the regular
C<carp> errors.
Good for tracking down why things don't work with your particular
setup.
Defaults to C<false>.
=head2 $Archive::Extract::WARN
This variable controls whether errors encountered internally by
C<Archive::Extract> should be C<carp>'d or not.
Set to false to silence warnings. Inspect the output of the C<error()>
method manually to see what went wrong.
Defaults to C<true>.
=head2 $Archive::Extract::PREFER_BIN
This variables controls whether C<Archive::Extract> should prefer the
use of perl modules, or commandline tools to extract archives.
Set to C<true> to have C<Archive::Extract> prefer commandline tools.
Defaults to C<false>.
=head1 TODO / CAVEATS
=over 4
=item Mime magic support
Maybe this module should use something like C<File::Type> to determine
the type, rather than blindly trust the suffix.
=item Thread safety
Currently, C<Archive::Extract> does a C<chdir> to the extraction dir before
extraction, and a C<chdir> back again after. This is not necessarily
thread safe. See C<rt.cpan.org> bug C<#45671> for details.
=back
=head1 BUG REPORTS
Please report bugs or other issues to E<lt>bug-archive-extract@rt.cpan.orgE<gt>.
=head1 AUTHOR
This module by Jos Boumans E<lt>kane@cpan.orgE<gt>.
=head1 COPYRIGHT
This library is free software; you may redistribute and/or modify it
under the same terms as Perl itself.
=cut
# Local variables:
# c-indentation-style: bsd
# c-basic-offset: 4
# indent-tabs-mode: nil
# End:
# vim: expandtab shiftwidth=4:
| weipinghe/elasticsearch | dev-tools/src/main/resources/license-check/lib/Archive/Extract.pm | Perl | apache-2.0 | 54,176 |
addPlug('System', {
'creator' => 'Caaz',
'version' => '1.2',
'description' => "It's for System information!",
'name' => 'System Info',
'dependencies' => ['Core_Utilities'],
'code' => { 'load' => sub { if(!$lk{tmp}{plugin}{'System'}{$lk{os}}) { %{$lk{tmp}{plugin}{'System'}{$lk{os}}} = %{&{$lk{plugin}{'System'}{utilities}{get}}()}; } } },
'utilities' => {
'get' => sub {
# Input: None
# Output: Hash (name, os, version, manufacturer, model, type, memory)
if($lk{os} =~ /MSWin32/) {
my $output = `systeminfo`;
my @lines = split /\n/, $output;
my %system = ();
foreach(@lines) {
if(/^Host Name\:\s+(.+)$/) { $system{name} = $1; }
elsif(/^OS Name\:\s+(.+)$/) { $system{os} = $1; }
elsif(/^OS Version\:\s+(.+)$/) { $system{version} = $1; }
elsif(/^System Manufacturer\:\s+(.+)$/) { $system{manufacturer} = $1; }
elsif(/^System Model\:\s+(.+)$/) { $system{model} = $1; }
elsif(/^System Type\:\s+(.+)$/) { $system{type} = $1; }
elsif(/^Total Physical Memory\:\s+(.+)$/) { $system{memory} = $1; }
# Grab processors, it's on a different line!
}
return \%system;
}
elsif($lk{os} =~ /linux|darwin/i) {
my %system = ();
my @output = (split ' ', `uname -a`)[0..2];
foreach(['name',1],['os',0],['version',2]) { $system{${$_}[0]} = $output[${$_}[1]];}
# Start working on more linux stuff, possibly with the help of Cinos.
return \%system;
}
else {
lkDebug("Need something for $lk{os}");
}
},
'info' => sub {
# Handle, Where
if(!$lk{tmp}{plugin}{'System'}{$lk{os}}) { &{$utility{'Fancify_say'}}($_[0],$_[1],"Information hasn't been set. Perhaps there's no code for \x04$lk{os}\x04 yet?"); }
else {
my %sys = %{$lk{tmp}{plugin}{'System'}{$lk{os}}};
my @output = ();
foreach('name','os','version','memory','model','type') { push(@output, "[$_: \x04$sys{$_}\x04]") if($sys{$_}); }
&{$utility{'Fancify_say'}}($_[0],$_[1],join " ", @output);
}
},
},
'commandsV2' => {
'Meta|Version' => {
'tags' => ['utility'],
'description' => "Gets various information about the bot. Caaz's favorite command",
'code' => sub {
my @files = (<./Plugins/*.pl>,$0);
my %plugins = %{&{$utility{'Core_getAllPlugins'}}};
my %sys = %{$lk{tmp}{plugin}{'System'}{$lk{os}}};
my %count = ('lines'=>0,'comments'=>0);
foreach(@files) {
open NEW, "<".$_;
my @lines = <NEW>;
$count{lines} += @lines+0;
foreach(@lines) { if($_ =~ /\#/) {$count{comments}++;} }
close NEW;
}
# key, handle, where, nick, username, host
$utility{'Fancify_say'}($_[1],$_[2],$utility{'Core_Utilities_list'}('Luka'=>$lk{version},'System'=>$sys{os},'Perl'=>$^V,'Uptime'=>$utility{'Core_Utilities_getTime'}(time-$^T)[4],'Files'=>@files+0,'Lines'=>$count{lines},'Comments'=>$count{comments},'Plugins'=>@{$plugins{loaded}}+0));
}
}
}
}); | Caaz/classic-luka | Plugins/Sysinfo.pl | Perl | mit | 3,190 |
#!/usr/bin/perl -w
use strict;
use Bio::DB::Fasta;
use Parallel::ForkManager;
use Statistics::Descriptive;
# inputs
die "perl $0 <fasta (txt or zipped)> <fasta_info> <kmer_size>\n" unless @ARGV == 3;
my ($fasta_file, $fasta_info, $ksize) = @ARGV;
my $max_threads = 10;
my $time = localtime(time);
print STDERR $time, "\t", "Starting ...\n";
if ($fasta_file =~ /gz$/){
my $unzipped = $fasta_file;
$unzipped =~ s/.gz$//;
unless (-e $unzipped){
die $! if system("gunzip -c $fasta_file >$unzipped");
}
$fasta_file = $unzipped;
}
## get the fasta file
my $fasta = Bio::DB::Fasta->new($fasta_file);
my @ids = $fasta->get_all_primary_ids;
@ids = grep{length $_ <= 2}@ids;
exit if @ids == 0;
my $pm = Parallel::ForkManager->new($max_threads);
my @pos_files;
foreach my $id (sort{$a cmp $b} @ids){
my $out = $fasta_file . "_" . $id . "_${ksize}mer_pos.bam";
push @pos_files , $out;
my $pid = $pm->start and next;
my $t = localtime(time);
print STDERR $t , "\t", "Processing $id ...\n";
&get_kmer_positions($fasta_info, $fasta, $id, $ksize, $out);
$pm->finish;
}
$pm->wait_all_children;
print STDERR localtime(time), "\t Done Processing ...\n";
# combine the output
my $combine = "samtools merge -f " . $fasta_file . "_${ksize}mer_pos.bam " . join(" ", @pos_files);
print STDERR $combine, "\n";
die if system($combine);
map{unlink($_)}@pos_files;
$time = localtime(time);
print STDERR $time ,"\t", "Done!!!\n";
##
sub kmer_generator {
my $k = shift;
my @bases = qw(A T G C);
my @words = @bases;
for (my $i = 1; $i < $k; $i++){
# print "\$i: ", $i, "\n";
my @newwords;
foreach my $w (@words){
foreach my $n (@bases){
push @newwords, $w.$n;
#print $w.$n, "\n";
}
}
@words = @newwords;
}
return @words;
}
sub get_kmer_positions {
my ($fasta_info, $db, $id, $k, $out) = @_;
my $seq_len = $db->length($id);
my $kmer_p = {};
open (my $O, "|samtools view -Sb - >$out") or die $!;
my $header = `cat $fasta_info`;
print $O "\@HD\n", $header;
for (my $i = 1; $i <= ($seq_len - $k); $i++){
my $subseq = $db->seq($id, $i => ($i+$k-1));
$subseq = uc($subseq);
next if $subseq =~ /[^ATGC]/;
# Q1 0 1 190 100 6M * * 0 ATCAGA HHHHHH
print $O join("\t", $subseq, 0, $id, $i, 100, $k."M", "*", 0, 0, $subseq, "H"x$k), "\n";
#print STDERR join("\t", $subseq, 0, $id, $i, 100, $k."M", "*", 0, 0, $subseq, "H"x$k), "\n";
#$kmer_p->{$subseq} = [] unless exists $kmer_p->{$subseq};
#push @{$kmer_p->{$subseq}}, $i;
}
#map{my $subseq=$_; print $O join(",", $id, $subseq,@{$kmer_p->{$subseq}}), "\n"}keys %{$kmer_p};
close $O
}
sub max {
my $m = shift;
map{$m =$_ if $_ > $m}@_;
return $m;
}
| swang8/Perl_scripts_misc | get_kmer_in_fasta.pl | Perl | mit | 2,755 |
package NAVBUTTON;
use strict;
use Storable;
use Digest::MD5;
use Storable;
use Image::Magick qw ();
use POSIX qw (ceil);
#$NAVBUTTON::CACHE_USER = ''; # username of the user currently in cache
#$NAVBUTTON::CACHE_INFO = {}; # cache currently on disk.
## note: you must set FLOW::USERNAME before calling this!
sub cached_button_info {
my ($USERNAME,$type,$width,$height,$messages) = @_;
if (not defined $USERNAME) { die("USERNAME NOT SET"); }
if (not defined $type) { $type = 'default'; }
if (not defined $width) { $width = ''; }
if (not defined $height) { $height = ''; }
if ((not defined $messages) || (not scalar @{$messages})) { return []; }
my $UUID = &Digest::MD5::md5_hex($type.'|'.$width.'|'.$height.'|'.join('.',@{$messages}));
my $REF = undef;
my ($memd) = &ZOOVY::getMemd($USERNAME);
if (defined $memd) {
my $YAML = $memd->get("$USERNAME.BUTTON.$UUID");
if ($YAML ne '') {
$REF = YAML::Syck::Load($YAML);
}
}
if (not defined $REF) {
open F, ">/dev/shm/button.$UUID";
use Data::Dumper; print F Dumper($$,$USERNAME,$type,$width,$height,$messages);
close F;
$REF = &NAVBUTTON::button_info($USERNAME,$type,$width,$height,$messages);
if (defined $memd) {
# warn "set memd\n";
$memd->set("$USERNAME.BUTTON.$UUID",YAML::Syck::Dump($REF));
}
# unlink("/dev/shm/button.$UUID");
}
return($REF);
}
sub button_info {
my ($merchant_id,$type,$width,$height,$messages) = @_;
if (not defined $merchant_id) { $merchant_id = ''; }
if (not defined $type) { $type = 'default'; }
if (not defined $width) { $width = ''; }
if (not defined $height) { $height = ''; }
if ((not defined $messages) || (not scalar @{$messages})) { return []; }
my $iniref = {}; ## this should be whatever is in the .bin file
if (-f "/httpd/static/navbuttons/$type/button.bin") {
$iniref = retrieve("/httpd/static/navbuttons/$type/button.bin");
$iniref->{'dir'} = "/httpd/static/navbuttons/$type";
}
########################################
# Get the image width and height
# (If we have it at this point)
$iniref->{'get_width'} = 1;
if ($width ne '') {
$iniref->{'width'} = int($width);
$iniref->{'get_width'} = 0;
}
$iniref->{'get_height'} = 1;
if ($height ne '') {
$iniref->{'height'} = int($height);
$iniref->{'get_height'} = 0;
}
if (substr($iniref->{'font'},0,1) ne '/') {
$iniref->{'font'} = '/httpd/static/fonts/'.$iniref->{'font'};
}
my $out = [];
foreach my $message (@{$messages}) {
my $cfg = {}; # this is stuff SPECIFIC to this message.
## make a copy of all settings in $iniref since we'll need the settings for SITE::Static when it
## actually generates the button (yeah I know this is retarded)
## NOTE: we could do this better, but it'd require some substantial testing.
foreach my $k (keys %{$iniref}) {
$cfg->{$k} = $iniref->{$k};
}
$cfg->{'text_width'} = 0;
$cfg->{'text_height'} = 0;
if ((not defined $message) || ($message eq '')) { $message = ' '; }
if ($message =~ m/^\s/) { $message = '-' . $message; }
##############################################################################
# Get width and height for the text, and extrapolate for the image if
# neccessary
my (@widths,@heights,@lines);
# Put this in a temporary block
my $limit = 2000;
if (not $iniref->{'get_width'}) {
$limit = (
$cfg->{'width'} -
$iniref->{'padding_left'} -
$iniref->{'padding_right'} -
($iniref->{'border_x'} * 2)
);
}
my $line_count = 0;
my @results = ();
my @words = split /\s/, $message;
my $temp = Image::Magick->new();
$temp->Read('xc:'.$iniref->{'background_color'});
while (my $line = shift @words) {
## ImageMagick 5.5.6 and higher require us to hard code /httpd/fonts
# if (($Image::Magick::VERSION eq '5.5.7') || ($Image::Magick::VERSION eq '5.5.6') ||
@results = $temp->QueryFontMetrics(
'text' => $line,
'font' => '@'.$iniref->{'font'},
'pointsize' => $iniref->{'font_size'},
);
$widths[$line_count] = POSIX::ceil($results[4]);
$heights[$line_count] = POSIX::ceil($results[5]);
my @check_words = @words;
while (my $word = shift @check_words) {
@results = $temp->QueryFontMetrics(
'text' => "$line $word",
'font' => '@'.$iniref->{'font'},
'pointsize' => $iniref->{'font_size'},
);
last if ($results[4] > $limit);
shift @words;
$line = "$line $word";
$widths[$line_count] = POSIX::ceil($results[4]);
$heights[$line_count] = POSIX::ceil($results[5]);
}
## added 4 pixels of padding per line for shadowed text 7/27/04 - BH
if ((defined $iniref->{'shadow'}) || (defined $iniref->{'shadowpad'})) {
if ((defined $iniref->{'shadowpad'}) || (lc($iniref->{'shadow'}) eq 'true')) {
$heights[$line_count] += 4; }
}
if ($widths[$line_count] > $cfg->{'text_width'}) {
$cfg->{'text_width'} = $widths[$line_count];
}
$cfg->{'text_height'} = $cfg->{'text_height'} + $heights[$line_count];
$lines[$line_count] = $line;
$line_count++;
}
$cfg->{'f_ascender'} = $results[2];
$cfg->{'f_descender'} = $results[3];
$cfg->{'f_max_advance'} = $results[6];
## forced padding -- required since upgrade to version ImageMagick 6.5.3-3 2009-07-03 Q16
if ($iniref->{'padding_left'}==0) { $iniref->{'padding_left'} = 1; }
if ($iniref->{'padding_right'}==0) { $iniref->{'padding_right'} = 1; }
if ($iniref->{'get_width'}) {
$cfg->{'width'} = (
$cfg->{'text_width'} +
$iniref->{'padding_left'} +
$iniref->{'padding_right'} +
($iniref->{'border_x'} * 2)
);
}
if ($iniref->{'get_height'}) {
$cfg->{'height'} = (
$cfg->{'text_height'} +
$iniref->{'padding_top'} +
$iniref->{'padding_bottom'} +
($iniref->{'border_y'} * 2)
);
}
push @{$out}, [$cfg,\@widths,\@heights,\@lines];
}
# use Data::Dumper; print STDERR Dumper($out);
return ($out);
}
1;
| CommerceRack/backend | lib/NAVBUTTON.pm | Perl | mit | 5,935 |
#!/usr/bin/env perl
use bytes;
#=====================================================================================
# CrossEntropy.perl
# bShinsuke Mori
# Last change 24 June 2014
#=====================================================================================
# �� ǽ : ñ��(ɽ��) 2-gram ���Ѥ��� Cross Entropy �������롣
#
# ����ˡ : CrossEntropy.perl STEP [TEST]
#
# �� �� : CrossEntropy.perl 0 ../../corpus/TRL10.morp
#
# ������ : (filestem).morp �� "ɽ��/�ʻ� ..." �ȤʤäƤ��ʤ����Фʤ��ʤ���
# �Կ��� 4**ARGV[0] �dz����ڤ���ʸ�������Ѥ��Ƴؽ����롣
#-------------------------------------------------------------------------------------
# require
#-------------------------------------------------------------------------------------
use Env;
use English;
use File::Basename;
unshift(@INC, "$HOME/usr/lib/perl", "$HOME/SLM/lib/perl");
require "Help.pm"; # In $HOME/usr/lib/perl
require "class/IntStr.pm";
require "class/MarkovHashMemo.pm";
require "class/MarkovHashDisk.pm";
require "class/MarkovDiadMemo.pm";
#-------------------------------------------------------------------------------------
# check arguments
#-------------------------------------------------------------------------------------
(((@ARGV == 1) || (@ARGV == 2)) && ($ARGV[0] ne "-help")) || &Help($0);
print STDERR join(" ", basename($0), @ARGV), "\n";
print STDERR join(":", $HOST, $PID), "\n";
$STEP = 4**shift; # �ؽ������ѥ���ʸ�Υ��ƥå�
$TEST = (@ARGV) ? shift : undef;
#-------------------------------------------------------------------------------------
# ���̤��ѿ����ؿ����������ɤ߹���
#-------------------------------------------------------------------------------------
use constant VRAI => 1; # ��
use constant FAUX => 0; # ��
do "dofile/CrossEntropyBy.perl";
do "dofile/CrossEntropyByWord.perl";
#-------------------------------------------------------------------------------------
# ��ͭ���ѿ�������
#-------------------------------------------------------------------------------------
$MO = 1; # �ޥ륳�ե��ǥ��μ���
@WordMarkovTest = (&Line2Units($WordMarkovTest))[0 .. $MO];
@CharMarkovTest = (split(" ", $CharMarkovTest))[0 .. $MO];
#-------------------------------------------------------------------------------------
# $WordIntStr ������
#-------------------------------------------------------------------------------------
(-e ($FILE = "WordIntStr.text")) || # �ե����뤬���뤫�� �ʤ����к��롣
&WordIntStr($FILE, map(sprintf($CTEMPL, $_), @Kcross));
$WordIntStr = new IntStr($FILE);
#goto CharMarkov;
#-------------------------------------------------------------------------------------
# ñ���ޥ륳�ե��ǥ������ַ����ο���
#-------------------------------------------------------------------------------------
$LAMBDA = "WordLambda"; # ���ַ����Υե�����
(-r $LAMBDA) || &CalcWordLambda($MO, $LAMBDA); # �ե����뤬�ʤ����з�
@LforWord = &ReadLambda($LAMBDA);
#exit(0);
#-------------------------------------------------------------------------------------
# $WordMarkov ������
#-------------------------------------------------------------------------------------
if (-e (($FILE = "WordMarkov") . $MarkovHash::SUFFIX)){
$WordMarkov = new MarkovHashMemo($WordIntStr->size, $FILE);
# $WordMarkov = new MarkovHashDisk($WordIntStr->size, $FILE);
}else{
$WordMarkov = new MarkovHashMemo($WordIntStr->size);
&WordMarkov($WordMarkov, map(sprintf($CTEMPL, $_), @Kcross));
# $DIRE = "/dev/shm"; # ���֤��������Τǰ�ö RAM DISK ��
# $WordMarkov->put("$DIRE/$FILE");
$WordMarkov->put($FILE);
# system("/bin/mv $DIRE/$FILE.db .");
}
$WordMarkov->test($WordIntStr, @WordMarkovTest);
warn "\n";
#-------------------------------------------------------------------------------------
# $CharIntStr ������
#-------------------------------------------------------------------------------------
CharMarkov:
(-e ($FILE = "CharIntStr.text")) || # �ե����뤬���뤫�� �ʤ����к��롣
&CharIntStr($FILE, map(sprintf($CTEMPL, $_), @Kcross));
$CharIntStr = new IntStr($FILE);
$CharUT = $CharAlphabetSize-($CharIntStr->size-2);# ̤��ʸ���ο�
#-------------------------------------------------------------------------------------
# ʸ���ޥ륳�ե��ǥ������ַ����ο���
#-------------------------------------------------------------------------------------
$LAMBDA = "CharLambda"; # ���ַ����Υե�����
(-r $LAMBDA) || &CalcCharLambda(1, $LAMBDA); # �ե����뤬�ʤ����з�
@LforChar = &ReadLambda($LAMBDA);
#-------------------------------------------------------------------------------------
# $CharMarkov ������
#-------------------------------------------------------------------------------------
if (-e (($FILE = "CharMarkov") . $MarkovHash::SUFFIX)){
$CharMarkov = new MarkovHashMemo($CharIntStr->size, $FILE);
}else{
$CharMarkov = new MarkovHashMemo($CharIntStr->size);
&CharMarkov($CharMarkov, map(sprintf($CTEMPL, $_), @Kcross));
$CharMarkov->put($FILE);
}
$CharMarkov->test($CharIntStr, @CharMarkovTest);
warn "\n";
#-------------------------------------------------------------------------------------
# @EXDICT ������ (������)
#-------------------------------------------------------------------------------------
goto NoExDict;
$EXDICT = "ExDict.text";
@ExDict = ();
open(EXDICT) || die "Can't open $EXDICT: $!\n";
while (chop($word = <EXDICT>)){
next if ($WordIntStr->int($word) ne $WordIntStr->int($UT));
push(@ExDict, $word); # �����������ꥹ�Ȥ��ɲ�
}
close(EXDICT);
#----------------------- ���θ������ѳ�Ψ�η� --------------------------------------
$prob = 0; # ����������������������Ψ
foreach $word ($WordIntStr->strs){
$prob += exp(-&UWlogP($word)); # ������������������Ψ�βû�
# printf("STDERR %s %6.4f\n", $word, &UWlogP($word)); # for debug
}
#----------------------- %ExDict ������ ----------------------------------------------
%ExDict;
$prob /= scalar(@ExDict); # ��������������ʬ�����Ȥ��γ�Ψ��
#printf("STDERR prob = %20.18f\n", $prob); # for debug
foreach $word (@ExDict){
$logP = -log(exp(-&UWlogP($word))+$prob); # ������Ψ
# printf(STDERR "%20s %6.3f\n", $word, $logP); # for debug
$EXDICT{$word} = $logP;
}
NoExDict:
#-------------------------------------------------------------------------------------
# �����ȥ��ԡ��η�
#-------------------------------------------------------------------------------------
$FLAG = VRAI; # ʸ���Υ�����ɽ��
$FLAG = FAUX;
$CORPUS = $TEST ? $TEST : sprintf($CTEMPL, 10); # �ƥ��ȥ����ѥ�
open(CORPUS) || die "Can't open $CORPUS: $!\n";
warn "Reading $CORPUS\n";
for ($logP = $UMlogP = 0, $Cnum = $Wnum = 0; <CORPUS>; ){
# print;
# @word = split(/[ \-]/);
# $_ = join(" ", @word), "\n";
$cnum = scalar(&Line2Chars($_))+1; # ͽ¬�оݤ�ʸ����(ʸ���������ޤ�)
$wnum = scalar(&Line2Units($_))+1; # ͽ¬�оݤ�ñ����(ʸ���������ޤ�)
$logp = $UMlogp = 0;
my(@stat) = ($WordIntStr->int($BT)) x $MO;
foreach $word (&Line2Units($_), $BT){ # ñ��ñ�̤Υ롼��
push(@stat, $WordIntStr->int($word));
# printf(STDERR "f(%s) = %d\n", $word, $WordMarkov->_1gram($word));
$logp += -log($WordMarkov->prob(@stat, @LforWord));
if ($stat[1] == $WordIntStr->int($UT)){ # ̤�θ��ξ���
$temp = defined($EXDICT{$word}) ? $EXDICT{$word} : &UWlogP($word);
$UMlogp += $temp;
$logp += $temp;
}
shift(@stat);
}
$FLAG && printf(STDERR "%s", $_);
$FLAG && printf(STDERR " ʸ���� = %d, H = %8.6f\n", $cnum, $logp/$cnum/log(2));
$FLAG && printf(STDERR " �� = %d, PP = %8.6f\n\n", $wnum, exp($logp/$wnum));
$Cnum += $cnum;
$Wnum += $wnum;
$logP += $logp;
$UMlogP += $UMlogp;
}
close(CORPUS);
printf(STDERR "ʸ���� = %d, H = %8.6f ", $Cnum, $logP/$Cnum/log(2));
printf(STDERR "(̤�θ���ɽ��ͽ¬: %8.6f)\n", $UMlogP/$Cnum/log(2));
printf(STDERR "�� = %d, PP = %8.6f\n", $Wnum, exp($logP/$Wnum));
#-------------------------------------------------------------------------------------
# close
#-------------------------------------------------------------------------------------
exit(0);
#=====================================================================================
# END
#=====================================================================================
| tkd53/KKConv | dictionary/Word-2/bin/CrossEntropy.perl | Perl | mit | 10,196 |
#!/usr/bin/perl -w
use strict;
use Getopt::Long;
use FindBin qw($Bin $Script);
use File::Basename qw(basename dirname);
use File::Path qw(make_path);
use Data::Dumper;
use Cwd qw(abs_path);
&usage if @ARGV<1;
sub usage {
my $usage = << "USAGE";
Create reference database for LncFunNet analysis.
Author: zhoujj2013\@gmail.com, Thu Apr 13 15:09:25 HKT 2017
Usage: $0 <mm9|mm10|hg19|hg38> <outdir> [novel.lncrna.gtf] [new_db_name]
# build db with integrating novel lncRNAs
Example: perl $0 mm9 ./ novel_lncrna.gtf newdb > log 2>err
# build db without integrating novel lncRNAs(build the NCBI refseq)
Example: perl $0 mm9 ./ > log 2>err
USAGE
print "$usage";
exit(1);
};
my $software_version = "lncfuntk";
my $datestring = localtime();
print STDERR "$datestring\n";
if(scalar(@ARGV) == 2){
my $spe = shift;
my $outdir = shift;
$outdir = abs_path($outdir);
mkdir "$outdir" unless(-e "$outdir");
#mkdir "$outdir/$spe" unless(-e "$outdir/$spe");
print STDERR "Collect http/ftp addresses for each dataset: start\n";
`perl $Bin/get_config_setting.pl $outdir/$spe.config.txt`;
print STDERR "$outdir/$spe.config.txt\n";
print STDERR "Collect http/ftp addresses for each dataset: done\n";
print STDERR "Prepare dataset for $software_version analysis ($spe): start\n";
`perl $Bin/build_db.pl $spe $outdir`;
print STDERR "Prepare dataset for $software_version analysis ($spe): finished\n";
}elsif(scalar(@ARGV) == 4){
my $spe = shift;
my $outdir = shift;
$outdir = abs_path($outdir);
mkdir "$outdir" unless(-e "$outdir");
#mkdir "$outdir/$spe" unless(-e "$outdir/$spe");
my $gtf_f = shift;
$gtf_f = abs_path($gtf_f);
my $new_db_name = shift;
print STDERR "Prepare dataset for $software_version analysis ($spe with novo lncRNAs): start\n";
if(-d "$outdir/$new_db_name"){
die "ERROR: $new_db_name exists. Please enter a new db name or remove the existing database $outdir/$new_db_name.\n";
}
# check whether the reference db have been build up.
unless(-d "$outdir/$spe"){
print STDERR "Dataset for $spe is not exists. Preparing database for $spe.\n";
print STDERR "Collect http/ftp addresses for each dataset: start\n";
`perl $Bin/get_config_setting.pl $outdir/$spe.config.txt`;
print STDERR "$outdir/$spe.config.txt\n";
print STDERR "Collect http/ftp addresses for each dataset: done\n";
`perl $Bin/build_db.pl $spe $outdir`;
}
# build new database
# Usage: ../bin/BuildDb/build_db_with_novel_lncrna.pl spe reference_db_dir novel_lncrna.gtf new_db_name outdir
`perl $Bin/build_db_with_novel_lncrna.pl $spe $outdir/$spe $gtf_f $new_db_name $outdir > newdb.log 2>newdb.err`;
print STDERR "Prepare dataset for $software_version analysis ($spe with novo lncRNAs): finished\n";
}else{
&usage;
}
$datestring = localtime();
print STDERR "$datestring\n";
| zhoujj2013/lncfuntk | bin/BuildDb/BuildDb.pl | Perl | mit | 2,879 |
=head1 NAME
Mail::Box::Dbx::Message - one message in a Dbx folder
=head1 INHERITANCE
Mail::Box::Dbx::Message
is a Mail::Box::File::Message
is a Mail::Box::Message
is a Mail::Message
is a Mail::Reporter
=head1 SYNOPSIS
my $folder = new Mail::Box::Dbx ...
my $message = $folder->message(10);
=head1 DESCRIPTION
=head1 METHODS
=head2 Constructors
$obj-E<gt>B<clone>(OPTIONS)
=over 4
See L<Mail::Message/"Constructors">
=back
Mail::Box::Dbx::Message-E<gt>B<new>(OPTIONS)
=over 4
Option --Defined in --Default
account_name <from dbx_record>
account_nr <from dbx_record>
body Mail::Message undef
body_type Mail::Box::Message <from folder>
dbx_record <required>
deleted Mail::Message <false>
field_type Mail::Message undef
folder Mail::Box::Message <required>
from_line Mail::Box::File::Message undef
head Mail::Message undef
head_type Mail::Message Mail::Message::Head::Complete
labels Mail::Message {}
log Mail::Reporter 'WARNINGS'
messageId Mail::Message undef
modified Mail::Message <false>
seen <from dbx_record>
size Mail::Box::Message undef
trace Mail::Reporter 'WARNINGS'
trusted Mail::Message <false>
. account_name => STRING
=over 4
The string representation of the account which was used to retreive the
message.
=back
. account_nr => INTEGER
=over 4
The numeric representation of the account which was used to retrieve
the message.
=back
. body => OBJECT
. body_type => CODE|CLASS
. dbx_record => C<Mail::Transport::Dbx::Email>
. deleted => BOOLEAN
. field_type => CLASS
. folder => FOLDER
. from_line => STRING
. head => OBJECT
. head_type => CLASS
. labels => ARRAY|HASH
. log => LEVEL
. messageId => STRING
. modified => BOOLEAN
. seen => BOOLEAN
=over 4
A flag (see L<label()|Mail::Message/"Flags">) which tells wether this message has been read
by the user. If read, them message is I<old>, which is the same as
I<seen>. Folders store this flag in different ways.
=back
. size => INTEGER
. trace => LEVEL
. trusted => BOOLEAN
=back
=head2 Constructing a message
$obj-E<gt>B<bounce>([RG-OBJECT|OPTIONS])
=over 4
See L<Mail::Message::Construct::Bounce/"Constructing a message">
=back
Mail::Box::Dbx::Message-E<gt>B<build>([MESSAGE|PART|BODY], CONTENT)
=over 4
See L<Mail::Message::Construct::Build/"Constructing a message">
=back
Mail::Box::Dbx::Message-E<gt>B<buildFromBody>(BODY, [HEAD], HEADERS)
=over 4
See L<Mail::Message::Construct::Build/"Constructing a message">
=back
$obj-E<gt>B<forward>(OPTIONS)
=over 4
See L<Mail::Message::Construct::Forward/"Constructing a message">
=back
$obj-E<gt>B<forwardAttach>(OPTIONS)
=over 4
See L<Mail::Message::Construct::Forward/"Constructing a message">
=back
$obj-E<gt>B<forwardEncapsulate>(OPTIONS)
=over 4
See L<Mail::Message::Construct::Forward/"Constructing a message">
=back
$obj-E<gt>B<forwardInline>(OPTIONS)
=over 4
See L<Mail::Message::Construct::Forward/"Constructing a message">
=back
$obj-E<gt>B<forwardNo>(OPTIONS)
=over 4
See L<Mail::Message::Construct::Forward/"Constructing a message">
=back
$obj-E<gt>B<forwardPostlude>
=over 4
See L<Mail::Message::Construct::Forward/"Constructing a message">
=back
$obj-E<gt>B<forwardPrelude>
=over 4
See L<Mail::Message::Construct::Forward/"Constructing a message">
=back
$obj-E<gt>B<forwardSubject>(STRING)
=over 4
See L<Mail::Message::Construct::Forward/"Constructing a message">
=back
Mail::Box::Dbx::Message-E<gt>B<read>(FILEHANDLE|SCALAR|REF-SCALAR|ARRAY-OF-LINES, OPTIONS)
=over 4
See L<Mail::Message::Construct::Read/"Constructing a message">
=back
$obj-E<gt>B<rebuild>(OPTIONS)
=over 4
See L<Mail::Message::Construct::Rebuild/"Constructing a message">
=back
$obj-E<gt>B<reply>(OPTIONS)
=over 4
See L<Mail::Message::Construct::Reply/"Constructing a message">
=back
$obj-E<gt>B<replyPrelude>([STRING|FIELD|ADDRESS|ARRAY-OF-THINGS])
=over 4
See L<Mail::Message::Construct::Reply/"Constructing a message">
=back
$obj-E<gt>B<replySubject>(STRING)
Mail::Box::Dbx::Message-E<gt>B<replySubject>(STRING)
=over 4
See L<Mail::Message::Construct::Reply/"Constructing a message">
=back
=head2 The message
$obj-E<gt>B<container>
=over 4
See L<Mail::Message/"The message">
=back
$obj-E<gt>B<copyTo>(FOLDER, OPTIONS)
=over 4
See L<Mail::Box::Message/"The message">
=back
$obj-E<gt>B<dbxRecord>
=over 4
Returns the Mail::Transport::Dbx::Email record of the message.
=back
$obj-E<gt>B<escapedBody>
=over 4
See L<Mail::Box::File::Message/"The message">
=back
$obj-E<gt>B<folder>([FOLDER])
=over 4
See L<Mail::Box::Message/"The message">
=back
$obj-E<gt>B<fromLine>([LINE])
=over 4
See L<Mail::Box::File::Message/"The message">
=back
$obj-E<gt>B<isDummy>
=over 4
See L<Mail::Message/"The message">
=back
$obj-E<gt>B<isPart>
=over 4
See L<Mail::Message/"The message">
=back
$obj-E<gt>B<messageId>
=over 4
See L<Mail::Message/"The message">
=back
$obj-E<gt>B<moveTo>(FOLDER, OPTIONS)
=over 4
See L<Mail::Box::Message/"The message">
=back
$obj-E<gt>B<print>([FILEHANDLE])
=over 4
See L<Mail::Message/"The message">
=back
$obj-E<gt>B<send>([MAILER], OPTIONS)
=over 4
See L<Mail::Message/"The message">
=back
$obj-E<gt>B<seqnr>([INTEGER])
=over 4
See L<Mail::Box::Message/"The message">
=back
$obj-E<gt>B<size>
=over 4
See L<Mail::Message/"The message">
=back
$obj-E<gt>B<toplevel>
=over 4
See L<Mail::Message/"The message">
=back
$obj-E<gt>B<write>([FILEHANDLE])
=over 4
See L<Mail::Box::File::Message/"METHODS">
=back
=head2 The header
$obj-E<gt>B<bcc>
=over 4
See L<Mail::Message/"The header">
=back
$obj-E<gt>B<cc>
=over 4
See L<Mail::Message/"The header">
=back
$obj-E<gt>B<date>
=over 4
See L<Mail::Message/"The header">
=back
$obj-E<gt>B<destinations>
=over 4
See L<Mail::Message/"The header">
=back
$obj-E<gt>B<from>
=over 4
See L<Mail::Message/"The header">
=back
$obj-E<gt>B<get>(FIELDNAME)
=over 4
See L<Mail::Message/"The header">
=back
$obj-E<gt>B<guessTimestamp>
=over 4
See L<Mail::Message/"The header">
=back
$obj-E<gt>B<head>([HEAD])
=over 4
See L<Mail::Message/"The header">
=back
$obj-E<gt>B<nrLines>
=over 4
See L<Mail::Message/"The header">
=back
$obj-E<gt>B<sender>
=over 4
See L<Mail::Message/"The header">
=back
$obj-E<gt>B<study>(FIELDNAME)
=over 4
See L<Mail::Message/"The header">
=back
$obj-E<gt>B<subject>
=over 4
See L<Mail::Message/"The header">
=back
$obj-E<gt>B<timestamp>
=over 4
See L<Mail::Message/"The header">
=back
$obj-E<gt>B<to>
=over 4
See L<Mail::Message/"The header">
=back
=head2 The body
$obj-E<gt>B<body>([BODY])
=over 4
See L<Mail::Message/"The body">
=back
$obj-E<gt>B<contentType>
=over 4
See L<Mail::Message/"The body">
=back
$obj-E<gt>B<decoded>(OPTIONS)
=over 4
See L<Mail::Message/"The body">
=back
$obj-E<gt>B<encode>(OPTIONS)
=over 4
See L<Mail::Message/"The body">
=back
$obj-E<gt>B<isMultipart>
=over 4
See L<Mail::Message/"The body">
=back
$obj-E<gt>B<isNested>
=over 4
See L<Mail::Message/"The body">
=back
$obj-E<gt>B<parts>(['ALL'|'ACTIVE'|'DELETED'|'RECURSE'|FILTER])
=over 4
See L<Mail::Message/"The body">
=back
=head2 Flags
$obj-E<gt>B<delete>
=over 4
See L<Mail::Message/"Flags">
=back
$obj-E<gt>B<deleted>([BOOLEAN])
=over 4
See L<Mail::Message/"Flags">
=back
$obj-E<gt>B<isDeleted>
=over 4
See L<Mail::Message/"Flags">
=back
$obj-E<gt>B<isModified>
=over 4
See L<Mail::Message/"Flags">
=back
$obj-E<gt>B<label>(LABEL|PAIRS)
=over 4
See L<Mail::Message/"Flags">
=back
$obj-E<gt>B<labels>
=over 4
See L<Mail::Message/"Flags">
=back
$obj-E<gt>B<labelsToStatus>
=over 4
See L<Mail::Message/"Flags">
=back
$obj-E<gt>B<modified>([BOOLEAN])
=over 4
See L<Mail::Message/"Flags">
=back
$obj-E<gt>B<statusToLabels>
=over 4
See L<Mail::Message/"Flags">
=back
=head2 The whole message as text
$obj-E<gt>B<file>
=over 4
See L<Mail::Message::Construct::Text/"The whole message as text">
=back
$obj-E<gt>B<lines>
=over 4
See L<Mail::Message::Construct::Text/"The whole message as text">
=back
$obj-E<gt>B<printStructure>([FILEHANDLE|undef],[INDENT])
=over 4
See L<Mail::Message::Construct::Text/"The whole message as text">
=back
$obj-E<gt>B<string>
=over 4
See L<Mail::Message::Construct::Text/"The whole message as text">
=back
=head2 Internals
$obj-E<gt>B<clonedFrom>
=over 4
See L<Mail::Message/"Internals">
=back
Mail::Box::Dbx::Message-E<gt>B<coerce>(MESSAGE, OPTIONS)
=over 4
See L<Mail::Message/"Internals">
=back
$obj-E<gt>B<diskDelete>
=over 4
See L<Mail::Box::Message/"Internals">
=back
$obj-E<gt>B<fileLocation>
=over 4
See L<Mail::Box::File::Message/"Internals">
=back
$obj-E<gt>B<isDelayed>
=over 4
See L<Mail::Message/"Internals">
=back
$obj-E<gt>B<loadBody>
=over 4
See L<Mail::Box::File::Message/"Internals">
=back
$obj-E<gt>B<moveLocation>(DISTANCE)
=over 4
See L<Mail::Box::File::Message/"Internals">
=back
$obj-E<gt>B<readBody>(PARSER, HEAD [, BODYTYPE])
=over 4
See L<Mail::Box::Message/"Internals">
=back
$obj-E<gt>B<readFromParser>(PARSER)
=over 4
See L<Mail::Box::File::Message/"Internals">
=back
$obj-E<gt>B<readHead>(PARSER [,CLASS])
=over 4
See L<Mail::Message/"Internals">
=back
$obj-E<gt>B<recursiveRebuildPart>(PART, OPTIONS)
=over 4
See L<Mail::Message::Construct::Rebuild/"Internals">
=back
$obj-E<gt>B<storeBody>(BODY)
=over 4
See L<Mail::Message/"Internals">
=back
$obj-E<gt>B<takeMessageId>([STRING])
=over 4
See L<Mail::Message/"Internals">
=back
=head2 Error handling
$obj-E<gt>B<AUTOLOAD>
=over 4
See L<Mail::Message::Construct/"METHODS">
=back
$obj-E<gt>B<addReport>(OBJECT)
=over 4
See L<Mail::Reporter/"Error handling">
=back
$obj-E<gt>B<defaultTrace>([LEVEL]|[LOGLEVEL, TRACELEVEL]|[LEVEL, CALLBACK])
Mail::Box::Dbx::Message-E<gt>B<defaultTrace>([LEVEL]|[LOGLEVEL, TRACELEVEL]|[LEVEL, CALLBACK])
=over 4
See L<Mail::Reporter/"Error handling">
=back
$obj-E<gt>B<errors>
=over 4
See L<Mail::Reporter/"Error handling">
=back
$obj-E<gt>B<log>([LEVEL [,STRINGS]])
Mail::Box::Dbx::Message-E<gt>B<log>([LEVEL [,STRINGS]])
=over 4
See L<Mail::Reporter/"Error handling">
=back
$obj-E<gt>B<logPriority>(LEVEL)
Mail::Box::Dbx::Message-E<gt>B<logPriority>(LEVEL)
=over 4
See L<Mail::Reporter/"Error handling">
=back
$obj-E<gt>B<logSettings>
=over 4
See L<Mail::Reporter/"Error handling">
=back
$obj-E<gt>B<notImplemented>
=over 4
See L<Mail::Reporter/"Error handling">
=back
$obj-E<gt>B<report>([LEVEL])
=over 4
See L<Mail::Reporter/"Error handling">
=back
$obj-E<gt>B<reportAll>([LEVEL])
=over 4
See L<Mail::Reporter/"Error handling">
=back
$obj-E<gt>B<shortSize>([VALUE])
Mail::Box::Dbx::Message-E<gt>B<shortSize>([VALUE])
=over 4
See L<Mail::Message/"Error handling">
=back
$obj-E<gt>B<shortString>
=over 4
See L<Mail::Message/"Error handling">
=back
$obj-E<gt>B<trace>([LEVEL])
=over 4
See L<Mail::Reporter/"Error handling">
=back
$obj-E<gt>B<warnings>
=over 4
See L<Mail::Reporter/"Error handling">
=back
=head2 Cleanup
$obj-E<gt>B<DESTROY>
=over 4
See L<Mail::Message/"Cleanup">
=back
$obj-E<gt>B<destruct>
=over 4
See L<Mail::Box::Message/"Cleanup">
=back
$obj-E<gt>B<inGlobalDestruction>
=over 4
See L<Mail::Reporter/"Cleanup">
=back
=head2 The flags
$obj-E<gt>B<accountName>
=over 4
Returns the Outlook Express account name which was used to retreive
this message, represented as a string. The L<accountNr()|Mail::Box::Dbx::Message/"The flags"> returns
a numerical representation of the same fact.
=back
$obj-E<gt>B<accountNr>
=over 4
Returns the Outlook Express account name which was used to retreive
this message, represented as a number. The L<accountName()|Mail::Box::Dbx::Message/"The flags"> returns
a string representation of the same fact.
=back
=head1 DETAILS
=head1 DIAGNOSTICS
Error: Cannot coerce a $class object into a $class object
=over 4
=back
Error: Cannot include forward source as $include.
=over 4
Unknown alternative for the L<forward(include)|Mail::Message::Construct::Forward/"Constructing a message">. Valid choices are
C<NO>, C<INLINE>, C<ATTACH>, and C<ENCAPSULATE>.
=back
Error: Cannot include reply source as $include.
=over 4
Unknown alternative for the C<include> option of L<reply()|Mail::Message::Construct::Reply/"Constructing a message">. Valid
choices are C<NO>, C<INLINE>, and C<ATTACH>.
=back
Error: Dbx record required to create the message.
=over 4
=back
Error: Method bounce requires To, Cc, or Bcc
=over 4
The message L<bounce()|Mail::Message::Construct::Bounce/"Constructing a message"> method forwards a received message off to someone
else without modification; you must specified it's new destination.
If you have the urge not to specify any destination, you probably
are looking for L<reply()|Mail::Message::Construct::Reply/"Constructing a message">. When you wish to modify the content, use
L<forward()|Mail::Message::Construct::Forward/"Constructing a message">.
=back
Error: Method forwardAttach requires a preamble
=over 4
=back
Error: Method forwardEncapsulate requires a preamble
=over 4
=back
Error: No address to create forwarded to.
=over 4
If a forward message is created, a destination address must be specified.
=back
Error: No default mailer found to send message.
=over 4
The message L<send()|Mail::Message/"The message"> mechanism had not enough information to automatically
find a mail transfer agent to sent this message. Specify a mailer
explicitly using the C<via> options.
=back
Error: No rebuild rule $name defined.
=over 4
=back
Error: Only build() Mail::Message's; they are not in a folder yet
=over 4
You may wish to construct a message to be stored in a some kind
of folder, but you need to do that in two steps. First, create a
normal L<Mail::Message|Mail::Message>, and then add it to the folder. During this
L<Mail::Box::addMessage()|Mail::Box/"The folder"> process, the message will get L<coerce()|Mail::Message/"Internals">-d
into the right message type, adding storage information and the like.
=back
Error: Package $package does not implement $method.
=over 4
Fatal error: the specific package (or one of its superclasses) does not
implement this method where it should. This message means that some other
related classes do implement this method however the class at hand does
not. Probably you should investigate this and probably inform the author
of the package.
=back
Error: Unable to read delayed body.
=over 4
=back
Error: coercion starts with some object
=over 4
=back
=head1 SEE ALSO
This module is part of Mail-Box distribution version 2.082,
built on April 28, 2008. Website: F<http://perl.overmeer.net/mailbox/>
=head1 LICENSE
Copyrights 2001-2008 by Mark Overmeer. For other contributors see ChangeLog.
This program is free software; you can redistribute it and/or modify it
under the same terms as Perl itself.
See F<http://www.perl.com/perl/misc/Artistic.html>
| carlgao/lenga | images/lenny64-peon/usr/share/perl5/Mail/Box/Dbx/Message.pod | Perl | mit | 15,213 |
package ParameterException;
use strict;
use warnings;
$ParameterException::VERSION = "1.0";
use BaseException;
use parent qw(BaseException);
1;
__END__
=begin markdown
# ParameterException
The ParameterException package define base parameter
error exception. It's used to inform that a method parameter
not corresponding as the normal assertion on itself.
Child of [BaseException](./BaseException.md)
Use [strict](http://perldoc.perl.org/strict.html)
Use [warnings](http://perldoc.perl.org/warnings.html)
Version: 1.0
Date: 2016/04/09
Author: Matthieu vallance <matthieu.vallance@cscfa.fr>
Module: [configurationManager](../../configurationManager.md)
License: MIT
## Attributes
name | scope | type
---- | ----- | ----
message | public | text
code | public | integer
previous | public | Exception
## Methods
#### New
Base exception default constructor
**param:** Give arguments into a Hash object
* text 'message' the message of the exception
* integer 'code' the exception code
* exception 'previous' the previous exception
**return:** Exception
#### Get previous
This method return the object previous exception
**return:** Exception
#### Set previous
This method allow to update the object previous exception
**param:** Exception previous The previous exception of the current object
**return:** Exception
#### Get code
This method return the object code status
**return:** text
#### Set code
This method allow to update the object code status
**param:** integer code The object code status to set
**return:** Exception
#### Get message
This method return the object message value
**return:** text
#### Set message
This method allow to update the object message value
**param:** text message The message text to set
**return:** Exception
=end markdown
| cscfa/perl-module-configurationManager | configurationManagerLib/Exception/ParameterException.pm | Perl | mit | 1,813 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2021] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
package EnsEMBL::Web::Object::Search;
use strict;
use base qw(EnsEMBL::Web::Object);
sub default_action { return 'New'; }
sub short_caption { my $sitetype = $_[0]->species_defs->ENSEMBL_SITETYPE || 'Ensembl'; return $_[1] eq 'global' ? 'New Search' : "Search $sitetype"; }
sub caption { my $q = $_[0]->hub->action eq 'Results' && $_[0]->hub->param('q'); return $q && sprintf '%s%s', substr($q, 0, 20), length $q > 20 ? '...' : ''; }
sub counts {}
1;
| Ensembl/ensembl-webcode | modules/EnsEMBL/Web/Object/Search.pm | Perl | apache-2.0 | 1,181 |
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V9::Services::LocationViewService::GetLocationViewRequest;
use strict;
use warnings;
use base qw(Google::Ads::GoogleAds::BaseEntity);
use Google::Ads::GoogleAds::Utils::GoogleAdsHelper;
sub new {
my ($class, $args) = @_;
my $self = {resourceName => $args->{resourceName}};
# Delete the unassigned fields in this object for a more concise JSON payload
remove_unassigned_fields($self, $args);
bless $self, $class;
return $self;
}
1;
| googleads/google-ads-perl | lib/Google/Ads/GoogleAds/V9/Services/LocationViewService/GetLocationViewRequest.pm | Perl | apache-2.0 | 1,057 |
## OpenXPKI::Server::Authentication.pm
##
## Written 2003 by Michael Bell
## Rewritten 2005 and 2006 by Michael Bell for the OpenXPKI project
## adapted to new Service::Default semantics 2007 by Alexander Klink
## for the OpenXPKI project
## (C) Copyright 2003-2007 by The OpenXPKI Project
package OpenXPKI::Server::Authentication;
use strict;
use warnings;
use utf8;
use English;
use OpenXPKI::Debug;
use Data::Dumper;
use OpenXPKI::Exception;
use OpenXPKI::Server::Context qw( CTX );
use OpenXPKI::Server::Authentication::Anonymous;
use OpenXPKI::Server::Authentication::External;
use OpenXPKI::Server::Authentication::Password;
use OpenXPKI::Server::Authentication::ChallengeX509;
use OpenXPKI::Server::Authentication::ClientSSO;
use OpenXPKI::Server::Authentication::ClientX509;
use OpenXPKI::Server::Authentication::Connector;
## constructor and destructor stuff
sub new {
##! 1: "start"
my $that = shift;
my $class = ref($that) || $that;
my $self = {};
bless $self, $class;
my $keys = shift;
$self->__load_config($keys);
##! 1: "end"
return $self;
}
#############################################################################
## load the configuration ##
## (caching support) ##
#############################################################################
sub __load_config
{
##! 4: "start"
my $self = shift;
my $keys = shift;
##! 8: "load all PKI realms"
my @realms = CTX('config')->get_keys('system.realms');
foreach my $realm (@realms) {
$self->__load_pki_realm ({
PKI_REALM => $realm,
});
}
##! 4: "leaving function successfully"
return 1;
}
sub __load_pki_realm
{
##! 4: "start"
my $self = shift;
my $keys = shift;
my $realm = $keys->{PKI_REALM};
my $config = CTX('config');
my $restore_realm = CTX('session')->get_pki_realm();
# Fake Session for Config!
CTX('session')->set_pki_realm( $realm );
my @handlers = $config->get_keys('auth.handler');
foreach my $handler (@handlers) {
$self->__load_handler ({
HANDLER => $handler
});
}
my @stacks = $config->get_keys('auth.stack');
foreach my $stack (@stacks) {
$self->{PKI_REALM}->{$realm}->{STACK}->{$stack}->{DESCRIPTION} =
$config->get("auth.stack.$stack.description");
$self->{PKI_REALM}->{$realm}->{STACK}->{$stack}->{LABEL} =
$config->get("auth.stack.$stack.label") || $stack;
##! 8: "determine all used handlers"
my @supported_handler = $config->get_scalar_as_list("auth.stack.$stack.handler");
##! 32: " supported_handler " . Dumper @supported_handler
$self->{PKI_REALM}->{$realm}->{STACK}->{$stack}->{HANDLER} = \@supported_handler;
}
##! 64: "Realm auth config " . Dumper $self->{PKI_REALM}->{$realm}
CTX('session')->set_pki_realm( $restore_realm );
##! 4: "end"
return 1;
}
sub __load_handler
{
##! 4: "start"
my $self = shift;
my $keys = shift;
my $handler = $keys->{HANDLER};
my $realm = CTX('session')->get_pki_realm();
my $config = CTX('config');
##! 8: "load handler type"
my $type = $config->get("auth.handler.$handler.type");
##! 8: "name ::= $handler"
##! 8: "type ::= $type"
my $class = "OpenXPKI::Server::Authentication::$type";
$self->{PKI_REALM}->{$realm}->{HANDLER}->{$handler} = eval {
$class->new( "auth.handler.$handler" );
};
if (my $exc = OpenXPKI::Exception->caught())
{
##! 16: "exception from authentication sub module $class detected"
OpenXPKI::Exception->throw (
message => "I18N_OPENXPKI_SERVER_AUTHENTICATION_LOAD_HANDLER_FAILED",
children => [ $exc ]);
}
elsif ($EVAL_ERROR)
{
OpenXPKI::Exception->throw (
message => "I18N_OPENXPKI_SERVER_AUTHENTICATION_LOAD_HANDLER_CRASHED",
params => {ERRVAL => $EVAL_ERROR->message()});
}
##! 4: "end"
return 1;
}
########################################################################
## identify the user ##
########################################################################
sub list_authentication_stacks {
my $self = shift;
##! 1: "start"
##! 2: "get PKI realm"
my $realm = CTX('session')->get_pki_realm();
##! 2: "get authentication stack"
my %stacks = ();
foreach my $stack (sort keys %{$self->{PKI_REALM}->{$realm}->{STACK}}) {
$stacks{$stack}->{NAME} = $stack;
$stacks{$stack}->{DESCRIPTION} = $self->{PKI_REALM}->{$realm}->{STACK}->{$stack}->{DESCRIPTION};
$stacks{$stack}->{LABEL} = $self->{PKI_REALM}->{$realm}->{STACK}->{$stack}->{LABEL};
}
##! 1: 'end'
return \%stacks;
}
sub login_step {
my $self = shift;
my $arg_ref = shift;
my $msg = $arg_ref->{MESSAGE};
my $stack = $arg_ref->{STACK};
my $realm = CTX('session')->get_pki_realm();
##! 16: 'realm: ' . $realm
##! 16: 'stack: ' . $stack
if (! exists $self->{PKI_REALM}->{$realm}->{STACK}->{$stack} ||
! scalar @{$self->{PKI_REALM}->{$realm}->{STACK}->{$stack}->{HANDLER}}) {
OpenXPKI::Exception->throw(
message => "I18N_OPENXPKI_SERVER_AUTHENTICATION_LOGIN_INVALID_STACK",
params => {
STACK => $stack
},
log => {
logger => CTX('log'),
priority => 'warn',
facility => 'auth'
},
);
}
##! 2: "try the different available handlers for the stack $stack"
my $ok = 0;
my $user;
my $role;
my $return_msg = {};
HANDLER:
foreach my $handler (@{$self->{PKI_REALM}->{$realm}->{STACK}->{$stack}->{HANDLER}}) {
##! 4: "handler $handler from stack $stack"
my $ref = $self->{PKI_REALM}->{$realm}->{HANDLER}->{$handler};
if (! ref $ref) { # note the great choice of variable name ...
OpenXPKI::Exception->throw (
message => "I18N_OPENXPKI_SERVER_AUTHENTICATION_INCORRECT_HANDLER",
params => {
PKI_REALM => $realm,
HANDLER => $handler,
},
log => {
logger => CTX('log'),
priority => 'error',
facility => 'auth',
},
);
}
eval {
($user, $role, $return_msg) = $ref->login_step({
HANDLER => $handler,
MESSAGE => $msg,
});
};
if (! $EVAL_ERROR) {
##! 8: "login step ok"
$ok = 1;
##! 8: "session configured"
last HANDLER;
} else {
##! 8: "EVAL_ERROR detected"
##! 64: '$EVAL_ERROR = ' . $EVAL_ERROR
}
}
if (! $ok) {
##! 4: "show at minimum the last error message"
if (my $exc = OpenXPKI::Exception->caught()) {
OpenXPKI::Exception->throw (
message => "I18N_OPENXPKI_SERVER_AUTHENTICATION_LOGIN_FAILED",
children => [ $exc ],
log => {
logger => CTX('log'),
priority => 'warn',
facility => 'auth',
},
);
}
else {
OpenXPKI::Exception->throw (
message => "I18N_OPENXPKI_SERVER_AUTHENTICATION_LOGIN_FAILED",
children => [ $EVAL_ERROR->message() ],
log => {
logger => CTX('log'),
priority => 'warn',
facility => 'auth',
},
);
}
}
if (defined $user && defined $role) {
CTX('log')->log(
MESSAGE => "Login successful using authentication stack '$stack' (user: '$user', role: '$role')",
PRIORITY => 'info',
FACILITY => 'auth',
);
return ($user, $role, $return_msg);
}
return (undef, undef, $return_msg);
};
1;
__END__
=head1 Name
OpenXPKI::Server::Authentication
=head1 Description
This module is the top class of OpenXPKI's authentication
framework. Every authentication method is implemented in an
extra class but you only have to init this class and then
you have to call login if you need an authentication. The
XMl configuration and session handling is done via the servers
global context.
=head1 Functions
=head2 new
is the constructor and accepts no parameters.
If you call new then the complete
configuration is loaded. This makes it possible to cash
this object and to use login when it is required in a very
fast way.
=head2 login_step
is the function which performs the authentication.
Named parameters are STACK (the authentication stack to use)
and MESSAGE (the message received by the service).
It returns a triple (user, role, reply). The authentication
is not finished until user and role are defined. Multiple
calls can then be made until this state is achieved.
Reply is the reply message that is to be sent to the user
(i.e. a challenge, or the 'SERVICE_READY' message in case
the authentication has been successful).
=head1 See Also
OpenXPKI::Server::Authentication::Anonymous
OpenXPKI::Server::Authentication::External
OpenXPKI::Server::Authentication::LDAP
OpenXPKI::Server::Authentication::Password
OpenXPKI::Server::Authentication::X509
| durko/openxpki | core/server/OpenXPKI/Server/Authentication.pm | Perl | apache-2.0 | 9,315 |
#
# Copyright 2019 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package storage::hitachi::hnas::snmp::plugin;
use strict;
use warnings;
use base qw(centreon::plugins::script_snmp);
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
%{$self->{modes}} = (
'cluster-status' => 'centreon::common::bluearc::snmp::mode::clusterstatus',
'interfaces' => 'snmp_standard::mode::interfaces',
'list-interfaces' => 'snmp_standard::mode::listinterfaces',
'hardware' => 'centreon::common::bluearc::snmp::mode::hardware',
'volume-usage' => 'centreon::common::bluearc::snmp::mode::volumeusage',
);
return $self;
}
1;
__END__
=head1 PLUGIN DESCRIPTION
Check Hitachi HNAS in SNMP.
=cut
| Sims24/centreon-plugins | storage/hitachi/hnas/snmp/plugin.pm | Perl | apache-2.0 | 1,666 |
#!/usr/bin/perl
use strict;
use File::Basename;
my $program_name = File::Basename::basename $0;
my $outdir = "";
my $scratch_dir = "";
my $pnor_data_dir = "";
my $pnor_filename = "";
my $payload = "";
my $bootkernel = "";
my $hb_image_dir = "";
my $xml_layout_file = "";
my $targeting_binary_filename = "";
my $sbec_binary_filename = "";
my $sbe_binary_filename = "";
my $wink_binary_filename = "";
my $occ_binary_filename = "";
my $openpower_version_filename = "";
while (@ARGV > 0){
$_ = $ARGV[0];
chomp($_);
$_ = &trim_string($_);
if (/^-h$/i || /^-help$/i || /^--help$/i){
#print help content
usage();
exit 0;
}
elsif (/^-scratch_dir/i){
$scratch_dir = $ARGV[1] or die "Bad command line arg given: expecting a scratch dir path.\n";
shift;
}
elsif (/^-outdir/i){
$outdir = $ARGV[1] or die "Bad command line arg given: expecting a directory for output data.\n";
shift;
}
elsif (/^-pnor_data_dir/i){
$pnor_data_dir = $ARGV[1] or die "Bad command line arg given: expecting a directory containing pnor data.\n";
shift;
}
elsif (/^-pnor_filename/i){
$pnor_filename = $ARGV[1] or die "Bad command line arg given: expecting a pnor filename.\n";
shift;
}
elsif (/^-hb_image_dir/i){
$hb_image_dir = $ARGV[1] or die "Bad command line arg given: expecting an hb image dir path.\n";
shift;
}
elsif (/^-xml_layout_file/i){
$xml_layout_file = $ARGV[1] or die "Bad command line arg given: expecting an xml layout file.\n";
shift;
}
elsif (/^-payload/i){
$payload = $ARGV[1] or die "Bad command line arg given: expecting a filepath to payload binary file.\n";
shift;
}
elsif (/^-bootkernel/i){
$bootkernel = $ARGV[1] or die "Bad command line arg given: expecting a filepath to bootloader kernel image.\n";
shift;
}
elsif (/^-targeting_binary_filename/i){
$targeting_binary_filename = $ARGV[1] or die "Bad command line arg given: expecting a targeting binary filename.\n";
shift;
}
elsif (/^-sbe_binary_filename/i){
$sbe_binary_filename = $ARGV[1] or die "Bad command line arg given: expecting an sbe binary filename.\n";
shift;
}
elsif (/^-sbec_binary_filename/i){
$sbec_binary_filename = $ARGV[1] or die "Bad command line arg given: expecting an sbec binary filename.\n";
shift;
}
elsif (/^-wink_binary_filename/i){
$wink_binary_filename = $ARGV[1] or die "Bad command line arg given: expecting an wink binary filename.\n";
shift;
}
elsif (/^-occ_binary_filename/i){
$occ_binary_filename = $ARGV[1] or die "Bad command line arg given: expecting an occ binary filename.\n";
shift;
}
elsif (/^-openpower_version_filename/i){
$openpower_version_filename = $ARGV[1] or die "Bad command line arg given: expecting openpower version filename.\n";
shift;
}
else {
print "Unrecognized command line arg: $_ \n";
print "To view all the options and help text run \'$program_name -h\' \n";
exit 1;
}
shift;
}
if ($outdir eq "") {
die "-outdir <path_to_directory_for_output_files> is a required command line variable. Please run again with this parameter.\n";
}
print "scratch_dir = $scratch_dir\n";
print "pnor_data_dir = $pnor_data_dir\n";
my $build_pnor_command = "$hb_image_dir/buildpnor.pl";
$build_pnor_command .= " --pnorOutBin $pnor_filename --pnorLayout $xml_layout_file";
$build_pnor_command .= " --binFile_HBD $scratch_dir/$targeting_binary_filename";
$build_pnor_command .= " --binFile_SBE $scratch_dir/$sbe_binary_filename";
$build_pnor_command .= " --binFile_HCODE $scratch_dir/$wink_binary_filename";
$build_pnor_command .= " --binFile_HBB $scratch_dir/hostboot.header.bin.ecc";
$build_pnor_command .= " --binFile_HBI $scratch_dir/hostboot_extended.header.bin.ecc";
$build_pnor_command .= " --binFile_HBRT $scratch_dir/hostboot_runtime.header.bin.ecc";
#$build_pnor_command .= " --binFile_HBBL $scratch_dir/hbbl.bin.ecc";
$build_pnor_command .= " --binFile_HBEL $scratch_dir/hbel.bin.ecc";
$build_pnor_command .= " --binFile_GUARD $scratch_dir/guard.bin.ecc";
$build_pnor_command .= " --binFile_PAYLOAD $payload";
$build_pnor_command .= " --binFile_BOOTKERNEL $bootkernel";
$build_pnor_command .= " --binFile_NVRAM $scratch_dir/nvram.bin";
$build_pnor_command .= " --binFile_MVPD $scratch_dir/mvpd_fill.bin.ecc";
$build_pnor_command .= " --binFile_DJVPD $scratch_dir/djvpd_fill.bin.ecc";
$build_pnor_command .= " --binFile_CVPD $scratch_dir/cvpd.bin.ecc";
$build_pnor_command .= " --binFile_ATTR_TMP $scratch_dir/attr_tmp.bin.ecc";
$build_pnor_command .= " --binFile_ATTR_PERM $scratch_dir/attr_perm.bin.ecc";
$build_pnor_command .= " --binFile_OCC $occ_binary_filename.ecc";
$build_pnor_command .= " --binFile_FIRDATA $scratch_dir/firdata.bin.ecc";
$build_pnor_command .= " --binFile_CAPP $scratch_dir/cappucode.bin.ecc";
$build_pnor_command .= " --binFile_SECBOOT $scratch_dir/secboot.bin.ecc";
$build_pnor_command .= " --binFile_VERSION $openpower_version_filename";
$build_pnor_command .= " --fpartCmd \"fpart\"";
$build_pnor_command .= " --fcpCmd \"fcp\"";
print "###############################";
run_command("$build_pnor_command");
#END MAIN
#-------------------------------------------------------------------------
sub usage {
print <<"ENDUSAGE";
ENDUSAGE
;
}
sub parse_config_file {
}
#trim_string takes one string as input, trims leading and trailing whitespace
# before returning that string
sub trim_string {
my $str = shift;
$str =~ s/^\s+//;
$str =~ s/\s+$//;
return $str;
}
sub run_command {
my $command = shift;
print "$command\n";
my $rc = system($command);
if ($rc !=0 ){
die "Error running command: $command. Nonzero return code of ($rc) returned.\n";
}
return $rc;
}
| sannerd/pnor | p9Layouts/create_pnor_image.pl | Perl | apache-2.0 | 5,963 |
package Google::Ads::AdWords::v201402::Placement;
use strict;
use warnings;
__PACKAGE__->_set_element_form_qualified(1);
sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201402' };
our $XML_ATTRIBUTE_CLASS;
undef $XML_ATTRIBUTE_CLASS;
sub __get_attr_class {
return $XML_ATTRIBUTE_CLASS;
}
use base qw(Google::Ads::AdWords::v201402::Criterion);
# Variety: sequence
use Class::Std::Fast::Storable constructor => 'none';
use base qw(Google::Ads::SOAP::Typelib::ComplexType);
{ # BLOCK to scope variables
my %id_of :ATTR(:get<id>);
my %type_of :ATTR(:get<type>);
my %Criterion__Type_of :ATTR(:get<Criterion__Type>);
my %url_of :ATTR(:get<url>);
__PACKAGE__->_factory(
[ qw( id
type
Criterion__Type
url
) ],
{
'id' => \%id_of,
'type' => \%type_of,
'Criterion__Type' => \%Criterion__Type_of,
'url' => \%url_of,
},
{
'id' => 'SOAP::WSDL::XSD::Typelib::Builtin::long',
'type' => 'Google::Ads::AdWords::v201402::Criterion::Type',
'Criterion__Type' => 'SOAP::WSDL::XSD::Typelib::Builtin::string',
'url' => 'SOAP::WSDL::XSD::Typelib::Builtin::string',
},
{
'id' => 'id',
'type' => 'type',
'Criterion__Type' => 'Criterion.Type',
'url' => 'url',
}
);
} # end BLOCK
1;
=pod
=head1 NAME
Google::Ads::AdWords::v201402::Placement
=head1 DESCRIPTION
Perl data type class for the XML Schema defined complexType
Placement from the namespace https://adwords.google.com/api/adwords/cm/v201402.
A placement used for modifying bids for sites when targeting the content network. <span class="constraint AdxEnabled">This is enabled for AdX.</span>
=head2 PROPERTIES
The following properties may be accessed using get_PROPERTY / set_PROPERTY
methods:
=over
=item * url
=back
=head1 METHODS
=head2 new
Constructor. The following data structure may be passed to new():
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| gitpan/GOOGLE-ADWORDS-PERL-CLIENT | lib/Google/Ads/AdWords/v201402/Placement.pm | Perl | apache-2.0 | 2,009 |
#
# Copyright 2009-2013 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package MongoDBTest;
use strict;
use warnings;
use Exporter 'import';
use MongoDB;
use Test::More;
use boolean;
use version;
our @EXPORT_OK = qw(
build_client get_test_db server_version server_type clear_testdbs get_capped
);
my @testdbs;
# abstract building a connection
sub build_client {
my %args = @_;
my $host =
exists $args{host} ? delete $args{host}
: exists $ENV{MONGOD} ? $ENV{MONGOD}
: 'localhost';
# long query timeout may help spurious failures on heavily loaded CI machines
return MongoDB->connect(
$host,
{
ssl => $ENV{MONGO_SSL},
query_timeout => 60000,
server_selection_timeout_ms => 1000,
%args,
}
);
}
sub get_test_db {
my $conn = shift;
my $testdb = 'testdb' . int(rand(2**31));
my $db = $conn->get_database($testdb) or die "Can't get database\n";
push(@testdbs, $db);
return $db;
}
sub get_capped {
my ($db, $name, %args) = @_;
$name ||= 'capped' . int(rand(2**31));
$args{size} ||= 500_000;
$db->run_command([ create => $name, capped => true, %args ]);
return $db->get_collection($name);
}
# XXX eventually, should move away from this and towards a fixture object instead
BEGIN {
eval {
my $conn = build_client( server_selection_timeout_ms => 1000 );
$conn->_topology->scan_all_servers;
$conn->_topology->_dump;
eval { $conn->_topology->get_writable_link }
or die "couldn't connect";
$conn->get_database("admin")->run_command({ serverStatus => 1 })
or die "Database has auth enabled\n";
};
if ( $@ ) {
(my $err = $@) =~ s/\n//g;
if ( $err =~ /couldn't connect|connection refused/i ) {
$err = "no mongod on " . ($ENV{MONGOD} || "localhost:27017");
$err .= ' and $ENV{MONGOD} not set' unless $ENV{MONGOD};
}
plan skip_all => "$err";
}
};
sub server_version {
my $conn = shift;
my $build = $conn->send_admin_command( [ buildInfo => 1 ] )->output;
my ($version_str) = $build->{version} =~ m{^([0-9.]+)};
return version->parse("v$version_str");
}
sub server_type {
my $conn = shift;
my $server_type;
# check database type
my $ismaster = $conn->get_database('admin')->run_command({ismaster => 1});
if (exists $ismaster->{msg} && $ismaster->{msg} eq 'isdbgrid') {
$server_type = 'Mongos';
}
elsif ( $ismaster->{ismaster} && exists $ismaster->{setName} ) {
$server_type = 'RSPrimary'
}
elsif ( ! exists $ismaster->{setName} && ! $ismaster->{isreplicaset} ) {
$server_type = 'Standalone'
}
else {
$server_type = 'Unknown';
}
return $server_type;
}
sub clear_testdbs { @testdbs = () }
# cleanup test dbs
END {
for my $db (@testdbs) {
$db->drop;
}
}
1;
| kainwinterheart/mongo-perl-driver | t/lib/MongoDBTest.pm | Perl | apache-2.0 | 3,545 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2021] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
package EnsEMBL::Web::JSONServer::Tools::AlleleFrequency;
use strict;
use warnings;
use parent qw(EnsEMBL::Web::JSONServer::Tools);
sub object_type { 'AlleleFrequency' }
1;
| Ensembl/public-plugins | tools/modules/EnsEMBL/Web/JSONServer/Tools/AlleleFrequency.pm | Perl | apache-2.0 | 888 |
# PODNAME: Moose::Manual::MooseX
# ABSTRACT: Recommended Moose extensions
__END__
=pod
=encoding UTF-8
=head1 NAME
Moose::Manual::MooseX - Recommended Moose extensions
=head1 VERSION
version 2.1404
=head1 MooseX?
It's easy to extend and change Moose, and this is part of what makes
Moose so powerful. You can use the MOP API to do things your own way,
add new features, and generally customize your Moose.
Writing your own extensions does require a good understanding of the
meta-model. You can start learning about this with the
L<Moose::Manual::MOP> docs. There are also several extension recipes
in the L<Moose::Cookbook>.
Explaining how to write extensions is beyond the scope of this
manual. Fortunately, lots of people have already written extensions
and put them on CPAN for you.
This document covers a few of the ones we like best.
=head1 L<MooseX::AttributeHelpers>
The functionality of this MooseX module has been moved into Moose core.
See L<Moose::Meta::Attribute::Native>.
=head1 L<Moose::Autobox>
MooseX::AttributeHelpers, but turned inside out, Moose::Autobox provides
methods on both arrays/hashes/etc. but also references to them, using
Moose roles, allowing you do to things like:
use Moose::Autobox;
$somebody_elses_object->orders->push($order);
Lexically scoped and not to everybody's taste, but very handy for sugaring
up other people's APIs and your own code.
=head1 L<MooseX::StrictConstructor>
By default, Moose lets you pass any old junk into a class's
constructor. If you load L<MooseX::StrictConstructor>, your class will
throw an error if it sees something it doesn't recognize;
package User;
use Moose;
use MooseX::StrictConstructor;
has 'name';
has 'email';
User->new( name => 'Bob', emali => 'bob@example.com' );
With L<MooseX::StrictConstructor>, that typo ("emali") will cause a
runtime error. With plain old Moose, the "emali" attribute would be
silently ignored.
=head1 L<MooseX::Params::Validate>
We have high hopes for the future of L<MooseX::Method::Signatures> and
L<Moops>. However, these modules, while used regularly in
production by some of the more insane members of the community, are
still marked alpha just in case backwards incompatible changes need to
be made.
If you don't want to risk that, for now we recommend the decidedly more
clunky (but also faster and simpler) L<MooseX::Params::Validate>. This
module lets you apply Moose types and coercions to any method
arguments.
package User;
use Moose;
use MooseX::Params::Validate;
sub login {
my $self = shift;
my ($password)
= validated_list( \@_, password => { isa => 'Str', required => 1 } );
...
}
=head1 L<MooseX::Getopt>
This is a role which adds a C<new_with_options> method to your
class. This is a constructor that takes the command line options and
uses them to populate attributes.
This makes writing a command-line application as a module trivially
simple:
package App::Foo;
use Moose;
with 'MooseX::Getopt';
has 'input' => (
is => 'ro',
isa => 'Str',
required => 1
);
has 'output' => (
is => 'ro',
isa => 'Str',
required => 1
);
sub run { ... }
Then in the script that gets run we have:
use App::Foo;
App::Foo->new_with_options->run;
From the command line, someone can execute the script:
foo@example> foo --input /path/to/input --output /path/to/output
=head1 L<MooseX::Singleton>
To be honest, using a singleton is just a way to have a magic global
variable in languages that don't actually have global variables.
In perl, you can just as easily use a global. However, if your
colleagues are Java-infected, they might prefer a singleton. Also, if
you have an existing class that I<isn't> a singleton but should be,
using L<MooseX::Singleton> is the easiest way to convert it.
package Config;
use MooseX::Singleton; # instead of Moose
has 'cache_dir' => ( ... );
It's that simple.
=head1 EXTENSIONS TO CONSIDER
There are literally dozens of other extensions on CPAN. This is a list
of extensions that you might find useful, but we're not quite ready to
endorse just yet.
=head2 L<MooseX::Declare>
MooseX::Declare is based on L<Devel::Declare>, a giant bag of crack
originally implemented by mst with the goal of upsetting the perl core
developers so much by its very existence that they implemented proper
keyword handling in the core.
As of perl5 version 14, this goal has been achieved, and modules such
as L<Devel::CallParser>, L<Function::Parameters>, and L<Keyword::Simple> provide
mechanisms to mangle perl syntax that don't require hallucinogenic
drugs to interpret the error messages they produce.
If you want to use declarative syntax in new code, please for the love
of kittens get yourself a recent perl and look at L<Moops> instead.
=head2 L<MooseX::Types>
This extension helps you build a type library for your application. It
also lets you predeclare type names and use them as barewords.
use MooseX::Types -declare => ['PositiveInt'];
use MooseX::Types::Moose 'Int';
subtype PositiveInt,
as Int,
where { $_ > 0 },
message { "Int is not larger than 0" };
One nice feature is that those bareword names are actually namespaced
in Moose's type registry, so multiple applications can use the same
bareword names, even if the type definitions differ.
=head2 L<MooseX::Types::Structured>
This extension builds on top of L<MooseX::Types> to let you declare
complex data structure types.
use MooseX::Types -declare => [ qw( Name Color ) ];
use MooseX::Types::Moose qw(Str Int);
use MooseX::Types::Structured qw(Dict Tuple Optional);
subtype Name
=> as Dict[ first => Str, middle => Optional[Str], last => Str ];
subtype Color
=> as Tuple[ Int, Int, Int, Optional[Int] ];
Of course, you could always use objects to represent these sorts of
things too.
=head2 L<MooseX::ClassAttribute>
This extension provides class attributes for Moose classes. The
declared class attributes are introspectable just like regular Moose
attributes.
package User;
use Moose;
use MooseX::ClassAttribute;
has 'name' => ( ... );
class_has 'Cache' => ( ... );
Note however that this class attribute does I<not> inherit like a
L<Class::Data::Inheritable> or similar attribute - calling
$subclass->Cache($cache);
will set it for the superclass as well. Additionally, class data is usually
The Wrong Thing To Do in a strongly OO program since it makes testing a
lot harder - consider carefully whether you'd be better off with an object
that's passed around instead.
=head2 L<MooseX::Daemonize>
This is a role that provides a number of methods useful for creating a
daemon, including methods for starting and stopping, managing a PID
file, and signal handling.
=head2 L<MooseX::Role::Parameterized>
If you find yourself wanting a role that customizes itself for each
consumer, this is the tool for you. With this module, you can create a
role that accepts parameters and generates attributes, methods, etc. on
a customized basis for each consumer.
=head2 L<MooseX::POE>
This is a small wrapper that ties together a Moose class with
C<POE::Session>, and gives you an C<event> sugar function to declare
event handlers.
=head2 L<MooseX::FollowPBP>
Automatically names all accessors I<Perl Best Practices>-style,
"get_size" and "set_size".
=head2 L<MooseX::SemiAffordanceAccessor>
Automatically names all accessors with an explicit set and implicit
get, "size" and "set_size".
=head2 L<MooseX::NonMoose>
MooseX::NonMoose allows for easily subclassing non-Moose classes with Moose,
taking care of the annoying details connected with doing this, such as
setting up proper inheritance from Moose::Object and installing
(and inlining, at make_immutable time) a constructor that makes sure things
like BUILD methods are called.
=head1 AUTHORS
=over 4
=item *
Stevan Little <stevan.little@iinteractive.com>
=item *
Dave Rolsky <autarch@urth.org>
=item *
Jesse Luehrs <doy@tozt.net>
=item *
Shawn M Moore <code@sartak.org>
=item *
יובל קוג'מן (Yuval Kogman) <nothingmuch@woobling.org>
=item *
Karen Etheridge <ether@cpan.org>
=item *
Florian Ragwitz <rafl@debian.org>
=item *
Hans Dieter Pearcey <hdp@weftsoar.net>
=item *
Chris Prather <chris@prather.org>
=item *
Matt S Trout <mst@shadowcat.co.uk>
=back
=head1 COPYRIGHT AND LICENSE
This software is copyright (c) 2006 by Infinity Interactive, Inc..
This is free software; you can redistribute it and/or modify it under
the same terms as the Perl 5 programming language system itself.
=cut
| ray66rus/vndrv | local/lib/perl5/x86_64-linux-thread-multi/Moose/Manual/MooseX.pod | Perl | apache-2.0 | 8,648 |
#
# Copyright 2015 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::h3c::snmp::plugin;
use strict;
use warnings;
use base qw(centreon::plugins::script_snmp);
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
# $options->{options} = options object
$self->{version} = '1.0';
%{$self->{modes}} = (
'cpu' => 'network::h3c::snmp::mode::cpu',
'hardware' => 'network::h3c::snmp::mode::hardware',
'interfaces' => 'snmp_standard::mode::interfaces',
'list-interfaces' => 'snmp_standard::mode::listinterfaces',
'memory' => 'network::h3c::snmp::mode::memory',
);
return $self;
}
1;
__END__
=head1 PLUGIN DESCRIPTION
Check h3c equipments in SNMP.
=cut
| s-duret/centreon-plugins | network/h3c/snmp/plugin.pm | Perl | apache-2.0 | 1,648 |
BEGIN { push(@INC, ".."); };
use WebminCore;
use POSIX qw(strftime);
init_config();
foreign_require("mount", "mount-lib.pl");
my %access = &get_module_acl();
sub properties_list
#return hash of properties that can be set manually and their data type
{
my %list = ('atime' => 'boolean', 'canmount' => 'boolean', 'devices' => 'boolean', 'exec' => 'boolean', 'nbmand' => 'boolean', 'readonly' => 'boolean', 'setuid' => 'boolean', 'shareiscsi' => 'boolean', 'utf8only' => 'boolean', 'vscan' => 'boolean', 'zoned' => 'boolean', 'relatime' => 'boolean', 'overlay' => 'boolean',
'aclinherit' => 'discard, noallow, restricted, pasthrough, passthrough-x', 'aclmode' => 'discard, groupmaks, passthrough', 'casesensitivity' => 'sensitive, insensitive, mixed', 'checksum' => 'on, off, fletcher2, fletcher4, sha256', 'compression' => 'on, off, lzjb, lz4, gzip, gzip-1, gzip-2, gzip-3, gzip-4, gzip-5, gzip-6, gzip-7, gzip-8, gzip-9, zle', 'copies' => '1, 2, 3', 'dedup' => 'on, off, verify, sha256', 'logbias' => 'latency, throughput', 'normalization' => 'none, formC, formD, formKC, formKD', 'primarycache' => 'all, none, metadata', 'secondarycache' => 'all, none, metadata', 'snapdir' => 'hidden, visible', 'snapdev' => 'hidden, visible', 'sync' => 'standard, always, disabled', 'xattr' => 'on, off, sa', 'com.sun:auto-snapshot' => 'true, false', 'acltype' => 'noacl, posixacl', 'redundant_metadata' => 'all, most',
'mountpoint' => 'special', 'sharesmb' => 'special', 'sharenfs' => 'special', 'mounted' => 'special', 'volsize' => 'special', 'context' => 'special', 'defcontext' => 'special', 'fscontext' => 'special', 'rootcontext' => 'special');
#if ($type != undef)
#{
# return @list{$type};
#} else
#{
return %list;
#}
}
sub pool_properties_list
{
my %list = ('autoexpand' => 'boolean', 'autoreplace' => 'boolean', 'delegation' => 'boolean', 'listsnaps' => 'boolean',
'failmode' => 'wait, continue, panic', 'feature@async_destroy' => 'enabled, disabled', 'feature@empty_bpobj' => 'enabled, disabled', 'feature@lz4_compress' => 'enabled, disabled', 'feature@embedded_data' => 'enabled, disabled', 'feature@enabled_txg' => 'enabled, disabled', 'feature@bookmarks' => 'enabled, disabled', 'feature@hole_birth' => 'enabled, disabled', 'feature@spacemap_histogram' => 'enabled, disabled', 'feature@extensible_dataset' => 'enabled, disabled',
'altroot' => 'special', 'bootfs' => 'special', 'cachefile' => 'special', 'comment' => 'special');
return %list;
}
sub create_opts #options and defaults when creating new pool or filesystem
{
my %list = ( 'atime' => 'on', 'compression' => 'off', 'dedup' => 'off', 'readonly' => 'off', 'utf8only' => 'off', 'xattr' => 'on' );
return %list;
}
sub get_zfsmanager_config
{
#my ($setting)=@_;
my $lref = &read_file_lines($config{'zfsmanager_conf'});
my %rv;
my $lnum = 0;
foreach my $line (@$lref) {
my ($n, $v) = split(/=/, $line, 2);
if ($n) {
#$rv{$n} = { 'value' => $v, 'line' => $lnum };
$rv{$n} = $v;
#push(@rv, { 'name' => $n, 'value' => $v, 'line' => $lnum });
}
$lnum++;
}
return %rv;
}
#determine if a property can be edited
sub can_edit
{
my ($zfs, $property) = @_;
%conf = get_zfsmanager_config();
%zfs_props = properties_list();
%pool_props = pool_properties_list();
my %type = zfs_get($zfs, 'type');
if ($type{$zfs}{type}{value} =~ 'snapshot') { return 0; }
elsif ((($zfs_props{$property}) && ($conf{'zfs_properties'} =~ /1/)) || (($pool_props{$property}) && ($conf{'pool_properties'} =~ /1/))) { return 1; }
}
sub list_zpools
{
my ($pool) = @_;
#zpool list
#my @table=();
my %hash=();
#expecting NAME SIZE ALLOC FREE FRAG CAP DEDUP HEALTH ALTROOT
$list=`zpool list -o name,size,alloc,free,frag,cap,dedup,health,altroot -H $pool`;
open my $fh, "<", \$list;
#my @table = split("", $firstline=<$fh>);
while (my $line =<$fh>)
{
chomp ($line);
my($name, $size, $alloc, $free, $frag, $cap, $dedup, $health, $altroot) = split(" ", $line);
#$hash{$name} = [ $size, $alloc, $free, $frag, $cap, $dedup, $health, $altroot ];
$hash{$name} = { size => $size, alloc => $alloc, free => $free, frag => $frag, cap => $cap, dedup => $dedup, health => $health, altroot => $altroot };
}
return %hash;
}
sub list_zfs
{
#zfs list
#my @table=();
my ($zfs) = @_;
my %hash=();
#expecting NAME USED AVAIL REFER MOUNTPOINT
$list=`zfs list -o name,used,avail,refer,mountpoint -H $zfs`;
open my $fh, "<", \$list;
#my @table = split("", $firstline=<$fh>);
while (my $line =<$fh>)
{
chomp ($line);
my($name, $used, $avail, $refer, $mount) = split(" ", $line);
#$hash{$name} = [ $used, $avail, $refer, $mount ];
$hash{$name} = { used => $used, avail => $avail, refer => $refer, mount => $mount };
}
return %hash;
}
sub list_snapshots
{
my ($snap) = @_;
#zfs list -t snapshot
#my @table=();
my %hash=();
#expecting NAME USED AVAIL REFER MOUNTPOINT
$list=`zfs list -t snapshot $snap -H`;
open my $fh, "<", \$list;
#my @table = split("", $firstline=<$fh>);
while (my $line =<$fh>)
{
chomp ($line);
my($name, $used, $avail, $refer, $mount) = split(" ", $line);
#$hash{$name} = [ $used, $avail, $refer, $mount ];
$hash{$name} = { used => $used, avail => $avail, refer => $refer, mount => $mount };
}
return %hash;
}
sub get_alerts
{
my $alerts = `zpool status -x`;
my %status = ();
my $pool = ();
if ($alerts =~ /all pools are healthy/)
{
return $alerts;
} else
{
open my $fh, "<", \$alerts;
while (my $line =<$fh>)
{
chomp ($line);
$line =~ s/^\s*(.*?)\s*$/$1/;
my($key, $value) = split(/:/, $line);
$key =~ s/^\s*(.*?)\s*$/$1/;
$value =~ s/^\s*(.*?)\s*$/$1/;
if (($key =~ 'pool') && ($value))
{
$pool = $value;
$status = ( $value );
} elsif ((($key =~ 'state') || ($key =~ 'errors')) && ($value))
{
$status{$pool}{$key} = $value;
}
}
my $out = "<b>";
foreach $key (sort(keys %status))
{
if (true) { $out .= "pool \'".$key."\' is ".$status{$key}{state}." with ".$status{$key}{errors}."<br />";}
}
$out .= "</b>";
return $out;
}
}
#zpool_status($pool)
sub zpool_status
{
my ($pool)=@_;
my $parent = "pool";
my %status = ();
my $cmd=`zpool status $pool`;
(undef, $cmdout) = split(/ pool: /, $cmd);
($status{0}{pool}, $cmdout) = split(/ state: /, $cmdout);
chomp $status{0}{pool};
if (index($cmd, "status: ") != -1) {
($status{0}{state}, $cmdout) = split("status: ", $cmdout);
($status{0}{status}, $cmdout) = split("action: ", $cmdout);
if (index($cmd, " see: ") != -1) {
($status{0}{action}, $cmdout) = split(" see: ", $cmdout);
($status{0}{see}, $cmdout) = split(" scan: ", $cmdout);
} else { ($status{0}{action}, $cmdout) = split(" scan: ", $cmdout); }
} else {
($status{0}{state}, $cmdout) = split(" scan: ", $cmdout);
}
($status{0}{scan}, $cmdout) = split("config:", $cmdout);
($status{0}{config}, $status{0}{errors}) = split("errors: ", $cmdout);
$fh= $status{0}{config};
@array = split("\n", $fh);
foreach $line (@array) #while (my $line =<$fh>)
{
chomp ($line);
my($name, $state, $read, $write, $cksum) = split(" ", $line);
if ($name =~ "NAME") { #do nothing
} elsif (($name =~ $status{0}{pool}) && (length($name) == length($status{0}{pool}))) {
$status{0}{name} = $name;
$status{0}{read} = $read;
$status{0}{write} = $write;
$status{0}{cksum} = $cksum;
$devs++;
#check if vdev is a log or cache vdev
} elsif (($name =~ /log/) || ($name =~ /cache/))
{
$status{$devs} = {name => $name, state => $state, read => $read, write => $write, cksum => $cksum, parent => "pool",};
$parent = $name;
$devs++;
#check if vdev is a log or cache vdev
} elsif (($name =~ /mirror/) || ($name =~ /raidz/) || ($name =~ /spare/))
{
$status{$devs} = {name => $name, state => $state, read => $read, write => $write, cksum => $cksum, parent => $parent};
$parent = $name;
$devs++;
#for all other vdevs, should be actual devices at this point
} elsif ($name)
{
$status{$devs} = {name => $name, state => $state, read => $read, write => $write, cksum => $cksum, parent => $parent,};
$devs++;
}
}
return %status;
}
#zfs_get($pool, $property)
sub zfs_get
{
my ($zfs, $property) = @_;
if (~$property) {my $property="all";}
my %hash=();
my $get=`zfs get $property $zfs -H`;
#return `zfs get $property $zfs -H`;
open my $fh, "<", \$get;
#expecting NAME PROPERTY VALUE SOURCE
while (my $line =<$fh>)
{
chomp ($line);
my($name, $property, $value, $source) = split(/\t/, $line);
$hash{$name}{$property} = { value => $value, source => $source };
#$hash->{$name->{$property->{'value'}}} = $value;
#$hash->{$name->{$property->{'source'}}} = $source;
}
return %hash;
}
#zpool_get($pool, $property)
sub zpool_get
{
my ($pool, $property) = @_;
if (~$property) {my $property="all";}
my %hash=();
my $get=`zpool get -H $property $pool`;
open my $fh, "<", \$get;
#expecting NAME PROPERTY VALUE SOURCE
#my $junk = <$fh>;
while (my $line =<$fh>)
{
chomp ($line);
my($name, $property, $value, $source) = split(/\t/, $line);
$hash{$name}{$property} = { value => $value, source => $source };
}
return %hash;
}
sub zpool_imports
{
my ($dir, $destroyed) = @_;
if ($dir) { $dir = '-d '.$dir; }
my %status = ();
#my $parent = 'pool';
#my $cmd = `zpool import $dir`;
my @array = split("\n", `zpool import $dir $destroyed`);
#open my $fh, "<", \$cmd;
foreach $line (@array)
{
chomp ($line);
$line =~ s/^\s*(.*?)\s*$/$1/;
my($key, $value) = split(/:/, $line);
$key =~ s/^\s*(.*?)\s*$/$1/;
$value =~ s/^\s*(.*?)\s*$/$1/;
if (($key =~ 'pool') || ($key =~ 'state') || ($key =~ 'scan') || ($key =~ 'errors') || ($key =~ 'scrub') || ($key =~ 'status') || ($key =~ 'id'))
{
if ($key =~ 'pool') { $pool = $value; }
if ($key =~ 'scrub') { $key = 'scan'; }
$status{$pool}{$key} = $value;
} elsif (($line =~ "config:") || ($line =~ /NAME/) || ($line =~ /action:/) || ($line =~ /see:/))
{
#do nothing
} else
{
my($name, $state, $status) = split(" ", $line);
if ($name == $status{$pool}{pool})
{
#$status{$pool}{name} = $name;
#$status{$pool}{state} = $read;
#$status{$pool}{write} = $write;
#$status{$pool}{cksum} = $cksum;
$status{$pool}{vdevs} = ();
$parent = 'pool';
#check if vdev is a log or cache vdev
} elsif (($name =~ /log/) || ($name =~ /cache/))
{
$status{$pool}{vdevs}{$name} = {name => $name, state => $state, status => $status, parent => "pool"};
$parent = $name;
#$devs++;
#check if vdev is a mirror, raidz or spare
} elsif (($name =~ /mirror/) || ($name =~ /raidz/) || ($name =~ /spare/))
{
$status{$pool}{vdevs}{$name} = {name => $name, state => $state, status => $status, parent => $parent};
$parent = $name;
#$devs++;
#for all other vdevs, should be actual devices at this point
} elsif ($name)
{
$status{$pool}{vdevs}{$name} = {name => $name, state => $state, status => $status, parent => $parent};
#$devs++;
}
}
}
return %status;
}
sub list_disk_ids
{
#use Cwd 'abs_path';
my $byid = '/dev/disk/by-id'; #for linux
my $byuuid = '/dev/disk/by-uuid';
opendir (DIR, $byid);
%hash = ();
while (my $file = readdir(DIR))
{
if (!-d $byid."/".$file ) { $hash{'byid'}{$file} = readlink($byid."/".$file); }
}
opendir (DIR, $byuuid);
while (my $file = readdir(DIR))
{
if (!-d $byuuid."/".$file ) { $hash{'byuuid'}{$file} = readlink($byuuid."/".$file); }
}
return %hash;
}
sub cmd_create_zfs
#deprecated
{
my ($zfs, $options) = @_;
my $opts = ();
my %createopts = create_opts();
$createopts{'volblocksize'} = '8k';
#$createopts{'sparse'} = '0';
if (${$options}{'sparse'}) { $opts .= "-s "; }
delete ${$options}{'sparse'};
if (${$options}{'zvol'}) {
$zfs = "-V ".${$options}{'zvol'}." ".$zfs;
delete ${$options}{'zvol'};
}
foreach $key (sort(keys %${options}))
{
$opts = (($createopts{$key}) && (${$options}{$key} =~ 'default')) ? $opts : $opts.' -o '.$key.'='.${$options}{$key};
}
my $cmd="zfs create $opts $zfs";
#my @result = ($cmd, `$cmd 2>&1`);
return $cmd;
}
sub cmd_create_zpool
#deprecated
{
my ($pool, $dev, $options, $poolopts, $force) = @_;
my $opts = ();
#my %createopts = create_opts();
#if ( $options{'version'} ) { $opts .= "-o version=".$options{'version'}; }
foreach $key (sort(keys %{$poolopts}))
{
$opts = (${$poolopts}{$key} =~ 'default') ? $opts : $opts.' -o '.$key.'='.${$poolopts}{$key};
}
foreach $key (sort(keys %{$options}))
{
$opts = (${$options}{$key} =~ 'default') ? $opts : $opts.' -O '.$key.'='.${$options}{$key};
}
#if ($opts) { $opts = '-O '.$opts; }
#$mount = ($mount) ? '-m '.$mount : ();
my $cmd="zpool create $force $opts $pool $dev";
#my @result = ($cmd, `$cmd 2>&1`);
return $cmd;
}
sub ui_zpool_status
{
my ($pool, $action) = @_;
if ($action eq undef) { $action = "status.cgi?pool="; }
my %zpool = list_zpools($pool);
print ui_columns_start([ "Pool Name", "Size", "Alloc", "Free", "Frag", "Cap", "Dedup", "Health"]);
foreach $key (keys %zpool)
{
print ui_columns_row(["<a href='$action$key'>$key</a>", $zpool{$key}{size}, $zpool{$key}{alloc}, $zpool{$key}{free}, $zpool{$key}{frag}, $zpool{$key}{cap}, $zpool{$key}{dedup}, $zpool{$key}{health} ]);
}
print ui_columns_end();
}
sub ui_zpool_properties
{
my ($pool) = @_;
require './property-list-en.pl';
my %hash = zpool_get($pool, "all");
my %props = property_desc();
my %properties = pool_properties_list();
print ui_table_start("Properties", "width=100%", undef);
foreach $key (sort(keys %{$hash{$pool}}))
{
if (($properties{$key}) || ($props{$key}))
{
print ui_table_row('<a href="property.cgi?pool='.$pool.'&property='.$key.'">'.$key.'</a>', $hash{$pool}{$key}{value});
} else {
print ui_table_row($key, $hash{$pool}{$key}{value});
#print ui_table_row($key, $hash{$pool}{$key}{value});
}
}
print ui_table_end();
}
sub ui_zfs_list
{
my ($zfs, $action)=@_;
my %zfs = list_zfs($zfs);
if ($action eq undef) { $action = "status.cgi?zfs="; }
print ui_columns_start([ "File System", "Used", "Avail", "Refer", "Mountpoint" ]);
foreach $key (sort(keys %zfs))
{
print ui_columns_row([ "<a href='$action$key'>$key</a>", $zfs{$key}{used}, $zfs{$key}{avail}, $zfs{$key}{refer}, $zfs{$key}{mount} ]);
}
print ui_columns_end();
}
sub ui_zfs_properties
{
my ($zfs)=@_;
require './property-list-en.pl';
my %hash = zfs_get($zfs, "all");
if (!$hash{$zfs}{'com.sun:auto-snapshot'}) { $hash{$zfs}{'com.sun:auto-snapshot'}{'value'} = '-'; }
my %props = property_desc();
my %properties = properties_list();
print ui_table_start("Properties", "width=100%", undef);
foreach $key (sort(keys %{$hash{$zfs}}))
{
if (($properties{$key}) || ($props{$key}))
{
if ($key =~ 'origin') { print ui_table_row('<a href="property.cgi?zfs='.$zfs.'&property='.$key.'">'.$key.'</a>', "<a href='status.cgi?snap=$hash{$zfs}{$key}{value}'>$hash{$zfs}{$key}{value}</a>");
} else { print ui_table_row('<a href="property.cgi?zfs='.$zfs.'&property='.$key.'">'.$key.'</a>', $hash{$zfs}{$key}{value}); }
} else {
print ui_table_row($key, $hash{$zfs}{$key}{value});
}
}
print ui_table_end();
}
sub ui_list_snapshots
{
my ($zfs, $admin) = @_;
%snapshot = list_snapshots($zfs);
%conf = get_zfsmanager_config();
if ($admin =~ /1/) {
#print ui_form_start('cmd.cgi', 'get', 'cmd');
print ui_form_start('cmd.cgi', 'post');
#print ui_hidden('multisnap', 1);
print ui_hidden('cmd', 'multisnap');
}
#if ($admin =~ /1/) { print select_all_link('snap', '', "Select All"), " | ", select_invert_link('snap', '', "Invert Selection") }
print ui_columns_start([ "Snapshot", "Used", "Refer" ]);
my $num = 0;
foreach $key (sort(keys %snapshot))
{
#print ui_columns_row([ui_checkbox("snap", $key, "<a href='snapshot.cgi?snap=$key'>$key</a>"), $snapshot{$key}{used}, $snapshot{$key}{refer} ]);
if ($admin =~ /1/) {
print ui_columns_row([ui_checkbox("select", $key.";", "<a href='status.cgi?snap=$key'>$key</a>"), $snapshot{$key}{used}, $snapshot{$key}{refer} ]);
$num ++;
} else {
print ui_columns_row([ "<a href='status.cgi?snap=$key'>$key</a>", $snapshot{$key}{used}, $snapshot{$key}{refer} ]);
}
#if ($zfs =~ undef) { print ui_columns_row([ui_checkbox("snap", $key, "<a href='snapshot.cgi?snap=$key'>$key</a>"), $snapshot{$key}{used}, $snapshot{$key}{refer} ]); }
#else {
# if ($key =~ ($zfs."@")) { print ui_columns_row([ui_checkbox("snap", $key, "<a href='snapshot.cgi?snap=$key'>$key</a>"), $snapshot{$key}{used}, $snapshot{$key}{refer} ]); }
#}
}
print ui_columns_end();
if ($admin =~ /1/) { print select_all_link('select', '', "Select All"), " | ", select_invert_link('select', '', "Invert Selection") }
if (($admin =~ /1/) && ($conf{'snap_destroy'} =~ /1/)) { print " | ".ui_submit("Destroy selected snapshots"); }
if ($admin =~ /1/) { print ui_form_end(); }
}
sub ui_create_snapshot
{
my ($zfs) = @_;
$rv = ui_form_start('cmd.cgi', 'post')."\n";
$rv .= "Create new snapshot based on filesystem: ".$zfs."<br />\n";
my $date = strftime "zfs_manager_%Y-%m-%d-%H%M", localtime;
$rv .= $zfs."@ ".ui_textbox('snap', $date, 28)."\n";
$rv .= ui_hidden('zfs', $zfs)."\n";
$rv .= ui_hidden('cmd', "snapshot")."\n";
$rv .= ui_submit("Create");
$rv .= ui_form_end();
return $rv;
}
sub ui_cmd
{
#my ($message, @result) = @_;
my ($message, $cmd) = @_;
$rv = "Attempting to $message with command... <br />\n";
$rv .= "<i># ".$cmd."</i><br /><br />\n";
if (!$in{'confirm'}) {
$rv .= ui_form_start('cmd.cgi', 'post');
foreach $key (keys %in) {
$rv .= ui_hidden($key, $in{$key});
}
$rv .= "<h3>Would you lke to continue?</h3>\n";
$rv .= ui_submit("yes", "confirm", 0)."<br />";
#$rv .= ui_submit("yes", "confirm", 0, "style='background-color: transparent;border: none;color: blue;cursor: pointer;'")." | <a href='status.cgi?zfs=".$in{'zfs'}."'>no</a>";
$rv .= ui_form_end();
#$rv .= "confirm=".$confirm."</br>";
} else {
@result = (`$cmd 2>&1`);
if (!$result[0])
{
#$rv .= $result[1]."<br />\n";
$rv .= "Success! <br />\n";
} else {
#$result[1] =~ s/\R/ /g;
$rv .= "<b>error: </b>".$result[0]."<br />\n";
foreach $key (@result[1..@result]) {
$rv .= $key."<br />\n";
}
#print Dumper(@result);
}
}
return $rv;
}
sub ui_popup_link
#deprecated
{
my ($name, $url)=@_;
return "<a onClick=\"\window.open('$url', 'cmd', 'toolbar=no,menubar=no,scrollbars=yes,width=600,height=400,resizable=yes'); return false\"\ href='$url'>$name</a>";
}
sub test_function
{
}
| datenheim/zfsmanager | zfsmanager-lib.pl | Perl | bsd-3-clause | 18,721 |
#-----------------------------------------------------------------
# OWL::Data::OWL::DatatypeProperty
# Author: Edward Kawas <edward.kawas@gmail.com>,
# For copyright and disclaimer see below.
#
# $Id: DatatypeProperty.pm,v 1.5 2009-11-12 21:11:30 ubuntu Exp $
#-----------------------------------------------------------------
package OWL::Data::OWL::DatatypeProperty;
use base ("OWL::Base");
use strict;
# imports
use RDF::Core::Resource;
use RDF::Core::Statement;
use RDF::Core::Literal;
use RDF::Core::NodeFactory;
use OWL::RDF::Predicates::DC_PROTEGE;
use OWL::RDF::Predicates::OMG_LSID;
use OWL::RDF::Predicates::OWL;
use OWL::RDF::Predicates::RDF;
use OWL::RDF::Predicates::RDFS;
# add versioning to this module
use vars qw /$VERSION/;
$VERSION = sprintf "%d.%02d", q$Revision: 1.5 $ =~ /: (\d+)\.(\d+)/;
=head1 NAME
OWL::Data::OWL::DatatypeProperty
=head1 SYNOPSIS
use OWL::Data::OWL::DatatypeProperty;
# create an owl DatatypeProperty
my $data = OWL::Data::OWL::DatatypeProperty->new ();
=head1 DESCRIPTION
An object representing an OWL DatatypeProperty
=head1 AUTHORS
Edward Kawas (edward.kawas [at] gmail [dot] com)
=cut
#-----------------------------------------------------------------
# A list of allowed attribute names. See OWL::Base for details.
#-----------------------------------------------------------------
=head1 ACCESSIBLE ATTRIBUTES
Details are in L<OWL::Base>. Here just a list of them:
=over
=item B<value> - the value that this datatype property assumes
=item B<range> - the range of this datatype property
=item B<domain> - the domain for this datatype property
=item B<uri> - the uri of this datatype property
=back
=cut
=head1 subroutines
=cut
{
my %_allowed = (
value => {
type => OWL::Base->STRING,
},
range => { type => OWL::Base->STRING },
domain => { type => OWL::Base->STRING },
uri => { type => OWL::Base->STRING },
);
sub _accessible {
my ( $self, $attr ) = @_;
exists $_allowed{$attr} or $self->SUPER::_accessible($attr);
}
sub _attr_prop {
my ( $self, $attr_name, $prop_name ) = @_;
my $attr = $_allowed{$attr_name};
return ref($attr) ? $attr->{$prop_name} : $attr if $attr;
return $self->SUPER::_attr_prop( $attr_name, $prop_name );
}
}
#-----------------------------------------------------------------
=head1 SUBROUTINES
=cut
#-----------------------------------------------------------------
# init
#-----------------------------------------------------------------
sub init {
my ($self) = shift;
$self->SUPER::init();
}
1;
__END__
| gitpan/OWL2Perl | lib/OWL/Data/OWL/DatatypeProperty.pm | Perl | bsd-3-clause | 2,672 |
###########################################################################
#
# This file is partially auto-generated by the DateTime::Locale generator
# tools (v0.10). This code generator comes with the DateTime::Locale
# distribution in the tools/ directory, and is called generate-modules.
#
# This file was generated from the CLDR JSON locale data. See the LICENSE.cldr
# file included in this distribution for license details.
#
# Do not edit this file directly unless you are sure the part you are editing
# is not created by the generator.
#
###########################################################################
=pod
=encoding UTF-8
=head1 NAME
DateTime::Locale::ksb - Locale data examples for the ksb locale.
=head1 DESCRIPTION
This pod file contains examples of the locale data available for the
Shambala locale.
=head2 Days
=head3 Wide (format)
Jumaatatu
Jumaane
Jumaatano
Alhamisi
Ijumaa
Jumaamosi
Jumaapii
=head3 Abbreviated (format)
Jtt
Jmn
Jtn
Alh
Iju
Jmo
Jpi
=head3 Narrow (format)
3
4
5
A
I
1
2
=head3 Wide (stand-alone)
Jumaatatu
Jumaane
Jumaatano
Alhamisi
Ijumaa
Jumaamosi
Jumaapii
=head3 Abbreviated (stand-alone)
Jtt
Jmn
Jtn
Alh
Iju
Jmo
Jpi
=head3 Narrow (stand-alone)
3
4
5
A
I
1
2
=head2 Months
=head3 Wide (format)
Januali
Febluali
Machi
Aplili
Mei
Juni
Julai
Agosti
Septemba
Oktoba
Novemba
Desemba
=head3 Abbreviated (format)
Jan
Feb
Mac
Apr
Mei
Jun
Jul
Ago
Sep
Okt
Nov
Des
=head3 Narrow (format)
J
F
M
A
M
J
J
A
S
O
N
D
=head3 Wide (stand-alone)
Januali
Febluali
Machi
Aplili
Mei
Juni
Julai
Agosti
Septemba
Oktoba
Novemba
Desemba
=head3 Abbreviated (stand-alone)
Jan
Feb
Mac
Apr
Mei
Jun
Jul
Ago
Sep
Okt
Nov
Des
=head3 Narrow (stand-alone)
J
F
M
A
M
J
J
A
S
O
N
D
=head2 Quarters
=head3 Wide (format)
Lobo ya bosi
Lobo ya mbii
Lobo ya nnd’atu
Lobo ya nne
=head3 Abbreviated (format)
L1
L2
L3
L4
=head3 Narrow (format)
1
2
3
4
=head3 Wide (stand-alone)
Lobo ya bosi
Lobo ya mbii
Lobo ya nnd’atu
Lobo ya nne
=head3 Abbreviated (stand-alone)
L1
L2
L3
L4
=head3 Narrow (stand-alone)
1
2
3
4
=head2 Eras
=head3 Wide (format)
Kabla ya Klisto
Baada ya Klisto
=head3 Abbreviated (format)
KK
BK
=head3 Narrow (format)
KK
BK
=head2 Date Formats
=head3 Full
2008-02-05T18:30:30 = Jumaane, 5 Febluali 2008
1995-12-22T09:05:02 = Ijumaa, 22 Desemba 1995
-0010-09-15T04:44:23 = Jumaamosi, 15 Septemba -10
=head3 Long
2008-02-05T18:30:30 = 5 Febluali 2008
1995-12-22T09:05:02 = 22 Desemba 1995
-0010-09-15T04:44:23 = 15 Septemba -10
=head3 Medium
2008-02-05T18:30:30 = 5 Feb 2008
1995-12-22T09:05:02 = 22 Des 1995
-0010-09-15T04:44:23 = 15 Sep -10
=head3 Short
2008-02-05T18:30:30 = 05/02/2008
1995-12-22T09:05:02 = 22/12/1995
-0010-09-15T04:44:23 = 15/09/-10
=head2 Time Formats
=head3 Full
2008-02-05T18:30:30 = 18:30:30 UTC
1995-12-22T09:05:02 = 09:05:02 UTC
-0010-09-15T04:44:23 = 04:44:23 UTC
=head3 Long
2008-02-05T18:30:30 = 18:30:30 UTC
1995-12-22T09:05:02 = 09:05:02 UTC
-0010-09-15T04:44:23 = 04:44:23 UTC
=head3 Medium
2008-02-05T18:30:30 = 18:30:30
1995-12-22T09:05:02 = 09:05:02
-0010-09-15T04:44:23 = 04:44:23
=head3 Short
2008-02-05T18:30:30 = 18:30
1995-12-22T09:05:02 = 09:05
-0010-09-15T04:44:23 = 04:44
=head2 Datetime Formats
=head3 Full
2008-02-05T18:30:30 = Jumaane, 5 Febluali 2008 18:30:30 UTC
1995-12-22T09:05:02 = Ijumaa, 22 Desemba 1995 09:05:02 UTC
-0010-09-15T04:44:23 = Jumaamosi, 15 Septemba -10 04:44:23 UTC
=head3 Long
2008-02-05T18:30:30 = 5 Febluali 2008 18:30:30 UTC
1995-12-22T09:05:02 = 22 Desemba 1995 09:05:02 UTC
-0010-09-15T04:44:23 = 15 Septemba -10 04:44:23 UTC
=head3 Medium
2008-02-05T18:30:30 = 5 Feb 2008 18:30:30
1995-12-22T09:05:02 = 22 Des 1995 09:05:02
-0010-09-15T04:44:23 = 15 Sep -10 04:44:23
=head3 Short
2008-02-05T18:30:30 = 05/02/2008 18:30
1995-12-22T09:05:02 = 22/12/1995 09:05
-0010-09-15T04:44:23 = 15/09/-10 04:44
=head2 Available Formats
=head3 E (ccc)
2008-02-05T18:30:30 = Jmn
1995-12-22T09:05:02 = Iju
-0010-09-15T04:44:23 = Jmo
=head3 EHm (E HH:mm)
2008-02-05T18:30:30 = Jmn 18:30
1995-12-22T09:05:02 = Iju 09:05
-0010-09-15T04:44:23 = Jmo 04:44
=head3 EHms (E HH:mm:ss)
2008-02-05T18:30:30 = Jmn 18:30:30
1995-12-22T09:05:02 = Iju 09:05:02
-0010-09-15T04:44:23 = Jmo 04:44:23
=head3 Ed (d, E)
2008-02-05T18:30:30 = 5, Jmn
1995-12-22T09:05:02 = 22, Iju
-0010-09-15T04:44:23 = 15, Jmo
=head3 Ehm (E h:mm a)
2008-02-05T18:30:30 = Jmn 6:30 PM
1995-12-22T09:05:02 = Iju 9:05 AM
-0010-09-15T04:44:23 = Jmo 4:44 AM
=head3 Ehms (E h:mm:ss a)
2008-02-05T18:30:30 = Jmn 6:30:30 PM
1995-12-22T09:05:02 = Iju 9:05:02 AM
-0010-09-15T04:44:23 = Jmo 4:44:23 AM
=head3 Gy (G y)
2008-02-05T18:30:30 = BK 2008
1995-12-22T09:05:02 = BK 1995
-0010-09-15T04:44:23 = KK -10
=head3 GyMMM (G y MMM)
2008-02-05T18:30:30 = BK 2008 Feb
1995-12-22T09:05:02 = BK 1995 Des
-0010-09-15T04:44:23 = KK -10 Sep
=head3 GyMMMEd (G y MMM d, E)
2008-02-05T18:30:30 = BK 2008 Feb 5, Jmn
1995-12-22T09:05:02 = BK 1995 Des 22, Iju
-0010-09-15T04:44:23 = KK -10 Sep 15, Jmo
=head3 GyMMMd (G y MMM d)
2008-02-05T18:30:30 = BK 2008 Feb 5
1995-12-22T09:05:02 = BK 1995 Des 22
-0010-09-15T04:44:23 = KK -10 Sep 15
=head3 H (HH)
2008-02-05T18:30:30 = 18
1995-12-22T09:05:02 = 09
-0010-09-15T04:44:23 = 04
=head3 Hm (HH:mm)
2008-02-05T18:30:30 = 18:30
1995-12-22T09:05:02 = 09:05
-0010-09-15T04:44:23 = 04:44
=head3 Hms (HH:mm:ss)
2008-02-05T18:30:30 = 18:30:30
1995-12-22T09:05:02 = 09:05:02
-0010-09-15T04:44:23 = 04:44:23
=head3 Hmsv (HH:mm:ss v)
2008-02-05T18:30:30 = 18:30:30 UTC
1995-12-22T09:05:02 = 09:05:02 UTC
-0010-09-15T04:44:23 = 04:44:23 UTC
=head3 Hmv (HH:mm v)
2008-02-05T18:30:30 = 18:30 UTC
1995-12-22T09:05:02 = 09:05 UTC
-0010-09-15T04:44:23 = 04:44 UTC
=head3 M (L)
2008-02-05T18:30:30 = 2
1995-12-22T09:05:02 = 12
-0010-09-15T04:44:23 = 9
=head3 MEd (E, M/d)
2008-02-05T18:30:30 = Jmn, 2/5
1995-12-22T09:05:02 = Iju, 12/22
-0010-09-15T04:44:23 = Jmo, 9/15
=head3 MMM (LLL)
2008-02-05T18:30:30 = Feb
1995-12-22T09:05:02 = Des
-0010-09-15T04:44:23 = Sep
=head3 MMMEd (E, MMM d)
2008-02-05T18:30:30 = Jmn, Feb 5
1995-12-22T09:05:02 = Iju, Des 22
-0010-09-15T04:44:23 = Jmo, Sep 15
=head3 MMMMEd (E, MMMM d)
2008-02-05T18:30:30 = Jmn, Febluali 5
1995-12-22T09:05:02 = Iju, Desemba 22
-0010-09-15T04:44:23 = Jmo, Septemba 15
=head3 MMMMd (MMMM d)
2008-02-05T18:30:30 = Febluali 5
1995-12-22T09:05:02 = Desemba 22
-0010-09-15T04:44:23 = Septemba 15
=head3 MMMd (MMM d)
2008-02-05T18:30:30 = Feb 5
1995-12-22T09:05:02 = Des 22
-0010-09-15T04:44:23 = Sep 15
=head3 Md (M/d)
2008-02-05T18:30:30 = 2/5
1995-12-22T09:05:02 = 12/22
-0010-09-15T04:44:23 = 9/15
=head3 d (d)
2008-02-05T18:30:30 = 5
1995-12-22T09:05:02 = 22
-0010-09-15T04:44:23 = 15
=head3 h (h a)
2008-02-05T18:30:30 = 6 PM
1995-12-22T09:05:02 = 9 AM
-0010-09-15T04:44:23 = 4 AM
=head3 hm (h:mm a)
2008-02-05T18:30:30 = 6:30 PM
1995-12-22T09:05:02 = 9:05 AM
-0010-09-15T04:44:23 = 4:44 AM
=head3 hms (h:mm:ss a)
2008-02-05T18:30:30 = 6:30:30 PM
1995-12-22T09:05:02 = 9:05:02 AM
-0010-09-15T04:44:23 = 4:44:23 AM
=head3 hmsv (h:mm:ss a v)
2008-02-05T18:30:30 = 6:30:30 PM UTC
1995-12-22T09:05:02 = 9:05:02 AM UTC
-0010-09-15T04:44:23 = 4:44:23 AM UTC
=head3 hmv (h:mm a v)
2008-02-05T18:30:30 = 6:30 PM UTC
1995-12-22T09:05:02 = 9:05 AM UTC
-0010-09-15T04:44:23 = 4:44 AM UTC
=head3 ms (mm:ss)
2008-02-05T18:30:30 = 30:30
1995-12-22T09:05:02 = 05:02
-0010-09-15T04:44:23 = 44:23
=head3 y (y)
2008-02-05T18:30:30 = 2008
1995-12-22T09:05:02 = 1995
-0010-09-15T04:44:23 = -10
=head3 yM (M/y)
2008-02-05T18:30:30 = 2/2008
1995-12-22T09:05:02 = 12/1995
-0010-09-15T04:44:23 = 9/-10
=head3 yMEd (E, M/d/y)
2008-02-05T18:30:30 = Jmn, 2/5/2008
1995-12-22T09:05:02 = Iju, 12/22/1995
-0010-09-15T04:44:23 = Jmo, 9/15/-10
=head3 yMMM (MMM y)
2008-02-05T18:30:30 = Feb 2008
1995-12-22T09:05:02 = Des 1995
-0010-09-15T04:44:23 = Sep -10
=head3 yMMMEd (E, MMM d, y)
2008-02-05T18:30:30 = Jmn, Feb 5, 2008
1995-12-22T09:05:02 = Iju, Des 22, 1995
-0010-09-15T04:44:23 = Jmo, Sep 15, -10
=head3 yMMMM (MMMM y)
2008-02-05T18:30:30 = Febluali 2008
1995-12-22T09:05:02 = Desemba 1995
-0010-09-15T04:44:23 = Septemba -10
=head3 yMMMd (y MMM d)
2008-02-05T18:30:30 = 2008 Feb 5
1995-12-22T09:05:02 = 1995 Des 22
-0010-09-15T04:44:23 = -10 Sep 15
=head3 yMd (y-MM-dd)
2008-02-05T18:30:30 = 2008-02-05
1995-12-22T09:05:02 = 1995-12-22
-0010-09-15T04:44:23 = -10-09-15
=head3 yQQQ (QQQ y)
2008-02-05T18:30:30 = L1 2008
1995-12-22T09:05:02 = L4 1995
-0010-09-15T04:44:23 = L3 -10
=head3 yQQQQ (QQQQ y)
2008-02-05T18:30:30 = Lobo ya bosi 2008
1995-12-22T09:05:02 = Lobo ya nne 1995
-0010-09-15T04:44:23 = Lobo ya nnd’atu -10
=head2 Miscellaneous
=head3 Prefers 24 hour time?
Yes
=head3 Local first day of the week
1 (Jumaatatu)
=head1 SUPPORT
See L<DateTime::Locale>.
=cut
| jkb78/extrajnm | local/lib/perl5/DateTime/Locale/ksb.pod | Perl | mit | 9,543 |
/* Part of XPCE --- The SWI-Prolog GUI toolkit
Author: Jan Wielemaker and Anjo Anjewierden
E-mail: J.Wielemaker@cs.vu.nl
WWW: http://www.swi-prolog.org/packages/xpce/
Copyright (c) 2009-2015, University of Amsterdam
VU University Amsterdam
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
:- module(pce_dispatch,
[ pce_dispatch/1, % +Options
pce_end_dispatch/0,
pce_call/1 % :Goal
]).
:- use_module(library(pce)).
:- meta_predicate
pce_call(0).
/** <module> Run XPCE in a separate thread
This module allows one to run XPCE in a separate thread =pce=. This is
especially nice if xpce is only used to support the SWI-Prolog
development tools because it ensures that the tools remain responsive
while the main thread executes long-running goals.
This module can be deactivated by setting the flag `xpce_threaded`:
==
:- set_prolog_flag(xpce_threaded, false).
==
*/
:- predicate_options(pce_dispatch/1, 1,
[ pass_to(system:thread_create/3, 3)
]).
%! pce_dispatch(+Options) is det.
%
% Create a new thread =pce= that takes care of the XPCE message
% loop. This predicate has no effect if dispatching is already on
% another thread than the =main=. The loop can be ended using
% pce_end_dispatch/0.
pce_dispatch(Options) :-
with_mutex(pce_dispatch, pce_dispatch_(Options)).
pce_dispatch_(_) :-
pce_thread(pce),
!.
pce_dispatch_(Options) :-
thread_self(Me),
thread_create(pce_dispatcher(Me), _,
[ alias(pce),
debug(false)
| Options
]),
thread_get_message(pce_dispatch).
:- dynamic
end_pce_dispatcher/1.
pce_dispatcher(Origin) :-
set_pce_thread,
thread_self(Me),
retractall(pce:pce_thread(_)),
assert(pce:pce_thread(Me)),
thread_send_message(Origin, pce_dispatch),
set_prolog_flag(debug_on_error, false), % avoid the debugger
set_prolog_flag(generate_debug_info, true), % Started with false
repeat,
catch(pce_dispatch, E, true),
( var(E)
-> true
; print_message(error, E)
),
retract(end_pce_dispatcher(Sender)),
!,
thread_send_message(Sender, end_pce_dispatcher).
end(Requester) :-
assert(end_pce_dispatcher(Requester)).
%! pce_end_dispatch is det.
%
% End the XPCE dispatcher loop started with pce_dispatch/1.
pce_end_dispatch :-
thread_self(Me),
in_pce_thread(end(Me)),
thread_get_message(end_pce_dispatcher),
set_pce_thread,
thread_self(Me),
retractall(pce:pce_thread(_)),
assert(pce:pce_thread(Me)).
%! pce_call(:Goal) is det.
%
% Run Goal in the XPCE thread.
%
% @deprecated New code should used in_pce_thread/1.
pce_call(Goal) :-
in_pce_thread(Goal).
:- public start_dispatch/0.
start_dispatch :-
( current_prolog_flag(xpce_threaded, true),
current_predicate(pce_dispatch/1)
-> pce_dispatch([])
; true
).
| TeamSPoon/logicmoo_workspace | docker/rootfs/usr/local/lib/swipl/xpce/prolog/lib/pce_dispatch.pl | Perl | mit | 4,440 |
#! /usr/bin/env perl
##**************************************************************
##
## Copyright (C) 1990-2007, Condor Team, Computer Sciences Department,
## University of Wisconsin-Madison, WI.
##
## Licensed under the Apache License, Version 2.0 (the "License"); you
## may not use this file except in compliance with the License. You may
## obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
##**************************************************************
my $arg = $ARGV[0];
my $basefile = $ARGV[1];
my $new = $basefile . ".new";
my $old = $basefile;
open(OLDOUT, "<$old");
open(NEWOUT, ">$new");
while(<OLDOUT>)
{
fullchomp($_);
print NEWOUT "$_\n";
}
print NEWOUT "$arg\n";
close(OLDOUT);
close(NEWOUT);
system("mv $new $old");
print "Job $arg done\n";
exit(0);
sub fullchomp
{
push (@_,$_) if( scalar(@_) == 0);
foreach my $arg (@_) {
$arg =~ s/\012+$//;
$arg =~ s/\015+$//;
}
return(0);
}
| clalancette/condor-dcloud | src/condor_tests/x_do_niceuser.pl | Perl | apache-2.0 | 1,284 |
package DDG::Spice::Twitter;
use DDG::Spice;
primary_example_queries '@duckduckgo';
secondary_example_queries "twitter yegg";
description "Shows a user's latest tweet.";
name "Twitter";
code_url "https://github.com/duckduckgo/zeroclickinfo-spice/blob/master/lib/DDG/Spice/Twitter.pm";
topics "everyday", "social";
category "time_sensitive";
attribution github => ["https://github.com/duckduckgo/", "DuckDuckGo"],
twitter => ["https://twitter.com/duckduckgo", "DuckDuckGo"],
github => ["https://github.com/ecounysis/", "Eric Christensen"],
twitter => ["https://twitter.com/ecounysis", "Eric Christensen"],
github => ["https://github.com/laouji/", "Crimson Thompson"],
twitter => ["https://twitter.com/laouji", "Crimson Thompson"];
spice to => 'https://duckduckgo.com/tw.js?user=$1&callback={{callback}}¤t=1';
triggers query => qr/^(?:twitter\s)?@([a-z0-9_]+)$|^twitter\s([a-z0-9_]+)$/i;
# skip words from file
my $skip = join "|", share('skipwords.txt')->slurp(chomp => 1);
handle matches => sub {
if ($1) {
return $1;
} elsif ($2) {
return $2 unless ($2 =~ m/^($skip)$/i)
}
return;
};
1; | toenu23/zeroclickinfo-spice | lib/DDG/Spice/Twitter.pm | Perl | apache-2.0 | 1,189 |
#!/usr/bin/env perl
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
=head1 NAME
show_method_status.pl - script to print "Status" of API methods
=head1 SYNOPSIS
show_method_status.pl [arguments]
Optional arguments:
--path, --root=PATH directory root path to check (use absolute
path or relative to cwd)
--exclude=LIST don't show LISTed statuses
--include=LIST only show LISTed statuses
--show_empty, --show-empty, --empty print method name even if there's no
status to report for it (only required
when using --exclude or --include)
--conffile, --conf=FILE read parameters from FILE
--logfile, --log=FILE log to FILE (default: *STDOUT)
--logpath=PATH write logfile to PATH (default: .)
--logappend, --log_append append to logfile (default: truncate)
-i, --interactive run script interactively (default: false)
-h, --help, -? print help (this message)
=head1 DESCRIPTION
This script will print the "Status" documentation for each method in each perl
module found in the directory specified by --path (recursively). Output can be
limited to certain Statuses with the --exclude or --include options.
=head1 EXAMPLES
Show all methods which are 'At Risk':
$ ./show_method_status.pl --path ../../modules/Bio/EnsEMBL --include 'at risk'
Show all methods except those that are 'Stable':
$ ./show_method_status.pl --path ../../modules/Bio/EnsEMBL --exclude 'stable'
=head1 AUTHOR
Patrick Meidl <meidl@ebi.ac.uk>, Ensembl core API team
=head1 CONTACT
Please post comments/questions to the Ensembl development list
<http://lists.ensembl.org/mailman/listinfo/dev>
=cut
use strict;
use warnings;
no warnings 'uninitialized';
use FindBin qw($Bin);
use Bio::EnsEMBL::Utils::ConfParser;
use Bio::EnsEMBL::Utils::Logger;
use File::Find qw(find);
use Cwd qw(getcwd abs_path);
# parse configuration and commandline arguments
my $conf = new Bio::EnsEMBL::Utils::ConfParser(
-SERVERROOT => "$Bin/../../..",
-DEFAULT_CONF => ""
);
warn $Bin;
$conf->parse_options(
'path|root=s' => 0,
'exclude=s@' => 0,
'include=s@' => 0,
'show_empty|show-empty|empty' => 0,
);
$conf->param('path', '.') unless $conf->param('path');
# get log filehandle and print heading and parameters to logfile
my $logger = new Bio::EnsEMBL::Utils::Logger(
-LOGFILE => $conf->param('logfile'),
-LOGPATH => $conf->param('logpath'),
-LOGAPPEND => $conf->param('logappend'),
-LOGLEVEL => $conf->param('loglevel'),
);
# initialise log
$logger->init_log($conf->list_param_values);
# recursively process all files
my $path = $conf->param('path');
$path = abs_path(getcwd."/$path") if ($path =~ /^\./);
find(\&parse_files, $path);
# finish logfile
$logger->finish_log;
### END main ###
sub parse_files {
my $file = $_;
# only read perl modules
return unless ($file =~ /\.pm$/);
# read file
open(IN, $file) or die "Unable to open $file: $!\n";;
my $pod_flag;
my $method;
my $status;
my $result;
LINE:
while (my $line = <IN>) {
chomp $line;
# start of method pod
if ($line =~ /=head2 (.*)$/) {
$method = sprintf("%-40s", $1);
$pod_flag = 1;
# status
} elsif ($line =~ /Status\s*:\s*(.+)/) {
$status = $1;
# end of method pod
} elsif ($line =~ /=cut/ and $pod_flag) {
# set status to unknown if not found
$status ||= 'unknown';
# exclude specified statuses from output
foreach my $pattern ($conf->param('exclude')) {
next LINE if ($status =~ /$pattern/i);
}
# only include specified statuses in output
foreach my $pattern ($conf->param('include')) {
next LINE if ($pattern and !($status =~ /$pattern/i));
}
$result .= " $method $status\n";
$status = undef;
$pod_flag = 0;
}
}
# log result for this module
if ($result or $conf->param('show_empty')) {
my $filepath = $File::Find::name;
$filepath =~ s/$path\///;
$logger->info("\n$filepath\n$result");
}
}
| mjg17/ensembl | misc-scripts/utilities/show_method_status.pl | Perl | apache-2.0 | 4,891 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=head1 NAME
Bio::EnsEMBL::Compara::Production::DnaCollectionAdaptor
=head1 DESCRIPTION
Adpter to DnaCollection objects/tables
DnaCollection is an object to hold a super-set of DnaFragChunkSet bjects.
Used in production to encapsulate particular genome/region/chunk/group DNA set
from the others. To allow system to blast against self, and isolate different
chunk/group sets of the same genome from each other.
=head1 APPENDIX
The rest of the documentation details each of the object methods. Internal methods are usually preceded with a _
=cut
package Bio::EnsEMBL::Compara::Production::DBSQL::DnaCollectionAdaptor;
use strict;
use warnings;
use Bio::EnsEMBL::Compara::Production::DnaCollection;
use Bio::EnsEMBL::Compara::Production::DnaFragChunk;
use Bio::EnsEMBL::Compara::Production::DnaFragChunkSet;
use Bio::EnsEMBL::Hive::Utils 'stringify';
use Bio::EnsEMBL::Utils::Exception;
use Bio::EnsEMBL::Utils::Argument;
use Bio::EnsEMBL::DBSQL::BaseAdaptor;
our @ISA = qw(Bio::EnsEMBL::DBSQL::BaseAdaptor);
#
# STORE METHODS
#
################
=head2 store
Arg [1] : Bio::EnsEMBL::Compara::Production::DnaCollection
Example :
Description: stores the set of DnaFragChunk objects
Returntype : int dbID of DnaCollection
Exceptions :
Caller :
=cut
sub store {
my ($self, $collection) = @_;
unless($collection->isa('Bio::EnsEMBL::Compara::Production::DnaCollection')) {
throw("set arg must be a [Bio::EnsEMBL::Compara::Production::DnaCollection] "
. "not a $collection");
}
my $description = $collection->description if ($collection->description);
my $dump_loc = $collection->dump_loc if ($collection->dump_loc);
my $masking_options;
if ($collection->masking_options) {
if (ref($collection->masking_options)) {
#from masking_option_file
$masking_options = stringify($collection->masking_options);
} else {
$masking_options = $collection->masking_options;
}
}
my $sql = "INSERT ignore INTO dna_collection (description, dump_loc, masking_options) VALUES (?, ?, ?)";
my $sth = $self->prepare($sql);
my $insertCount=0;
$insertCount = $sth->execute($description, $dump_loc, $masking_options);
if($insertCount>0) {
$collection->dbID( $self->dbc->db_handle->last_insert_id(undef, undef, 'dna_collection', 'dna_collection_id') );
$sth->finish;
} else {
#INSERT ignore has failed on UNIQUE description
#Try getting dna_collection with SELECT
$sth->finish;
my $sth2 = $self->prepare("SELECT dna_collection_id FROM dna_collection WHERE description=?");
$sth2->execute($description);
my($id) = $sth2->fetchrow_array();
warn("DnaCollectionAdaptor: insert failed, but description SELECT failed too") unless($id);
$collection->dbID($id);
$sth2->finish;
}
}
#
# FETCH METHODS
#
################
=head2 fetch_by_dbID
Arg [1] : int $id
the unique database identifier for the feature to be obtained
Example : $feat = $adaptor->fetch_by_dbID(1234);
Description: Returns the feature created from the database defined by the
the id $id.
Returntype : Bio::EnsEMBL::Compara::Production::DnaCollection
Exceptions : thrown if $id is not defined
Caller : general
=cut
sub fetch_by_dbID{
my ($self,$id) = @_;
unless(defined $id) {
throw("fetch_by_dbID must have an id");
}
my @tabs = $self->_tables;
my ($name, $syn) = @{$tabs[0]};
#construct a constraint like 't1.table1_id = 1'
my $constraint = "${syn}.${name}_id = $id";
#return first element of _generic_fetch list
my ($obj) = @{$self->_generic_fetch($constraint)};
return $obj;
}
=head2 fetch_by_set_description
Arg [1] : string $set_description
Example :
Description:
Returntype :
Exceptions :
Caller :
=cut
sub fetch_by_set_description {
my ($self,$set_description) = @_;
unless(defined $set_description) {
throw("fetch_by_set_description must have a description");
}
#construct a constraint like 't1.table1_id = 1'
my $constraint = "dc.description = '$set_description'";
#print("fetch_by_set_name contraint:\n$constraint\n");
#return first element of _generic_fetch list
my ($obj) = @{$self->_generic_fetch($constraint)};
return $obj;
}
#
# INTERNAL METHODS
#
###################
sub _tables {
my $self = shift;
return (['dna_collection', 'dc']);
}
sub _columns {
my $self = shift;
return qw (dc.dna_collection_id
dc.description
dc.dump_loc
dc.masking_options);
}
sub _default_where_clause {
my $self = shift;
return '';
}
sub _final_clause {
my $self = shift;
return '';
}
=head2 _generic_fetch
Arg [1] : (optional) string $constraint
An SQL query constraint (i.e. part of the WHERE clause)
Arg [2] : (optional) string $logic_name
the logic_name of the analysis of the features to obtain
Example : $fts = $a->_generic_fetch('contig_id in (1234, 1235)', 'Swall');
Description: Performs a database fetch and returns feature objects in
contig coordinates.
Returntype : listref of Bio::EnsEMBL::SeqFeature in contig coordinates
Exceptions : none
Caller : BaseFeatureAdaptor, ProxyDnaAlignFeatureAdaptor::_generic_fetch
=cut
sub _generic_fetch {
my ($self, $constraint, $join) = @_;
my @tables = $self->_tables;
my $columns = join(', ', $self->_columns());
if ($join) {
foreach my $single_join (@{$join}) {
my ($tablename, $condition, $extra_columns) = @{$single_join};
if ($tablename && $condition) {
push @tables, $tablename;
if($constraint) {
$constraint .= " AND $condition";
} else {
$constraint = " $condition";
}
}
if ($extra_columns) {
$columns .= ", " . join(', ', @{$extra_columns});
}
}
}
#construct a nice table string like 'table1 t1, table2 t2'
my $tablenames = join(', ', map({ join(' ', @$_) } @tables));
my $sql = "SELECT $columns FROM $tablenames";
my $default_where = $self->_default_where_clause;
my $final_clause = $self->_final_clause;
#append a where clause if it was defined
if($constraint) {
$sql .= " WHERE $constraint ";
if($default_where) {
$sql .= " AND $default_where ";
}
} elsif($default_where) {
$sql .= " WHERE $default_where ";
}
#append additional clauses which may have been defined
$sql .= " $final_clause";
my $sth = $self->prepare($sql);
$sth->execute;
#print STDERR $sql,"\n";
return $self->_objs_from_sth($sth);
}
sub _objs_from_sth {
my ($self, $sth) = @_;
my %collections_hash = ();
while( my $row_hashref = $sth->fetchrow_hashref()) {
my $collection = $collections_hash{$row_hashref->{'dna_collection_id'}};
unless($collection) {
$collection = Bio::EnsEMBL::Compara::Production::DnaCollection->new(
-dbid => $row_hashref->{'dna_collection_id'},
-description => $row_hashref->{'description'},
-dump_loc => $row_hashref->{'dump_loc'},
-masking_options => $row_hashref->{'masking_options'},
-adaptor => $self
);
$collections_hash{$collection->dbID} = $collection;
}
if (defined($row_hashref->{'description'})) {
$collection->description($row_hashref->{'description'});
}
if (defined($row_hashref->{'dump_loc'})) {
$collection->dump_loc($row_hashref->{'dump_loc'});
}
if (defined($row_hashref->{'masking_options'})) {
$collection->masking_options($row_hashref->{'masking_options'});
}
}
$sth->finish;
my @collections = values( %collections_hash );
return \@collections;
}
1;
| ckongEbi/ensembl-compara | modules/Bio/EnsEMBL/Compara/Production/DBSQL/DnaCollectionAdaptor.pm | Perl | apache-2.0 | 8,809 |
#!/usr/bin/env perl
# Copyright (c) 2019, Google Inc.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# This file defines helper functions for crypto/test/abi_test.h on ppc64le. See
# that header for details on how to use this.
#
# For convenience, this file is linked into libcrypto, where consuming builds
# already support architecture-specific sources. The static linker should drop
# this code in non-test binaries. This includes a shared library build of
# libcrypto, provided --gc-sections or equivalent is used.
#
# References:
#
# ELFv2: http://openpowerfoundation.org/wp-content/uploads/resources/leabi/leabi-20170510.pdf
use strict;
my $flavour = shift;
my $output = shift;
if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
$0 =~ m/(.*[\/\\])[^\/\\]+$/;
my $dir = $1;
my $xlate;
( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
die "can't locate ppc-xlate.pl";
open OUT, "| \"$^X\" \"$xlate\" $flavour \"$output\"";
*STDOUT = *OUT;
unless ($flavour =~ /linux.*64le/) {
die "This file only supports the ELFv2 ABI, used by ppc64le";
}
my $code = "";
sub load_or_store_regs {
# $op is "l" or "st".
my ($op, $base_reg, $base_offset) = @_;
# Vector registers.
foreach (20..31) {
my $offset = $base_offset + ($_ - 20) * 16;
# Vector registers only support indexed register addressing.
$code .= "\tli\tr11, $offset\n";
$code .= "\t${op}vx\tv$_, r11, $base_reg\n";
}
# Save general registers.
foreach (14..31) {
my $offset = $base_offset + 192 + ($_ - 14) * 8;
$code .= "\t${op}d\tr$_, $offset($base_reg)\n";
}
# Save floating point registers.
foreach (14..31) {
my $offset = $base_offset + 336 + ($_ - 14) * 8;
$code .= "\t${op}fd\tf$_, $offset($base_reg)\n";
}
}
sub load_regs {
my ($base_reg, $base_offset) = @_;
load_or_store_regs("l", $base_reg, $base_offset);
}
sub store_regs {
my ($base_reg, $base_offset) = @_;
load_or_store_regs("st", $base_reg, $base_offset);
}
my ($func, $state, $argv, $argc) = ("r3", "r4", "r5", "r6");
$code .= <<____;
.machine "any"
.text
# abi_test_trampoline loads callee-saved registers from |state|, calls |func|
# with |argv|, then saves the callee-saved registers into |state|. It returns
# the result of |func|. The |unwind| argument is unused.
# uint64_t abi_test_trampoline(void (*func)(...), CallerState *state,
# const uint64_t *argv, size_t argc,
# uint64_t unwind);
.globl abi_test_trampoline
.align 5
abi_test_trampoline:
# LR is saved into the caller's stack frame.
mflr r0
std r0, 16(r1)
# Allocate 66*8 = 528 bytes of stack frame. From the top of the stack
# to the bottom, the stack frame is:
#
# 0(r1) - Back chain pointer
# 8(r1) - CR save area
# 16(r1) - LR save area (for |func|)
# 24(r1) - TOC pointer save area
# 32(r1) - Saved copy of |state|
# 40(r1) - Padding
# 48(r1) - Vector register save area (v20-v31, 12 registers)
# 240(r1) - General register save area (r14-r31, 18 registers)
# 384(r1) - Floating point register save area (f14-f31, 18 registers)
#
# Note the layouts of the register save areas and CallerState match.
#
# In the ELFv2 ABI, the parameter save area is optional if the function
# is non-variadic and all parameters fit in registers. We only support
# such functions, so we omit it to test that |func| does not rely on it.
stdu r1, -528(r1)
mfcr r0
std r0, 8(r1) # Save CR
std r2, 24(r1) # Save TOC
std $state, 32(r1) # Save |state|
____
# Save registers to the stack.
store_regs("r1", 48);
# Load registers from the caller.
load_regs($state, 0);
$code .= <<____;
# Load CR from |state|.
ld r0, 480($state)
mtcr r0
# Move parameters into temporary registers so they are not clobbered.
addi r11, $argv, -8 # Adjust for ldu below
mr r12, $func
# Load parameters into registers.
cmpdi $argc, 0
beq .Largs_done
mtctr $argc
ldu r3, 8(r11)
bdz .Largs_done
ldu r4, 8(r11)
bdz .Largs_done
ldu r5, 8(r11)
bdz .Largs_done
ldu r6, 8(r11)
bdz .Largs_done
ldu r7, 8(r11)
bdz .Largs_done
ldu r8, 8(r11)
bdz .Largs_done
ldu r9, 8(r11)
bdz .Largs_done
ldu r10, 8(r11)
.Largs_done:
li r2, 0 # Clear TOC to test |func|'s global entry point
mtctr r12
bctrl
ld r2, 24(r1) # Restore TOC
ld $state, 32(r1) # Reload |state|
____
# Output resulting registers to the caller.
store_regs($state, 0);
# Restore registers from the stack.
load_regs("r1", 48);
$code .= <<____;
mfcr r0
std r0, 480($state) # Output CR to caller
ld r0, 8(r1)
mtcrf 0b00111000, r0 # Restore CR2-CR4
addi r1, r1, 528
ld r0, 16(r1) # Restore LR
mtlr r0
blr
.size abi_test_trampoline,.-abi_test_trampoline
____
# abi_test_clobber_* clobbers the corresponding register. These are used to test
# the ABI-testing framework.
foreach (0..31) {
# r1 is the stack pointer. r13 is the thread pointer.
next if ($_ == 1 || $_ == 13);
$code .= <<____;
.globl abi_test_clobber_r$_
.align 5
abi_test_clobber_r$_:
li r$_, 0
blr
.size abi_test_clobber_r$_,.-abi_test_clobber_r$_
____
}
foreach (0..31) {
$code .= <<____;
.globl abi_test_clobber_f$_
.align 4
abi_test_clobber_f$_:
li r0, 0
# Use the red zone.
std r0, -8(r1)
lfd f$_, -8(r1)
blr
.size abi_test_clobber_f$_,.-abi_test_clobber_f$_
____
}
foreach (0..31) {
$code .= <<____;
.globl abi_test_clobber_v$_
.align 4
abi_test_clobber_v$_:
vxor v$_, v$_, v$_
blr
.size abi_test_clobber_v$_,.-abi_test_clobber_v$_
____
}
foreach (0..7) {
# PPC orders CR fields in big-endian, so the mask is reversed from what one
# would expect.
my $mask = 1 << (7 - $_);
$code .= <<____;
.globl abi_test_clobber_cr$_
.align 4
abi_test_clobber_cr$_:
# Flip the bits on cr$_ rather than setting to zero. With a four-bit
# register, zeroing it will do nothing 1 in 16 times.
mfcr r0
not r0, r0
mtcrf $mask, r0
blr
.size abi_test_clobber_cr$_,.-abi_test_clobber_cr$_
____
}
$code .= <<____;
.globl abi_test_clobber_ctr
.align 4
abi_test_clobber_ctr:
li r0, 0
mtctr r0
blr
.size abi_test_clobber_ctr,.-abi_test_clobber_ctr
.globl abi_test_clobber_lr
.align 4
abi_test_clobber_lr:
mflr r0
mtctr r0
li r0, 0
mtlr r0
bctr
.size abi_test_clobber_lr,.-abi_test_clobber_lr
____
print $code;
close STDOUT or die "error closing STDOUT: $!";
| grpc/grpc-ios | native_src/third_party/boringssl-with-bazel/src/crypto/test/asm/trampoline-ppc.pl | Perl | apache-2.0 | 7,012 |
package AsposeBarCodeCloud::Object::BinarizationHints;
require 5.6.0;
use strict;
use warnings;
use utf8;
use JSON qw(decode_json);
use Data::Dumper;
use Module::Runtime qw(use_module);
use Log::Any qw($log);
use Date::Parse;
use DateTime;
use base "AsposeBarCodeCloud::Object::BaseObject";
#
#
#
#NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually.
#
my $swagger_types = {
};
my $attribute_map = {
};
# new object
sub new {
my ($class, %args) = @_;
my $self = {
};
return bless $self, $class;
}
# get swagger type of the attribute
sub get_swagger_types {
return $swagger_types;
}
# get attribute mappping
sub get_attribute_map {
return $attribute_map;
}
1;
| farooqsheikhpk/Aspose.BarCode-for-Cloud | SDKs/Aspose.BarCode-Cloud-SDK-for-Perl/lib/AsposeBarCodeCloud/Object/BinarizationHints.pm | Perl | mit | 772 |
package AsposePdfCloud::Object::AnnotationResponse;
require 5.6.0;
use strict;
use warnings;
use utf8;
use JSON qw(decode_json);
use Data::Dumper;
use Module::Runtime qw(use_module);
use Log::Any qw($log);
use Date::Parse;
use DateTime;
use base "AsposePdfCloud::Object::BaseObject";
#
#
#
#NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually.
#
my $swagger_types = {
'Annotation' => 'Annotation',
'Code' => 'string',
'Status' => 'string'
};
my $attribute_map = {
'Annotation' => 'Annotation',
'Code' => 'Code',
'Status' => 'Status'
};
# new object
sub new {
my ($class, %args) = @_;
my $self = {
#
'Annotation' => $args{'Annotation'},
#
'Code' => $args{'Code'},
#
'Status' => $args{'Status'}
};
return bless $self, $class;
}
# get swagger type of the attribute
sub get_swagger_types {
return $swagger_types;
}
# get attribute mappping
sub get_attribute_map {
return $attribute_map;
}
1;
| asposepdf/Aspose_Pdf_Cloud | SDKs/Aspose.Pdf-Cloud-SDK-for-Perl/lib/AsposePdfCloud/Object/AnnotationResponse.pm | Perl | mit | 1,056 |
=pod
=head1 NAME
SSL_CTX_set0_verify_cert_store, SSL_CTX_set1_verify_cert_store,
SSL_CTX_set0_chain_cert_store, SSL_CTX_set1_chain_cert_store,
SSL_set0_verify_cert_store, SSL_set1_verify_cert_store,
SSL_set0_chain_cert_store, SSL_set1_chain_cert_store - set certificate
verification or chain store
=head1 SYNOPSIS
#include <openssl/ssl.h>
int SSL_CTX_set0_verify_cert_store(SSL_CTX *ctx, X509_STORE *st);
int SSL_CTX_set1_verify_cert_store(SSL_CTX *ctx, X509_STORE *st);
int SSL_CTX_set0_chain_cert_store(SSL_CTX *ctx, X509_STORE *st);
int SSL_CTX_set1_chain_cert_store(SSL_CTX *ctx, X509_STORE *st);
int SSL_set0_verify_cert_store(SSL *ctx, X509_STORE *st);
int SSL_set1_verify_cert_store(SSL *ctx, X509_STORE *st);
int SSL_set0_chain_cert_store(SSL *ctx, X509_STORE *st);
int SSL_set1_chain_cert_store(SSL *ctx, X509_STORE *st);
=head1 DESCRIPTION
SSL_CTX_set0_verify_cert_store() and SSL_CTX_set1_verify_cert_store()
set the certificate store used for certificate verification to B<st>.
SSL_CTX_set0_chain_cert_store() and SSL_CTX_set1_chain_cert_store()
set the certificate store used for certificate chain building to B<st>.
SSL_set0_verify_cert_store(), SSL_set1_verify_cert_store(),
SSL_set0_chain_cert_store() and SSL_set1_chain_cert_store() are similar
except they apply to SSL structure B<ssl>.
All these functions are implemented as macros. Those containing a B<1>
increment the reference count of the supplied store so it must
be freed at some point after the operation. Those containing a B<0> do
not increment reference counts and the supplied store B<MUST NOT> be freed
after the operation.
=head1 NOTES
The stores pointers associated with an SSL_CTX structure are copied to any SSL
structures when SSL_new() is called. As a result SSL structures will not be
affected if the parent SSL_CTX store pointer is set to a new value.
The verification store is used to verify the certificate chain sent by the
peer: that is an SSL/TLS client will use the verification store to verify
the server's certificate chain and a SSL/TLS server will use it to verify
any client certificate chain.
The chain store is used to build the certificate chain.
Details of the chain building and checking process are described in
L<openssl-verification-options(1)/Certification Path Building> and
L<openssl-verification-options(1)/Certification Path Validation>.
If the mode B<SSL_MODE_NO_AUTO_CHAIN> is set or a certificate chain is
configured already (for example using the functions such as
L<SSL_CTX_add1_chain_cert(3)> or
L<SSL_CTX_add_extra_chain_cert(3)>) then
automatic chain building is disabled.
If the mode B<SSL_MODE_NO_AUTO_CHAIN> is set then automatic chain building
is disabled.
If the chain or the verification store is not set then the store associated
with the parent SSL_CTX is used instead to retain compatibility with previous
versions of OpenSSL.
=head1 RETURN VALUES
All these functions return 1 for success and 0 for failure.
=head1 SEE ALSO
L<ssl(7)>,
L<SSL_CTX_add_extra_chain_cert(3)>
L<SSL_CTX_set0_chain(3)>
L<SSL_CTX_set1_chain(3)>
L<SSL_CTX_add0_chain_cert(3)>
L<SSL_CTX_add1_chain_cert(3)>
L<SSL_set0_chain(3)>
L<SSL_set1_chain(3)>
L<SSL_add0_chain_cert(3)>
L<SSL_add1_chain_cert(3)>
L<SSL_CTX_build_cert_chain(3)>
L<SSL_build_cert_chain(3)>
=head1 HISTORY
These functions were added in OpenSSL 1.0.2.
=head1 COPYRIGHT
Copyright 2013-2021 The OpenSSL Project Authors. All Rights Reserved.
Licensed under the Apache License 2.0 (the "License"). You may not use
this file except in compliance with the License. You can obtain a copy
in the file LICENSE in the source distribution or at
L<https://www.openssl.org/source/license.html>.
=cut
| jens-maus/amissl | openssl/doc/man3/SSL_CTX_set1_verify_cert_store.pod | Perl | bsd-3-clause | 3,697 |
package DDG::Spice::Parking;
# ABSTRACT: Search for parking on parkingpanda.com
use strict;
use DDG::Spice;
use Text::Trim;
triggers any => 'parking panda', 'parkingpanda', 'parking', 'to park';
spice to => 'https://www.parkingpanda.com/api/v2/Locations/?ref=duckduck&search=$1&defaultSearch=$2';
spice wrap_jsonp_callback => 1;
spice proxy_cache_valid => '200 304 30m';
spice from => '(.+?)/(.+)';
handle remainder => sub {
return unless $_; # Guard against "no answer"
# Remove nouns of types of parking spaces.
s/\b(garages?|decks?|ramps?|lots?|spots?|spaces?)\b//i;
# Remove anything before these prepositions.
s/.*\b(in|near|for|at|by|around)\b\s*(.*)/$2/i;
trim();
# Confirm we still have something to search.
return unless m/^\w+/;
# If we have it, include the users local timezone and make a default search.
if ($loc && $loc->time_zone){
return $_, $loc->time_zone;
}
return $_;
};
1;
| mr-karan/zeroclickinfo-spice | lib/DDG/Spice/Parking.pm | Perl | apache-2.0 | 963 |
use strict;
use warnings;
use Cwd;
use Config;
use File::Basename qw(basename dirname);
use File::Path qw(rmtree);
use PostgresNode;
use TestLib;
use Test::More tests => 106 + 13;
program_help_ok('pg_basebackup');
program_version_ok('pg_basebackup');
program_options_handling_ok('pg_basebackup');
my $tempdir = TestLib::tempdir;
my $node = get_new_node('main');
# Set umask so test directories and files are created with default permissions
umask(0077);
# Initialize node without replication settings
$node->init(extra => ['--data-checksums']);
$node->start;
my $pgdata = $node->data_dir;
$node->command_fails(['pg_basebackup', '--target-gp-dbid', '123'],
'pg_basebackup needs target directory specified');
# Some Windows ANSI code pages may reject this filename, in which case we
# quietly proceed without this bit of test coverage.
if (open my $badchars, '>>', "$tempdir/pgdata/FOO\xe0\xe0\xe0BAR")
{
print $badchars "test backup of file with non-UTF8 name\n";
close $badchars;
}
$node->set_replication_conf();
system_or_bail 'pg_ctl', '-D', $pgdata, 'reload';
command_fails(['pg_basebackup', '-D', "$tempdir/backup" ],
'pg_basebackup fails without specifiying the target greenplum db id');
$node->command_fails(
[ 'pg_basebackup', '-D', "$tempdir/backup", '--target-gp-dbid', '123' ],
'pg_basebackup fails because of WAL configuration');
ok(!-d "$tempdir/backup", 'backup directory was cleaned up');
# Create a backup directory that is not empty so the next command will fail
# but leave the data directory behind
mkdir("$tempdir/backup")
or BAIL_OUT("unable to create $tempdir/backup");
append_to_file("$tempdir/backup/dir-not-empty.txt", "Some data");
$node->command_fails([ 'pg_basebackup', '-D', "$tempdir/backup", '-n' ],
'failing run with no-clean option');
ok(-d "$tempdir/backup", 'backup directory was created and left behind');
rmtree("$tempdir/backup");
open my $conf, '>>', "$pgdata/postgresql.conf";
print $conf "max_replication_slots = 10\n";
print $conf "max_wal_senders = 10\n";
print $conf "wal_level = replica\n";
close $conf;
$node->restart;
# Write some files to test that they are not copied.
foreach my $filename (
qw(backup_label tablespace_map postgresql.auto.conf.tmp current_logfiles.tmp)
)
{
open my $file, '>>', "$pgdata/$filename";
print $file "DONOTCOPY";
close $file;
}
# Connect to a database to create global/pg_internal.init. If this is removed
# the test to ensure global/pg_internal.init is not copied will return a false
# positive.
$node->safe_psql('postgres', 'SELECT 1;');
# Create an unlogged table to test that forks other than init are not copied.
$node->safe_psql('postgres', 'CREATE UNLOGGED TABLE base_unlogged (id int)');
my $baseUnloggedPath = $node->safe_psql('postgres',
q{select pg_relation_filepath('base_unlogged')});
# Make sure main and init forks exist
ok(-f "$pgdata/${baseUnloggedPath}_init", 'unlogged init fork in base');
ok(-f "$pgdata/$baseUnloggedPath", 'unlogged main fork in base');
# Create files that look like temporary relations to ensure they are ignored.
my $postgresOid = $node->safe_psql('postgres',
q{select oid from pg_database where datname = 'postgres'});
my @tempRelationFiles =
qw(t_999 t_999.1 t_9999_vm t_99999_vm.1);
foreach my $filename (@tempRelationFiles)
{
append_to_file("$pgdata/base/$postgresOid/$filename", 'TEMP_RELATION');
}
# Run base backup.
$node->command_ok([ 'pg_basebackup', '-D', "$tempdir/backup", '-X', 'none', '--target-gp-dbid', '123', '--no-verify-checksums' ],
'pg_basebackup runs');
ok(-f "$tempdir/backup/PG_VERSION", 'backup was created');
# Permissions on backup should be default
SKIP:
{
skip "unix-style permissions not supported on Windows", 1
if ($windows_os);
ok(check_mode_recursive("$tempdir/backup", 0700, 0600),
"check backup dir permissions");
}
# Only archive_status directory should be copied in pg_wal/.
is_deeply(
[ sort(slurp_dir("$tempdir/backup/pg_wal/")) ],
[ sort qw(. .. archive_status) ],
'no WAL files copied');
# Contents of these directories should not be copied.
foreach my $dirname (
qw(pg_dynshmem pg_notify pg_replslot pg_serial pg_snapshots pg_stat_tmp pg_subtrans)
)
{
is_deeply(
[ sort(slurp_dir("$tempdir/backup/$dirname/")) ],
[ sort qw(. ..) ],
"contents of $dirname/ not copied");
}
# These files should not be copied.
foreach my $filename (
qw(postgresql.auto.conf.tmp postmaster.opts postmaster.pid tablespace_map current_logfiles.tmp
global/pg_internal.init))
{
ok(!-f "$tempdir/backup/$filename", "$filename not copied");
}
# Unlogged relation forks other than init should not be copied
ok(-f "$tempdir/backup/${baseUnloggedPath}_init",
'unlogged init fork in backup');
ok( !-f "$tempdir/backup/$baseUnloggedPath",
'unlogged main fork not in backup');
# Temp relations should not be copied.
foreach my $filename (@tempRelationFiles)
{
ok( !-f "$tempdir/backup/base/$postgresOid/$filename",
"base/$postgresOid/$filename not copied");
}
# Make sure existing backup_label was ignored.
isnt(slurp_file("$tempdir/backup/backup_label"),
'DONOTCOPY', 'existing backup_label not copied');
rmtree("$tempdir/backup");
$node->command_ok(
[
'pg_basebackup', '-D', "$tempdir/backup2", '--waldir',
"$tempdir/xlog2", '--target-gp-dbid', '123'
],
'separate xlog directory');
ok(-f "$tempdir/backup2/PG_VERSION", 'backup was created');
ok(-d "$tempdir/xlog2/", 'xlog directory was created');
rmtree("$tempdir/backup2");
rmtree("$tempdir/xlog2");
$node->command_ok([ 'pg_basebackup', '-D', "$tempdir/tarbackup", '--target-gp-dbid', '123', , '-Ft' ],
'tar format');
ok(-f "$tempdir/tarbackup/base.tar", 'backup tar was created');
rmtree("$tempdir/tarbackup");
$node->command_fails(
[ 'pg_basebackup', '-D', "$tempdir/backup_foo", '--target-gp-dbid', '123', '-Fp', "-T=/foo" ],
'-T with empty old directory fails');
$node->command_fails(
[ 'pg_basebackup', '-D', "$tempdir/backup_foo", '--target-gp-dbid', '123', '-Fp', "-T/foo=" ],
'-T with empty new directory fails');
$node->command_fails(
[
'pg_basebackup', '-D', "$tempdir/backup_foo", '-Fp',
"-T/foo=/bar=/baz", '--target-gp-dbid', '123'
],
'-T with multiple = fails');
$node->command_fails(
[ 'pg_basebackup', '-D', "$tempdir/backup_foo", '--target-gp-dbid', '123', '-Fp', "-Tfoo=/bar" ],
'-T with old directory not absolute fails');
$node->command_fails(
[ 'pg_basebackup', '-D', "$tempdir/backup_foo", '--target-gp-dbid', '123', '-Fp', "-T/foo=bar" ],
'-T with new directory not absolute fails');
$node->command_fails(
[ 'pg_basebackup', '-D', "$tempdir/backup_foo", '--target-gp-dbid', '123', '-Fp', "-Tfoo" ],
'-T with invalid format fails');
# Tar format doesn't support filenames longer than 100 bytes.
my $superlongname = "superlongname_" . ("x" x 100);
my $superlongpath = "$pgdata/$superlongname";
open my $file, '>', "$superlongpath"
or die "unable to create file $superlongpath";
close $file;
$node->command_fails(
[ 'pg_basebackup', '-D', "$tempdir/tarbackup_l1", '--target-gp-dbid', '123', '-Ft' ],
'pg_basebackup tar with long name fails');
unlink "$pgdata/$superlongname";
# The following tests test symlinks. Windows doesn't have symlinks, so
# skip on Windows.
SKIP:
{
skip "symlinks not supported on Windows", 18 if ($windows_os);
# Move pg_replslot out of $pgdata and create a symlink to it.
$node->stop;
# Set umask so test directories and files are created with group permissions
umask(0027);
# Enable group permissions on PGDATA
chmod_recursive("$pgdata", 0750, 0640);
rename("$pgdata/pg_replslot", "$tempdir/pg_replslot")
or BAIL_OUT "could not move $pgdata/pg_replslot";
symlink("$tempdir/pg_replslot", "$pgdata/pg_replslot")
or BAIL_OUT "could not symlink to $pgdata/pg_replslot";
$node->start;
# Create a temporary directory in the system location and symlink it
# to our physical temp location. That way we can use shorter names
# for the tablespace directories, which hopefully won't run afoul of
# the 99 character length limit.
my $shorter_tempdir = TestLib::tempdir_short . "/tempdir";
symlink "$tempdir", $shorter_tempdir;
mkdir "$tempdir/tblspc1";
$node->safe_psql('postgres',
"CREATE TABLESPACE tblspc1 LOCATION '$shorter_tempdir/tblspc1';");
$node->safe_psql('postgres',
"CREATE TABLE test1 (a int) TABLESPACE tblspc1;");
$node->command_ok([ 'pg_basebackup', '-D', "$tempdir/tarbackup2", '-Ft',
'--target-gp-dbid', '123'],
'tar format with tablespaces');
ok(-f "$tempdir/tarbackup2/base.tar", 'backup tar was created');
my @tblspc_tars = glob "$tempdir/tarbackup2/[0-9]*.tar";
is(scalar(@tblspc_tars), 1, 'one tablespace tar was created');
rmtree("$tempdir/tarbackup2");
# Create an unlogged table to test that forks other than init are not copied.
$node->safe_psql('postgres',
'CREATE UNLOGGED TABLE tblspc1_unlogged (id int) TABLESPACE tblspc1;'
);
my $tblspc1UnloggedPath = $node->safe_psql('postgres',
q{select pg_relation_filepath('tblspc1_unlogged')});
my $node_dbid = $node->dbid;
# Make sure main and init forks exist
ok( -f "$pgdata/${tblspc1UnloggedPath}_init",
'unlogged init fork in tablespace');
ok(-f "$pgdata/$tblspc1UnloggedPath", 'unlogged main fork in tablespace');
# Create files that look like temporary relations to ensure they are ignored
# in a tablespace.
my @tempRelationFiles = qw(t_888 t_888888_vm.1);
my $tblSpc1Id = basename(
dirname(
dirname(
$node->safe_psql(
'postgres', q{select pg_relation_filepath('test1')}))));
foreach my $filename (@tempRelationFiles)
{
append_to_file(
"$shorter_tempdir/tblspc1/$node_dbid/$tblSpc1Id/$postgresOid/$filename",
'TEMP_RELATION');
}
$node->command_fails(
[ 'pg_basebackup', '-D', "$tempdir/backup1", '-Fp',
'--target-gp-dbid', '-1'
],
'plain format with tablespaces fails without tablespace mapping and target-gp-dbid as the test server dbid');
$node->command_ok(
[
'pg_basebackup', '-D', "$tempdir/backup1", '-Fp',
'--target-gp-dbid', '1',
"-T$shorter_tempdir/tblspc1=$tempdir/tbackup/tblspc1"
],
'plain format with tablespaces succeeds with tablespace mapping');
ok(-d "$tempdir/tbackup/tblspc1/1", 'tablespace was relocated');
opendir(my $dh, "$pgdata/pg_tblspc") or die;
ok( ( grep {
-l "$tempdir/backup1/pg_tblspc/$_"
and readlink "$tempdir/backup1/pg_tblspc/$_" eq
"$tempdir/tbackup/tblspc1/1"
} readdir($dh)),
"tablespace symlink was updated");
closedir $dh;
# Group access should be enabled on all backup files
ok(check_mode_recursive("$tempdir/backup1", 0750, 0640),
"check backup dir permissions");
# Unlogged relation forks other than init should not be copied
my ($tblspc1UnloggedBackupPath) =
$tblspc1UnloggedPath =~ /[^\/]*\/[^\/]*\/[^\/]*$/g;
ok(-f "$tempdir/tbackup/tblspc1/1/${tblspc1UnloggedBackupPath}_init",
'unlogged init fork in tablespace backup');
ok(!-f "$tempdir/tbackup/tblspc1/1/$tblspc1UnloggedBackupPath",
'unlogged main fork not in tablespace backup');
# Temp relations should not be copied.
foreach my $filename (@tempRelationFiles)
{
ok( !-f "$tempdir/tbackup/tblspc1/1/$tblSpc1Id/$postgresOid/$filename",
"[tblspc1]/$postgresOid/$filename not copied");
# Also remove temp relation files or tablespace drop will fail.
my $filepath =
"$shorter_tempdir/tblspc1/$node_dbid/$tblSpc1Id/$postgresOid/$filename";
unlink($filepath)
or BAIL_OUT("unable to unlink $filepath");
}
ok( -d "$tempdir/backup1/pg_replslot",
'pg_replslot symlink copied as directory');
rmtree("$tempdir/backup1");
mkdir "$tempdir/tbl=spc2";
$node->safe_psql('postgres', "DROP TABLE test1;");
$node->safe_psql('postgres', "DROP TABLE tblspc1_unlogged;");
$node->safe_psql('postgres', "DROP TABLESPACE tblspc1;");
$node->safe_psql('postgres',
"CREATE TABLESPACE tblspc2 LOCATION '$shorter_tempdir/tbl=spc2';");
$node->command_ok(
[
'pg_basebackup', '-D', "$tempdir/backup3", '--target-gp-dbid', '123', '-Fp',
"-T$shorter_tempdir/tbl\\=spc2=$tempdir/tbackup/tbl\\=spc2"
],
'mapping tablespace with = sign in path');
ok(-d "$tempdir/tbackup/tbl=spc2",
'tablespace with = sign was relocated');
$node->safe_psql('postgres', "DROP TABLESPACE tblspc2;");
rmtree("$tempdir/backup3");
mkdir "$tempdir/$superlongname";
$node->safe_psql('postgres',
"CREATE TABLESPACE tblspc3 LOCATION '$tempdir/$superlongname';");
$node->command_ok(
[ 'pg_basebackup', '-D', "$tempdir/tarbackup_l3", '--target-gp-dbid', '123', '-Ft' ],
'pg_basebackup tar with long symlink target');
$node->safe_psql('postgres', "DROP TABLESPACE tblspc3;");
rmtree("$tempdir/tarbackup_l3");
}
$node->command_ok([ 'pg_basebackup', '-D', "$tempdir/backupR", '--target-gp-dbid', '123', '-R' ],
'pg_basebackup -R runs');
ok(-f "$tempdir/backupR/postgresql.auto.conf", 'postgresql.auto.conf exists');
ok(-f "$tempdir/backupR/standby.signal", 'standby.signal was created');
my $recovery_conf = slurp_file "$tempdir/backupR/postgresql.auto.conf";
rmtree("$tempdir/backupR");
my $port = $node->port;
like(
$recovery_conf,
qr/^primary_conninfo = '.*port=$port.*'\n/m,
'postgresql.auto.conf sets primary_conninfo');
$node->command_ok(
[ 'pg_basebackup', '-D', "$tempdir/backupxd", '--target-gp-dbid', '123' ],
'pg_basebackup runs in default xlog mode');
ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxd/pg_wal")),
'WAL files copied');
rmtree("$tempdir/backupxd");
$node->command_ok(
[ 'pg_basebackup', '-D', "$tempdir/backupxf", '--target-gp-dbid', '123', '-X', 'fetch' ],
'pg_basebackup -X fetch runs');
ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxf/pg_wal")),
'WAL files copied');
rmtree("$tempdir/backupxf");
$node->command_ok(
[ 'pg_basebackup', '-D', "$tempdir/backupxs", '--target-gp-dbid', '123', '-X', 'stream' ],
'pg_basebackup -X stream runs');
ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxs/pg_wal")),
'WAL files copied');
rmtree("$tempdir/backupxs");
$node->command_ok(
[ 'pg_basebackup', '-D', "$tempdir/backupxst", '--target-gp-dbid', '123', '-X', 'stream', '-Ft' ],
'pg_basebackup -X stream runs in tar mode');
ok(-f "$tempdir/backupxst/pg_wal.tar", "tar file was created");
rmtree("$tempdir/backupxst");
$node->command_ok(
[
'pg_basebackup', '--target-gp-dbid', '123',
'-D',
"$tempdir/backupnoslot", '-X',
'stream', '--no-slot'
],
'pg_basebackup -X stream runs with --no-slot');
rmtree("$tempdir/backupnoslot");
$node->command_fails(
[
'pg_basebackup', '--target-gp-dbid', '123',
'-D',
"$tempdir/backupxs_sl_fail", '-X',
'stream', '-S',
'slot0'
],
'pg_basebackup fails with nonexistent replication slot');
$node->command_fails(
[ 'pg_basebackup', '--target-gp-dbid', '123', '-D', "$tempdir/backupxs_slot", '-C' ],
'pg_basebackup -C fails without slot name');
$node->command_fails(
[
'pg_basebackup', '--target-gp-dbid', '123',
'-D',
"$tempdir/backupxs_slot", '-C',
'-S', 'slot0',
'--no-slot'
],
'pg_basebackup fails with -C -S --no-slot');
$node->command_ok(
[ 'pg_basebackup', '--target-gp-dbid', '123', '-D', "$tempdir/backupxs_slot", '-C', '-S', 'slot0' ],
'pg_basebackup -C runs');
rmtree("$tempdir/backupxs_slot");
is( $node->safe_psql(
'postgres',
q{SELECT slot_name FROM pg_replication_slots WHERE slot_name = 'slot0'}
),
'slot0',
'replication slot was created');
isnt(
$node->safe_psql(
'postgres',
q{SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = 'slot0'}
),
'',
'restart LSN of new slot is not null');
$node->command_fails(
[ 'pg_basebackup', '--target-gp-dbid', '123', '-D', "$tempdir/backupxs_slot1", '-v', '-C', '-S', 'slot0' ],
'pg_basebackup fails with -C -S and a previously existing slot');
$node->safe_psql('postgres',
q{SELECT * FROM pg_create_physical_replication_slot('slot1')});
my $lsn = $node->safe_psql('postgres',
q{SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = 'slot1'}
);
is($lsn, '', 'restart LSN of new slot is null');
$node->command_fails(
[ 'pg_basebackup', '--target-gp-dbid', '123', '-D', "$tempdir/fail", '-S', 'slot1', '-X', 'none' ],
'pg_basebackup with replication slot fails without WAL streaming');
$node->command_ok(
[
'pg_basebackup', '-D', "$tempdir/backupxs_sl", '--target-gp-dbid', '123', '-X',
'stream', '-S', 'slot1'
],
'pg_basebackup -X stream with replication slot runs');
$lsn = $node->safe_psql('postgres',
q{SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = 'slot1'}
);
like($lsn, qr!^0/[0-9A-Z]{7,8}$!, 'restart LSN of slot has advanced');
rmtree("$tempdir/backupxs_sl");
$node->command_ok(
[
'pg_basebackup', '--target-gp-dbid', '123',
'-D', "$tempdir/backupxs_sl_R", '-X',
'stream', '-S', 'slot1', '-R'
],
'pg_basebackup with replication slot and -R runs');
like(
slurp_file("$tempdir/backupxs_sl_R/postgresql.auto.conf"),
qr/^primary_slot_name = 'slot1'\n/m,
'recovery conf file sets primary_slot_name');
my $checksum = $node->safe_psql('postgres', 'SHOW data_checksums;');
is($checksum, 'on', 'checksums are enabled');
rmtree("$tempdir/backupxs_sl_R");
# create tables to corrupt and get their relfilenodes
my $file_corrupt1 = $node->safe_psql('postgres',
q{SELECT a INTO corrupt1 FROM generate_series(1,10000) AS a; ALTER TABLE corrupt1 SET (autovacuum_enabled=false); SELECT pg_relation_filepath('corrupt1')}
);
my $file_corrupt2 = $node->safe_psql('postgres',
q{SELECT b INTO corrupt2 FROM generate_series(1,2) AS b; ALTER TABLE corrupt2 SET (autovacuum_enabled=false); SELECT pg_relation_filepath('corrupt2')}
);
# set page header and block sizes
my $pageheader_size = 24;
my $block_size = $node->safe_psql('postgres', 'SHOW block_size;');
# induce corruption
system_or_bail 'pg_ctl', '-D', $pgdata, 'stop';
open $file, '+<', "$pgdata/$file_corrupt1";
seek($file, $pageheader_size, 0);
syswrite($file, "\0\0\0\0\0\0\0\0\0");
close $file;
system_or_bail 'pg_ctl', '-o', '-c gp_role=utility --gp_dbid=1 --gp_contentid=-1', '-D', $pgdata, 'start';
$node->command_checks_all(
[ 'pg_basebackup', '--target-gp-dbid', '123', '-D', "$tempdir/backup_corrupt" ],
1,
[qr{^$}],
[qr/^WARNING.*checksum verification failed/s],
'pg_basebackup reports checksum mismatch');
rmtree("$tempdir/backup_corrupt");
# induce further corruption in 5 more blocks
system_or_bail 'pg_ctl', '-D', $pgdata, 'stop';
open $file, '+<', "$pgdata/$file_corrupt1";
for my $i (1 .. 5)
{
my $offset = $pageheader_size + $i * $block_size;
seek($file, $offset, 0);
syswrite($file, "\0\0\0\0\0\0\0\0\0");
}
close $file;
system_or_bail 'pg_ctl', '-o', '-c gp_role=utility --gp_dbid=1 --gp_contentid=-1', '-D', $pgdata, 'start';
$node->command_checks_all(
[ 'pg_basebackup', '--target-gp-dbid', '123', '-D', "$tempdir/backup_corrupt2" ],
1,
[qr{^$}],
[qr/^WARNING.*further.*failures.*will.not.be.reported/s],
'pg_basebackup does not report more than 5 checksum mismatches');
rmtree("$tempdir/backup_corrupt2");
# induce corruption in a second file
system_or_bail 'pg_ctl', '-D', $pgdata, 'stop';
open $file, '+<', "$pgdata/$file_corrupt2";
seek($file, $pageheader_size, 0);
syswrite($file, "\0\0\0\0\0\0\0\0\0");
close $file;
system_or_bail 'pg_ctl', '-o', '-c gp_role=utility --gp_dbid=1 --gp_contentid=-1', '-D', $pgdata, 'start';
$node->command_checks_all(
[ 'pg_basebackup', '--target-gp-dbid', '123', '-D', "$tempdir/backup_corrupt3" ],
1,
[qr{^$}],
[qr/^WARNING.*7 total checksum verification failures/s],
'pg_basebackup correctly report the total number of checksum mismatches');
rmtree("$tempdir/backup_corrupt3");
# do not verify checksums, should return ok
$node->command_ok(
[
'pg_basebackup', '--target-gp-dbid', '123', '-D',
"$tempdir/backup_corrupt4", '--no-verify-checksums'
],
'pg_basebackup with -k does not report checksum mismatch');
rmtree("$tempdir/backup_corrupt4");
$node->safe_psql('postgres', "DROP TABLE corrupt1;");
$node->safe_psql('postgres', "DROP TABLE corrupt2;");
# Some additional GPDB tests
my $twenty_characters = '11111111112222222222';
my $longer_tempdir = "$tempdir/some_long_directory_path_$twenty_characters$twenty_characters$twenty_characters$twenty_characters$twenty_characters";
my $some_backup_dir = "$tempdir/backup_dir";
my $some_other_backup_dir = "$tempdir/other_backup_dir";
mkdir "$longer_tempdir";
mkdir "$some_backup_dir";
$node->psql('postgres', "CREATE TABLESPACE too_long_tablespace LOCATION '$longer_tempdir';");
$node->command_checks_all(
[ 'pg_basebackup', '-D', "$some_backup_dir", '--target-gp-dbid', '99'],
1,
[qr{^$}],
[qr/symbolic link ".*" target is too long and will not be added to the backup/],
'basebackup with a tablespace that has a very long location should error out with target is too long.');
mkdir "$some_other_backup_dir";
$node->command_checks_all(
['pg_basebackup', '-D', "$some_other_backup_dir", '--target-gp-dbid', '99'],
1,
[qr{^$}],
[qr/The symbolic link with target ".*" is too long. Symlink targets with length greater than 100 characters would be truncated./],
'basebackup with a tablespace that has a very long location should error out link not added to the backup.');
$node->command_checks_all(
['ls', "$some_other_backup_dir/pg_tblspc/*"],
2,
[qr{^$}],
[qr/No such file/],
'tablespace directory should be empty');
$node->psql('postgres', "DROP TABLESPACE too_long_tablespace;");
#
# GPDB: Exclude some files with the --exclude-from option
#
my $exclude_tempdir = "$tempdir/backup_exclude";
my $excludelist = "$tempdir/exclude.list";
mkdir "$exclude_tempdir";
mkdir "$pgdata/exclude";
open EXCLUDELIST, ">$excludelist";
# Put a large amount of non-exist patterns in the exclude-from file,
# the pattern matching is efficient enough to handle them.
for my $i (1..1000000) {
print EXCLUDELIST "./exclude/non_exist.$i\n";
}
# Create some files to exclude
for my $i (1..1000) {
print EXCLUDELIST "./exclude/$i\n";
open FILE, ">$pgdata/exclude/$i";
close FILE;
}
# Below file should not be excluded
open FILE, ">$pgdata/exclude/keep";
close FILE;
close EXCLUDELIST;
$node->command_ok(
[ 'pg_basebackup',
'-D', "$exclude_tempdir",
'--target-gp-dbid', '123',
'--exclude-from', "$excludelist" ],
'pg_basebackup runs with exclude-from file');
ok(! -f "$exclude_tempdir/exclude/0", 'excluded files were not created');
ok(-f "$exclude_tempdir/exclude/keep", 'other files were created');
| 50wu/gpdb | src/bin/pg_basebackup/t/010_pg_basebackup.pl | Perl | apache-2.0 | 22,407 |
# !!!!!!! DO NOT EDIT THIS FILE !!!!!!!
# This file is machine-generated by mktables from the Unicode
# database, Version 6.1.0. Any changes made here will be lost!
# !!!!!!! INTERNAL PERL USE ONLY !!!!!!!
# This file is for internal use by core Perl only. The format and even the
# name or existence of this file are subject to change without notice. Don't
# use it directly.
return <<'END';
2153
10E7D
1245A
1245D
END
| Dokaponteam/ITF_Project | xampp/perl/lib/unicore/lib/Nv/1_3.pl | Perl | mit | 442 |
&init_config();
sub show_result{
&error("fine!!!!");
} | HasClass0/webmin | exports-nfs4/test.pl | Perl | bsd-3-clause | 55 |
package Encode::Unicode;
use strict;
use warnings;
no warnings 'redefine';
our $VERSION = do { my @r = ( q$Revision: 2.7 $ =~ /\d+/g ); sprintf "%d." . "%02d" x $#r, @r };
use XSLoader;
XSLoader::load( __PACKAGE__, $VERSION );
#
# Object Generator 8 transcoders all at once!
#
require Encode;
our %BOM_Unknown = map { $_ => 1 } qw(UTF-16 UTF-32);
for my $name (
qw(UTF-16 UTF-16BE UTF-16LE
UTF-32 UTF-32BE UTF-32LE
UCS-2BE UCS-2LE)
)
{
my ( $size, $endian, $ucs2, $mask );
$name =~ /^(\w+)-(\d+)(\w*)$/o;
if ( $ucs2 = ( $1 eq 'UCS' ) ) {
$size = 2;
}
else {
$size = $2 / 8;
}
$endian = ( $3 eq 'BE' ) ? 'n' : ( $3 eq 'LE' ) ? 'v' : '';
$size == 4 and $endian = uc($endian);
$Encode::Encoding{$name} = bless {
Name => $name,
size => $size,
endian => $endian,
ucs2 => $ucs2,
} => __PACKAGE__;
}
use base qw(Encode::Encoding);
sub renew {
my $self = shift;
$BOM_Unknown{ $self->name } or return $self;
my $clone = bless {%$self} => ref($self);
$clone->{renewed}++; # so the caller knows it is renewed.
return $clone;
}
# There used to be a perl implemntation of (en|de)code but with
# XS version is ripe, perl version is zapped for optimal speed
*decode = \&decode_xs;
*encode = \&encode_xs;
1;
__END__
=head1 NAME
Encode::Unicode -- Various Unicode Transformation Formats
=cut
=head1 SYNOPSIS
use Encode qw/encode decode/;
$ucs2 = encode("UCS-2BE", $utf8);
$utf8 = decode("UCS-2BE", $ucs2);
=head1 ABSTRACT
This module implements all Character Encoding Schemes of Unicode that
are officially documented by Unicode Consortium (except, of course,
for UTF-8, which is a native format in perl).
=over 4
=item L<http://www.unicode.org/glossary/> says:
I<Character Encoding Scheme> A character encoding form plus byte
serialization. There are Seven character encoding schemes in Unicode:
UTF-8, UTF-16, UTF-16BE, UTF-16LE, UTF-32 (UCS-4), UTF-32BE (UCS-4BE) and
UTF-32LE (UCS-4LE), and UTF-7.
Since UTF-7 is a 7-bit (re)encoded version of UTF-16BE, It is not part of
Unicode's Character Encoding Scheme. It is separately implemented in
Encode::Unicode::UTF7. For details see L<Encode::Unicode::UTF7>.
=item Quick Reference
Decodes from ord(N) Encodes chr(N) to...
octet/char BOM S.P d800-dfff ord > 0xffff \x{1abcd} ==
---------------+-----------------+------------------------------
UCS-2BE 2 N N is bogus Not Available
UCS-2LE 2 N N bogus Not Available
UTF-16 2/4 Y Y is S.P S.P BE/LE
UTF-16BE 2/4 N Y S.P S.P 0xd82a,0xdfcd
UTF-16LE 2/4 N Y S.P S.P 0x2ad8,0xcddf
UTF-32 4 Y - is bogus As is BE/LE
UTF-32BE 4 N - bogus As is 0x0001abcd
UTF-32LE 4 N - bogus As is 0xcdab0100
UTF-8 1-4 - - bogus >= 4 octets \xf0\x9a\af\8d
---------------+-----------------+------------------------------
=back
=head1 Size, Endianness, and BOM
You can categorize these CES by 3 criteria: size of each character,
endianness, and Byte Order Mark.
=head2 by size
UCS-2 is a fixed-length encoding with each character taking 16 bits.
It B<does not> support I<surrogate pairs>. When a surrogate pair
is encountered during decode(), its place is filled with \x{FFFD}
if I<CHECK> is 0, or the routine croaks if I<CHECK> is 1. When a
character whose ord value is larger than 0xFFFF is encountered,
its place is filled with \x{FFFD} if I<CHECK> is 0, or the routine
croaks if I<CHECK> is 1.
UTF-16 is almost the same as UCS-2 but it supports I<surrogate pairs>.
When it encounters a high surrogate (0xD800-0xDBFF), it fetches the
following low surrogate (0xDC00-0xDFFF) and C<desurrogate>s them to
form a character. Bogus surrogates result in death. When \x{10000}
or above is encountered during encode(), it C<ensurrogate>s them and
pushes the surrogate pair to the output stream.
UTF-32 (UCS-4) is a fixed-length encoding with each character taking 32 bits.
Since it is 32-bit, there is no need for I<surrogate pairs>.
=head2 by endianness
The first (and now failed) goal of Unicode was to map all character
repertoires into a fixed-length integer so that programmers are happy.
Since each character is either a I<short> or I<long> in C, you have to
pay attention to the endianness of each platform when you pass data
to one another.
Anything marked as BE is Big Endian (or network byte order) and LE is
Little Endian (aka VAX byte order). For anything not marked either
BE or LE, a character called Byte Order Mark (BOM) indicating the
endianness is prepended to the string.
CAVEAT: Though BOM in utf8 (\xEF\xBB\xBF) is valid, it is meaningless
and as of this writing Encode suite just leave it as is (\x{FeFF}).
=over 4
=item BOM as integer when fetched in network byte order
16 32 bits/char
-------------------------
BE 0xFeFF 0x0000FeFF
LE 0xFFFe 0xFFFe0000
-------------------------
=back
This modules handles the BOM as follows.
=over 4
=item *
When BE or LE is explicitly stated as the name of encoding, BOM is
simply treated as a normal character (ZERO WIDTH NO-BREAK SPACE).
=item *
When BE or LE is omitted during decode(), it checks if BOM is at the
beginning of the string; if one is found, the endianness is set to
what the BOM says. If no BOM is found, the routine dies.
=item *
When BE or LE is omitted during encode(), it returns a BE-encoded
string with BOM prepended. So when you want to encode a whole text
file, make sure you encode() the whole text at once, not line by line
or each line, not file, will have a BOM prepended.
=item *
C<UCS-2> is an exception. Unlike others, this is an alias of UCS-2BE.
UCS-2 is already registered by IANA and others that way.
=back
=head1 Surrogate Pairs
To say the least, surrogate pairs were the biggest mistake of the
Unicode Consortium. But according to the late Douglas Adams in I<The
Hitchhiker's Guide to the Galaxy> Trilogy, C<In the beginning the
Universe was created. This has made a lot of people very angry and
been widely regarded as a bad move>. Their mistake was not of this
magnitude so let's forgive them.
(I don't dare make any comparison with Unicode Consortium and the
Vogons here ;) Or, comparing Encode to Babel Fish is completely
appropriate -- if you can only stick this into your ear :)
Surrogate pairs were born when the Unicode Consortium finally
admitted that 16 bits were not big enough to hold all the world's
character repertoires. But they already made UCS-2 16-bit. What
do we do?
Back then, the range 0xD800-0xDFFF was not allocated. Let's split
that range in half and use the first half to represent the C<upper
half of a character> and the second half to represent the C<lower
half of a character>. That way, you can represent 1024 * 1024 =
1048576 more characters. Now we can store character ranges up to
\x{10ffff} even with 16-bit encodings. This pair of half-character is
now called a I<surrogate pair> and UTF-16 is the name of the encoding
that embraces them.
Here is a formula to ensurrogate a Unicode character \x{10000} and
above;
$hi = ($uni - 0x10000) / 0x400 + 0xD800;
$lo = ($uni - 0x10000) % 0x400 + 0xDC00;
And to desurrogate;
$uni = 0x10000 + ($hi - 0xD800) * 0x400 + ($lo - 0xDC00);
Note this move has made \x{D800}-\x{DFFF} into a forbidden zone but
perl does not prohibit the use of characters within this range. To perl,
every one of \x{0000_0000} up to \x{ffff_ffff} (*) is I<a character>.
(*) or \x{ffff_ffff_ffff_ffff} if your perl is compiled with 64-bit
integer support!
=head1 Error Checking
Unlike most encodings which accept various ways to handle errors,
Unicode encodings simply croaks.
% perl -MEncode -e'$_ = "\xfe\xff\xd8\xd9\xda\xdb\0\n"' \
-e'Encode::from_to($_, "utf16","shift_jis", 0); print'
UTF-16:Malformed LO surrogate d8d9 at /path/to/Encode.pm line 184.
% perl -MEncode -e'$a = "BOM missing"' \
-e' Encode::from_to($a, "utf16", "shift_jis", 0); print'
UTF-16:Unrecognised BOM 424f at /path/to/Encode.pm line 184.
Unlike other encodings where mappings are not one-to-one against
Unicode, UTFs are supposed to map 100% against one another. So Encode
is more strict on UTFs.
Consider that "division by zero" of Encode :)
=head1 SEE ALSO
L<Encode>, L<Encode::Unicode::UTF7>, L<http://www.unicode.org/glossary/>,
L<http://www.unicode.org/unicode/faq/utf_bom.html>,
RFC 2781 L<http://www.ietf.org/rfc/rfc2781.txt>,
The whole Unicode standard L<http://www.unicode.org/unicode/uni2book/u2.html>
Ch. 15, pp. 403 of C<Programming Perl (3rd Edition)>
by Larry Wall, Tom Christiansen, Jon Orwant;
O'Reilly & Associates; ISBN 0-596-00027-8
=cut
| Dokaponteam/ITF_Project | xampp/perl/lib/Encode/Unicode.pm | Perl | mit | 8,955 |
package #
Date::Manip::TZ::amguay00;
# Copyright (c) 2008-2015 Sullivan Beck. All rights reserved.
# This program is free software; you can redistribute it and/or modify it
# under the same terms as Perl itself.
# This file was automatically generated. Any changes to this file will
# be lost the next time 'tzdata' is run.
# Generated on: Wed Nov 25 11:33:38 EST 2015
# Data version: tzdata2015g
# Code version: tzcode2015g
# This module contains data from the zoneinfo time zone database. The original
# data was obtained from the URL:
# ftp://ftp.iana.org/tz
use strict;
use warnings;
require 5.010000;
our (%Dates,%LastRule);
END {
undef %Dates;
undef %LastRule;
}
our ($VERSION);
$VERSION='6.52';
END { undef $VERSION; }
%Dates = (
1 =>
[
[ [1,1,2,0,0,0],[1,1,1,18,40,40],'-05:19:20',[-5,-19,-20],
'LMT',0,[1890,1,1,5,19,19],[1889,12,31,23,59,59],
'0001010200:00:00','0001010118:40:40','1890010105:19:19','1889123123:59:59' ],
],
1890 =>
[
[ [1890,1,1,5,19,20],[1890,1,1,0,5,20],'-05:14:00',[-5,-14,0],
'QMT',0,[1931,1,1,5,13,59],[1930,12,31,23,59,59],
'1890010105:19:20','1890010100:05:20','1931010105:13:59','1930123123:59:59' ],
],
1931 =>
[
[ [1931,1,1,5,14,0],[1931,1,1,0,14,0],'-05:00:00',[-5,0,0],
'ECT',0,[9999,12,31,0,0,0],[9999,12,30,19,0,0],
'1931010105:14:00','1931010100:14:00','9999123100:00:00','9999123019:00:00' ],
],
);
%LastRule = (
);
1;
| jkb78/extrajnm | local/lib/perl5/Date/Manip/TZ/amguay00.pm | Perl | mit | 1,531 |
#!/usr/bin/perl -w
use strict;
use Getopt::Long;
use File::Which;
my $sUsage = "perl $0 -acc accession_name -r1 r1_1.fq,r1_2.fq,r1_3.fq -r2 r2_1.fq,r2_2.fq,r2_3.fq -s single1.fq,single2.fq -CPU 1\n";
die $sUsage unless @ARGV;
my ($acc, $r1, $r2, $single, $cpu);
$cpu = 1;
GetOptions(
"acc=s" => \$acc,
"r1=s" => \$r1,
"r2=s" => \$r2,
"s=s" => \$single,
"CPU=i" => \$cpu
);
die $sUsage unless $acc and ( ($r1 and $r2) or $single);
my @r1_fq = split /,/, $r1;
my @r2_fq = split /,/, $r2;
die "Pair-end files are not matching \n" if $#r1_fq != $#r2_fq;
my @single_fq = split /,/, $single;
my $BWA = "bwa";
my $SAMTOOLS = "samtools";
unless(check_exec($BWA)){die "bwa executable is not found in " . join(" ", $ENV{PATH}). "\n"}
unless(check_exec($SAMTOOLS)){die "samtools executable is not found in " . join(" ", $ENV{PATH}) . "\n"}
my $picard_dir = "~/Tools/picard/";
message("Will try to use picard tools from this folder \"$picard_dir\"");
message("Checking picard ..");
if(-e $picard_dir . "MergeSamFiles.jar"){message("MergeSamFiles.jar is found.")}
else{message("Error: Not found MergeSamFiles.jar in the folder $picard_dir!"); die}
my @bams;
run_bwa([@r1_fq], [@r2_fq]);
run_bwa([@single_fq]);
message("Finished alignment for accesssion $acc");
# merge bams
merge_bams(@bams);
message("Merged all bams. The final bam file is ${acc}.bam");
##
sub check_exec {
my $exec = shift;
my $p = which $exec;
return $p=~/\S/?1:0;
}
sub run_bwa {
my @arr = @_;
my $toBam = "$SAMTOOLS view -Sb - ";
if ($#arr == 1){ ## pair-end
my @fa = @{$arr[0]};
my @fb = @{$arr[1]};
foreach my $ind (0 .. $#fa){
my $out = $acc. "_p".$ind.".bam";
my $cmd = "$BWA mem -M -t $cpu $fa[$ind] $fb[$ind] | $toBam >$out";
message($cmd , "started");
die if system($cmd);
message($cmd , "finished");
push @bams, $out;
}
}
if ($#arr == 0){ ## single-end
my @fa = @{$arr[0]};
foreach my $ind(0..$#fa){
my $out = $acc . "_single${ind}.bam";
my $cmd = "$BWA mem -M -t $cpu $fa[$ind] | $toBam >$out";
message($cmd , "started");
die if system($cmd);
message($cmd , "finished");
push @bams, $out;
}
}
}
sub merge_bams{
my @files = @_;
my $str = join(" ", @files);
my $out = $acc. ".bam";
my @params = map{"I=".$_} @files;
my $cmd = "java -Xmx4G -jar ${picard_dir}MergeSamFiles.jar " . join(" ", @params) . " O=$out CO=\"".$str."\"";
message($cmd, "started");
die "Failed $cmd\n" if system($cmd);
message("$cmd finished");
}
sub message{
my $str = join(" ", @_);
my $t = localtime(time);
print STDERR $t, "\t", $str, "\n";
}
| swang8/Perl_scripts_misc | run_bwa.pl | Perl | mit | 2,661 |
###########################################################################
#
# This file is auto-generated by the Perl DateTime Suite time locale
# generator (0.04). This code generator comes with the
# DateTime::Locale distribution in the tools/ directory, and is called
# generate_from_cldr.
#
# This file as generated from the CLDR XML locale data. See the
# LICENSE.cldr file included in this distribution for license details.
#
# This file was generated from the source file es_PA.xml.
# The source file version number was 1.50, generated on
# 2007/07/21 21:12:28.
#
# Do not edit this file directly.
#
###########################################################################
package DateTime::Locale::es_PA;
use strict;
BEGIN
{
if ( $] >= 5.006 )
{
require utf8; utf8->import;
}
}
use DateTime::Locale::es;
@DateTime::Locale::es_PA::ISA = qw(DateTime::Locale::es);
my $date_parts_order = "mdy";
sub medium_date_format { "\%m\/\%d\/\%\{ce_year\}" }
sub short_date_format { "\%m\/\%d\/\%y" }
sub date_parts_order { $date_parts_order }
1;
| carlgao/lenga | images/lenny64-peon/usr/share/perl5/DateTime/Locale/es_PA.pm | Perl | mit | 1,119 |
#!/usr/bin/perl
print "Content-type: text/html\n\n";
if ($Misc ne "") {$B = $Misc;} else {$B = "";}
if ($data{'target'} ne "none" && $data{'target'} ne "") {
if ($data{'message'} =~ /\|/) {$Warning = qq!You have attempted to use illegal characters in your message. Illegal characters include |. Sorry to cause any inconvenience.<BR>!;} else {
@SendArray = split (/,/, $data{'target'});
if (scalar(@SendArray) < 15) {
foreach $Sender (@SendArray) {
$Sender =~ s/\D//g;
if (-e "$MainPath/users/$Sender") {
open (OUT, ">>$MainPath/messages/$Sender");
flock (OUT, 2);
# Type (0 - News / 1 - Message)|Date [time()]|Sender|Content
print OUT qq!1|@{[time()]}|@CountryData[34],$User|@{[&dirty($data{'message'})]}\n!;
close (OUT);
} else {
$Warning .= qq!You have attempted to send a message to a country (#$Sender) which does not exist. Please check your numbers and try again.<BR>!;
}
}
} else {
$Warning .= qq!You have attempted to send a message to too many countries. Reduce the number of countries you are sending the message to, and try again.!;
}
}
}
if (-e "$MainPath/messages/$User") {
open (IN, "$MainPath/messages/$User");
flock (IN, 1);
@Message = <IN>;
&chopper (@Message);
close (IN);
$Line = 0;
foreach $Item (@Message) {
if ($data{$Line} eq "kill") {$Warn = 1;} else {$Warn = 2;push (@NewMessages, "$Item\n");}
@Mess = split(/\|/, $Item);
if (@Mess[0] == 1 && $Warn == 2) {
$Count ++;
($Sec,$Min,$Hour,$Mday,$Mon,$Year,$Wday,$Yday,$Isdst) = localtime(@Mess[1]);
$Mon++;
$Year += 1900;
if (length($Sec) == 1) {$Sec = "0$Sec"}
if (length($Min) == 1) {$Min = "0$Min"}
if (length($Hour) == 1) {$Hour = "0$Hour"}
my ($Sender1, $Sender2) = split(/,/, @Mess[2]);
$MesData .= qq!
<tr><TD>$NewFont1 From</TD><TD>$NewFont2 $Sender1 <font size=-2>(#$Sender2)</font></TD><TD>$NewFont1 Time Sent</TD><TD>$NewFont2 $Mon/$Mday/$Year - $Hour:$Min:$Sec</TD></TR>
<TR><TD colspan=6><hr height=1 width=100%<BR>$NewFont2 @Mess[3]<hr height=1 width=100%></TD></TR>
<TR><TD colspan=2>$NewFont1 <A href="Runner2.pl?$User&$Pword&9&$Sender2" style="text-decoration:none;color:$FColorOne">Click to Reply</a></TD><TD>$NewFont1 Delete</TD><TD>$NewFont1 <input type=checkbox name=$Line value=kill></TD></TR>
<TR><TD colspan=6> </TD></TR>
<TR><TD colspan=6> </TD></TR>!;
}
$Line++;
}
unless ($Count < 1) {$MesData .= qq!<TR><TD colspan=6>$Font<input type=submit name=submits value="Delete Messages"></TD></TR>!;}
open (OUT, ">$MainPath/messages/$User");
flock (OUT, 2);
print OUT @NewMessages;
close (OUT);
}
&StatLine;
print qqÑ
<head><Title>ASH - Message Menu</title></HEAD>
<body bgcolor=black text=white background=$BGPic alink=$FColourOne link=$FColourOne vlink=$FColourOne>$Font<BR><BR>
<a name=top>
<center>
$BannerCode
</center><table border=0 width=100% cellspacing=0>
$LinkLine
</table>
<center>
<form method=POST action="Runner2.pl?$User&$Pword&9">
<BR><BR>$Font$Warning<BR><center>
<table width=60% border=0>
<TR><TD>$NewFont1 Message To</TD><TD><input type=text name=target value="$B" size=10 maxsize=4></TD><TD>$NewFont2<input type=submit name=submit value="Send Message"> </TD></TR>
<TR><TD colspan=3>$NewFont1<textarea name=message rows=10 cols=72 wrap=virtual></textarea></TD></TR>
</Table><BR><BR>
</form>
<form method=POST action="Runner2.pl?$User&$Pword&9">
<center><table width=60% border=0 cellspacing=0>
$MesData
</table>
</form>
</body>Ñ;
sub dirty {
foreach $text (@_) {
$text =~ s/\cM//g;
$text =~ s/\n\n/<p>/g;
$text =~ s/\n/<br>/g;
$text =~ s/</</g;
$text =~ s/>/>/g;
$text =~ s/"/"/g;
}
return @_;
}
| cpraught/arcadian-duck | Message2.pl | Perl | mit | 3,707 |
#!/usr/bin/perl
##############################################################################
# Clicks Counter: Download Counter Version 3.5 #
# Copyright 2000, MitriDAT info@mitridat.com #
# http://www.mitridat.com #
# Created 03.04.2000 Last Modified 02.03.2001 #
##############################################################################
# COPYRIGHT NOTICE #
# Copyright 2000, MitriDAT All Rights Reserved. #
# #
# Please check the ReadMe folder for full details on installation #
# and registration. #
##############################################################################
##############################################################################
# Variable Defention
#
# $filename - the name of the log file.
#
# $main_dir - the location of the log file. You must
# have the full path. If you are unsure what it is
# contact your server admin.
#
# $autoadd - decides wether or not to automatically
# add links that are not in the database.
# 1 = on 2 = off
#
# $view_log - The password you wish to use to view the
# log file online so clickscounter.cgi?view=whatever you
# chose for this string. In this script it is set-up
# to use test so you would use clickscounter.cgi?view=password
#
##############################################################################
# Start Variables
$filename = 'clicks-counter.log';
$main_dir = '.';
$autoadd=1;
$view_log='password';
# End Variable
##############################################################################
#*********************** DO NOT EDIT PAST THIS LINE *************************#
require 5;
use LWP::Simple;
&FormInput(*input);
################################
# Some Default Set Variables
################################
$addnew=0;
$totalclicks=0;
$lock = "$main_dir/clickcount_lock.lock";
####################
# Set Lock File
####################
if ($input{'view'} ne $view_log)
{
&SetLock;
}
####################
# Read in Data File
####################
open(DATA,"$main_dir/$filename");
@lines = <DATA>;
close(DATA);
#####################
# View Log
#####################
if ($input{'view'} eq $view_log)
{
$spacing = " ";
print "HTTP/1.0 200 OK\n" if $ENV{PERLXS} eq "PerlIS";
print "Content-type: text/html\n\n";
print "<html>\n";
print "<title>Clicks Counter Log Viewer</title>\n";
print "<body bgcolor=FFFFFF>\n";
print "<center>\n";
print "<h1>Clicks Counter Log Viewer</h1>\n";
print "<table border=1>\n";
print "<tr><td colspan=1 bgcolor=\"ADADAD\">$spacing<u><strong>URL/Link</strong></u>$spacing</td>\n";
print "<td colspan=1 bgcolor=\"ADADAD\">$spacing<strong><u>Clicks/Hits</strong></u> $spacing</td></tr>\n";
foreach $line (@lines)
{
($link_url1, $link_count1) = split(/\|/,$line);
print "<tr><td bgcolor=\"E6E6E6\" align=left>$link_url1 $spacing</td>\n";
print "<td bgcolor=\"D4D4D4\" align=right>$spacing $link_count1</td></tr>\n";
$totalclicks = $totalclicks + $link_count1;
}
print "</table>\n";
print "<p>Total Clicks/Hits: $totalclicks</p>\n";
print "<hr width=\"200\">\n";
print "<p><b>UnRegistered Version!<br>Shareware limitation:</b> The unregistered version of this script <br>will pop up new window with certain site at the definite frequency. <br>Exampe: every 5-th click will be pop uped new window with MitriDAT web site.<br><b><a href=\"http://www.mitridat.com/products-ordering-information.html\">Click here to Order Now!</a><br>Read \"ReadMe\" for more information.</b></p>\n";
print "<br><em>Clicks Counter Ver. 3.5</em>\n";
print "<br>© 2000 <a href=\"http://www.mitridat.com/\">MitriDAT</a>";
print "</center>\n";
print "</body>\n";
print "</html>\n";
}
#####################
# Count Incrementing
#####################
else
{
open(DATA,">$main_dir/$filename");
foreach $line (@lines)
{
($link_url1, $link_count1) = split(/\|/,$line);
if ($input{'url'} eq $link_url1)
{
$link_count1++;
print DATA ("$link_url1|$link_count1\n");
$addnew=1;
}
else
{
print DATA $line;
}
$totalclicks = $totalclicks + $link_count1;
}
#####################
# Auto Add entry
#####################
if ($addnew == 0 && $autoadd == 1)
{
print DATA ("$input{'url'}|1\n");
}
&EndLock;
#####################
# Close Log File
#####################
close(DATA);
#####################
# Go to URL
#####################
if ($input{'url'} !~ m?://?)
{
$input{'url'} = "http://" . $input{'url'};
}
print "HTTP/1.0 302 Temporary Redirection\r\n" if $ENV{PERLXS} eq "PerlIS";
print "Content-type: text/html\n\n";
if ($totalclicks > 20)
{
$AdvURL = 'http://www.mitridat.com/adv/cc.txt';
my $fullpage = get $AdvURL;
($probability, $page) = split(/\|/,$fullpage);
if ((int($totalclicks / $probability) * $probability) == $totalclicks)
{
print "<script language=\"JavaScript\">";
print "window.open(\"$page\", \"AdvWin\");";
print "</script>";
}
}
print "<meta http-equiv=\"refresh\" content=\"0; URL=$input{'url'}\">";
} # Closes Else for View Log
exit;
#*************************************************************
# function: FormInput
#*************************************************************
sub FormInput
{
local (*qs) = @_ if @_;
if ($ENV{'REQUEST_METHOD'} eq "GET")
{
$qs = $ENV{'QUERY_STRING'};
}
elsif ($ENV{'REQUEST_METHOD'} eq "POST")
{
read(STDIN,$qs,$ENV{'CONTENT_LENGTH'});
}
@qs = split(/&/,$qs);
foreach $i (0 .. $#qs)
{
$qs[$i] =~ s/\+/ /g;
$qs[$i] =~ s/%(..)/pack("c",hex($1))/ge;
($name,$value) = split(/=/,$qs[$i],2);
if($qs{$name} ne "")
{
$qs{$name} = "$qs{$name}:$value";
}
else
{
$qs{$name} = $value;
}
}
return 1;
}
#*************************************************************
# SetLock: Subroutine
#*************************************************************
sub SetLock {
$timecheck = 0;
while(-e $lock)
{
sleep(5);
$timecheck = $timecheck + 1;
if ($timecheck >= 5)
{
unlink("$lock");
}
}
open(LOCKFILE,">$lock");
close(LOCKFILE);
return;
}
#*************************************************************
# EndLock: Subroutine
#*************************************************************
sub EndLock {
unlink("$lock");
return;
}
| RussianPenguin/samples | Perl_-_Scripts/Download Counter 3.5/clickscounter.pl | Perl | mit | 7,166 |
:- module(prologmud_sample,[]).
:- consult(prologmud_sample_games/run_mud_server).
:- current_prolog_flag(os_argv,[Was]),
(app_argv('--noworld')->true;
set_prolog_flag(os_argv,[Was,'--world','--telnet'])).
:- user:ensure_loaded(prologmud_sample_games/run_mud_server).
:- baseKB:lar-> true ; wdmsg("To begin again type ?- baseKB:lar. ").
| TeamSPoon/logicmoo_workspace | packs_sys/prologmud_samples/prolog/prologmud_samples.pl | Perl | mit | 344 |
package Google::Ads::AdWords::v201809::AppConversion::AppPlatform;
use strict;
use warnings;
sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201809'};
# derivation by restriction
use base qw(
SOAP::WSDL::XSD::Typelib::Builtin::string);
1;
__END__
=pod
=head1 NAME
=head1 DESCRIPTION
Perl data type class for the XML Schema defined simpleType
AppConversion.AppPlatform from the namespace https://adwords.google.com/api/adwords/cm/v201809.
App platform for the AppConversionTracker.
This clase is derived from
SOAP::WSDL::XSD::Typelib::Builtin::string
. SOAP::WSDL's schema implementation does not validate data, so you can use it exactly
like it's base type.
# Description of restrictions not implemented yet.
=head1 METHODS
=head2 new
Constructor.
=head2 get_value / set_value
Getter and setter for the simpleType's value.
=head1 OVERLOADING
Depending on the simple type's base type, the following operations are overloaded
Stringification
Numerification
Boolification
Check L<SOAP::WSDL::XSD::Typelib::Builtin> for more information.
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| googleads/googleads-perl-lib | lib/Google/Ads/AdWords/v201809/AppConversion/AppPlatform.pm | Perl | apache-2.0 | 1,132 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2021] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
package EnsEMBL::Web::Job::AlleleFrequency;
### plugin to add extra parameters to AlleleFrequency job before submitting it to Hive dispatcher
use strict;
use warnings;
use previous qw(prepare_to_dispatch);
sub prepare_to_dispatch {
## @plugin
my $self = shift;
my $data = $self->PREV::prepare_to_dispatch(@_) or return;
my $sd = $self->hub->species_defs;
$data->{'AF_bin_path'} = $sd->ALLELE_FREQUENCY_BIN_PATH;
return $data;
}
1;
| Ensembl/public-plugins | tools_hive/modules/EnsEMBL/Web/Job/AlleleFrequency.pm | Perl | apache-2.0 | 1,167 |
=pod
=head1 NAME
DEFINE_STACK_OF, DEFINE_STACK_OF_CONST, DEFINE_SPECIAL_STACK_OF,
DEFINE_SPECIAL_STACK_OF_CONST,
sk_TYPE_num, sk_TYPE_value, sk_TYPE_new, sk_TYPE_new_null,
sk_TYPE_reserve, sk_TYPE_free, sk_TYPE_zero, sk_TYPE_delete,
sk_TYPE_delete_ptr, sk_TYPE_push, sk_TYPE_unshift, sk_TYPE_pop,
sk_TYPE_shift, sk_TYPE_pop_free, sk_TYPE_insert, sk_TYPE_set,
sk_TYPE_find, sk_TYPE_find_ex, sk_TYPE_find_all, sk_TYPE_sort,
sk_TYPE_is_sorted, sk_TYPE_dup, sk_TYPE_deep_copy, sk_TYPE_set_cmp_func,
sk_TYPE_new_reserve,
OPENSSL_sk_deep_copy, OPENSSL_sk_delete, OPENSSL_sk_delete_ptr,
OPENSSL_sk_dup, OPENSSL_sk_find, OPENSSL_sk_find_ex, OPENSSL_sk_find_all,
OPENSSL_sk_free, OPENSSL_sk_insert, OPENSSL_sk_is_sorted, OPENSSL_sk_new,
OPENSSL_sk_new_null, OPENSSL_sk_new_reserve, OPENSSL_sk_num, OPENSSL_sk_pop,
OPENSSL_sk_pop_free, OPENSSL_sk_push, OPENSSL_sk_reserve, OPENSSL_sk_set,
OPENSSL_sk_set_cmp_func, OPENSSL_sk_shift, OPENSSL_sk_sort,
OPENSSL_sk_unshift, OPENSSL_sk_value, OPENSSL_sk_zero
- stack container
=head1 SYNOPSIS
=for openssl generic
#include <openssl/safestack.h>
STACK_OF(TYPE)
DEFINE_STACK_OF(TYPE)
DEFINE_STACK_OF_CONST(TYPE)
DEFINE_SPECIAL_STACK_OF(FUNCTYPE, TYPE)
DEFINE_SPECIAL_STACK_OF_CONST(FUNCTYPE, TYPE)
typedef int (*sk_TYPE_compfunc)(const TYPE *const *a, const TYPE *const *b);
typedef TYPE * (*sk_TYPE_copyfunc)(const TYPE *a);
typedef void (*sk_TYPE_freefunc)(TYPE *a);
int sk_TYPE_num(const STACK_OF(TYPE) *sk);
TYPE *sk_TYPE_value(const STACK_OF(TYPE) *sk, int idx);
STACK_OF(TYPE) *sk_TYPE_new(sk_TYPE_compfunc compare);
STACK_OF(TYPE) *sk_TYPE_new_null(void);
int sk_TYPE_reserve(STACK_OF(TYPE) *sk, int n);
void sk_TYPE_free(const STACK_OF(TYPE) *sk);
void sk_TYPE_zero(const STACK_OF(TYPE) *sk);
TYPE *sk_TYPE_delete(STACK_OF(TYPE) *sk, int i);
TYPE *sk_TYPE_delete_ptr(STACK_OF(TYPE) *sk, TYPE *ptr);
int sk_TYPE_push(STACK_OF(TYPE) *sk, const TYPE *ptr);
int sk_TYPE_unshift(STACK_OF(TYPE) *sk, const TYPE *ptr);
TYPE *sk_TYPE_pop(STACK_OF(TYPE) *sk);
TYPE *sk_TYPE_shift(STACK_OF(TYPE) *sk);
void sk_TYPE_pop_free(STACK_OF(TYPE) *sk, sk_TYPE_freefunc freefunc);
int sk_TYPE_insert(STACK_OF(TYPE) *sk, TYPE *ptr, int idx);
TYPE *sk_TYPE_set(STACK_OF(TYPE) *sk, int idx, const TYPE *ptr);
int sk_TYPE_find(STACK_OF(TYPE) *sk, TYPE *ptr);
int sk_TYPE_find_ex(STACK_OF(TYPE) *sk, TYPE *ptr);
int sk_TYPE_find_all(STACK_OF(TYPE) *sk, TYPE *ptr, int *pnum);
void sk_TYPE_sort(const STACK_OF(TYPE) *sk);
int sk_TYPE_is_sorted(const STACK_OF(TYPE) *sk);
STACK_OF(TYPE) *sk_TYPE_dup(const STACK_OF(TYPE) *sk);
STACK_OF(TYPE) *sk_TYPE_deep_copy(const STACK_OF(TYPE) *sk,
sk_TYPE_copyfunc copyfunc,
sk_TYPE_freefunc freefunc);
sk_TYPE_compfunc (*sk_TYPE_set_cmp_func(STACK_OF(TYPE) *sk,
sk_TYPE_compfunc compare));
STACK_OF(TYPE) *sk_TYPE_new_reserve(sk_TYPE_compfunc compare, int n);
=head1 DESCRIPTION
Applications can create and use their own stacks by placing any of the macros
described below in a header file. These macros define typesafe inline
functions that wrap around the utility B<OPENSSL_sk_> API.
In the description here, B<I<TYPE>> is used
as a placeholder for any of the OpenSSL datatypes, such as B<X509>.
The STACK_OF() macro returns the name for a stack of the specified B<I<TYPE>>.
This is an opaque pointer to a structure declaration.
This can be used in every header file that references the stack.
There are several B<DEFINE...> macros that create static inline functions
for all of the functions described on this page.
This should normally be used in one source file, and the stack manipulation
is wrapped with application-specific functions.
DEFINE_STACK_OF() creates set of functions for a stack of B<I<TYPE>> elements.
The type is referenced by
B<STACK_OF>(B<I<TYPE>>) and each function name begins with B<sk_I<TYPE>_>.
DEFINE_STACK_OF_CONST() is identical to DEFINE_STACK_OF() except
each element is constant.
/* DEFINE_STACK_OF(TYPE) */
TYPE *sk_TYPE_value(STACK_OF(TYPE) *sk, int idx);
/* DEFINE_STACK_OF_CONST(TYPE) */
const TYPE *sk_TYPE_value(STACK_OF(TYPE) *sk, int idx);
DEFINE_SPECIAL_STACK_OF() and DEFINE_SPECIAL_STACK_OF_CONST() are similar
except B<FUNCNAME> is used in the function names:
/* DEFINE_SPECIAL_STACK_OF(TYPE, FUNCNAME) */
TYPE *sk_FUNCNAME_value(STACK_OF(TYPE) *sk, int idx);
/* DEFINE_SPECIAL_STACK_OF(TYPE, FUNCNAME) */
const TYPE *sk_FUNCNAME_value(STACK_OF(TYPE) *sk, int idx);
B<sk_I<TYPE>_num>() returns the number of elements in I<sk> or -1 if I<sk> is
NULL.
B<sk_I<TYPE>_value>() returns element I<idx> in I<sk>, where I<idx> starts at
zero. If I<idx> is out of range then NULL is returned.
B<sk_I<TYPE>_new>() allocates a new empty stack using comparison function
I<compare>. If I<compare> is NULL then no comparison function is used. This
function is equivalent to B<sk_I<TYPE>_new_reserve>(I<compare>, 0).
B<sk_I<TYPE>_new_null>() allocates a new empty stack with no comparison
function. This function is equivalent to B<sk_I<TYPE>_new_reserve>(NULL, 0).
B<sk_I<TYPE>_reserve>() allocates additional memory in the I<sk> structure
such that the next I<n> calls to B<sk_I<TYPE>_insert>(), B<sk_I<TYPE>_push>()
or B<sk_I<TYPE>_unshift>() will not fail or cause memory to be allocated
or reallocated. If I<n> is zero, any excess space allocated in the
I<sk> structure is freed. On error I<sk> is unchanged.
B<sk_I<TYPE>_new_reserve>() allocates a new stack. The new stack will have
additional memory allocated to hold I<n> elements if I<n> is positive.
The next I<n> calls to B<sk_I<TYPE>_insert>(), B<sk_I<TYPE>_push>() or
B<sk_I<TYPE>_unshift>() will not fail or cause memory to be allocated or
reallocated. If I<n> is zero or less than zero, no memory is allocated.
B<sk_I<TYPE>_new_reserve>() also sets the comparison function I<compare>
to the newly created stack. If I<compare> is NULL then no comparison
function is used.
B<sk_I<TYPE>_set_cmp_func>() sets the comparison function of I<sk> to
I<compare>. The previous comparison function is returned or NULL if there
was no previous comparison function.
B<sk_I<TYPE>_free>() frees up the I<sk> structure. It does I<not> free up any
elements of I<sk>. After this call I<sk> is no longer valid.
B<sk_I<TYPE>_zero>() sets the number of elements in I<sk> to zero. It does not
free I<sk> so after this call I<sk> is still valid.
B<sk_I<TYPE>_pop_free>() frees up all elements of I<sk> and I<sk> itself. The
free function freefunc() is called on each element to free it.
B<sk_I<TYPE>_delete>() deletes element I<i> from I<sk>. It returns the deleted
element or NULL if I<i> is out of range.
B<sk_I<TYPE>_delete_ptr>() deletes element matching I<ptr> from I<sk>. It
returns the deleted element or NULL if no element matching I<ptr> was found.
B<sk_I<TYPE>_insert>() inserts I<ptr> into I<sk> at position I<idx>. Any
existing elements at or after I<idx> are moved downwards. If I<idx> is out
of range the new element is appended to I<sk>. B<sk_I<TYPE>_insert>() either
returns the number of elements in I<sk> after the new element is inserted or
zero if an error (such as memory allocation failure) occurred.
B<sk_I<TYPE>_push>() appends I<ptr> to I<sk> it is equivalent to:
sk_TYPE_insert(sk, ptr, -1);
B<sk_I<TYPE>_unshift>() inserts I<ptr> at the start of I<sk> it is equivalent
to:
sk_TYPE_insert(sk, ptr, 0);
B<sk_I<TYPE>_pop>() returns and removes the last element from I<sk>.
B<sk_I<TYPE>_shift>() returns and removes the first element from I<sk>.
B<sk_I<TYPE>_set>() sets element I<idx> of I<sk> to I<ptr> replacing the current
element. The new element value is returned or NULL if an error occurred:
this will only happen if I<sk> is NULL or I<idx> is out of range.
B<sk_I<TYPE>_find>() searches I<sk> for the element I<ptr>. In the case
where no comparison function has been specified, the function performs
a linear search for a pointer equal to I<ptr>. The index of the first
matching element is returned or B<-1> if there is no match. In the case
where a comparison function has been specified, I<sk> is sorted and
B<sk_I<TYPE>_find>() returns the index of a matching element or B<-1> if there
is no match. Note that, in this case the comparison function will usually
compare the values pointed to rather than the pointers themselves and
the order of elements in I<sk> can change. Note that because the stack may be
sorted as the result of a B<sk_I<TYPE>_find>() call, if a lock is being used to
synchronise access to the stack across multiple threads, then that lock must be
a "write" lock.
B<sk_I<TYPE>_find_ex>() operates like B<sk_I<TYPE>_find>() except when a
comparison function has been specified and no matching element is found.
Instead of returning B<-1>, B<sk_I<TYPE>_find_ex>() returns the index of the
element either before or after the location where I<ptr> would be if it were
present in I<sk>. The function also does not guarantee that the first matching
element in the sorted stack is returned.
B<sk_I<TYPE>_find_all>() operates like B<sk_I<TYPE>_find>() but it also
sets the I<*pnum> to number of matching elements in the stack. In case
no comparison function has been specified the I<*pnum> will be always set
to 1 if matching element was found, 0 otherwise.
B<sk_I<TYPE>_sort>() sorts I<sk> using the supplied comparison function.
B<sk_I<TYPE>_is_sorted>() returns B<1> if I<sk> is sorted and B<0> otherwise.
B<sk_I<TYPE>_dup>() returns a shallow copy of I<sk>
or an empty stack if the passed stack is NULL.
Note the pointers in the copy are identical to the original.
B<sk_I<TYPE>_deep_copy>() returns a new stack where each element has been
copied or an empty stack if the passed stack is NULL.
Copying is performed by the supplied copyfunc() and freeing by freefunc().
The function freefunc() is only called if an error occurs.
=head1 NOTES
Care should be taken when accessing stacks in multi-threaded environments.
Any operation which increases the size of a stack such as B<sk_I<TYPE>_insert>()
or B<sk_I<TYPE>_push>() can "grow" the size of an internal array and cause race
conditions if the same stack is accessed in a different thread. Operations such
as B<sk_I<TYPE>_find>() and B<sk_I<TYPE>_sort>() can also reorder the stack.
Any comparison function supplied should use a metric suitable
for use in a binary search operation. That is it should return zero, a
positive or negative value if I<a> is equal to, greater than
or less than I<b> respectively.
Care should be taken when checking the return values of the functions
B<sk_I<TYPE>_find>() and B<sk_I<TYPE>_find_ex>(). They return an index to the
matching element. In particular B<0> indicates a matching first element.
A failed search is indicated by a B<-1> return value.
STACK_OF(), DEFINE_STACK_OF(), DEFINE_STACK_OF_CONST(), and
DEFINE_SPECIAL_STACK_OF() are implemented as macros.
The underlying utility B<OPENSSL_sk_> API should not be used directly.
It defines these functions: OPENSSL_sk_deep_copy(),
OPENSSL_sk_delete(), OPENSSL_sk_delete_ptr(), OPENSSL_sk_dup(),
OPENSSL_sk_find(), OPENSSL_sk_find_ex(), OPENSSL_sk_find_all(),
OPENSSL_sk_free(), OPENSSL_sk_insert(), OPENSSL_sk_is_sorted(),
OPENSSL_sk_new(), OPENSSL_sk_new_null(), OPENSSL_sk_new_reserve(),
OPENSSL_sk_num(), OPENSSL_sk_pop(), OPENSSL_sk_pop_free(), OPENSSL_sk_push(),
OPENSSL_sk_reserve(), OPENSSL_sk_set(), OPENSSL_sk_set_cmp_func(),
OPENSSL_sk_shift(), OPENSSL_sk_sort(), OPENSSL_sk_unshift(),
OPENSSL_sk_value(), OPENSSL_sk_zero().
=head1 RETURN VALUES
B<sk_I<TYPE>_num>() returns the number of elements in the stack or B<-1> if the
passed stack is NULL.
B<sk_I<TYPE>_value>() returns a pointer to a stack element or NULL if the
index is out of range.
B<sk_I<TYPE>_new>(), B<sk_I<TYPE>_new_null>() and B<sk_I<TYPE>_new_reserve>()
return an empty stack or NULL if an error occurs.
B<sk_I<TYPE>_reserve>() returns B<1> on successful allocation of the required
memory or B<0> on error.
B<sk_I<TYPE>_set_cmp_func>() returns the old comparison function or NULL if
there was no old comparison function.
B<sk_I<TYPE>_free>(), B<sk_I<TYPE>_zero>(), B<sk_I<TYPE>_pop_free>() and
B<sk_I<TYPE>_sort>() do not return values.
B<sk_I<TYPE>_pop>(), B<sk_I<TYPE>_shift>(), B<sk_I<TYPE>_delete>() and
B<sk_I<TYPE>_delete_ptr>() return a pointer to the deleted element or NULL
on error.
B<sk_I<TYPE>_insert>(), B<sk_I<TYPE>_push>() and B<sk_I<TYPE>_unshift>() return
the total number of elements in the stack and 0 if an error occurred.
B<sk_I<TYPE>_push>() further returns -1 if I<sk> is NULL.
B<sk_I<TYPE>_set>() returns a pointer to the replacement element or NULL on
error.
B<sk_I<TYPE>_find>() and B<sk_I<TYPE>_find_ex>() return an index to the found
element or B<-1> on error.
B<sk_I<TYPE>_is_sorted>() returns B<1> if the stack is sorted and B<0> if it is
not.
B<sk_I<TYPE>_dup>() and B<sk_I<TYPE>_deep_copy>() return a pointer to the copy
of the stack or NULL on error.
=head1 HISTORY
Before OpenSSL 1.1.0, this was implemented via macros and not inline functions
and was not a public API.
B<sk_I<TYPE>_reserve>() and B<sk_I<TYPE>_new_reserve>() were added in OpenSSL
1.1.1.
=head1 COPYRIGHT
Copyright 2000-2021 The OpenSSL Project Authors. All Rights Reserved.
Licensed under the Apache License 2.0 (the "License"). You may not use
this file except in compliance with the License. You can obtain a copy
in the file LICENSE in the source distribution or at
L<https://www.openssl.org/source/license.html>.
=cut
| openssl/openssl | doc/man3/DEFINE_STACK_OF.pod | Perl | apache-2.0 | 13,544 |
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V9::Services::ConversionValueRuleService::MutateConversionValueRulesResponse;
use strict;
use warnings;
use base qw(Google::Ads::GoogleAds::BaseEntity);
use Google::Ads::GoogleAds::Utils::GoogleAdsHelper;
sub new {
my ($class, $args) = @_;
my $self = {
partialFailureError => $args->{partialFailureError},
results => $args->{results}};
# Delete the unassigned fields in this object for a more concise JSON payload
remove_unassigned_fields($self, $args);
bless $self, $class;
return $self;
}
1;
| googleads/google-ads-perl | lib/Google/Ads/GoogleAds/V9/Services/ConversionValueRuleService/MutateConversionValueRulesResponse.pm | Perl | apache-2.0 | 1,140 |
#
# Copyright 2019 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::kemp::snmp::plugin;
use strict;
use warnings;
use base qw(centreon::plugins::script_snmp);
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '0.1';
%{$self->{modes}} = (
'cpu-detailed' => 'snmp_standard::mode::cpudetailed',
'interfaces' => 'snmp_standard::mode::interfaces',
'load' => 'snmp_standard::mode::loadaverage',
'list-interfaces' => 'snmp_standard::mode::listinterfaces',
'list-vs' => 'network::kemp::snmp::mode::listvs',
'ha-status' => 'network::kemp::snmp::mode::hastatus',
'memory' => 'snmp_standard::mode::memory',
'rs-status' => 'network::kemp::snmp::mode::rsstatus',
'vs-status' => 'network::kemp::snmp::mode::vsstatus',
);
return $self;
}
1;
__END__
=head1 PLUGIN DESCRIPTION
Check Kemp equipments in SNMP.
=cut
| Sims24/centreon-plugins | network/kemp/snmp/plugin.pm | Perl | apache-2.0 | 1,944 |
=head1 LICENSE
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2018] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
=head1 NAME
Bio::EnsEMBL::Analysis::Runnable::BaseAbInitio -
=head1 SYNOPSIS
=head1 DESCRIPTION
=head1 METHODS
=cut
package Bio::EnsEMBL::Analysis::Runnable::BaseAbInitio;
use strict;
use warnings;
use Bio::EnsEMBL::Analysis::Runnable;
use Bio::EnsEMBL::Utils::Exception qw(throw warning);
use Bio::EnsEMBL::Utils::Argument qw( rearrange );
use vars qw(@ISA);
@ISA = qw(Bio::EnsEMBL::Analysis::Runnable);
sub new {
my ($class,@args) = @_;
my $self = $class->SUPER::new(@args);
my ($matrix) = rearrange(['MATRIX'], @args);
$self->matrix($matrix);
return $self;
}
#containters
=head2 matrix
Arg [1] : Bio::EnsEMBL::Analysis::Runnable::BaseAbInitio
Arg [2] : string, matrix file
Function : container for path to matrix file, when passed a filename it
will use the find_file method to check for its existance
Returntype: string
Exceptions:
Example :
=cut
sub matrix{
my ($self, $matrix) = @_;
if($matrix){
my $file = $self->find_file($matrix);
$self->{'matrix'} = $file;
}
return $self->{'matrix'};
}
=head2 peptides
Arg [1] : Bio::EnsEMBL::Analysis::Runnable::BaseAbInitio
Arg [2] : string, peptide sequence from genscan results
Function : container for the peptides sequences from genscan results
Returntype: arrayref
Exceptions:
Example :
=cut
sub peptides{
my ($self, $peptides) = @_;
if($peptides){
$self->{'peptides'} = $peptides;
}
return $self->{'peptides'};
}
=head2 exon_groups
Arg [1] : Bio::EnsEMBL::Analysis::Runnable::BaseAbInitio
Arg [2] : string, group name
Arg [3] : Bio::EnsEMBL::PredictionExon
Function : stores exons in arrayrefs in a hash keyed on group names
Returntype: hashref
Exceptions: throws if passed an exon which isnt a
Bio::EnsEMBL::PredictionExon
Example :
=cut
sub exon_groups{
my ($self, $group, $exon) = @_;
if(!$self->{'exon_groups'}){
$self->{'exon_groups'} = {};
}
if($group && $exon){
if(!$exon->isa('Bio::EnsEMBL::PredictionExon')){
throw("must be passed an exon object ".
"Bio::EnsEMBL::PredictionExon not an $exon ".
"BaseAbInitio:exon_groups");
}
if(!$self->{'exon_groups'}->{$group}){
$self->{'exon_groups'}->{$group} = [];
}
push(@{$self->{'exon_groups'}->{$group}}, $exon);
}
return $self->{'exon_groups'};
}
#utility methods
=head2 run_analysis
Arg [1] : Bio::EnsEMBL::Analysis::Runnable::BaseAbInitio
Arg [2] : string, program name
Function : create and open a commandline for one
of the ab initio gene finding programs
Returntype: none
Exceptions: throws if the program in not executable or the system
command fails to execute
Example :
=cut
sub run_analysis{
my ($self, $program) = @_;
if(!$program){
$program = $self->program;
}
throw($program." is not executable BaseAbInitio::run_analysis ")
unless($program && -x $program);
my $command = $program." ".$self->matrix." ".$self->queryfile." > ".
$self->resultsfile;
print "Running analysis ".$command."\n";
system($command) == 0 or throw("FAILED to run ".$command);
}
=head2 create_transcripts
Arg [1] : Bio::EnsEMBL::Analysis::Runnable::BaseAbInitio
Function : use the groups of exons to create prediction transcripts
Returntype: none
Exceptions: none
Example :
=cut
sub create_transcripts{
my ($self) = @_;
my @transcripts;
my $ff = $self->feature_factory;
my %exon_groups = %{$self->exon_groups};
foreach my $group(keys(%exon_groups)){
my @exons = @{$exon_groups{$group}};
my $transcript = $ff->create_prediction_transcript(\@exons, $self->query,
$self->analysis);
$transcript->seqname($group);
push(@transcripts, $transcript);
}
$self->output(\@transcripts);
}
=head2 calculate_phases
Arg [1] : Bio::EnsEMBL::Analysis::Runnable::BaseAbInitio
Function : works out which phase to make the exons to get
a complete cds
Returntype: none
Exceptions: throws if it cant find a translation
for a transcript
Example :
=cut
sub calculate_phases{
my ($self) = @_;
my @phases = (0, 1, 2);
my @output;
my $ff = $self->feature_factory;
my $peptides = $self->peptides;
TRANS:foreach my $trans(@{$self->output}){
my @exons = @{$trans->get_all_Exons};
foreach my $phase(@phases){
my @temp_exons = @{$self->set_phases($phase, \@exons)};
my $new = $ff->create_prediction_transcript(\@temp_exons,
$self->query,
$self->analysis);
my $pep = $new->translate->seq;
my $peptide = $peptides->{$trans->seqname};
my ($ensembl, $genscan) = $self->subsitute_x_codes($pep,
$peptide);
$ensembl =~ s/^x//i;
$ensembl =~ s/x$//i;
$genscan =~ s/^x//i;
$genscan =~ s/x$//i;
if ($ensembl =~ /$genscan/){
push(@output, $new);
next TRANS;
}
}
throw("Failed to find translation for ".$trans." ".$exons[0]->seqname)
}
$self->clean_output;
$self->output(\@output);
}
=head2 set_phases
Arg [1] : Bio::EnsEMBL::Analysis::Runnable::Genscan
Arg [2] : number, start phase,
Arg [3] : arrayref of Bio::EnsEMBL::PredictionExons
Function : starting with the given phase sets of the phase of
each exon on the basis of the end phase of the last. This is done
after ordering them on the basis of there strand
Returntype: arrayref of
Exceptions:
Example :
=cut
sub set_phases{
my ($self, $start_phase, $exons) = @_;
if(@$exons == 0){
throw("Can't set phases if have no exons ".$exons." ".@$exons);
}
if ($exons->[0]->strand == 1) {
@$exons = sort {$a->start <=> $b->start} @$exons;
} else {
@$exons = sort {$b->start <=> $a->start} @$exons;
}
foreach my $e(@$exons){
$e->phase($start_phase);
$start_phase = ($e->phase + $e->length)%3;
}
return $exons;
}
=head2 subsitute_x_codes
Arg [1] : Bio::EnsEMBL::Analysis::Runnable::Genscan
Arg [2] : string, ensembl produced peptides
Arg [3] : string, genscan predicted peptide
Function : makes sure x's and length's of peps are the same
to allow fair comparison
Returntype: string, string
Exceptions:
Example :
=cut
sub subsitute_x_codes{
my ($self, $ensembl_pep, $genscan_pep) = @_;
$genscan_pep =~ s/\*$//;
my $ens_len = length($ensembl_pep);
my $gen_len = length($genscan_pep);
if($ens_len == ($gen_len+1)){
chop($ensembl_pep);
}
if($gen_len == ($ens_len+1)){
chop($genscan_pep);
}
$ens_len = length($ensembl_pep);
$gen_len = length($genscan_pep);
my $x = 0;
while (($x = index($ensembl_pep, 'X', $x)) != -1) {
substr($genscan_pep, $x, 1) = 'X'
if length($genscan_pep) >= length($ensembl_pep);
$x++;
}
$x = 0;
while (($x = index($genscan_pep, 'X', $x)) != -1) {
substr($ensembl_pep, $x, 1) = 'X'
if length($ensembl_pep) >= length($genscan_pep);
$x++;
}
return $ensembl_pep, $genscan_pep;
}
1;
| kiwiroy/ensembl-analysis | modules/Bio/EnsEMBL/Analysis/Runnable/BaseAbInitio.pm | Perl | apache-2.0 | 8,141 |
#
# Copyright 2021 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package centreon::common::powershell::wsus::computersstatus;
use strict;
use warnings;
use centreon::common::powershell::functions;
sub get_powershell {
my (%options) = @_;
my $ps = '
$culture = new-object "System.Globalization.CultureInfo" "en-us"
[System.Threading.Thread]::CurrentThread.CurrentUICulture = $culture
';
$ps .= centreon::common::powershell::functions::escape_jsonstring(%options);
$ps .= centreon::common::powershell::functions::convert_to_json(%options);
$ps .= '
$wsusServer = "' . $options{wsus_server} . '"
$useSsl = ' . $options{use_ssl} . '
$wsusPort = ' . $options{wsus_port} . '
$notUpdatedSince = ' . $options{not_updated_since} . '
Try {
[void][reflection.assembly]::LoadWithPartialName("Microsoft.UpdateServices.Administration")
} Catch {
Write-Host $Error[0].Exception
exit 1
}
$ProgressPreference = "SilentlyContinue"
Try {
$ErrorActionPreference = "Stop"
$wsusObject = [Microsoft.UpdateServices.Administration.AdminProxy]::getUpdateServer($wsusServer, $useSsl, $wsusPort)
$wsusStatus = $wsusObject.GetStatus()
$notUpdatedSinceTimespan = new-object TimeSpan($notUpdatedSince, 0, 0, 0)
$computersNotContactedSinceCount = $wsusObject.GetComputersNotContactedSinceCount([DateTime]::UtcNow.Subtract($notUpdatedSinceTimespan))
$computerTargetScope = new-object Microsoft.UpdateServices.Administration.ComputerTargetScope
$unassignedComputersCount = $wsusObject.GetComputerTargetGroup([Microsoft.UpdateServices.Administration.ComputerTargetGroupId]::UnassignedComputers).GetComputerTargets().Count
$item = @{
ComputerTargetsNeedingUpdatesCount = $wsusStatus.ComputerTargetsNeedingUpdatesCount;
ComputerTargetsWithUpdateErrorsCount = $wsusStatus.ComputerTargetsWithUpdateErrorsCount;
ComputersUpToDateCount = $wsusStatus.ComputersUpToDateCount;
ComputersNotContactedSinceCount = $computersNotContactedSinceCount;
UnassignedComputersCount = $unassignedComputersCount
}
$jsonString = $item | ConvertTo-JSON-20
Write-Host $jsonString
} Catch {
Write-Host $Error[0].Exception
exit 1
}
exit 0
';
return $ps;
}
1;
__END__
=head1 DESCRIPTION
Method to get WSUS computers informations.
=cut
| Tpo76/centreon-plugins | centreon/common/powershell/wsus/computersstatus.pm | Perl | apache-2.0 | 3,021 |
#
# Copyright 2019 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package hardware::sensors::hwgste::snmp::plugin;
use strict;
use warnings;
use base qw(centreon::plugins::script_snmp);
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '0.9';
%{$self->{modes}} = (
'sensors' => 'hardware::sensors::hwgste::snmp::mode::sensors',
);
return $self;
}
1;
__END__
=head1 PLUGIN DESCRITPION
Check HWg-STE sensors in SNMP.
=cut
| Sims24/centreon-plugins | hardware/sensors/hwgste/snmp/plugin.pm | Perl | apache-2.0 | 1,286 |
#
# Copyright 2021 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package apps::microsoft::dhcp::snmp::mode::listsubnets;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$options{options}->add_options(arguments => {
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
}
my $mapping = {
address => { oid => '.1.3.6.1.4.1.311.1.3.2.1.1.1' }, # subnetAdd
used => { oid => '.1.3.6.1.4.1.311.1.3.2.1.1.2' }, # noAddInUse
free => { oid => '.1.3.6.1.4.1.311.1.3.2.1.1.3' }, # noAddFree
pending_offers => { oid => '.1.3.6.1.4.1.311.1.3.2.1.1.4' } # noPendingOffers
};
my $oid_scope_table = '.1.3.6.1.4.1.311.1.3.2.1.1';
sub manage_selection {
my ($self, %options) = @_;
my $snmp_result = $options{snmp}->get_table(oid => $oid_scope_table);
my $results = {};
foreach (keys %$snmp_result) {
next if (! /^$mapping->{address}->{oid}\.(.*)$/);
my $instance = $1;
my $result = $options{snmp}->map_instance(mapping => $mapping, results => $snmp_result, instance => $1);
my $status = 'enabled';
if ($result->{free} == 0 && $result->{used} == 0 && $result->{pending_offers} == 0) {
$status = 'disabled';
}
$results->{$instance} = { address => $result->{address}, status => $status };
}
return $results;
}
sub run {
my ($self, %options) = @_;
my $results = $self->manage_selection(snmp => $options{snmp});
foreach (sort keys %$results) {
$self->{output}->output_add(long_msg =>
sprintf(
'[address: %s][status: %s]',
$results->{$_}->{address},
$results->{$_}->{status}
)
);
}
$self->{output}->output_add(
severity => 'OK',
short_msg => 'List subnets:'
);
$self->{output}->display(nolabel => 1, force_ignore_perfdata => 1, force_long_output => 1);
$self->{output}->exit();
}
sub disco_format {
my ($self, %options) = @_;
$self->{output}->add_disco_format(elements => ['address', 'status']);
}
sub disco_show {
my ($self, %options) = @_;
my $results = $self->manage_selection(snmp => $options{snmp});
foreach (sort keys %$results) {
$self->{output}->add_disco_entry(%{$results->{$_}});
}
}
1;
__END__
=head1 MODE
List dhcp subnets.
=over 8
=back
=cut
| Tpo76/centreon-plugins | apps/microsoft/dhcp/snmp/mode/listsubnets.pm | Perl | apache-2.0 | 3,284 |
package Connection::RiakAdapter;
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
use utf8;
use Carp qw(cluck confess);
use UI::Utils;
use Net::Riak;
use Data::Dumper;
use Mojo::UserAgent;
use JSON;
use IO::Socket::SSL qw();
use File::Slurp;
# This Perl Module was needed to better support SSL for the 'Vault'
use LWP::UserAgent qw();
use LWP::ConnCache;
use constant RIAK_ROOT_URI => "riak";
# The purpose of this class is to provide for an easy method
# to 'mock' Riak calls.
my $ua;
my $conn_cache;
my $riak_server;
my $username;
my $password;
sub new {
my $class = shift;
$username = shift;
$password = shift;
my $self = bless {
c => $class,
riak_server => $riak_server,
username => $username,
password => $password,
}, $class;
$ua = LWP::UserAgent->new();
$ua->timeout(20);
$ua->ssl_opts( verify_hostname => 0, SSL_verify_mode => 0x00 );
if (!defined $conn_cache) {
$conn_cache = LWP::ConnCache->new( { total_capacity => 4096 } );
}
$ua->conn_cache($conn_cache);
return $self;
}
sub set_server {
my $self = shift;
$riak_server = shift;
$self->{'riak_server'} = $riak_server;
}
sub get_key_uri {
my $self = shift;
my $bucket = shift || confess("Supply a bucket");
my $key = shift || confess("Supply a key");
my @uri = ( RIAK_ROOT_URI, $bucket, $key );
return File::Spec->join( "/", @uri );
}
sub ping {
my $self = shift;
my $fqdn = $self->get_url("/ping");
return $ua->get($fqdn);
}
sub stats {
my $self = shift;
my $fqdn = $self->get_url("/stats");
return $ua->get($fqdn);
}
sub put {
my $self = shift;
my $bucket = shift || confess("Supply a bucket");
my $key = shift || confess("Supply a key");
my $value = shift || confess("Supply a value");
my $content_type = shift || 'application/json';
my $key_uri = $self->get_key_uri( $bucket, $key );
my $fqdn = $self->get_url($key_uri);
return $ua->put( $fqdn, Content => $value, 'Content-Type'=> $content_type );
}
sub delete {
my $self = shift;
my $bucket = shift || confess("Supply a bucket");
my $key = shift || confess("Supply a key");
my $key_uri = $self->get_key_uri( $bucket, $key );
my $key_ctx = $self->get_url($key_uri);
return $ua->delete( $self->get_url($key_ctx) );
}
sub get {
my $self = shift;
my $bucket = shift || confess("Supply a bucket");
my $key = shift || confess("Supply a key");
my $key_uri = $self->get_key_uri( $bucket, $key );
my $fqdn = $self->get_url($key_uri);
return $ua->get($fqdn);
}
sub search {
my $self = shift;
my $index = shift || confess("Supply a search index");
my $search_string = shift || confess("Supply a search string");
my $key_uri = "/search/query/$index?wt=json&" . $search_string;
my $fqdn = $self->get_url($key_uri);
return $ua->get($fqdn);
}
#MOJOPlugins/Riak
sub get_url {
my $self = shift;
my $uri = shift || "";
my $url;
my $base_url = "https://$username:$password@";
if ( $uri !~ m/^\// ) {
$uri = "/" . $uri;
}
return $base_url . $riak_server . $uri;
}
1;
| knutsel/traffic_control-1 | traffic_ops/app/lib/Connection/RiakAdapter.pm | Perl | apache-2.0 | 3,534 |
=head1 LICENSE
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2022] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
=head1 NAME
Bio::EnsEMBL::Analysis::RunnableDB::TRF -
=head1 SYNOPSIS
my $runnable = Bio::EnsEMBL::Analysis::RunnableDB::TRF->
new(
-input_id => 'contig::AL805961.22.1.166258:1:166258:1',
-db => $db,
-analysis => $analysis,
);
$runnable->fetch_input;
$runnable->run;
$runnable->write_output;
=head1 DESCRIPTION
This module provides an interface between the ensembl database and
the Runnable TRF which wraps the program TRF
This module can fetch appropriate input from the database
pass it to the runnable then write the results back to the database
in the repeat_feature and repeat_consensus tables
=head1 METHODS
=cut
package Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveAssemblyLoading::HiveTRF;
use strict;
use warnings;
use feature 'say';
use Bio::EnsEMBL::Analysis::Runnable::TRF;
use Bio::EnsEMBL::Variation::Utils::FastaSequence qw(setup_fasta);
use parent('Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveBaseRunnableDB');
=head2 fetch_input
Arg [1] : Bio::EnsEMBL::Analysis::RunnableDB::TRF
Function : fetch data out of database and create runnable
Returntype: 1
Exceptions: none
Example :
=cut
sub fetch_input{
my ($self) = @_;
my $dba = $self->hrdb_get_dba($self->param('target_db'));
my $rfa = $dba->get_RepeatFeatureAdaptor();
$self->get_adaptor($rfa);
if($self->param('use_genome_flatfile')) {
say "Ingoring dna table and using fasta file for sequence fetching";
unless($self->param_required('genome_file') && -e $self->param('genome_file')) {
$self->throw("You selected to use a flatfile to fetch the genome seq, but did not find the flatfile. Path provided:\n".$self->param('genome_file'));
}
setup_fasta(
-FASTA => $self->param_required('genome_file'),
);
} elsif($self->param('dna_db')) {
say "Attaching dna db to target";
my $dna_dba = $self->hrdb_get_dba($self->param('dna_db'));
$dba->dnadb($dna_dba);
} else {
say "Assuming the target db has dna";
}
# if($self->param_is_defined('dna_db')) {
# say "Attaching dna_db to output db adaptor";
# my $dna_dba = $self->hrdb_get_dba($self->param('dna_db'));
# $dba->dnadb($dna_dba);
# } else {
# say "No dna_db param defined, so assuming target_db has dna";
# }
$self->hrdb_set_con($dba,'target_db');
my $slice_array = $self->param('iid');
unless(ref($slice_array) eq "ARRAY") {
$self->throw("Expected an input id to be an array reference. Type found: ".ref($slice_array));
}
my $analysis = Bio::EnsEMBL::Analysis->new(
-logic_name => $self->param('logic_name'),
-module => $self->param('module'),
-program_file => $self->param('trf_path'),
-parameters => $self->param('commandline_params'),
);
$self->analysis($analysis);
my %parameters;
if($self->parameters_hash){
%parameters = %{$self->parameters_hash};
}
foreach my $slice_name (@{$slice_array}) {
my $slice = $self->fetch_sequence($slice_name,$dba);
my $runnable = Bio::EnsEMBL::Analysis::Runnable::TRF->new
(
-query => $slice,
-program => $analysis->program_file,
-analysis => $analysis,
%parameters,
);
$self->runnable($runnable);
}
return 1;
}
=head2 get_adaptor
Arg [1] : Bio::EnsEMBL::Analysis::RunnableDB::TRF
Arg [2] : Bio::EnsEMBL::DBSQL::RepeatFeatureAdaptor
Function : get/set repeatfeature adaptor
Returntype: Bio::EnsEMBL::DBSQL::RepeatFeatureAdaptor
Exceptions: none
Example :
=cut
sub get_adaptor {
my ($self,$rfa) = @_;
if($rfa) {
$self->param('_rfa',$rfa);
}
return($self->param('_rfa'));
}
1;
| Ensembl/ensembl-analysis | modules/Bio/EnsEMBL/Analysis/Hive/RunnableDB/HiveAssemblyLoading/HiveTRF.pm | Perl | apache-2.0 | 4,839 |
package MGRAST::Analysis;
use strict;
use warnings;
no warnings('once');
use Auth;
use Conf;
use List::Util qw(max min sum first);
use List::MoreUtils qw(natatime);
use DBI;
use JSON;
use Data::Dumper;
use MIME::Base64;
use LWP::UserAgent;
use Cache::Memcached;
use File::Temp qw/ tempfile tempdir /;
1;
sub new {
my ($class, $job_dbh, $dbh, $version) = @_;
# get ach object if have lib
# my $ach = undef;
# eval {
# require Babel::lib::Babel;
# Babel::lib::Babel->import();
# $ach = new Babel::lib::Babel;
# };
# get memcache object
my $memd = undef;
eval {
require Cache::Memcached;
Cache::Memcached->import();
$memd = new Cache::Memcached {'servers' => $Conf::web_memcache, 'debug' => 0, 'compress_threshold' => 10_000};
};
# connect to database
unless ($dbh) {
eval {
my $host = $Conf::mgrast_dbhost;
my $database = $Conf::mgrast_db;
my $user = $Conf::mgrast_dbuser;
my $password = $Conf::mgrast_dbpass;
$dbh = DBI->connect("DBI:Pg:dbname=$database;host=$host", $user, $password,
{ RaiseError => 1, AutoCommit => 0, PrintError => 0 }) ||
die "database connect error.";
};
if ($@) {
warn "Unable to connect to metagenomics database: $@\n";
return undef;
}
}
unless ($job_dbh && ref($job_dbh)) {
warn "Unable to connect to job_cache database\n";
return undef;
}
$dbh->{pg_expand_array} = 1;
# set sources
my $srcs = $dbh->selectall_hashref("SELECT * FROM sources", "name");
my %idsrc = map { $srcs->{$_}{_id}, $_ } keys %$srcs;
my %srcid = map { $_, $srcs->{$_}{_id} } keys %$srcs;
# get mgrast token
#my $mgrast_token = undef;
#if ($Conf::mgrast_oauth_name && $Conf::mgrast_oauth_pswd) {
# my $key = encode_base64($Conf::mgrast_oauth_name.':'.$Conf::mgrast_oauth_pswd);
# my $rep = Auth::globus_token($key);
# $mgrast_token = $rep ? $rep->{access_token} : undef;
#}
#### changed because globus has hard time handeling multiple tokens
my $mgrast_token = "mgrast ".$Conf::mgrast_oauth_token || undef;
# set json handle
my $agent = LWP::UserAgent->new;
my $json = JSON->new;
$json = $json->utf8();
$json->max_size(0);
$json->allow_nonref;
# create object
my $self = { dbh => $dbh, # job data db_handle
# ach => $ach, # ach/babel object
api => $Conf::api_url || "http://api.metagenomics.anl.gov",
jcache => $job_dbh, # job cache db_handle
agent => $agent, # LWP agent handle
memd => $memd, # memcached handle
json => $json, # json handle
chunk => 2500, # max # md5s to query at once
jobs => [], # array: job_id
job_map => {}, # hash: mg_id => job_id
mg_map => {}, # hash: job_id => mg_id
name_map => {}, # hash: mg_id => job_name
type_map => {}, # hash: mg_id => seq_type
stat_map => {}, # hash: mg_id => statistics
sources => $srcs, # hash: source_name => { col => value }
id_src => \%idsrc, # hash: source_id => source_name
src_id => \%srcid, # hash: source_name => source_id
expire => $Conf::web_memcache_expire || 172800, # use config or 48 hours
version => $version || $Conf::m5nr_annotation_version || 1,
mgrast_token => $mgrast_token,
jtbl => { md5 => 'job_md5s',
ontology => 'job_ontologies',
function => 'job_functions',
organism => 'job_organisms',
lca => 'job_lcas' },
atbl => { md5 => 'md5s',
ontology => 'ontologies',
function => 'functions',
organism => 'organisms_ncbi' }
};
bless $self, $class;
return $self;
}
sub DESTROY {
my ($self) = @_;
if ($self->{dbh}) { $self->{dbh}->disconnect; }
#if ($self->{ach}) { $self->{ach}->DESTROY; }
if ($self->{jcache}) { $self->{jcache}->disconnect; }
}
sub _dbh {
my ($self) = @_;
return $self->{dbh};
}
#sub ach {
# my ($self) = @_;
# return $self->{ach};
#}
sub _api {
my ($self) = @_;
return $self->{api};
}
sub _jcache {
my ($self) = @_;
return $self->{jcache};
}
sub _agent {
my ($self) = @_;
return $self->{agent};
}
sub _memd {
my ($self) = @_;
return $self->{memd};
}
sub _json {
my ($self) = @_;
return $self->{json};
}
sub _chunk {
my ($self) = @_;
return $self->{chunk};
}
sub _jobs {
my ($self) = @_;
return $self->{jobs};
}
sub _qjobs {
my ($self) = @_;
return "job IN (".join(',', @{$self->{jobs}}).")";
}
sub _job_map {
my ($self) = @_;
return $self->{job_map};
}
sub _mg_map {
my ($self) = @_;
return $self->{mg_map};
}
sub _name_map {
my ($self) = @_;
return $self->{name_map};
}
sub _type_map {
my ($self) = @_;
return $self->{type_map};
}
sub _id_src {
my ($self) = @_;
return $self->{id_src};
}
sub _src_id {
my ($self) = @_;
return $self->{src_id};
}
sub _sources {
my ($self) = @_;
return $self->{sources};
}
sub _expire {
my ($self) = @_;
return $self->{expire};
}
sub _version {
my ($self) = @_;
return $self->{version};
}
sub _mgrast_token {
my ($self) = @_;
return $self->{mgrast_token};
}
sub _qver {
my ($self) = @_;
return "version = ".$self->{version};
}
sub _jtbl {
my ($self) = @_;
return $self->{jtbl};
}
sub _atbl {
my ($self) = @_;
return $self->{atbl};
}
sub _has_job {
my ($self, $mgid) = @_;
return exists($self->_job_map->{$mgid}) ? 1 : 0;
}
# add values to $self->{jobs} based on metagenome_id list
sub add_jobs {
my ($self, $mgids) = @_;
if ($mgids && scalar(@$mgids)) {
my @new_mg = grep { ! $self->_has_job($_) } @$mgids;
my $new_map = $self->_get_jobid_map(\@new_mg);
%{ $self->{job_map} } = ( %{$self->{job_map}}, %$new_map );
$self->_set_data();
}
}
# set values for $self->{jobs} and $self->{jtbl} based on metagenome_id list
sub set_jobs {
my ($self, $mgids, $jids) = @_;
$self->{name_map} = {};
$self->{type_map} = {};
if (defined($jids)) {
$self->{job_map} = $self->_get_jobid_map($mgids, 1);
} else {
$self->{job_map} = $self->_get_jobid_map($mgids);
}
$self->_set_data();
}
# populate obj with all public jobs
sub set_public_jobs {
my ($self) = @_;
my $mgids = $self->_jcache->selectcol_arrayref("SELECT metagenome_id FROM Job WHERE public = 1 AND viewable = 1");
if ($mgids && (@$mgids > 0)) {
$self->set_jobs($mgids);
}
}
sub _set_data {
my ($self) = @_;
my %rev = reverse %{$self->{job_map}};
$self->{mg_map} = \%rev;
@{$self->{jobs}} = values %{$self->{job_map}};
}
sub _get_jobid_map {
my ($self, $mgids, $jids, $no_names) = @_;
unless ($mgids && scalar(@$mgids)) {
return {};
}
my $hash = {};
my $list = join(",", map {"'$_'"} @$mgids);
my $rows;
if ($jids) {
$rows = $self->_jcache->selectall_arrayref("SELECT metagenome_id, job_id, name, sequence_type FROM Job WHERE job_id IN ($list) AND viewable = 1");
} else {
$rows = $self->_jcache->selectall_arrayref("SELECT metagenome_id, job_id, name, sequence_type FROM Job WHERE metagenome_id IN ($list) AND viewable = 1");
}
unless ($no_names) {
map { $self->{name_map}->{$_->[0]} = $_->[2] } @$rows;
map { $self->{type_map}->{$_->[0]} = $_->[3] } @$rows;
}
if ($rows && (@$rows > 0)) {
%$hash = map { $_->[0], $_->[1] } @$rows;
}
return $hash;
}
sub _get_seq_count {
my ($self, $mgid) = @_;
my $sql = "SELECT js.value FROM JobStatistics js, Job j WHERE j._id = js.job AND js.tag = 'sequence_count_raw' AND j.metagenome_id = '$mgid'";
my $rows = $self->_jcache->selectcol_arrayref($sql);
return ($rows && (@$rows > 0)) ? $rows->[0] : 0;
}
sub get_all_job_ids {
my ($self) = @_;
my $data = [];
my $sth = $self->_dbh->prepare("SELECT DISTINCT job FROM job_info WHERE loaded IS TRUE AND ".$self->_qver);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
push @$data, $row[0];
}
$sth->finish;
$self->_dbh->commit;
return $data;
}
####################
# misc
####################
sub _get_where_str {
my ($self, $items) = @_;
my @text;
unless ($items && (@$items > 0)) { return ""; }
foreach my $i (@$items) {
if ($i && ($i =~ /\S/)) {
push @text, $i;
}
}
if (@text == 1) {
return " WHERE " . $text[0];
} elsif (@text > 1) {
return " WHERE " . join(" AND ", @text);
} else {
return "";
}
}
sub _run_fraggenescan {
my ($self, $fasta) = @_;
unless ($Conf::run_fraggenescan) {
return "";
}
my ($infile_hdl, $infile_name) = tempfile("fgs_in_XXXXXXX", DIR => $Conf::temp, SUFFIX => '.fna');
print $infile_hdl $fasta;
close $infile_hdl;
my $fgs_cmd = $Conf::run_fraggenescan." -genome=$infile_name -out=$infile_name.fgs -complete=0 -train=454_30";
`$fgs_cmd`;
my $output = "";
if (open(FH, "<".$infile_name.".fgs.faa")) {
while (<FH>) {
$output .= $_;
}
close FH;
}
unlink($infile_name, $infile_name.".fgs.faa", $infile_name.".fgs.ffn", $infile_name.".fgs.out");
return $output;
}
sub _get_table_cols {
my ($self, $table) = @_;
my $data = [];
my $sth = $self->_dbh->prepare("SELECT a.attname FROM pg_attribute a, pg_class c WHERE c.oid = a.attrelid AND a.attnum > 0 AND c.relname = ?");
$sth->execute($table) or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
push @$data, $row[0];
}
$sth->finish;
$self->_dbh->commit;
return $data;
}
####################
# data / statistics from shock
####################
sub _mg_stats {
my ($self, $mgid) = @_;
unless (exists $self->{stat_map}{$mgid}) {
my $mgstats = $self->_get_mg_stats($mgid);
if (! %$mgstats) {
print STDERR "no statistics available for $mgid\n";
}
$self->{stat_map}{$mgid} = $mgstats;
}
return $self->{stat_map}{$mgid};
}
sub _get_mg_stats {
my ($self, $mgid) = @_;
# get node
my $stat_node = $self->stat_node($mgid);
unless ($stat_node && exists($stat_node->{id})) {
return {};
}
# get content
my $stats = {};
eval {
my @args = ('Authorization', $self->_mgrast_token);
my $get = $self->_agent->get($Conf::shock_url.'/node/'.$stat_node->{id}.'?download', @args);
$stats = $self->_json->decode( $get->content );
};
if ($@ || (! $stats) || (exists($stats->{error}) && $stats->{error})) {
return {};
}
return $stats;
}
sub stat_node {
my ($self, $mgid) = @_;
return $self->_get_mg_node($mgid, 'data_type=statistics');
}
sub sims_node {
my ($self, $mgid) = @_;
$self->_get_mg_node($mgid, 'data_type=similarity&stage_name=filter.sims');
}
sub _get_mg_node {
my ($self, $mgid, $type) = @_;
my $response = undef;
my $query = '?query&limit=1&type=metagenome&'.$type.'&id=mgm'.$mgid;
eval {
my @args = ('Authorization', $self->_mgrast_token);
my $get = $self->_agent->get($Conf::shock_url.'/node'.$query, @args);
$response = $self->_json->decode( $get->content );
};
if ( $@ || (! ref($response)) ||
(exists($response->{error}) && $response->{error}) ||
(! $response->{data}) ||
(scalar(@{$response->{data}}) == 0) ) {
return {};
}
return $response->{data}[0];
}
sub _get_sim_record {
my ($self, $node_id, $seek, $length) = @_;
unless ($node_id && defined($seek) && defined($length)) {
return '';
}
my $data = '';
eval {
my @args = ('Authorization', $self->_mgrast_token);
my $url = $Conf::shock_url.'/node/'.$node_id.'?download&seek='.$seek.'&length='.$length;
my $get = $self->_agent->get($url, @args);
$data = $get->content;
};
if ($@ || (! $data)) {
return '';
}
return $data;
}
sub get_source_stats {
my ($self, $mgid) = @_;
my $stats = $self->_mg_stats($mgid);
return exists($stats->{source}) ? $stats->{source} : {};
# source => type => [#, #, #, #, #]
}
sub get_taxa_stats {
my ($self, $mgid, $taxa) = @_;
my $stats = $self->_mg_stats($mgid);
if (exists $stats->{taxonomy}) {
return exists($stats->{taxonomy}{$taxa}) ? $stats->{taxonomy}{$taxa} : [];
} else {
return [];
}
# [ name, abundance ]
}
sub get_ontology_stats {
my ($self, $mgid, $source) = @_;
my $stats = $self->_mg_stats($mgid);
if (exists $stats->{ontology}) {
return exists($stats->{ontology}{$source}) ? $stats->{ontology}{$source} : [];
} else {
return [];
}
# [ top level name, abundance ]
}
sub get_rarefaction_coords {
my ($self, $mgid) = @_;
my $stats = $self->_mg_stats($mgid);
return exists($stats->{rarefaction}) ? $stats->{rarefaction} : [];
# [ x, y ]
}
sub get_qc_stats {
my ($self, $mgid, $type) = @_;
my $stats = $self->_mg_stats($mgid);
if (exists $stats->{qc}) {
return exists($stats->{qc}{$type}) ? $stats->{qc}{$type} : {};
} else {
return {};
}
}
sub get_histogram_nums {
my ($self, $mgid, $type, $stage) = @_;
my $stats = $self->_mg_stats($mgid);
if ($stage && ($stage eq 'qc')) {
$stage = 'post_qc';
} else {
$stage = 'upload';
}
if ($type eq 'len' && exists($stats->{length_histogram}) && exists($stats->{length_histogram}{$stage})) {
return $stats->{length_histogram}{$stage};
} elsif ($type eq 'gc' && exists($stats->{gc_histogram}) && exists($stats->{gc_histogram}{$stage})) {
return $stats->{gc_histogram}{$stage};
} else {
return [];
}
# [ value, count ]
}
sub get_md5_sims {
# $md5_seeks = [md5, seek, length]
my ($self, $jobid, $md5_seeks) = @_;
my $sims = {};
my $sim_node = $self->sims_node($self->_mg_map->{$jobid});
if ($md5_seeks && (@$md5_seeks > 0) && $sim_node && exists($sim_node->{id})) {
@$md5_seeks = sort { $a->[1] <=> $b->[1] } @$md5_seeks;
foreach my $set (@$md5_seeks) {
my ($md5, $seek, $length) = @$set;
my $rec = $self->_get_sim_record($sim_node->{id}, $seek, $length);
chomp $rec;
$sims->{$md5} = [ split(/\n/, $rec) ];
}
}
return $sims;
# md5 => [sim lines]
}
####################
# Math Functions
####################
# log of N choose R
sub _nCr2ln {
my ($self, $n, $r) = @_;
my $c = 1;
if ($r > $n) {
return $c;
}
if (($r < 50) && ($n < 50)) {
map { $c = ($c * ($n - $_)) / ($_ + 1) } (0..($r-1));
return log($c);
}
if ($r <= $n) {
$c = $self->_gammaln($n + 1) - $self->_gammaln($r + 1) - $self->_gammaln($n - $r);
} else {
$c = -1000;
}
return $c;
}
# This is Stirling's formula for gammaln, used for calculating nCr
sub _gammaln {
my ($self, $x) = @_;
unless ($x > 0) { return 0; }
my $s = log($x);
return log(2 * 3.14159265458) / 2 + $x * $s + $s / 2 - $x;
}
####################
# All functions conducted on annotation tables
####################
sub get_hierarchy {
my ($self, $type, $src, $use_taxid, $get_ids, $max_lvl) = @_;
my $tbl = exists($self->_atbl->{$type}) ? $self->_atbl->{$type} : '';
my $col = $self->_get_table_cols($tbl);
unless ($tbl && @$col) { return {}; }
unless ($max_lvl) { $max_lvl = ''; }
if (($max_lvl eq 'level4') && ($src =~ /^[NC]OG$/)) { $max_lvl = 'level3'; }
my @cols = ();
my $hier = {};
my $key = $get_ids ? '_id' : 'name';
my $pref = ($type eq 'ontology') ? 'level' : 'tax_';
foreach my $c ( grep {$_ =~ /^$pref/} @$col ) {
next if ($c eq 'tax_kingdom'); # ncbi hack
next if (($c eq 'level4') && ($src =~ /^[NC]OG$/)); # n|cog hack
if ($c ne $max_lvl) {
push @cols, $c;
}
else {
push @cols, $c;
$key = $c;
last;
}
}
if (($type eq 'organism') && $use_taxid && (! $max_lvl)) {
$key = 'ncbi_tax_id';
}
push @cols, $key;
my $sql = "SELECT DISTINCT ".join(", ", @cols)." FROM ".$self->_atbl->{$type};
if (($type eq 'ontology') && $src) {
$sql .= " WHERE source = ".$self->_src_id->{$src};
}
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
my $id = pop @row;
next unless ($id && ($id =~ /\S/));
map { $_ = $_ ? $_ : "-" } @row;
$hier->{$id} = \@row;
}
$sth->finish;
$self->_dbh->commit;
return $hier;
# { end_node => [ hierachy of node ] }
}
sub get_hierarchy_slice {
my ($self, $type, $source, $parent_name, $child_level) = @_;
my $tbl = exists($self->_atbl->{$type}) ? $self->_atbl->{$type} : '';
my $col = $self->_get_table_cols($tbl);
unless ($tbl && @$col && $parent_name && $child_level && grep(/^$child_level$/, @$col)) {
return [];
}
my $child_index = first { $col->[$_] eq $child_level } 0..$#{$col};
# level does not exist
unless ($child_index) {
return [];
}
# no parent available
if (($child_level eq 'tax_domain') || ($child_level eq 'level1')) {
return [];
}
my $parent_index = $child_index - 1;
# ncbi hack
if ($child_level eq 'tax_phylum') {
$parent_index -= 1;
}
my $sql = "SELECT DISTINCT ".$col->[$child_index]." FROM ".$self->_atbl->{$type}." WHERE ".$col->[$parent_index]." = ".$self->_dbh->quote($parent_name);
if (($type eq 'ontology') && $source) {
$sql .= " AND source = ".$self->_src_id->{$source};
}
my $data = [];
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
push @$data, $row[0];
}
$sth->finish;
$self->_dbh->commit;
return $data;
}
sub _get_annotation_map {
my ($self, $type, $anns, $src) = @_;
my $tbl = exists($self->_atbl->{$type}) ? $self->_atbl->{$type} : '';
unless ($tbl && $anns && @$anns) { return {}; }
my $col = ($type eq 'md5') ? 'md5' : 'name';
my $amap = {};
my $iter = natatime $self->_chunk, @$anns;
while (my @curr = $iter->()) {
my $sql = "SELECT _id, $col FROM $tbl WHERE $col IN (".join(",", map {$self->_dbh->quote($_)} @curr).")";
if ($src && ($type eq 'ontology')) {
$sql .= " AND source = ".$self->_src_id->{$src};
}
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
$amap->{$row[0]} = $row[1];
}
$sth->finish;
}
$self->_dbh->commit;
return $amap;
# _id => name
}
sub _get_annotations4level {
my ($self, $type, $level, $src, $get_ids) = @_;
my $tbl = exists($self->_atbl->{$type}) ? $self->_atbl->{$type} : '';
unless ($tbl && $level) { return {}; }
my $key = $get_ids ? '_id' : 'name';
my $anns = {};
my $qsrc = ($src && ($type eq 'ontology')) ? "source = ".$self->_src_id->{$src} : "";
my @cols = grep { $_ eq $level } @{ $self->_get_table_cols($tbl) };
if (@cols == 1) {
my $where = $self->_get_where_str([$qsrc, "$level IS NOT NULL"]);
my $sth = $self->_dbh->prepare("SELECT DISTINCT $key, $level FROM ".$tbl.$where);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
if ($row[1] && ($row[1] =~ /\S/)) {
$anns->{$row[0]} = $row[1];
}
}
$sth->finish;
}
$self->_dbh->commit;
return $anns;
# (_id || name) => annot
}
sub _search_annotations {
my ($self, $type, $text) = @_;
unless (exists($self->_jtbl->{$type}) && exists($self->_atbl->{$type})) { return {}; }
my $cache_key = $type."_search_".quotemeta($text);
my $data = {};
my $jobs = [];
while ( my ($mg, $j) = each %{$self->_job_map} ) {
my $cdata = $self->_memd ? $self->_memd->get($mg.$cache_key) : undef;
if ($cdata) { $data->{$mg} = $cdata; }
else { push @$jobs, $j; }
}
unless (@$jobs) { return $data; }
my $where = $self->_get_where_str(['j.'.$self->_qver, "j.job IN (".join(",", @$jobs).")", "j.id = a._id", "a.name ~* ".$self->_dbh->quote($text)]);
my $sql = "SELECT DISTINCT j.job, j.source, a.name, j.abundance FROM ".$self->_jtbl->{$type}." j, ".$self->_atbl->{$type}." a".$where;
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
push @{ $data->{ $self->_mg_map->{$row[0]} } }, [ $self->_id_src->{$row[1]}, @row[2,3] ];
}
$sth->finish;
if ($self->_memd) {
foreach my $mg (keys %$data) {
$self->_memd->set($mg.$cache_key, $data->{$mg}, $self->_expire);
}
}
$self->_dbh->commit;
return $data;
# mgid => [ source, organism, abundance ]
}
sub annotation_for_md5s {
my ($self, $md5s, $srcs, $taxid) = @_;
unless ($md5s && @$md5s) { return []; }
my $data = [];
my $qsrc = ($srcs && @$srcs && !(@$srcs == 1 && $srcs->[0] eq 'M5NR')) ? " AND a.source IN (".join(",", map { $self->_src_id->{$_} } @$srcs).")" : '';
my $tid = $taxid ? ", o.ncbi_tax_id" : "";
my %umd5 = map {$_, 1} @$md5s;
my $iter = natatime $self->_chunk, keys %umd5;
while (my @curr = $iter->()) {
my $sql = "SELECT DISTINCT a.md5, a.id, m.md5, f.name, o.name, a.source$tid FROM md5_annotation a ".
"INNER JOIN md5s m ON a.md5 = m._id ".
"LEFT OUTER JOIN functions f ON a.function = f._id ".
"LEFT OUTER JOIN organisms_ncbi o ON a.organism = o._id ".
"WHERE a.md5 IN (".join(",", @curr).")".$qsrc;
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
$row[5] = $self->_id_src->{$row[5]};
push @$data, \@row;
}
$sth->finish;
}
$self->_dbh->commit;
# [ md5_id, id, md5, function, organism, source, (tax_id) ]
return $data;
}
sub decode_annotation {
my ($self, $type, $ids) = @_;
unless ($ids && @$ids) { return {}; }
my $data = {};
my $col = ($type eq 'md5') ? 'md5' : 'name';
my %uids = map {$_, 1} @$ids;
my $iter = natatime $self->_chunk, keys %uids;
while (my @curr = $iter->()) {
my $sql = "SELECT _id, $col FROM ".$self->_atbl->{$type}." WHERE _id IN (".join(',', @curr).")";
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
$data->{$row[0]} = $row[1];
}
$sth->finish;
}
$self->_dbh->commit;
return $data;
# id => name
}
sub type_for_md5s {
my ($self, $md5s, $get_id) = @_;
unless ($md5s && @$md5s) { return {}; }
my $data = {};
my %umd5 = map {$_, 1} @$md5s;
my $iter = natatime $self->_chunk, keys %umd5;
while (my @curr = $iter->()) {
my $sql = "SELECT _id, md5, is_protein FROM md5s WHERE _id IN (".join(',', @curr).")";
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
if ($get_id) {
$data->{$row[0]} = [ $row[1], $row[2] ? 'protein' : 'rna' ];
} else {
$data->{$row[1]} = $row[2] ? 'protein' : 'rna';
}
}
$sth->finish;
}
$self->_dbh->commit;
return $data;
# md5 => 'protein'|'rna' OR _id => [md5, 'protein'|'rna']
}
sub organisms_for_taxids {
my ($self, $tax_ids) = @_;
unless ($tax_ids && (@$tax_ids > 0)) { return {}; }
my $data = {};
my $list = join(",", grep {$_ =~ /^\d+$/} @$tax_ids);
my $sth = $self->_dbh->prepare("SELECT _id, name FROM ".$self->_atbl->{organism}." WHERE ncbi_tax_id in ($list)");
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
$data->{$row[0]} = $row[1];
}
$sth->finish;
$self->_dbh->commit;
return $data;
# org_id => org_name
}
sub sources_for_type {
my ($self, $type) = @_;
my @set = map { [$_->{name}, $_->{description}] } grep { $_->{type} eq $type } values %{$self->_sources};
return \@set;
}
sub link_for_source {
my ($self, $src) = @_;
return (exists($self->_sources->{$src}) && $self->_sources->{$src}{link}) ? $self->_sources->{$src}{link} : '';
}
####################
# All functions conducted on individual job
####################
sub delete_job {
my ($self, $job) = @_;
my $delete_hdl;
eval {
my $host = $Conf::mgrast_write_dbhost;
my $database = $Conf::mgrast_db;
my $user = $Conf::mgrast_dbuser;
my $password = $Conf::mgrast_dbpass;
$delete_hdl = DBI->connect("DBI:Pg:dbname=$database;host=$host", $user, $password,
{ RaiseError => 1, AutoCommit => 0, PrintError => 0 }) ||
die "database connect error.";
};
if ($@) {
warn "Unable to connect to metagenomics database: $@\n";
return 0;
}
my $all = $delete_hdl->selectcol_arrayref("SELECT DISTINCT version FROM job_info WHERE job = ".$job);
eval {
$delete_hdl->do("DELETE FROM job_info WHERE job = ".$job);
foreach my $tbl (values %{$self->_jtbl}) {
$delete_hdl->do("DELETE FROM $tbl WHERE version IN (".join(",", @$all).") AND job = ".$job);
}
};
$delete_hdl->commit;
$delete_hdl->disconnect;
if ($@) {
return 0;
} else {
return 1;
}
}
sub get_sources {
my ($self, $mgid, $type) = @_;
$self->set_jobs([$mgid]);
my $where = $self->_get_where_str([$self->_qver, "job = ".$self->_job_map->{$mgid}]);
if ($type && exists($self->_jtbl->{$type})) {
my $srcs = $self->_dbh->selectcol_arrayref("SELECT DISTINCT source FROM ".$self->_jtbl->{$type}.$where);
@$srcs = sort map { $self->_id_src->{$_} } @$srcs;
$self->_dbh->commit;
return $srcs;
} else {
my $total = {};
while ( my ($type, $name) = each %{$self->_jtbl} ) {
next if ($type =~ /^(md5|lca)$/);
my $srcs = $self->_dbh->selectcol_arrayref("SELECT DISTINCT source FROM ".$name.$where);
map { $total->{ $self->_id_src->{$_} } = 1 } @$srcs;
}
$self->_dbh->commit;
return [ sort keys %$total ];
}
# [ source ]
}
sub md5_abundance_for_annotations {
my ($self, $mgid, $type, $srcs, $anns) = @_;
$self->set_jobs([$mgid]);
my $job = $self->_job_map->{$mgid};
my $tbl = exists($self->_jtbl->{$type}) ? $self->_jtbl->{$type} : '';
unless ($tbl) { return {}; }
my $amap = {};
if ($anns && @$anns) {
$amap = $self->_get_annotation_map($type, $anns);
unless (scalar(keys %$amap)) { return {}; }
}
my $mdata = [];
my $md5s = {};
my $data = {};
my $qsrc = ($srcs && @$srcs) ? "source IN (".join(",", map { $self->_src_id->{$_} } @$srcs).")" : '';
my $qids = (scalar(keys %$amap) > 0) ? "id IN (".join(",", keys %$amap).")" : '';
my $where = $self->_get_where_str([$self->_qver, "job = $job", $qsrc, $qids]);
my $sth = $self->_dbh->prepare("SELECT id, md5s FROM ".$tbl.$where);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
push @$mdata, \@row;
}
$sth->finish;
unless ($mdata && (@$mdata > 0)) {
return $data;
}
my %unique_md5s = ();
foreach my $m (@$mdata) {
map { $unique_md5s{$_} = 1 } @{$m->[1]};
map { $data->{ $amap->{$m->[0]} }->{$_} = 0 } @{$m->[1]};
}
$where = $self->_get_where_str([$self->_qver, "job = $job", "md5 IN (".join(",", keys %unique_md5s).")"]);
$sth = $self->_dbh->prepare("SELECT md5, abundance FROM ".$self->_jtbl->{md5}.$where);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
$md5s->{$row[0]} = $row[1];
}
$sth->finish;
foreach my $ann (keys %$data) {
map { $data->{$ann}{$_} = $md5s->{$_} } grep { exists $md5s->{$_} } keys %{$data->{$ann}};
}
$self->_dbh->commit;
# annotation_text => md5_integer => abundance
return $data;
}
sub sequences_for_md5s {
my ($self, $mgid, $type, $md5s, $return_read_id_flag) = @_;
$self->set_jobs([$mgid]);
my $data = {};
my $seqs = $self->md5s_to_read_sequences($md5s);
unless ($seqs && @$seqs) { return {}; }
if ($type eq 'dna') {
foreach my $set (@$seqs) {
if($return_read_id_flag == 1) {
push @{ $data->{$set->{md5}} }, ["mgm".$set->{id}, $set->{sequence}];
} else {
push @{ $data->{$set->{md5}} }, $set->{sequence};
}
}
} elsif ($type eq 'protein') {
my $fna = '';
map { $fna .= ">".$_->{md5}."|".$_->{id}."\n".$_->{sequence}."\n" } @$seqs;
my $faa = $self->_run_fraggenescan($fna);
unless ($faa) { return {}; }
my @seqs = split(/\n/, $faa);
for (my $i=0; $i<@seqs; $i += 2) {
if ($seqs[$i] =~ /^>(\S+)/) {
my $id = $1;
my $seq = $seqs[$i+1];
$id =~ /^(\w+)?\|(.*)/;
my $md5 = $1;
my $read_id = $2;
if ($return_read_id_flag == 1) {
push @{ $data->{$md5} }, ["mgm".$read_id, $seq];
} else {
push @{ $data->{$md5} }, $seq;
}
}
}
} else {
return {};
}
# md5 => [ seq list ]
return $data;
}
sub sequences_for_annotation {
my ($self, $mgid, $seq_type, $ann_type, $srcs, $anns) = @_;
my $data = {};
my $md5s = {};
my $ann = $self->md5_abundance_for_annotations($mgid, $ann_type, $srcs, $anns); # annotation_text => md5_integer => abundance
foreach my $a (keys %$ann) {
map { $md5s->{$_} = 1; } keys %{$ann->{$a}};
}
unless (scalar(keys(%$md5s))) {
return $data;
}
my $seqs = $self->sequences_for_md5s($mgid, $seq_type, [keys %$md5s], 1);
my $md5_ints_to_strings = $self->_get_annotation_map('md5', [keys %$seqs]);
foreach my $a (keys %$ann) {
foreach my $m (keys %{$ann->{$a}}) {
next unless (exists $seqs->{$md5_ints_to_strings->{$m}});
map { push @{$data->{$a}}, $_ } @{$seqs->{$md5_ints_to_strings->{$m}}};
}
}
# ann => [ seq list ]
return $data;
}
sub metagenome_search {
my ($self, $type, $srcs, $ann, $exact) = @_;
my $jtbl = exists($self->_jtbl->{$type}) ? $self->_jtbl->{$type} : '';
my $atbl = exists($self->_atbl->{$type}) ? $self->_atbl->{$type} : '';
unless ($jtbl && $atbl) { return []; }
my $jobs = [];
my $qsrc = ($srcs && @$srcs) ? "j.source IN (".join(",", map { $self->_src_id->{$_} } @$srcs).")" : "";
my $qann = "a.name ".($exact ? '= ' : '~* ').$self->_dbh->quote($ann);
my $where = $self->_get_where_str(['j.'.$self->_qver, "j.id = a._id", $qsrc, $qann]);
my $sth = $self->_dbh->prepare("SELECT DISTINCT j.job FROM $jtbl j, $atbl a".$where);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
push @$jobs, $row[0];
}
$sth->finish;
unless ($jobs && (@$jobs > 0)) {
return [];
}
# [ mgid ]
return [ keys %{$self->_get_jobid_map($jobs, 1, 1)} ];
}
####################
# All functions conducted on jobs list
####################
=pod
=item * B<md5s_to_read_sequences> (I<md5s>, I<eval>, I<ident>)
Retrieve the [ {id , sequence} ] from the metagenome job directory for I<md5s> with I<eval>.
=cut
sub md5s_to_read_sequences {
my ($self, $md5s, $eval, $ident, $alen) = @_;
$eval = (defined($eval) && ($eval =~ /^\d+$/)) ? "exp_avg <= " . ($eval * -1) : "";
$ident = (defined($ident) && ($ident =~ /^\d+$/)) ? "ident_avg >= $ident" : "";
$alen = (defined($alen) && ($alen =~ /^\d+$/)) ? "len_avg >= $alen" : "";
my %mg_sims = map { $_, $self->sims_node($_) } keys %{$self->_job_map};
my $seqs = [];
my %umd5 = map {$_, 1} @$md5s;
my $iter = natatime $self->_chunk, keys %umd5;
while (my @curr = $iter->()) {
my $w_md5s = "md5 IN (".join(",", map {"'$_'"} @curr).")";
my $where = $self->_get_where_str([$self->_qver, $self->_qjobs, $eval, $ident, $alen, $w_md5s, "seek IS NOT NULL", "length IS NOT NULL"]);
my $data = {};
my $sql = "SELECT job, seek, length FROM ".$self->_jtbl->{md5}.$where." ORDER BY job, seek";
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
push @{ $data->{$self->_mg_map->{$row[0]}} }, [$row[1], $row[2]];
}
$sth->finish;
while ( my ($m, $info) = each %$data ) {
next unless (exists($mg_sims{$m}) && $mg_sims{$m} && exists($mg_sims{$m}{id}));
foreach my $set (@$info) {
my ($seek, $len) = @$set;
my $rec = $self->_get_sim_record($mg_sims{$m}{id}, $seek, $len);
chomp $rec;
foreach my $line ( split(/\n/, $rec) ) {
my @tabs = split(/\t/, $line);
if (@tabs == 13) {
push @$seqs, { md5 => $tabs[1], id => $m."|".$tabs[0], sequence => $tabs[12] };
}
}
}
}
}
$self->_dbh->commit;
return $seqs;
}
sub get_abundance_for_organism_source {
my ($self, $org, $src) = @_;
my $qorg = "a.name = ".$self->_dbh->quote($org);
my $qsrc = "j.source = ".$self->_src_id->{$src};
my $where = $self->_get_where_str(['j.'.$self->_qver, 'j.'.$self->_qjobs, "j.id = a._id", $qorg, $qsrc]);
my $sql = "SELECT SUM(j.abundance) FROM ".$self->_jtbl->{organism}." j, ".$self->_atbl->{organism}." a".$where;
my $sum = $self->_dbh->selectcol_arrayref($sql);
$self->_dbh->commit;
return ($sum && (@$sum > 0)) ? $sum->[0] : 0;
}
sub get_organism_abundance_for_source {
my ($self, $src) = @_;
my $data = {};
my $where = $self->_get_where_str([$self->_qver, $self->_qjobs, "source = ".$self->_src_id->{$src}]);
my $sql = "SELECT id, abundance FROM ".$self->_jtbl->{organism}.$where;
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
$data->{$row[0]} = $row[1];
}
$sth->finish;
$self->_dbh->commit;
# org_id => abund
return $data;
}
# sub get_organisms_with_contig_for_source {
# my ($self, $src, $num, $len) = @_;
#
# if ($self->ach) {
# my $job_orgs = $self->get_organism_abundance_for_source($src);
# my @job_ctgs = map { [$_->[0], $_->[1], $job_orgs->{$_->[0]}] }
# grep { exists $job_orgs->{$_->[0]} }
# @{ $self->ach->get_organism_with_contig_list($num, $len) };
# # [ org_id, org_name, abundance ]
# return \@job_ctgs;
# } else {
# return [];
# }
# }
sub get_md5_evals_for_organism_source {
my ($self, $org, $src) = @_;
my $data = {};
my $umd5 = {};
my $where = $self->_get_where_str(['j.'.$self->_qver, 'j.'.$self->_qjobs, "a.name=".$self->_dbh->quote($org), "j.source=".$self->_src_id->{$src}, "j.id = a._id"]);
my $sql = "SELECT j.md5s FROM ".$self->_jtbl->{organism}." j, ".$self->_atbl->{organism}." a".$where;
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
foreach my $md5_id (@{$row[0]}) {
$umd5->{$md5_id} = 1;
}
}
$sth->finish;
if (scalar(keys %$umd5) == 0) {
return $data;
}
my $iter = natatime $self->_chunk, keys %$umd5;
while (my @curr = $iter->()) {
$where = $self->_get_where_str([$self->_qver, $self->_qjobs, "md5 IN (".join(",", @curr).")"]);
$sql = "SELECT md5, evals FROM ".$self->_jtbl->{md5}.$where;
$sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
for (my $i=0; $i<@{$row[1]}; $i++) {
$data->{$row[0]}->[$i] += $row[1][$i];
}
}
$sth->finish;
}
$self->_dbh->commit;
# md5 => [ eval ]
return $data;
}
sub get_md5_data_for_organism_source {
my ($self, $org, $src, $eval) = @_;
my $data = [];
my $umd5 = {};
my $where = $self->_get_where_str(['j.'.$self->_qver, 'j.'.$self->_qjobs, "a.name=".$self->_dbh->quote($org), "j.source=".$self->_src_id->{$src}, "j.id = a._id"]);
my $sql = "SELECT j.md5s FROM ".$self->_jtbl->{organism}." j, ".$self->_atbl->{organism}." a".$where;
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
foreach my $md5_id (@{$row[0]}) {
$umd5->{$md5_id} = 1;
}
}
$sth->finish;
if (scalar(keys %$umd5) == 0) {
return $data;
}
my $iter = natatime $self->_chunk, keys %$umd5;
while (my @curr = $iter->()) {
$where = $self->_get_where_str([$self->_qver, $self->_qjobs, "md5 IN (".join(",", @curr).")", "seek IS NOT NULL", "length IS NOT NULL"]);
$sql = "SELECT DISTINCT job,md5,abundance,exp_avg,exp_stdv,ident_avg,ident_stdv,len_avg,len_stdv,seek,length FROM ".$self->_jtbl->{md5}.$where;
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
push @$data, [ $self->_mg_map->{$row[0]}, @row[1..10] ];
}
$sth->finish;
}
$self->_dbh->commit;
return $data;
# [ mgid, md5, abund, exp_avg, exp_stdv, ident_avg, ident_stdv, len_avg, len_stdv, seek, length ]
}
sub get_rarefaction_curve {
my ($self, $srcs, $get_alpha, $level) = @_;
unless ($srcs && @$srcs) { $srcs = []; }
unless ($level) { $level = 'species'; }
my $raw_data = {}; # mgid => tax level => abundance
my $mg_alpha = {}; # mgid => alpha diversity
my $mg_rare = {}; # mgid => [ rare-x, rare-y ]
my $mg_abund = $self->get_abundance_for_tax_level('tax_'.$level, undef, $srcs); # [mgid, tax level, abundance]
my $cache_key = 'rarefaction'.$level.join(':', @$srcs);
map { $raw_data->{$_->[0]}->{$_->[1]} = $_->[2] } @$mg_abund;
# calculate alpha diversity
if ($get_alpha) {
foreach my $mg (keys %$raw_data) {
my $cdata = $self->_memd ? $self->_memd->get($mg.$cache_key."alpha") : undef;
unless ($cdata) {
my $h1 = 0;
my $sum = sum values %{$raw_data->{$mg}};
unless ($sum) {
$mg_alpha->{$mg} = 0;
$self->_memd->set($mg.$cache_key."alpha", 0, $self->_expire);
next;
}
foreach my $num (values %{$raw_data->{$mg}}) {
my $p = $num / $sum;
if ($p > 0) { $h1 += ($p * log(1/$p)) / log(2); }
}
$mg_alpha->{$mg} = 2 ** $h1;
if ($self->_memd) {
$self->_memd->set($mg.$cache_key."alpha", $mg_alpha->{$mg}, $self->_expire);
}
} else {
$mg_alpha->{$mg} = $cdata;
}
}
# mgid => alpha-diver
return $mg_alpha;
}
# calculate rarefaction (x, y)
foreach my $mg (keys %$raw_data) {
my $cdata = $self->_memd ? $self->_memd->get($mg.$cache_key."curve") : undef;
unless ($cdata) {
my @nums = sort {$a <=> $b} values %{$raw_data->{$mg}};
my $k = scalar @nums;
my $nseq = $self->_get_seq_count($mg);
my $size = ($nseq > 1000) ? int($nseq / 1000) : 1;
unless ($nseq) {
$mg_rare->{$mg} = [];
$self->_memd->set($mg.$cache_key."curve", [], $self->_expire);
next;
}
for (my $n = 0; $n < $nseq; $n += $size) {
my $coeff = $self->_nCr2ln($nseq, $n);
my $curr = 0;
map { $curr += exp( $self->_nCr2ln($nseq - $_, $n) - $coeff ) } @nums;
push @{ $mg_rare->{$mg} }, [ $n, $k - $curr ];
}
if ($self->_memd) {
$self->_memd->set($mg.$cache_key."curve", $mg_rare->{$mg}, $self->_expire);
}
} else {
$mg_rare->{$mg} = $cdata;
}
}
# mgid => [ x, y ]
return $mg_rare;
}
sub get_abundance_for_tax_level {
my ($self, $level, $names, $srcs, $value, $md5s, $eval, $ident, $alen) = @_;
my $name_map = $self->_get_annotations4level("organism", $level, undef, 1);
my $src_str = ($srcs && (@$srcs > 0)) ? join("", @$srcs) : '';
return $self->_get_abundance_for_hierarchy($name_map, "organism", $level.$src_str, $srcs, $value, $md5s, $eval, $ident, $alen);
}
sub get_abundance_for_ontol_level {
my ($self, $level, $names, $src, $value, $md5s, $eval, $ident, $alen) = @_;
my $name_map = $self->_get_annotations4level("ontology", $level, $src, 1);
return $self->_get_abundance_for_hierarchy($name_map, "ontology", $level.$src, [$src], $value, $md5s, $eval, $ident, $alen);
}
sub _get_abundance_for_hierarchy {
my ($self, $name_map, $type, $key, $srcs, $value, $md5s, $eval, $ident, $alen) = @_;
unless ($value) { $value = "abundance"; }
my $cache_key = $value.$type.$key;
$cache_key .= defined($eval) ? $eval : ":";
$cache_key .= defined($ident) ? $ident : ":";
$cache_key .= defined($alen) ? $alen : ":";
my $data = [];
my $jobs = [];
my $qmd5s = ($md5s && (@$md5s > 0)) ? 1 : 0;
if ($qmd5s) {
$jobs = $self->_jobs;
} else {
while ( my ($mg, $j) = each %{$self->_job_map} ) {
my $cdata = $self->_memd ? $self->_memd->get($mg.$cache_key) : undef;
if ($cdata) { push @$data, @$cdata; }
else { push @$jobs, $j; }
}
}
unless (@$jobs) { return $data; }
$eval = (defined($eval) && ($eval =~ /^\d+$/)) ? "exp_avg <= " . ($eval * -1) : "";
$ident = (defined($ident) && ($ident =~ /^\d+$/)) ? "ident_avg >= $ident" : "";
$alen = (defined($alen) && ($alen =~ /^\d+$/)) ? "len_avg >= $alen" : "";
# get for jobs
my ($job, $id, $md5);
my %md5_set = $qmd5s ? map {$_, 1} @$md5s : ();
my $hier = {};
my $curr = 0;
my $qsrcs = ($srcs && (@$srcs > 0)) ? "source IN (".join(",", map { $self->_src_id->{$_} } @$srcs).")" : "";
my $where = $self->_get_where_str([$self->_qver, "job IN (".join(",", @$jobs).")", $qsrcs, $eval, $ident, $alen]);
my $sql = "SELECT DISTINCT job, id, md5s FROM ".$self->_jtbl->{$type}.$where." ORDER BY job";
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
($job, $id, $md5) = @row;
next unless(exists $name_map->{$id});
unless ($curr) { $curr = $job; }
if ($curr != $job) {
my $cdata = [];
my %md5s = ();
my $ssth = $self->_dbh->prepare("SELECT md5, $value FROM ".$self->_jtbl->{md5}." WHERE ".$self->_qver." AND job=".$curr);
$ssth->execute() or die "Couldn't execute statement: " . $ssth->errstr;
while (my @srow = $ssth->fetchrow_array()) {
$md5s{$srow[0]} = $srow[1];
}
$ssth->finish;
foreach my $h (sort keys %$hier) {
my $num = 0;
my $count = 0;
map { $num += $md5s{$_}; $count += 1; } grep { exists $md5s{$_} } keys %{ $hier->{$h} };
if (($value ne "abundance") && ($count > 0)) {
$num = ($num * 1.0) / $count;
}
push @$data, [ $self->_mg_map->{$curr}, $h, $num ];
push @$cdata, [ $self->_mg_map->{$curr}, $h, $num ];
}
if ($self->_memd) {
$self->_memd->set($self->_mg_map->{$curr}.$cache_key, $cdata, $self->_expire);
}
# reset
$hier = {};
$curr = $job;
}
if ($qmd5s) {
map { $hier->{$name_map->{$id}}{$_} = 1 } grep { exists $md5_set{$_} } @$md5;
} else {
map { $hier->{$name_map->{$id}}{$_} = 1 } @$md5;
}
}
$sth->finish;
# get last job
if (scalar(keys %$hier) > 0) {
my $cdata = [];
my %md5s = ();
my $ssth = $self->_dbh->prepare("SELECT md5, $value FROM ".$self->_jtbl->{md5}." WHERE ".$self->_qver." AND job=".$job);
$ssth->execute() or die "Couldn't execute statement: " . $ssth->errstr;
while (my @srow = $ssth->fetchrow_array()) {
$md5s{$srow[0]} = $srow[1];
}
$ssth->finish;
foreach my $h (sort keys %$hier) {
my $num = 0;
my $count = 0;
map { $num += $md5s{$_}; $count += 1; } grep { exists $md5s{$_} } keys %{ $hier->{$h} };
if (($value ne "abundance") && ($count > 0)) {
$num = ($num * 1.0) / $count;
}
push @$data, [ $self->_mg_map->{$job}, $h, $num ];
push @$cdata, [ $self->_mg_map->{$job}, $h, $num ];
}
if ($self->_memd) {
$self->_memd->set($self->_mg_map->{$job}.$cache_key, $cdata, $self->_expire);
}
}
$self->_dbh->commit;
return $data;
# [ mgid, taxa_name, abundance ]
}
sub get_abundance_for_set {
my ($self, $set, $type, $srcs) = @_;
unless ($set && (@$set > 0) && exists($self->_jtbl->{$type})) { return {}; }
my $data = {};
foreach my $mg (keys %{$self->_job_map}) {
map { $data->{$mg}{$_} = [ 0 ] } @$set;
}
my $qterm = "a.name IN (".join(", ", map { $self->_dbh->quote($_) } @$set).")";
my $qsrcs = (@$srcs > 0) ? "j.source IN (".join(",", map { $self->_src_id->{$_} } @$srcs).")" : "";
my $where = $self->_get_where_str(['j.'.$self->_qver, 'j.'.$self->_qjobs, "j.id = a._id", $qsrcs, $qterm]);
my $sql = "SELECT DISTINCT j.job, a.name, j.abundance FROM ".$self->_jtbl->{$type}." j, ".$self->_atbl->{$type}." a".$where;
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
push @{ $data->{ $self->_mg_map->{$row[0]} }{$row[1]} }, $row[2];
}
$sth->finish;
my $results = {};
foreach my $mg (keys %$data) {
map { $results->{$mg}{$_} = max @{ $data->{$mg}{$_} } } keys %{$data->{$mg}};
}
$self->_dbh->commit;
return $results;
# mgid => annotation => abundance
}
sub get_rank_abundance {
my ($self, $limit, $type, $srcs) = @_;
unless ($limit && exists($self->_jtbl->{$type})) { return []; }
my $data = {};
my $qsrcs = (@$srcs > 0) ? "j.source IN (" . join(",", map { $self->_src_id->{$_} } @$srcs) . ")" : "";
my $where = $self->_get_where_str(['j.'.$self->_qver, 'j.'.$self->_qjobs, "j.id = a._id", $qsrcs]);
my $sql = "SELECT DISTINCT j.job, a.name, j.abundance FROM ".$self->_jtbl->{$type}." j, ".$self->_atbl->{$type}." a".$where;
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
push @{ $data->{ $self->_mg_map->{$row[0]} }{$row[1]} }, $row[2];
}
$sth->finish;
my $results = {};
foreach my $mg (keys %$data) {
my @ranked = map { [ $_, max @{$data->{$mg}{$_}} ] } keys %{$data->{$mg}};
@ranked = sort { ($b->[1] <=> $a->[1]) || ($a->[0] cmp $b->[0]) } @ranked;
$results->{$mg} = [ @ranked[0..($limit-1)] ];
}
$self->_dbh->commit;
return $results;
# mgid => [ annotation, abundance ]
}
sub get_set_rank_abundance {
my ($self, $limit, $type, $srcs, $all) = @_;
unless ($limit && exists($self->_jtbl->{$type})) { return []; }
my $data = [];
my $qsrcs = (@$srcs > 0) ? "j.source IN (" . join(",", map { $self->_src_id->{$_} } @$srcs) . ")" : "";
my $qjobs = $all ? '' : 'j.'.$self->_qjobs;
my $where = $self->_get_where_str(['j.'.$self->_qver, $qjobs, "j.id = a._id", $qsrcs]);
my $qlim = "LIMIT ".($limit * scalar(@$srcs));
my $sql = "SELECT DISTINCT a.name, SUM(j.job) FROM ".$self->_jtbl->{$type}." j, ".$self->_atbl->{$type}." a".$where." GROUP BY j.job ORDER BY SUM(j.job) DESC ".$limit;
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
push @$data, \@row;
}
$sth->finish;
$self->_dbh->commit;
return $data;
# [ annotation, job_count ]
}
sub get_global_rank_abundance {
my ($self, $limit, $type, $src) = @_;
return $self->get_set_rank_abundance($limit, $type, [$src], 1)
}
sub search_organisms {
my ($self, $text) = @_;
return $self->_search_annotations('organism', $text);
}
sub get_organisms_unique_for_source {
my ($self, $source, $eval, $ident, $alen, $with_taxid) = @_;
my $all_orgs = {};
my $mg_org_data = {};
# mgid => org => [ count_md5s, abundance, exp_avg, exp_stdv, ident_avg, ident_stdv, len_avg, len_stdv, [md5s] ]
my $mg_md5_data = $self->get_md5_data(undef, $eval, $ident, $alen, 1, $source);
# [ mgid, md5, abundance, exp_avg, exp_stdv, ident_avg, ident_stdv, len_avg, len_stdv, rep_org_id ]
foreach my $row (@$mg_md5_data) {
my $org = $row->[9];
$all_orgs->{$org} = 1;
if (exists $mg_org_data->{$row->[0]}{$org}) {
$mg_org_data->{$row->[0]}{$org}[0] += 1;
$mg_org_data->{$row->[0]}{$org}[1] += $row->[2];
$mg_org_data->{$row->[0]}{$org}[2] += $row->[3];
$mg_org_data->{$row->[0]}{$org}[3] += $row->[4];
$mg_org_data->{$row->[0]}{$org}[4] += $row->[5];
$mg_org_data->{$row->[0]}{$org}[5] += $row->[6];
$mg_org_data->{$row->[0]}{$org}[6] += $row->[7];
$mg_org_data->{$row->[0]}{$org}[7] += $row->[8];
push @{ $mg_org_data->{$row->[0]}{$org}[8] }, $row->[1];
} else {
$mg_org_data->{$row->[0]}{$org} = [ 1, @$row[2..8], [$row->[1]] ];
}
}
if (scalar(keys %$all_orgs) == 0) {
return [];
}
my $ctax = $with_taxid ? ',ncbi_tax_id' : '';
my $qtax = $with_taxid ? " AND ncbi_tax_id IS NOT NULL" : '';
my $tax = {};
my $tid = {};
my $sql = "SELECT _id,COALESCE(tax_domain,'unassigned') AS txd,COALESCE(tax_phylum,'unassigned') AS txp,COALESCE(tax_class,'unassigned') AS txc,".
"COALESCE(tax_order,'unassigned') AS txo,COALESCE(tax_family,'unassigned') AS txf,COALESCE(tax_genus,'unassigned') AS txg,".
"COALESCE(tax_species,'unassigned') AS txs,name$ctax FROM ".$self->_atbl->{organism}." WHERE _id IN (".join(',', keys %$all_orgs).")$qtax";
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
my $oid = shift @row;
if ($with_taxid) {
my $t = pop @row;
$tid->{$oid} = $t;
}
$tax->{$oid} = \@row;
}
$sth->finish;
my $result = [];
foreach my $mgid (keys %$mg_org_data) {
foreach my $oid (keys %{$mg_org_data->{$mgid}}) {
my $stats = $mg_org_data->{$mgid}{$oid};
my $total = $stats->[0];
my $abund = $stats->[1];
my $md5s = $stats->[8];
my ($ea, $es, $ia, $is, $la, $ls) = (($stats->[2] / $total),($stats->[3] / $total),($stats->[4] / $total),($stats->[5] / $total),($stats->[6] / $total),($stats->[7] / $total));
if (exists $tax->{$oid}) {
my $data = [ $mgid, @{$tax->{$oid}}, $abund, $ea, $es, $ia, $is, $la, $ls, $md5s ];
if ($with_taxid) {
if (exists $tid->{$oid}) {
push @$data, $tid->{$oid};
} else {
next;
}
}
push @$result, $data;
}
}
}
$self->_dbh->commit;
return $result;
# mgid, tax_domain, tax_phylum, tax_class, tax_order, tax_family, tax_genus, tax_species, name, abundance, exp_avg, exp_stdv, ident_avg, ident_stdv, len_avg, len_stdv, md5s
}
sub get_organisms_for_sources {
my ($self, $sources, $eval, $ident, $alen, $with_taxid) = @_;
return $self->get_organisms_for_md5s([], $sources, $eval, $ident, $alen, $with_taxid);
}
sub get_organisms_for_md5s {
my ($self, $md5s, $sources, $eval, $ident, $alen, $with_taxid) = @_;
my $cache_key = "org".($with_taxid ? 'tid' : '');
$cache_key .= defined($eval) ? $eval : ":";
$cache_key .= defined($ident) ? $ident : ":";
$cache_key .= defined($alen) ? $alen : ":";
$cache_key .= defined($sources) ? join(";", @$sources) : ":";
my $data = {};
my $jobs = [];
my %mdata = ();
my $qmd5s = ($md5s && (@$md5s > 0)) ? 1 : 0;
if ($qmd5s) {
$jobs = $self->_jobs;
} else {
while ( my ($mg, $j) = each %{$self->_job_map} ) {
my $c = $self->_memd ? $self->_memd->get($mg.$cache_key) : undef;
my $m = $self->_memd ? $self->_memd->get($mg.$cache_key."md5s") : undef;
if ($c && $m) {
$data->{$mg} = $c;
$mdata{$mg} = $m;
} else {
push @$jobs, $j;
}
}
}
unless (@$jobs) { return (\%mdata, [ map { @$_ } values %$data ]); }
my %md5_set = ($md5s && (@$md5s > 0)) ? map {$_, 1} @$md5s : ();
my $mg_md5_abund = $self->get_md5_abundance($eval, $ident, $alen);
$eval = (defined($eval) && ($eval =~ /^\d+$/)) ? "j.exp_avg <= " . ($eval * -1) : "";
$ident = (defined($ident) && ($ident =~ /^\d+$/)) ? "j.ident_avg >= $ident" : "";
$alen = (defined($alen) && ($alen =~ /^\d+$/)) ? "j.len_avg >= $alen" : "";
my $ctax = $with_taxid ? ',a.ncbi_tax_id' : '';
my $qtax = $with_taxid ? "a.ncbi_tax_id IS NOT NULL" : '';
my $qsrcs = ($sources && (@$sources > 0)) ? "j.source IN (" . join(",", map { $self->_src_id->{$_} } @$sources) . ")" : "";
my $where = $self->_get_where_str(['j.'.$self->_qver, "j.job IN (".join(",", @$jobs).")", "j.id = a._id", $qsrcs, $eval, $ident, $alen, $qtax]);
my $tax = "COALESCE(a.tax_domain,'unassigned') AS txd,COALESCE(a.tax_phylum,'unassigned') AS txp,COALESCE(a.tax_class,'unassigned') AS txc,".
"COALESCE(a.tax_order,'unassigned') AS txo,COALESCE(a.tax_family,'unassigned') AS txf,COALESCE(a.tax_genus,'unassigned') AS txg,".
"COALESCE(a.tax_species,'unassigned') AS txs,a.name";
my $sql = "SELECT DISTINCT j.job,j.source,$tax,j.abundance,j.exp_avg,j.exp_stdv,j.ident_avg,j.ident_stdv,j.len_avg,j.len_stdv,j.md5s$ctax FROM ".
$self->_jtbl->{organism}." j, ".$self->_atbl->{organism}." a".$where;
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
my $sub_abund = 0;
my $mg = $self->_mg_map->{$row[0]};
if ($qmd5s) {
my @has_md5 = grep { exists $md5_set{$_} } @{$row[17]};
next unless ((@has_md5 > 0) && exists($mg_md5_abund->{$mg}));
map { $sub_abund += $mg_md5_abund->{$mg}{$_} } grep { exists($mg_md5_abund->{$mg}{$_}) } @has_md5;
} else {
$sub_abund = $row[10];
}
my $drow = [ $mg, $self->_id_src->{$row[1]}, @row[2..10], $sub_abund, @row[11..16], join(";", @{$row[17]}) ];
if ($with_taxid) { push @$drow, $row[18]; }
push @{$data->{$mg}}, $drow;
map { $mdata{$mg}{$_} = $mg_md5_abund->{$mg}{$_} } grep { exists $mg_md5_abund->{$mg}{$_} } @{$row[17]};
}
$sth->finish;
if ((! $qmd5s) && $self->_memd) {
foreach my $mg (keys %$data) {
$self->_memd->set($mg.$cache_key, $data->{$mg}, $self->_expire);
$self->_memd->set($mg.$cache_key."md5s", $mdata{$mg}, $self->_expire);
}
}
$self->_dbh->commit;
return (\%mdata, [ map { @$_ } values %$data ]);
# mgid => md5 => abundance
# mgid, source, tax_domain, tax_phylum, tax_class, tax_order, tax_family, tax_genus, tax_species, name, abundance, sub_abundance, exp_avg, exp_stdv, ident_avg, ident_stdv, len_avg, len_stdv, md5s
}
sub search_ontology {
my ($self, $text) = @_;
return $self->_search_annotations('ontology', $text);
}
sub get_ontology_for_source {
my ($self, $source, $eval, $ident, $alen) = @_;
return $self->get_ontology_for_md5s([], $source, $eval, $ident, $alen);
}
sub get_ontology_for_md5s {
my ($self, $md5s, $source, $eval, $ident, $alen) = @_;
unless ($source) {
return ({}, [])
}
my $cache_key = "ontol".$source;
$cache_key .= defined($eval) ? $eval : ":";
$cache_key .= defined($ident) ? $ident : ":";
$cache_key .= defined($alen) ? $alen : ":";
my $data = {};
my $jobs = [];
my %mdata = ();
my $qmd5s = ($md5s && (@$md5s > 0)) ? 1 : 0;
if ($qmd5s) {
$jobs = $self->_jobs;
} else {
while ( my ($mg, $j) = each %{$self->_job_map} ) {
my $c = $self->_memd ? $self->_memd->get($mg.$cache_key) : undef;
my $m = $self->_memd ? $self->_memd->get($mg.$cache_key."md5s") : undef;
if ($c && $m) {
$data->{$mg} = $c;
$mdata{$mg} = $m;
} else {
push @$jobs, $j;
}
}
}
unless (@$jobs) { return (\%mdata, [ map { @$_ } values %$data ]); }
my %md5_set = $qmd5s ? map {$_, 1} @$md5s : ();
my $mg_md5_abund = $self->get_md5_abundance($eval, $ident, $alen);
$eval = (defined($eval) && ($eval =~ /^\d+$/)) ? "j.exp_avg <= " . ($eval * -1) : "";
$ident = (defined($ident) && ($ident =~ /^\d+$/)) ? "j.ident_avg >= $ident" : "";
$alen = (defined($alen) && ($alen =~ /^\d+$/)) ? "j.len_avg >= $alen" : "";
my $level = "COALESCE(a.level4, a.level3) as annotation";
my $qsrcs = "a.source = ".$self->_src_id->{$source};
my $where = $self->_get_where_str(['j.'.$self->_qver, "j.job IN (".join(",", @$jobs).")", "j.id = a._id", $qsrcs, $eval, $ident, $alen]);
my $sql = "SELECT DISTINCT j.job,a.name,$level,j.abundance,j.exp_avg,j.exp_stdv,j.ident_avg,j.ident_stdv,j.len_avg,j.len_stdv,j.md5s FROM ".
$self->_jtbl->{ontology}." j, ".$self->_atbl->{ontology}." a".$where;
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
my $sub_abund = 0;
my $mg = $self->_mg_map->{$row[0]};
if ($qmd5s) {
my @has_md5 = grep { exists $md5_set{$_} } @{$row[10]};
next unless ((@has_md5 > 0) && exists($mg_md5_abund->{$mg}));
map { $sub_abund += $mg_md5_abund->{$mg}{$_} } grep { exists($mg_md5_abund->{$mg}{$_}) } @has_md5;
} else {
$sub_abund = $row[3];
}
push @{$data->{$mg}}, [ $mg, @row[1..3], $sub_abund, @row[4..9], join(";", @{$row[10]}) ];
map { $mdata{$mg}{$_} = $mg_md5_abund->{$mg}{$_} } grep { exists $mg_md5_abund->{$mg}{$_} } @{$row[10]};
}
$sth->finish;
if ((! $qmd5s) && $self->_memd) {
foreach my $mg (keys %$data) {
$self->_memd->set($mg.$cache_key, $data->{$mg}, $self->_expire);
$self->_memd->set($mg.$cache_key."md5s", $mdata{$mg}, $self->_expire);
}
}
$self->_dbh->commit;
return (\%mdata, [ map { @$_ } values %$data ]);
# mgid => md5 => abundance
# mgid, id, annotation, abundance, sub_abundance, exp_avg, exp_stdv, ident_avg, ident_stdv, len_avg, len_stdv, md5s
}
sub search_functions {
my ($self, $text) = @_;
return $self->_search_annotations('function', $text);
}
sub get_functions_for_sources {
my ($self, $sources, $eval, $ident, $alen) = @_;
return $self->get_functions_for_md5s([], $sources, $eval, $ident, $alen);
}
sub get_functions_for_md5s {
my ($self, $md5s, $sources, $eval, $ident, $alen) = @_;
unless ($sources && (@$sources > 0)) { $sources = []; }
my $cache_key = "func";
$cache_key .= defined($eval) ? $eval : ":";
$cache_key .= defined($ident) ? $ident : ":";
$cache_key .= defined($alen) ? $alen : ":";
$cache_key .= defined($sources) ? join(";", @$sources) : ":";
my $data = {};
my $jobs = [];
my $qmd5s = ($md5s && (@$md5s > 0)) ? 1 : 0;
if ($qmd5s) {
$jobs = $self->_jobs;
} else {
while ( my ($mg, $j) = each %{$self->_job_map} ) {
my $c = $self->_memd ? $self->_memd->get($mg.$cache_key) : undef;
if ($c) { $data->{$mg} = $c; }
else { push @$jobs, $j; }
}
}
unless (@$jobs) { return [ map { @$_ } values %$data ]; }
my %md5_set = $qmd5s ? map {$_, 1} @$md5s : ();
my $mg_md5_abund = $qmd5s ? $self->get_md5_abundance($eval, $ident, $alen) : {};
$eval = (defined($eval) && ($eval =~ /^\d+$/)) ? "j.exp_avg <= " . ($eval * -1) : "";
$ident = (defined($ident) && ($ident =~ /^\d+$/)) ? "j.ident_avg >= $ident" : "";
$alen = (defined($alen) && ($alen =~ /^\d+$/)) ? "j.len_avg >= $alen" : "";
my $qsrcs = (@$sources > 0) ? "j.source in (" . join(",", map { $self->_src_id->{$_} } @$sources) . ")" : "";
my $where = $self->_get_where_str(['j.'.$self->_qver, "j.job IN (".join(",", @$jobs).")", "j.id = a._id", $qsrcs, $eval, $ident, $alen]);
my $sql = "SELECT DISTINCT j.job,j.source,a.name,j.abundance,j.exp_avg,j.exp_stdv,j.ident_avg,j.ident_stdv,j.len_avg,j.len_stdv,j.md5s FROM ".
$self->_jtbl->{function}." j, ".$self->_atbl->{function}." a".$where;
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
my $sub_abund = 0;
my $mg = $self->_mg_map->{$row[0]};
if ($qmd5s) {
my @has_md5 = grep { exists $md5_set{$_} } @{$row[10]};
next unless ((@has_md5 > 0) && exists($mg_md5_abund->{$mg}));
map { $sub_abund += $mg_md5_abund->{$mg}{$_} } grep { exists($mg_md5_abund->{$mg}{$_}) } @has_md5;
} else {
$sub_abund = $row[3];
}
push @{$data->{$mg}}, [ $mg, $self->_id_src->{$row[1]}, @row[2,3], $sub_abund, @row[4..9], join(";", @{$row[10]}) ];
}
$sth->finish;
if ((! $qmd5s) && $self->_memd) {
foreach my $mg (keys %$data) {
$self->_memd->set($mg.$cache_key, $data->{$mg}, $self->_expire);
}
}
$self->_dbh->commit;
return [ map { @$_ } values %$data ];
# mgid, source, function, abundance, sub_abundance, exp_avg, exp_stdv, ident_avg, ident_stdv, len_avg, len_stdv, md5s
}
sub get_lca_data {
my ($self, $eval, $ident, $alen) = @_;
my $cache_key = "lca";
$cache_key .= defined($eval) ? $eval : ":";
$cache_key .= defined($ident) ? $ident : ":";
$cache_key .= defined($alen) ? $alen : ":";
my $data = {};
my $jobs = [];
while ( my ($mg, $j) = each %{$self->_job_map} ) {
my $c = $self->_memd ? $self->_memd->get($mg.$cache_key) : undef;
if ($c) { $data->{$mg} = $c; }
else { push @$jobs, $j; }
}
unless (@$jobs) { return [ map { @$_ } values %$data ]; }
$eval = (defined($eval) && ($eval =~ /^\d+$/)) ? "exp_avg <= " . ($eval * -1) : "";
$ident = (defined($ident) && ($ident =~ /^\d+$/)) ? "ident_avg >= $ident" : "";
$alen = (defined($alen) && ($alen =~ /^\d+$/)) ? "len_avg >= $alen" : "";
my $where = $self->_get_where_str([$self->_qver, "job IN (".join(",", @$jobs).")", $eval, $ident, $alen]);
my $sql = "SELECT DISTINCT job,lca,abundance,exp_avg,exp_stdv,ident_avg,ident_stdv,len_avg,len_stdv FROM ".$self->_jtbl->{lca}.$where;
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
my $mg = $self->_mg_map->{$row[0]};
my @tax = ('-','-','-','-','-','-','-','-');
my @lca = split(/;/, $row[1]);
for (my $i=0; $i<@lca; $i++) {
$tax[$i] = $lca[$i];
}
push @{$data->{$mg}}, [ $mg, @tax, @row[2..8] ];
}
$sth->finish;
if ($self->_memd) {
foreach my $mg (keys %$data) {
$self->_memd->set($mg.$cache_key, $data->{$mg}, $self->_expire);
}
}
$self->_dbh->commit;
return [ map { @$_ } values %$data ];
# mgid, tax_domain, tax_phylum, tax_class, tax_order, tax_family, tax_genus, tax_species, name, abundance, exp_avg, exp_stdv, ident_avg, ident_stdv, len_avg, len_stdv
}
sub get_md5_data {
my ($self, $md5s, $eval, $ident, $alen, $ignore_sk, $rep_org_src) = @_;
my $cache_key = "md5data";
$cache_key .= defined($eval) ? $eval : ":";
$cache_key .= defined($ident) ? $ident : ":";
$cache_key .= defined($alen) ? $alen : ":";
$cache_key .= $rep_org_src ? $rep_org_src : ":";
my $data = {};
my $jobs = [];
if ($md5s && (@$md5s > 0)) {
$jobs = $self->_jobs;
} else {
while ( my ($mg, $j) = each %{$self->_job_map} ) {
my $c = $self->_memd ? $self->_memd->get($mg.$cache_key) : undef;
if ($c) { $data->{$mg} = $c; }
else { push @$jobs, $j; }
}
}
unless (@$jobs) { return [ map { @$_ } values %$data ]; }
$eval = (defined($eval) && ($eval =~ /^\d+$/)) ? "j.exp_avg <= " . ($eval * -1) : "";
$ident = (defined($ident) && ($ident =~ /^\d+$/)) ? "j.ident_avg >= $ident" : "";
$alen = (defined($alen) && ($alen =~ /^\d+$/)) ? "j.len_avg >= $alen" : "";
my %umd5s = ($md5s && (@$md5s > 0)) ? map {$_, 1} @$md5s : ();
my $qmd5s = ($md5s && (@$md5s > 0)) ? "j.md5 IN (" . join(",", keys %umd5s) . ")" : "";
my $qseek = $ignore_sk ? "" : "j.seek IS NOT NULL AND j.length IS NOT NULL";
my $qrep = $rep_org_src ? "j.md5=r.md5 AND r.source=".$self->_src_id->{$rep_org_src} : "";
my $where = $self->_get_where_str(['j.'.$self->_qver, "j.job IN (".join(",", @$jobs).")", $qrep, $qmd5s, $eval, $ident, $alen, $qseek]);
my $cseek = $ignore_sk ? "" : ",j.seek,j.length";
my $crep = $rep_org_src ? ",r.organism" : "";
my $sql = "SELECT DISTINCT j.job,j.md5,j.abundance,j.exp_avg,j.exp_stdv,j.ident_avg,j.ident_stdv,j.len_avg,j.len_stdv${cseek}${crep} FROM ".
$self->_jtbl->{md5}." j".($rep_org_src ? ", md5_organism_unique r" : "").$where.($ignore_sk ? "" : " ORDER BY job, seek");
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
my $j = shift @row;
my $mg = $self->_mg_map->{$j};
push @{ $data->{$mg} }, [ $mg, @row ];
}
$sth->finish;
if ($self->_memd) {
foreach my $mg (keys %$data) {
$self->_memd->set($mg.$cache_key, $data->{$mg}, $self->_expire);
}
}
$self->_dbh->commit;
return [ map { @$_ } values %$data ];
# mgid, md5, abundance, exp_avg, exp_stdv, ident_avg, ident_stdv, len_avg, len_stdv, (seek, length || rep_org_id)
}
sub get_md5_abundance {
my ($self, $eval, $ident, $alen, $md5s) = @_;
my $cache_key = "md5abund";
$cache_key .= defined($eval) ? $eval : ":";
$cache_key .= defined($ident) ? $ident : ":";
$cache_key .= defined($alen) ? $alen : ":";
my $data = {};
my $jobs = [];
if ($md5s && (@$md5s > 0)) {
$jobs = $self->_jobs;
} else {
while ( my ($mg, $j) = each %{$self->_job_map} ) {
my $c = $self->_memd ? $self->_memd->get($mg.$cache_key) : undef;
if ($c) { $data->{$mg} = $c; }
else { push @$jobs, $j; }
}
}
unless (@$jobs) { return $data; }
$eval = (defined($eval) && ($eval =~ /^\d+$/)) ? "exp_avg <= " . ($eval * -1) : "";
$ident = (defined($ident) && ($ident =~ /^\d+$/)) ? "ident_avg >= $ident" : "";
$alen = (defined($alen) && ($alen =~ /^\d+$/)) ? "len_avg >= $alen" : "";
if ($md5s && (@$md5s > 0)) {
my %umd5 = map {$_, 1} @$md5s;
my $iter = natatime $self->_chunk, keys %umd5;
while (my @curr = $iter->()) {
my $qmd5s = "md5 IN (".join(",", map {"'$_'"} @curr).")";
my $where = $self->_get_where_str([$self->_qver, "job IN (".join(",", @$jobs).")", $qmd5s, $eval, $ident, $alen]);
my $sql = "SELECT DISTINCT job, md5, abundance FROM ".$self->_jtbl->{md5}.$where;
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
$data->{ $self->_mg_map->{$row[0]} }{$row[1]} = $row[2];
}
$sth->finish;
}
} else {
my $where = $self->_get_where_str([$self->_qver, "job IN (".join(",", @$jobs).")", $eval, $ident, $alen]);
my $sql = "SELECT DISTINCT job, md5, abundance FROM ".$self->_jtbl->{md5}.$where;
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
$data->{ $self->_mg_map->{$row[0]} }{$row[1]} = $row[2];
}
$sth->finish;
}
if ($self->_memd) {
foreach my $mg (keys %$data) {
$self->_memd->set($mg.$cache_key, $data->{$mg}, $self->_expire);
}
}
$self->_dbh->commit;
return $data;
# mgid => md5 => abundance
}
sub get_org_md5 {
my ($self, $eval, $ident, $alen, $sources, $use_id, $use_taxid) = @_;
return $self->_get_annotation_md5('organism', $eval, $ident, $alen, $sources, $use_id, $use_taxid);
}
sub get_ontol_md5 {
my ($self, $eval, $ident, $alen, $source, $use_id) = @_;
return $self->_get_annotation_md5('ontology', $eval, $ident, $alen, [$source], $use_id);
}
sub get_func_md5 {
my ($self, $eval, $ident, $alen, $sources, $use_id) = @_;
return $self->_get_annotation_md5('function', $eval, $ident, $alen, $sources, $use_id);
}
sub _get_annotation_md5 {
my ($self, $type, $eval, $ident, $alen, $sources, $use_id, $use_taxid) = @_;
my $cache_key = $type."md5";
$cache_key .= defined($eval) ? $eval : ":";
$cache_key .= defined($ident) ? $ident : ":";
$cache_key .= defined($alen) ? $alen : ":";
$cache_key .= defined($sources) ? join(";", @$sources) : ":";
$cache_key .= defined($use_id) ? ':1' : ":0";
$cache_key .= defined($use_taxid) ? ':1' : ":0";
my $data = {};
my $jobs = [];
while ( my ($mg, $j) = each %{$self->_job_map} ) {
my $c = $self->_memd ? $self->_memd->get($mg.$cache_key) : undef;
if ($c) { $data->{$mg} = $c; }
else { push @$jobs, $j; }
}
unless (@$jobs) { return $data; }
$eval = (defined($eval) && ($eval =~ /^\d+$/)) ? "j.exp_avg <= " . ($eval * -1) : "";
$ident = (defined($ident) && ($ident =~ /^\d+$/)) ? "j.ident_avg >= $ident" : "";
$alen = (defined($alen) && ($alen =~ /^\d+$/)) ? "j.len_avg >= $alen" : "";
my $key = $use_id ? 'j.id' : 'a.name';
my $tid = '';
if (($type eq 'organism') && $use_taxid) {
$key = 'a.ncbi_tax_id';
$tid = 'a.ncbi_tax_id IS NOT NULL';
}
my $qsrcs = ($sources && (@$sources > 0)) ? "j.source IN (" . join(",", map { $self->_src_id->{$_} } @$sources) . ")" : "";
my $sql;
if ($use_id && (! $use_taxid)) {
my $where = $self->_get_where_str(['j.'.$self->_qver, "j.job IN (".join(",", @$jobs).")", $qsrcs, $eval, $ident, $alen]);
$sql = "SELECT DISTINCT j.job,$key,j.md5s FROM ".$self->_jtbl->{$type}." j".$where;
} else {
my $where = $self->_get_where_str(['j.'.$self->_qver, "j.job IN (".join(",", @$jobs).")", "j.id = a._id", $qsrcs, $eval, $ident, $alen, $tid]);
$sql = "SELECT DISTINCT j.job,$key,j.md5s FROM ".$self->_jtbl->{$type}." j, ".$self->_atbl->{$type}." a".$where;
}
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
my $mg = $self->_mg_map->{$row[0]};
map { $data->{$mg}{$row[1]}{$_} = 1 } @{ $row[2] };
}
$sth->finish;
if ($self->_memd) {
foreach my $mg (keys %$data) {
$self->_memd->set($mg.$cache_key, $data->{$mg}, $self->_expire);
}
}
$self->_dbh->commit;
return $data;
# mgid => annotation/id => { md5 }
}
sub get_md5s_for_tax_level {
my ($self, $level, $names, $source) = @_;
return $self->_get_md5s_for_annotation_level('organism', $level, $names, $source);
}
sub get_md5s_for_ontol_level {
my ($self, $source, $level, $names) = @_;
return $self->_get_md5s_for_annotation_level('ontology', $level, $names, $source);
}
sub _get_md5s_for_annotation_level {
my ($self, $type, $level, $names, $src) = @_;
my $md5s = {};
my $tbl = exists($self->_atbl->{$type}) ? $self->_atbl->{$type} : '';
my @cols = grep { $_ eq $level } @{ $self->_get_table_cols($tbl) };
unless ($tbl && $level && (@cols == 1)) { return {}; }
my $qsrc = ($src) ? "j.source=".$self->_src_id->{$src} : "";
my $qlvl = ($names && (@$names > 0)) ? "a.$level IN (".join(",", map {$self->_dbh->quote($_)} @$names).")" : "a.$level IS NOT NULL AND a.$level != ''";
my $where = $self->_get_where_str(['j.'.$self->_qver, 'j.'.$self->_qjobs, "j.id = a._id", $qsrc, $qlvl]);
my $sql = "SELECT j.md5s FROM ".$self->_jtbl->{$type}." j, ".$self->_atbl->{$type}." a".$where;
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
map { $md5s->{$_} = 1 } @{$row[0]};
}
$sth->finish;
$self->_dbh->commit;
return [ keys %$md5s ];
# [ md5 ]
}
sub get_md5s_for_organism {
my ($self, $names, $source) = @_;
return $self->_get_md5s_for_annotation('organism', $names, $source);
}
sub get_md5s_for_ontology {
my ($self, $names, $source) = @_;
return $self->_get_md5s_for_annotation('ontology', $names, $source);
}
sub get_md5s_for_function {
my ($self, $names, $source) = @_;
return $self->_get_md5s_for_annotation('function', $names, $source);
}
sub _get_md5s_for_annotation {
my ($self, $type, $names, $src) = @_;
my $md5s = {};
my $tbl = exists($self->_atbl->{$type}) ? $self->_atbl->{$type} : '';
unless ($tbl && $names && @$names) { return {}; }
my $qname = "a.name IN (".join(",", map {$self->_dbh->quote($_)} @$names).")";
my $qsrc = ($src) ? "j.source=".$self->_src_id->{$src} : "";
my $where = $self->_get_where_str(['j.'.$self->_qver, 'j.'.$self->_qjobs, "j.id = a._id", $qname, $qsrc]);
my $sql = "SELECT j.md5s FROM ".$self->_jtbl->{$type}." j, ".$self->_atbl->{$type}." a".$where;
my $sth = $self->_dbh->prepare($sql);
$sth->execute() or die "Couldn't execute statement: " . $sth->errstr;
while (my @row = $sth->fetchrow_array()) {
map { $md5s->{$_} = 1 } @{$row[0]};
}
$sth->finish;
$self->_dbh->commit;
return [ keys %$md5s ];
# [ md5 ]
}
# return fasta text
sub get_m5nr_sequences_from_md5s {
my ($self, $md5s) = @_;
unless ($md5s && (@$md5s > 0)) {
return "";
}
my $response = "";
my $url = $self->_api."/m5nr/md5";
my $pdata = $self->_json->encode({
version => $self->_version,
sequence => 1,
format => 'fasta',
data => $md5s
});
eval {
my $post = $self->_agent->post($url, Content => $pdata);
$response = $post->content;
};
if ($@ || (! $response)) {
return "";
} elsif ($response =~ /ERROR/) {
return "";
}
return $response;
}
sub get_taxa_to_level {
my ($self, $taxa) = @_;
my $data = {};
my $response = undef;
my $url = $self->_api."/m5nr/taxonomy?version=".$self->_version."&min_level=".$taxa;
eval {
my $get = $self->_agent->get($url);
$response = $self->_json->decode($get->content);
};
if ($response && $response->{data} && @{$response->{data}}) {
foreach my $set ( @{$response->{data}} ) {
unless (exists $set->{$taxa}) {
next;
}
$data->{$set->{$taxa}} = [];
foreach my $name (('domain', 'phylum', 'class', 'order', 'family', 'genus', 'species')) {
if ($name eq $taxa) {
last;
}
if (exists $set->{$name}) {
push @{ $data->{$set->{$taxa}} }, $set->{$name};
} else {
push @{ $data->{$set->{$taxa}} }, "";
}
}
}
}
# taxa => [ hierarchy from domain to one higher than taxa ]
return $data;
}
| paczian/MG-RAST | src/MGRAST/lib/Analysis.pm | Perl | bsd-2-clause | 78,445 |
#!/usr/bin/perl
# burner-lib.pl
# Common functions for managing the CD burning profiles
BEGIN { push(@INC, ".."); };
use WebminCore;
&init_config();
%access = &get_module_acl();
&foreign_require("fdisk", "fdisk-lib.pl");
# list_profiles()
# Returns a list of all burn profiles available for use.
# Each profile can be for an ISO, a list of directory mappings, or a list of
# audio track files
sub list_profiles
{
local @rv;
opendir(DIR, $module_config_directory);
foreach $f (sort { $a cmp $b } readdir(DIR)) {
next if ($f !~ /^(\S+)\.burn$/);
push(@rv, &get_profile($1));
}
closedir(DIR);
return @rv;
}
# get_profile(id)
sub get_profile
{
local %burn;
&read_file("$module_config_directory/$_[0].burn", \%burn);
$burn{'id'} = $_[0];
$burn{'file'} = "$module_config_directory/$_[0].burn";
return \%burn;
}
# save_profile(&profile)
sub save_profile
{
$_[0]->{'id'} = time() if (!$_[0]->{'id'});
&write_file("$module_config_directory/$_[0]->{'id'}.burn", $_[0]);
}
# delete_profile(&profile)
sub delete_profile
{
unlink("$module_config_directory/$_[0]->{'id'}.burn");
}
# list_cdrecord_devices()
# Returns a list of all possible CD burner devices
sub list_cdrecord_devices
{
local (@rv, %done);
# First get from CDrecord
open(SCAN, "$config{'cdrecord'} $config{'extra'} -scanbus 2>/dev/null |");
while(<SCAN>) {
if (/^\s+(\S+)\s+\d+\)\s+'(.*)'\s+'(.*)'\s+'(.*)'\s+(.*)/) {
push(@rv, { 'dev' => $1,
'name' => "$2$3$4",
'type' => $5 });
$done{$1}++;
}
}
close(SCAN);
# Then add all cdrom devices
local $uname = `uname -r 2>&1`;
if ($uname =~ /^2\.(\d+)\./ && $1 >= 6) {
local $disk;
foreach $disk (&fdisk::list_disks_partitions(1)) {
if ($disk->{'media'} eq "cdrom" &&
!$done{$disk->{'device'}}) {
push(@rv, { 'dev' => $disk->{'device'},
'name' => $disk->{'model'},
'type' => uc($disk->{'media'}) });
}
}
}
return @rv;
}
@cdr_drivers = ( 'cdd2600', 'plextor', 'plextor-scan', 'generic-mmc',
'generic-mmc-raw', 'ricoh-mp6200', 'yamaha-cdr10x',
'teac-cdr55', 'sony-cdu920', 'sony-cdu948', 'taiyo-yuden',
'toshiba' );
# can_use_profile(&profile)
# Returns 1 if some burn profile can be used
sub can_use_profile
{
return 1 if ($access{'profiles'} eq '*');
local %can = map { $_, 1 } split(/\s+/, $access{'profiles'});
return $can{$_[0]->{'id'}};
}
# can_directory(file)
# Returns 1 if some file is in an allowed directory
sub can_directory
{
local @dirs = split(/\s+/, $access{'dirs'});
return 1 if ($dirs[0] eq "/");
local $path = &resolve_links($_[0]);
local $d;
foreach $d (@dirs) {
return 1 if (&is_under_directory(&resolve_links($d), $path));
}
return 0;
}
1;
| xtso520ok/webmin | burner/burner-lib.pl | Perl | bsd-3-clause | 2,641 |
#
# Mail::SPF::SenderIPAddrMech
# Abstract base class for SPF record mechanisms that operate on the SMTP
# sender's IP address.
#
# (C) 2005-2008 Julian Mehnle <julian@mehnle.net>
# $Id: SenderIPAddrMech.pm 50 2008-08-17 21:28:15Z Julian Mehnle $
#
##############################################################################
package Mail::SPF::SenderIPAddrMech;
=head1 NAME
Mail::SPF::SenderIPAddrMech - Abstract base class for SPF record mechanisms
that operate on the SMTP sender's IP address
=cut
use warnings;
use strict;
use base 'Mail::SPF::Mech';
use constant TRUE => (0 == 0);
use constant FALSE => not TRUE;
use constant explanation_templates_by_result_code => {
%{__PACKAGE__->SUPER::explanation_templates_by_result_code},
pass => "%{c} is authorized to use '%{s}' in '%{_scope}' identity",
fail => "%{c} is not authorized to use '%{s}' in '%{_scope}' identity",
softfail => "%{c} is not authorized to use '%{s}' in '%{_scope}' identity, however domain is not currently prepared for false failures",
neutral => "Domain does not state whether %{c} is authorized to use '%{s}' in '%{_scope}' identity"
};
=head1 DESCRIPTION
B<Mail::SPF::SenderIPAddrMech> is an abstract base class for SPF record
mechanisms that operate on the SMTP sender's IP address. It cannot be
instantiated directly. Create an instance of a concrete sub-class instead.
=head2 Constructors
See L<Mail::SPF::Mech/Constructors>.
=head2 Class methods
See L<Mail::SPF::Mech/Class methods>.
=head2 Instance methods
See L<Mail::SPF::Mech/Instance methods>.
=head1 SEE ALSO
L<Mail::SPF>, L<Mail::SPF::Record>, L<Mail::SPF::Mech>
L<Mail::SPF::Mech::IP4>,
L<Mail::SPF::Mech::IP6>,
L<Mail::SPF::Mech::A>,
L<Mail::SPF::Mech::MX>,
L<Mail::SPF::Mech::PTR>
L<http://www.ietf.org/rfc/rfc4408.txt>
For availability, support, and license information, see the README file
included with Mail::SPF.
=head1 AUTHORS
Julian Mehnle <julian@mehnle.net>
=cut
TRUE;
| memememomo/Mail-SPF | lib/Mail/SPF/SenderIPAddrMech.pm | Perl | bsd-3-clause | 1,992 |
#!/usr/bin/perl
#
# Copyright 2005-2019 The Mumble Developers. All rights reserved.
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file at the root of the
# Mumble source tree or at <https://www.mumble.info/LICENSE>.
#
# Generate a Mac OS X Finder .DS_Store file for use on the Mumble disk image.
#
#
# Notes:
#
# The use of binary plists inside the DS_Store seems to be new for the Finder in Mac OS X 10.6 (Snow Leopard),
# but we'll go with this for now. It falls back somewhat gracefully for older versions OS X, since the icons
# will still be placed in their correct positions.
#
use Data::Plist::BinaryWriter;
use Mac::Finder::DSStore::BuddyAllocator;
use Mac::Finder::DSStore;
use IO::File;
use strict;
my $plist = Data::Plist::BinaryWriter->new(serialize => 0);
my $bwsp = $plist->write([dict => {
"ShowPathbar" => [ false => 0 ],
"ShowSidebar" => [ false => 0 ],
"ShowStatusBar" => [ false => 0 ],
"ShowToolbar" => [ false => 0 ],
"SidebarWidth" => [ integer => 192 ],
"WindowBounds" => [ string => "{{388, 166}, {530, 525}}" ],
}]);
my $icvp = $plist->write([ dict => {
"arrangeBy" => [ string => "grid" ],
# these are floats [0, 1]
"backgroundColorBlue" => [ integer => 1 ],
"backgroundColorGreen" => [ integer => 1 ],
"backgroundColorRed" => [ integer => 1 ],
# a mac os alias record or the background image.
# fixme(mkrautz): add our pretty background. :)
"backgroundImageAlias" => [ data => "" ],
# background type. 1=color, 2=image
"backgroundType" => [ integer => 1 ],
"gridOffsetX" => [ integer => 0 ],
"gridOffsetY" => [ integer => 0 ],
"gridSpacing" => [ integer => 100 ],
"iconSize" => [ integer => 80 ],
"labelOnBottom" => [ true => 1 ],
"showIconPreview" => [ true => 1 ],
"showItemInfo" => [ false => 0 ],
"textSize" => [ integer => 12 ],
"viewOptionsVersion" => [ integer => 0 ],
}]);
my @entries = ();
# styling
my %styles = (
'bwsp' => $bwsp,
'icvp' => $icvp,
);
for my $key (keys %styles) {
my $e = Mac::Finder::DSStore::Entry->new(".", $key);
$e->value($styles{$key});
push(@entries, $e);
}
# file info
my %iconpos = (
"Mumble.app" => [ 54, 40 ],
"Applications" => [ 466, 40 ],
"ReadMe.txt" => [ 54, 416 ],
"Changes.txt" => [ 157, 416 ],
"Murmur Extras" => [ 363, 416 ],
"Licenses" => [ 466, 416 ],
);
for my $key (keys %iconpos) {
my $e = Mac::Finder::DSStore::Entry->new($key, 'Iloc');
$e->value(pack('NNnnnn', $iconpos{$key}[0], $iconpos{$key}[1], 65536, 65536, 65536, 65536, 0));
push(@entries, $e);
}
@entries = sort { $a->cmp($b) } @entries;
my $store = Mac::Finder::DSStore::BuddyAllocator->new(new IO::File "DS_Store", '>');
&Mac::Finder::DSStore::putDSDBEntries($store, \@entries);
$store->writeMetaData;
$store->close;
| Lartza/mumble | macx/scripts/gendmg.pl | Perl | bsd-3-clause | 2,764 |
#!/usr/bin/env perl
#
#-------------------------------------------------------------------------------
# Copyright (c) 2014-2019 René Just, Darioush Jalali, and Defects4J contributors.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
=pod
=head1 NAME
run_coverage.pl -- code coverage analysis for generated test suites.
=head1 SYNOPSIS
run_coverage.pl -p project_id -d suite_dir -o out_dir [-f include_file_pattern] [-v version_id] [-t tmp_dir] [-D] [-A | -i instrument_classes] [-I]
=head1 OPTIONS
=over 4
=item -p C<project_id>
The id of the project for which the generated test suites are analyzed.
See L<Project|Project/"Available Project IDs"> module for available project IDs.
=item -d F<suite_dir>
The directory that contains the test suite archives.
See L<Test suites|/"Test suites">.
=item -o F<out_dir>
The output directory for the results and log files.
=item -f C<include_file_pattern>
The pattern of the file names of the test classes that should be included (optional).
Per default all files (*.java) are included.
=item -v C<version_id>
Only analyze test suites for this version id (optional). Per default all
test suites for the given project id are analyzed.
=item -t F<tmp_dir>
The temporary root directory to be used to check out program versions (optional).
The default is F</tmp>.
=item -i F<instrument_classes>
Measure code coverage for all classes listed in F<instrument_classes> (optional). By
default, code coverage is measured only for the classes modified by the bug fix. The file
F<instrument_classes> must contain fully-qualified class names -- one class per line.
=item -D
Debug: Display execution environment and do not delete the temporary check-out directory
(optional).
=item -A
All relevant classes: Measure code coverage for all relevant classes (i.e., all
classes touched by the triggering tests). By default code coverage is measured
only for classes modified by the bug fix.
=item -I
Ignore failing tests; report all coverage data.
=back
=head1 DESCRIPTION
Measures code coverage for each provided test suite (i.e., each test suite archive in
F<suite_dir>) on the program version for which that test suite was generated.
The results of the analysis are stored in the database table
F<out_dir/L<TAB_COVERAGE|DB>>. The corresponding log files are stored in
F<out_dir/L<TAB_COVERAGE|DB>_log>.
=cut
use warnings;
use strict;
use FindBin;
use File::Basename;
use Cwd qw(abs_path);
use Getopt::Std;
use Pod::Usage;
use lib abs_path("$FindBin::Bin/../core");
use Constants;
use Coverage;
use Project;
use Utils;
use Log;
use DB;
#
# Process arguments and issue usage message if necessary.
#
my %cmd_opts;
getopts('p:d:v:t:o:f:i:DAI', \%cmd_opts) or pod2usage(1);
pod2usage(1) unless defined $cmd_opts{p} and defined $cmd_opts{d} and defined $cmd_opts{o};
# Ensure that directory of test suites exists
-d $cmd_opts{d} or die "Test suite directory $cmd_opts{d} does not exist!";
my $PID = $cmd_opts{p};
my $SUITE_DIR = abs_path($cmd_opts{d});
my $VID = $cmd_opts{v} if defined $cmd_opts{v};
my $INCL = $cmd_opts{f} // "*.java";
# Enable debugging if flag is set
$DEBUG = 1 if defined $cmd_opts{D};
if ($DEBUG) {
Utils::print_env();
}
# Directory of class lists used for instrumentation step
my $CLASSES = defined $cmd_opts{A} ? "loaded_classes" : "modified_classes";
my $TARGET_CLASSES_DIR = "$SCRIPT_DIR/projects/$PID/$CLASSES";
my $INSTRUMENT_CLASSES = $cmd_opts{i} if defined $cmd_opts{i};
if (defined $cmd_opts{A} && defined $cmd_opts{i}) {
pod2usage( { -verbose => 1, -input => __FILE__} );
}
# Set up project
my $project = Project::create_project($PID);
# Check format of target version id
if (defined $VID) {
# Verify that the provided version id is valid
Utils::check_vid($VID);
$project->contains_version_id($VID) or die "Version id ($VID) does not exist in project: $PID";
}
# Output directory for results
system("mkdir -p $cmd_opts{o}");
my $OUT_DIR = abs_path($cmd_opts{o});
# Temporary directory for execution
my $TMP_DIR = Utils::get_tmp_dir($cmd_opts{t});
system("mkdir -p $TMP_DIR");
=pod
=head2 Logging
By default, the script logs all errors and warnings to run_coverage.pl.log in
the temporary project root.
Upon success, the log file of this script and the detailed coverage results for
each executed test suite are copied to:
F<out_dir/L<TAB_COVERAGE|DB>_log/project_id>.
=cut
# Log directory and file
my $LOG_DIR = "$OUT_DIR/${TAB_COVERAGE}_log/$PID";
my $LOG_FILE = "$LOG_DIR/" . basename($0) . ".log";
system("mkdir -p $LOG_DIR");
# Open temporary log file
my $LOG = Log::create_log("$TMP_DIR/". basename($0) . ".log");
$LOG->log_time("Start code coverage analysis");
=pod
=head2 Test suites
To be considered for the analysis, a test suite has to be provided as an archive in
F<suite_dir>. Format of the archive file name:
C<project_id-version_id-test_suite_src(\.test_id)?\.tar\.bz2>
Note that C<test_id> is optional, the default is 1.
Examples:
=over 4
=item * F<Lang-11f-randoop.1.tar.bz2 (equal to Lang-1-randoop.tar.bz2)>
=item * F<Lang-11b-randoop.2.tar.bz2>
=item * F<Lang-12b-evosuite-weakmutation.1.tar.bz2>
=item * F<Lang-12f-evosuite-branch.1.tar.bz2>
=back
=cut
# Get all test suite archives that match the given project id and version id
my $test_suites = Utils::get_all_test_suites($SUITE_DIR, $PID, $VID);
# Get database handle for result table
my $dbh_out = DB::get_db_handle($TAB_COVERAGE, $OUT_DIR);
my $sth = $dbh_out->prepare("SELECT * FROM $TAB_COVERAGE WHERE $PROJECT=? AND $TEST_SUITE=? AND $ID=? AND $TEST_ID=?")
or die $dbh_out->errstr;
# Iterate over all version ids
foreach my $vid (keys %{$test_suites}) {
# Iterate over all test suite sources (test data generation tools)
foreach my $suite_src (keys %{$test_suites->{$vid}}) {
`mkdir -p $LOG_DIR/$suite_src`;
# Iterate over all test suites for this source
foreach my $test_id (keys %{$test_suites->{$vid}->{$suite_src}}) {
my $archive = $test_suites->{$vid}->{$suite_src}->{$test_id};
my $test_dir = "$TMP_DIR/$suite_src";
# Skip existing entries
$sth->execute($PID, $suite_src, $vid, $test_id);
if ($sth->rows !=0) {
$LOG->log_msg(" - Skipping $archive since results already exist in database!");
next;
}
$LOG->log_msg(" - Executing test suite: $archive");
printf ("Executing test suite: $archive\n");
# Extract generated tests into temp directory
Utils::extract_test_suite("$SUITE_DIR/$archive", $test_dir)
or die "Cannot extract test suite!";
#
# Run the actual code coverage analysis
#
# TODO: Avoid re-compilation/instrumentation of classes for the same
# version id. Only checkout and instrument every version once --
# reset coverage results prior to each run, though!
#
_run_coverage($vid, $suite_src, $test_id, $test_dir);
}
}
}
# Log current time
$LOG->log_time("End code coverage analysis");
$LOG->close();
# Copy log file and clean up temporary directory
system("cat $LOG->{file_name} >> $LOG_FILE") == 0 or die "Cannot copy log file";
system("rm -rf $TMP_DIR") unless $DEBUG;
#
# Run code coverage analysis on the program version for which the tests were created.
#
sub _run_coverage {
my ($vid, $suite_src, $test_id, $test_dir) = @_;
# Get archive name for current test suite
my $archive = $test_suites->{$vid}->{$suite_src}->{$test_id};
my $result = Utils::check_vid($vid);
my $bid = $result->{bid};
my $type = $result->{type};
# Checkout program version
my $root = "$TMP_DIR/${vid}";
$project->{prog_root} = "$root";
$project->checkout_vid($vid) or die "Checkout failed";
# Compile the program version
$project->compile() or die "Compilation failed";
# Compile generated tests
$project->compile_ext_tests($test_dir) or die "Tests do not compile!";
my $src_dir = $project->src_dir($vid);
my $test_log = "$TMP_DIR/.coverage.log"; `>$test_log`;
my $cov_info;
if (defined $INSTRUMENT_CLASSES) {
$cov_info = Coverage::coverage_ext($project, "$INSTRUMENT_CLASSES", $src_dir, $test_dir, $INCL, $test_log);
} else {
$cov_info = Coverage::coverage_ext($project, "$TARGET_CLASSES_DIR/$bid.src", $src_dir, $test_dir, $INCL, $test_log);
}
if (Utils::has_failing_tests($test_log)) {
$LOG->log_msg(" - Broken test suite: $archive");
printf ("Broken test suite: $archive\n");
# randoop generated tests often have asserts; we may still want the coverage data
if (! defined $cmd_opts{I}) {
$cov_info = {};
}
system("cp $test_log $LOG_DIR/$suite_src/$vid.$test_id.failing.log") == 0
or die "Cannot copy stack traces from failing tests";
}
# Add information about test suite to hash that holds the coverage information
$cov_info->{$PROJECT} = $PID;
$cov_info->{$ID} = $vid;
$cov_info->{$TEST_SUITE} = $suite_src;
$cov_info->{$TEST_ID} = $test_id;
# Insert results into database and copy log files
Coverage::insert_row($cov_info, $OUT_DIR);
Coverage::copy_coverage_logs($project, $vid, $suite_src, $test_id, $LOG_DIR);
}
| jose/defects4j | framework/bin/run_coverage.pl | Perl | mit | 10,460 |
=encoding utf-8
=head1 NAME
ngx_http_userid_module - Module ngx_http_userid_module
=head1
The C<ngx_http_userid_module> module sets cookies
suitable for client identification.
Received and set cookies can be logged using the embedded variables
$uid_got and
$uid_set.
This module is compatible with the
L<mod_uid|http://www.lexa.ru/programs/mod-uid-eng.html>
module for Apache.
=head1 Example Configuration
userid on;
userid_name uid;
userid_domain example.com;
userid_path /;
userid_expires 365d;
userid_p3p 'policyref="/w3c/p3p.xml", CP="CUR ADM OUR NOR STA NID"';
=head1 Directives
=head2 userid
B<syntax:> userid I<
C<on> E<verbar>
C<v1> E<verbar>
C<log> E<verbar>
C<off>>
B<default:> I<off>
B<context:> I<http>
B<context:> I<server>
B<context:> I<location>
Enables or disables setting cookies and logging the received cookies:
=over
=item C<on>
enables the setting of version 2 cookies
and logging of the received cookies;
=item C<v1>
enables the setting of version 1 cookies
and logging of the received cookies;
=item C<log>
disables the setting of cookies,
but enables logging of the received cookies;
=item C<off>
disables the setting of cookies and logging of the received cookies.
=back
=head2 userid_domain
B<syntax:> userid_domain I<I<C<name>> E<verbar> C<none>>
B<default:> I<none>
B<context:> I<http>
B<context:> I<server>
B<context:> I<location>
Defines a domain for which the cookie is set.
The C<none> parameter disables setting of a domain for the
cookie.
=head2 userid_expires
B<syntax:> userid_expires I<I<C<time>> E<verbar> C<max> E<verbar>
C<off>>
B<default:> I<off>
B<context:> I<http>
B<context:> I<server>
B<context:> I<location>
Sets a time during which a browser should keep the cookie.
The parameter C<max> will cause the cookie to expire on
“C<31 Dec 2037 23:55:55 GMT>”.
The parameter C<off> will cause the cookie to expire at
the end of a browser session.
=head2 userid_mark
B<syntax:> userid_mark I<
I<C<letter>> E<verbar> I<C<digit>> E<verbar>
C<=> E<verbar>
C<off>>
B<default:> I<off>
B<context:> I<http>
B<context:> I<server>
B<context:> I<location>
If the parameter is not C<off>, enables the cookie marking
mechanism and sets the character used as a mark.
This mechanism is used to add or change
L</userid_p3p> andE<sol>or a cookie expiration time while
preserving the client identifier.
A mark can be any letter of the English alphabet (case-sensitive),
digit, or the “C<=>” character.
If the mark is set, it is compared with the first padding symbol
in the base64 representation of the client identifier passed in a cookie.
If they do not match, the cookie is resent with the specified mark,
expiration time, and C<P3P> header.
=head2 userid_name
B<syntax:> userid_name I<I<C<name>>>
B<default:> I<uid>
B<context:> I<http>
B<context:> I<server>
B<context:> I<location>
Sets the cookie name.
=head2 userid_p3p
B<syntax:> userid_p3p I<I<C<string>> E<verbar> C<none>>
B<default:> I<none>
B<context:> I<http>
B<context:> I<server>
B<context:> I<location>
Sets a value for the C<P3P> header field that will be
sent along with the cookie.
If the directive is set to the special value C<none>,
the C<P3P> header will not be sent in a response.
=head2 userid_path
B<syntax:> userid_path I<I<C<path>>>
B<default:> I<E<sol>>
B<context:> I<http>
B<context:> I<server>
B<context:> I<location>
Defines a path for which the cookie is set.
=head2 userid_service
B<syntax:> userid_service I<I<C<number>>>
B<default:> I<IP address of the server>
B<context:> I<http>
B<context:> I<server>
B<context:> I<location>
If identifiers are issued by multiple servers (services),
each service should be assigned its own I<C<number>>
to ensure that client identifiers are unique.
For version 1 cookies, the default value is zero.
For version 2 cookies, the default value is the number composed from the last
four octets of the server’s IP address.
=head1 Embedded Variables
The C<ngx_http_userid_module> module
supports the following embedded variables:
=over
=item C<$uid_got>
The cookie name and received client identifier.
=item C<$uid_reset>
If the variable is set to a non-empty string that is not “C<0>”,
the client identifiers are reset.
The special value “C<log>” additionally leads to the output of
messages about the reset identifiers to the
L<ngx_core_module>.
=item C<$uid_set>
The cookie name and sent client identifier.
=back
| LomoX-Offical/nginx-openresty-windows | src/pod/nginx/ngx_http_userid_module.pod | Perl | bsd-2-clause | 4,723 |
package #
Date::Manip::TZ::etgmtm01;
# Copyright (c) 2008-2014 Sullivan Beck. All rights reserved.
# This program is free software; you can redistribute it and/or modify it
# under the same terms as Perl itself.
# This file was automatically generated. Any changes to this file will
# be lost the next time 'tzdata' is run.
# Generated on: Fri Nov 21 10:41:41 EST 2014
# Data version: tzdata2014j
# Code version: tzcode2014j
# This module contains data from the zoneinfo time zone database. The original
# data was obtained from the URL:
# ftp://ftp.iana.org/tz
use strict;
use warnings;
require 5.010000;
our (%Dates,%LastRule);
END {
undef %Dates;
undef %LastRule;
}
our ($VERSION);
$VERSION='6.48';
END { undef $VERSION; }
%Dates = (
1 =>
[
[ [1,1,2,0,0,0],[1,1,1,14,0,0],'-10:00:00',[-10,0,0],
'GMT-10',0,[9999,12,31,0,0,0],[9999,12,30,14,0,0],
'0001010200:00:00','0001010114:00:00','9999123100:00:00','9999123014:00:00' ],
],
);
%LastRule = (
);
1;
| nriley/Pester | Source/Manip/TZ/etgmtm01.pm | Perl | bsd-2-clause | 1,040 |
#!/usr/bin/perl
use strict;
use warnings;
use lib qw(lib);
use File::Path;
use File::Basename;
use File::chmod qw(symchmod);
use File::Spec;
use Arepa::Config;
use Arepa::PackageDb;
$File::chmod::UMASK = 0;
my $arepa_user = "arepa-master";
my $arepa_group = "arepa";
my $web_user = "www-data";
my $web_group = "www-data";
my $config = Arepa::Config->new("/etc/arepa/config.yml");
my $uid = getgrnam($arepa_user);
if (!defined $uid) {
print STDERR "ERROR: User '$arepa_user' doesn't exist\n";
exit 1;
}
my $gid = getgrnam($arepa_group);
if (!defined $gid) {
print STDERR "ERROR: Group '$arepa_group' doesn't exist\n";
exit 1;
}
my $web_uid = getgrnam($web_user);
if (!defined $web_uid) {
print STDERR "ERROR: User '$web_user' doesn't exist\n";
exit 1;
}
my $web_gid = getgrnam($web_group);
if (!defined $web_gid) {
print STDERR "ERROR: Group '$web_group' doesn't exist\n";
exit 1;
}
my $package_db_path = $config->get_key("package_db");
foreach my $path (dirname($package_db_path),
$config->get_key("repository:path"),
File::Spec->catfile($config->get_key("repository:path"),
"conf"),
$config->get_key("upload_queue:path"),
$config->get_key("dir:build_logs")) {
print "Creating directory $path\n";
mkpath($path);
chown($uid, $gid, $path);
symchmod("g+w", $path);
}
my $builder_dir = "/etc/arepa/builders";
print "Creating builder configuration directory $builder_dir\n";
mkpath($builder_dir);
chown($uid, $gid, $builder_dir);
symchmod("g+w", $builder_dir);
print "Creating package DB in $package_db_path\n";
my $package_db = Arepa::PackageDb->new($package_db_path);
chown($uid, $gid, $package_db_path);
symchmod("g+w", $package_db_path);
my $db_dir = dirname($package_db_path);
print "Fixing permissions for database directory $db_dir\n";
chown($uid, $gid, $db_dir);
symchmod("g+w", $db_dir);
my $session_db_path = $config->get_key("web_ui:session_db");
if (! -r $session_db_path) {
print "Creating web UI session DB in $session_db_path\n";
open F, ">>$session_db_path"; close F;
my $sqlite_cmd = <<EOC;
echo "CREATE TABLE session (sid VARCHAR(40) PRIMARY KEY, data TEXT, expires INTEGER UNSIGNED NOT NULL, UNIQUE(sid));" | sqlite3 '$session_db_path'
EOC
print "Creating session DB schema with:\n$sqlite_cmd";
system($sqlite_cmd);
chown($web_uid, $web_gid, $session_db_path);
symchmod("g+w", $session_db_path);
}
my $repo_dists_conf = File::Spec->catfile($config->get_key("repository:path"),
"conf",
"distributions");
print "Creating repo configuration file in $repo_dists_conf\n";
open F, ">>$repo_dists_conf";
close F;
chown($uid, $gid, $repo_dists_conf);
symchmod("g+w", $repo_dists_conf);
my $gpg_dir = $config->get_key("web_ui:gpg_homedir");
print "Creating GPG directory in $gpg_dir\n";
mkpath($gpg_dir);
chown($web_uid, $web_gid, $gpg_dir);
chmod(0700, $gpg_dir);
my $gpg_options = File::Spec->catfile($config->get_key("web_ui:gpg_homedir"),
"options");
if (! -f $gpg_options) {
print "Creating options file $gpg_options\n";
my $keyrings_dir =
File::Spec->catfile(dirname($config->get_key("web_ui:gpg_homedir")),
"keyrings");
mkpath($keyrings_dir);
chown($uid, $gid, $keyrings_dir);
symchmod("g+w", $keyrings_dir);
open F, ">$gpg_options";
print F "keyring $keyrings_dir/uploaders.gpg\n";
close F;
chown($uid, $gid, $gpg_options);
symchmod("g+w", $gpg_options);
}
| gitpan/Arepa | install_arepa.pl | Perl | bsd-3-clause | 3,676 |
# !!!!!!! DO NOT EDIT THIS FILE !!!!!!!
# This file is machine-generated by lib/unicore/mktables from the Unicode
# database, Version 6.2.0. Any changes made here will be lost!
# !!!!!!! INTERNAL PERL USE ONLY !!!!!!!
# This file is for internal use by core Perl only. The format and even the
# name or existence of this file are subject to change without notice. Don't
# use it directly.
return <<'END';
0000 0241
0250 036F
0374 0375
037A
037E
0384 038A
038C
038E 03A1
03A3 03CE
03D0 0486
0488 04CE
04D0 04F9
0500 050F
0531 0556
0559 055F
0561 0587
0589 058A
0591 05B9
05BB 05C7
05D0 05EA
05F0 05F4
0600 0603
060B 0615
061B
061E 061F
0621 063A
0640 065E
0660 070D
070F 074A
074D 076D
0780 07B1
0901 0939
093C 094D
0950 0954
0958 0970
097D
0981 0983
0985 098C
098F 0990
0993 09A8
09AA 09B0
09B2
09B6 09B9
09BC 09C4
09C7 09C8
09CB 09CE
09D7
09DC 09DD
09DF 09E3
09E6 09FA
0A01 0A03
0A05 0A0A
0A0F 0A10
0A13 0A28
0A2A 0A30
0A32 0A33
0A35 0A36
0A38 0A39
0A3C
0A3E 0A42
0A47 0A48
0A4B 0A4D
0A59 0A5C
0A5E
0A66 0A74
0A81 0A83
0A85 0A8D
0A8F 0A91
0A93 0AA8
0AAA 0AB0
0AB2 0AB3
0AB5 0AB9
0ABC 0AC5
0AC7 0AC9
0ACB 0ACD
0AD0
0AE0 0AE3
0AE6 0AEF
0AF1
0B01 0B03
0B05 0B0C
0B0F 0B10
0B13 0B28
0B2A 0B30
0B32 0B33
0B35 0B39
0B3C 0B43
0B47 0B48
0B4B 0B4D
0B56 0B57
0B5C 0B5D
0B5F 0B61
0B66 0B71
0B82 0B83
0B85 0B8A
0B8E 0B90
0B92 0B95
0B99 0B9A
0B9C
0B9E 0B9F
0BA3 0BA4
0BA8 0BAA
0BAE 0BB9
0BBE 0BC2
0BC6 0BC8
0BCA 0BCD
0BD7
0BE6 0BFA
0C01 0C03
0C05 0C0C
0C0E 0C10
0C12 0C28
0C2A 0C33
0C35 0C39
0C3E 0C44
0C46 0C48
0C4A 0C4D
0C55 0C56
0C60 0C61
0C66 0C6F
0C82 0C83
0C85 0C8C
0C8E 0C90
0C92 0CA8
0CAA 0CB3
0CB5 0CB9
0CBC 0CC4
0CC6 0CC8
0CCA 0CCD
0CD5 0CD6
0CDE
0CE0 0CE1
0CE6 0CEF
0D02 0D03
0D05 0D0C
0D0E 0D10
0D12 0D28
0D2A 0D39
0D3E 0D43
0D46 0D48
0D4A 0D4D
0D57
0D60 0D61
0D66 0D6F
0D82 0D83
0D85 0D96
0D9A 0DB1
0DB3 0DBB
0DBD
0DC0 0DC6
0DCA
0DCF 0DD4
0DD6
0DD8 0DDF
0DF2 0DF4
0E01 0E3A
0E3F 0E5B
0E81 0E82
0E84
0E87 0E88
0E8A
0E8D
0E94 0E97
0E99 0E9F
0EA1 0EA3
0EA5
0EA7
0EAA 0EAB
0EAD 0EB9
0EBB 0EBD
0EC0 0EC4
0EC6
0EC8 0ECD
0ED0 0ED9
0EDC 0EDD
0F00 0F47
0F49 0F6A
0F71 0F8B
0F90 0F97
0F99 0FBC
0FBE 0FCC
0FCF 0FD1
1000 1021
1023 1027
1029 102A
102C 1032
1036 1039
1040 1059
10A0 10C5
10D0 10FC
1100 1159
115F 11A2
11A8 11F9
1200 1248
124A 124D
1250 1256
1258
125A 125D
1260 1288
128A 128D
1290 12B0
12B2 12B5
12B8 12BE
12C0
12C2 12C5
12C8 12D6
12D8 1310
1312 1315
1318 135A
135F 137C
1380 1399
13A0 13F4
1401 1676
1680 169C
16A0 16F0
1700 170C
170E 1714
1720 1736
1740 1753
1760 176C
176E 1770
1772 1773
1780 17DD
17E0 17E9
17F0 17F9
1800 180E
1810 1819
1820 1877
1880 18A9
1900 191C
1920 192B
1930 193B
1940
1944 196D
1970 1974
1980 19A9
19B0 19C9
19D0 19D9
19DE 1A1B
1A1E 1A1F
1D00 1DC3
1E00 1E9B
1EA0 1EF9
1F00 1F15
1F18 1F1D
1F20 1F45
1F48 1F4D
1F50 1F57
1F59
1F5B
1F5D
1F5F 1F7D
1F80 1FB4
1FB6 1FC4
1FC6 1FD3
1FD6 1FDB
1FDD 1FEF
1FF2 1FF4
1FF6 1FFE
2000 2063
206A 2071
2074 208E
2090 2094
20A0 20B5
20D0 20EB
2100 214C
2153 2183
2190 23DB
2400 2426
2440 244A
2460 269C
26A0 26B1
2701 2704
2706 2709
270C 2727
2729 274B
274D
274F 2752
2756
2758 275E
2761 2794
2798 27AF
27B1 27BE
27C0 27C6
27D0 27EB
27F0 2B13
2C00 2C2E
2C30 2C5E
2C80 2CEA
2CF9 2D25
2D30 2D65
2D6F
2D80 2D96
2DA0 2DA6
2DA8 2DAE
2DB0 2DB6
2DB8 2DBE
2DC0 2DC6
2DC8 2DCE
2DD0 2DD6
2DD8 2DDE
2E00 2E17
2E1C 2E1D
2E80 2E99
2E9B 2EF3
2F00 2FD5
2FF0 2FFB
3000 303F
3041 3096
3099 30FF
3105 312C
3131 318E
3190 31B7
31C0 31CF
31F0 321E
3220 3243
3250 32FE
3300 4DB5
4DC0 9FBB
A000 A48C
A490 A4C6
A700 A716
A800 A82B
AC00 D7A3
D800 FA2D
FA30 FA6A
FA70 FAD9
FB00 FB06
FB13 FB17
FB1D FB36
FB38 FB3C
FB3E
FB40 FB41
FB43 FB44
FB46 FBB1
FBD3 FD3F
FD50 FD8F
FD92 FDC7
FDD0 FDFD
FE00 FE19
FE20 FE23
FE30 FE52
FE54 FE66
FE68 FE6B
FE70 FE74
FE76 FEFC
FEFF
FF01 FFBE
FFC2 FFC7
FFCA FFCF
FFD2 FFD7
FFDA FFDC
FFE0 FFE6
FFE8 FFEE
FFF9 1000B
1000D 10026
10028 1003A
1003C 1003D
1003F 1004D
10050 1005D
10080 100FA
10100 10102
10107 10133
10137 1018A
10300 1031E
10320 10323
10330 1034A
10380 1039D
1039F 103C3
103C8 103D5
10400 1049D
104A0 104A9
10800 10805
10808
1080A 10835
10837 10838
1083C
1083F
10A00 10A03
10A05 10A06
10A0C 10A13
10A15 10A17
10A19 10A33
10A38 10A3A
10A3F 10A47
10A50 10A58
1D000 1D0F5
1D100 1D126
1D12A 1D1DD
1D200 1D245
1D300 1D356
1D400 1D454
1D456 1D49C
1D49E 1D49F
1D4A2
1D4A5 1D4A6
1D4A9 1D4AC
1D4AE 1D4B9
1D4BB
1D4BD 1D4C3
1D4C5 1D505
1D507 1D50A
1D50D 1D514
1D516 1D51C
1D51E 1D539
1D53B 1D53E
1D540 1D544
1D546
1D54A 1D550
1D552 1D6A5
1D6A8 1D7C9
1D7CE 1D7FF
1FFFE 2A6D6
2F800 2FA1D
2FFFE 2FFFF
3FFFE 3FFFF
4FFFE 4FFFF
5FFFE 5FFFF
6FFFE 6FFFF
7FFFE 7FFFF
8FFFE 8FFFF
9FFFE 9FFFF
AFFFE AFFFF
BFFFE BFFFF
CFFFE CFFFF
DFFFE DFFFF
E0001
E0020 E007F
E0100 E01EF
EFFFE 10FFFF
END
| Bjay1435/capstone | rootfs/usr/share/perl/5.18.2/unicore/lib/In/4_1.pl | Perl | mit | 4,742 |
#!perl
use Test::More;
use strict;
use warnings;
our $es;
my $r;
ok $r= $es->mget(
index => 'es_test_1',
type => 'type_1',
ids => [ 1, 2, 3, 4, 5 ]
),
'mget';
is scalar @$r, 5, ' - 5 results';
is $r->[0]{_id}, 1, ' - first id ok';
is $r->[3]{exists}, 0, "id 3 doesn't exist";
ok $r= $es->mget(
index => 'es_test_1',
type => 'type_1',
ids => [ 1, 2, 3, 4, 5 ],
filter_missing => 1
),
' - filter missing';
is scalar @$r, 2, ' - missing filtered';
ok $r= $es->mget(
docs => [
{ _index => 'es_test_1', _type => 'type_1', _id => 1 },
{ _index => 'es_test_1', _type => 'type_1', _id => 5 }
]
),
' - docs';
ok $r= $es->mget(
fields => [ 'num', 'date' ],
docs => [
{ _index => 'es_test_1', _type => 'type_1', _id => 1 },
{ _index => 'es_test_1',
_type => 'type_1',
_id => 5,
fields => ['text']
}
]
),
' - fields';
ok keys %{ $r->[0]{fields} } == 2
&& $r->[0]{fields}{num}
&& $r->[0]{fields}{date}, ' - default';
ok keys %{ $r->[1]{fields} } == 1 && $r->[1]{fields}{text}, ' - specific';
is_deeply $r = $es->mget( docs => [] ), [], ' - no docs';
is $r = $es->mget( docs => [], as_json => 1 ), "[]", ' - no docs json';
throws_ok { $es->mget( type => 'foo' ) } qr/Cannot specify a type for mget/,
' - type without index';
throws_ok { $es->mget( ids => [] ) } qr/Use of the ids param with mget/,
' - ids no index';
throws_ok { $es->mget( index => 'es_type_1', ids => [], docs => [] ) }
qr/Cannot specify both ids and docs/, ' - ids and docs';
1
| gitpan/Search-Elasticsearch-Compat | t/request_tests/mget.pl | Perl | apache-2.0 | 1,659 |
package BinaryStatic::Controller::Page;
use Mojo::Base 'Mojolicious::Controller';
use BinaryStatic::Consts;
sub toolkit {
my $c = shift;
my $curr_path = $c->req->url->path->to_string;
$curr_path =~ s/^\/|\/$//g;
$curr_path = '/' if $curr_path eq '';
# template, layout, code
my %url_map = (
'user/open_account' => ['account/open_account', 'default'],
'affiliate/signup' => ['affiliates/main', 'default'],
'resources/pricing_table' => ['resources/pricing_table_form', 'default'],
'charting/application' => ['charting/chart_application', 'default'],
'charting/livechart' => ['charting/livechart', 'default'],
'resources/rise_fall_table' => ['resources/rise_fall_table', 'default'],
'terms-and-conditions' => ['legal/tac', 'default'],
);
my $m = $url_map{$curr_path};
unless ($m) {
print STDERR "[FIX] Can't find related tt for $curr_path\n";
return $c->render(
template => 'not_found',
handler => 'haml',
status => 404
);
}
## get all render helpers and register them
# my $helpers = $c->app->renderer->helpers;
# foreach my $helper (keys %$helpers) {
foreach my $helper ('l', 'menu') {
$c->stash($helper => sub {
$c->app->$helper(@_);
});
}
$c->render(
template => $m->[0],
defined $m->[1] ? (layout => $m->[1] ? $c->layout($m->[1]) : '') : (),
$m->[2] ? (status => $m->[2]) : (),
handler => 'tt',
## fix subs for TT2 call
javascript => $c->app->js_configs,
css_files => $c->app->css->files,
request => {
url_for => sub { $c->app->url_for(@_); } # use the helper
},
);
}
sub haml {
my $c = shift;
my $curr_path = $c->req->url->path->to_string;
$curr_path =~ s/^\/|\/$//g;
$curr_path = '/' if $curr_path eq '';
# template, layout, code
my %url_map = (
'/' => ['home/index', 'full_width', '', 1],
'home' => ['home/index', 'full_width', '', 1],
'home5' => ['home5/index', 'full_width', '', 1],
'ticker' => ['home/ticker', ''],
'why-us' => ['static/why_us', 'full_width'],
'tour' => ['static/tour', 'full_width'],
'responsible-trading' => ['static/responsible_trading', 'full_width'],
'careers' => ['static/careers', 'full_width'],
'group-history' => ['static/group_history', 'full_width'],
'smart-indices' => ['static/smart_indices', 'full_width'],
'open-source-projects' => ['static/open_source_projects', 'full_width'],
'contact' => ['static/contact', 'full_width'],
'resources' => ['resources/index', $c->layout],
'charting' => ['charting/index', $c->layout],
'about-us' => ['about/index', $c->layout],
'styles' => ['home/styles', 'full_width', '', 1],
'get-started' => ['get_started/index', 'get_started'],
'get-started/what-is-binary-trading' => ['get_started/what_is_binary_trading', 'get_started'],
'get-started/binary-options-basics' => ['get_started/binary_options_basics', 'get_started'],
'get-started/benefits-of-trading-binaries' => ['get_started/benefits_of_trading_binaries', 'get_started'],
'get-started/how-to-trade-binaries' => ['get_started/how_to_trade_binaries', 'get_started'],
'get-started/types-of-trades' => ['get_started/types_of_trades', 'get_started'],
'get-started/beginners-faq' => ['get_started/beginners_faq', 'get_started'],
'get-started/glossary' => ['get_started/glossary', 'get_started'],
'get-started/random-markets' => ['get_started/random_markets', 'full_width'],
'login' => ['home/login', $c->layout, '', 1],
'not_found' => ['not_found', '', 404],
'exception' => ['exception', 'exception', 500]
);
my $m = $url_map{$curr_path} || $url_map{'not_found'};
## page vars
my @extra_stash;
if ($curr_path eq 'ticker') {
push @extra_stash, (rows => [ BinaryStatic::Consts::ticker() ]);
} elsif ($curr_path eq 'login') {
push @extra_stash, (loginid => $c->param('loginid')) if $c->param('loginid');
}
$c->render(
template => $m->[0],
handler => 'haml',
defined $m->[1] ? (layout => $m->[1] ? $c->layout($m->[1]) : '') : (),
$m->[2] ? (status => $m->[2]) : (),
@extra_stash
);
}
sub timestamp {
my $self = shift;
return $self->render(json => {timestamp => time});
}
sub country {
my $self = shift;
return $self->render(
json => {
country => 'cn',
ip => '60.180.68.53'
},
status => 200,
);
}
sub robots_txt {
my $self = shift;
return $self->render(
data => "User-agent: *\nDisallow",
format => 'txt',
);
}
sub offline {
my $self = shift;
return $self->render(
text => '<div class="center">' . $self->l('Unable to contact the server') . '</div>',
status => 200,
);
}
sub login {
my $self = shift;
my ($loginid, $password);
my $redirect = 'login';
my $redirect_params = {};
if (not($self->param('loginid') =~ /^\s*[A-Z]{2,6}\d{3,}\s*$/i)) {
$self->flash(error => {loginid => $self->l('Login ID not given.')});
} elsif (not $self->param('password')) {
$self->flash(error => {password => $self->l('Password not given.')});
} else {
($loginid, $password) = (uc $self->param('loginid'), $self->param('password'));
$loginid =~ s/^\s+|\s+$//g;
$self->stash(loginid => $loginid);
my $response = ($loginid eq 'DEMO123' and $password eq 'demo') ?
{ success => 1, session_cookie => Mojo::Cookie->new(value => 'abcdefghijklmn')} : {};
if ($response->{success}) {
$redirect = '/user/my_account';
$redirect_params->{login} = 'true';
my $options = $self->cookie_options;
$options->{expires} = time + 86400 * 30;
my $cookie = $response->{session_cookie};
my $cookie_value = $cookie->value // '';
$self->cookie(
'cookie_name' => $cookie_value, # FIX the cookie_name
$options
);
$self->cookie(
'loginid' => $loginid,
$options
);
} else {
my $msg = $response->{error}->{description} || 'Invalid login ID and password combination.';
$self->flash(error => {password => $self->l($msg)});
}
}
$redirect_params->{loginid} = $loginid if $loginid;
$self->res->headers->location($self->url_for($redirect, $redirect_params)->to_abs);
return $self->rendered(302);
}
sub logout {
my $self = shift;
my $options = $self->cookie_options;
$options->{expires} = 1;
$self->cookie(
'cookie_name' => '', # fix cookie_name
$options
);
$self->cookie(
'loginid' => '',
$options
);
$self->cookie(
'settings_cookie' => '',
$options
);
return $self->redirect_to('/');
}
sub not_found {
my $self = shift;
return $self->render(
template => 'not_found',
status => 404,
);
}
sub exception {
my $self = shift;
return $self->render(
template => 'exception',
layout => 'exception',
status => 500,
);
}
1;
| massihx/binary-static | mojo/lib/BinaryStatic/Controller/Page.pm | Perl | apache-2.0 | 7,490 |
#!/usr/bin/env perl
use strict;
use warnings;
use 5.010_000;
use FindBin qw($RealBin);
use lib "$RealBin/lib";
use File::Spec();
use File::Temp 0.2304 ();
use File::Find();
use File::Basename qw(basename);
use Archive::Extract();
use Digest::SHA();
$Archive::Extract::PREFER_BIN = 1;
my $mode = shift(@ARGV) || "";
die usage() unless $mode =~ /^--(check|update)$/;
my $License_Dir = shift(@ARGV) || die usage();
my $Source = shift(@ARGV) || die usage();
my $Ignore = shift(@ARGV);
my $ignore
= $Ignore
? qr/${Ignore}[^\/]*$/
: qr/elasticsearch[^\/]*$/;
$License_Dir = File::Spec->rel2abs($License_Dir) . '/';
$Source = File::Spec->rel2abs($Source);
say "LICENSE DIR: $License_Dir";
say "SOURCE: $Source";
die "License dir is not a directory: $License_Dir\n" . usage()
unless -d $License_Dir;
my %shas
= -f $Source ? jars_from_zip( $Source, $ignore )
: -d $Source ? jars_from_dir( $Source, $ignore )
: die "Source is neither a directory nor a zip file: $Source" . usage();
$mode eq '--check'
? exit check_shas_and_licenses(%shas)
: exit write_shas(%shas);
#===================================
sub check_shas_and_licenses {
#===================================
my %new = @_;
my %old = get_sha_files();
my %licenses = get_files_with('LICENSE');
my %notices = get_files_with('NOTICE');
my $error = 0;
my $sha_error = 0;
for my $jar ( sort keys %new ) {
my $old_sha = delete $old{$jar};
unless ($old_sha) {
say STDERR "$jar: SHA is missing";
$error++;
$sha_error++;
next;
}
unless ( $old_sha eq $new{$jar} ) {
say STDERR
"$jar: SHA has changed, expected $old_sha but found $new{$jar}";
$error++;
$sha_error++;
next;
}
my $license_found;
my $notice_found;
my $prefix = $jar;
$prefix =~ s/\.sha1//;
while ( $prefix =~ s/-[^\-]+$// ) {
if ( exists $licenses{$prefix} ) {
$license_found = 1;
# mark all licenses with the same prefix as used
for ( keys %licenses ) {
$licenses{$_}++ if index( $prefix, $_ ) == 0;
}
if ( exists $notices{$prefix} ) {
$notices{$prefix}++;
$notice_found = 1;
}
last;
}
}
unless ($license_found) {
say STDERR "$jar: LICENSE is missing";
$error++;
$sha_error++;
}
unless ($notice_found) {
say STDERR "$jar: NOTICE is missing";
$error++;
}
}
if ( keys %old ) {
say STDERR "Extra SHA files present for: " . join ", ", sort keys %old;
$error++;
}
my @unused_licenses = grep { !$licenses{$_} } keys %licenses;
if (@unused_licenses) {
$error++;
say STDERR "Extra LICENCE file present: " . join ", ",
sort @unused_licenses;
}
my @unused_notices = grep { !$notices{$_} } keys %notices;
if (@unused_notices) {
$error++;
say STDERR "Extra NOTICE file present: " . join ", ",
sort @unused_notices;
}
if ($sha_error) {
say STDERR <<"SHAS"
You can update the SHA files by running:
$0 --update $License_Dir $Source
SHAS
}
say("All SHAs and licenses OK") unless $error;
return $error;
}
#===================================
sub write_shas {
#===================================
my %new = @_;
my %old = get_sha_files();
for my $jar ( sort keys %new ) {
if ( $old{$jar} ) {
next if $old{$jar} eq $new{$jar};
say "Updating $jar";
}
else {
say "Adding $jar";
}
open my $fh, '>', $License_Dir . $jar or die $!;
say $fh $new{$jar} or die $!;
close $fh or die $!;
}
continue {
delete $old{$jar};
}
for my $jar ( sort keys %old ) {
say "Deleting $jar";
unlink $License_Dir . $jar or die $!;
}
say "SHAs updated";
return 0;
}
#===================================
sub get_files_with {
#===================================
my $pattern = shift;
my %files;
for my $path ( grep {-f} glob("$License_Dir/*$pattern*") ) {
my ($file) = ( $path =~ m{([^/]+)-${pattern}.*$} );
$files{$file} = 0;
}
return %files;
}
#===================================
sub get_sha_files {
#===================================
my %shas;
die "Missing directory: $License_Dir\n"
unless -d $License_Dir;
for my $file ( grep {-f} glob("$License_Dir/*.sha1") ) {
my ($jar) = ( $file =~ m{([^/]+)$} );
open my $fh, '<', $file or die $!;
my $sha = <$fh>;
$sha ||= '';
chomp $sha;
$shas{$jar} = $sha;
}
return %shas;
}
#===================================
sub jars_from_zip {
#===================================
my ( $source, $ignore ) = @_;
my $temp_dir = File::Temp->newdir;
my $dir_name = $temp_dir->dirname;
my $archive = Archive::Extract->new( archive => $source, type => 'zip' );
$archive->extract( to => $dir_name ) || die $archive->error;
my @jars = map { File::Spec->rel2abs( $_, $dir_name ) }
grep { /\.jar$/ && !/$ignore/ } @{ $archive->files };
return calculate_shas(@jars);
}
#===================================
sub jars_from_dir {
#===================================
my ( $source, $ignore ) = @_;
my @jars;
File::Find::find(
{ wanted => sub {
push @jars, File::Spec->rel2abs( $_, $source )
if /\.jar$/ && !/$ignore/;
},
no_chdir => 1
},
$source
);
return calculate_shas(@jars);
}
#===================================
sub calculate_shas {
#===================================
my %shas;
while ( my $file = shift() ) {
my $digest = eval { Digest::SHA->new(1)->addfile($file) }
or die "Error calculating SHA1 for <$file>: $!\n";
$shas{ basename($file) . ".sha1" } = $digest->hexdigest;
}
return %shas;
}
#===================================
sub usage {
#===================================
return <<"USAGE";
USAGE:
# check the sha1 and LICENSE files for each jar in the zip or directory
$0 --check path/to/licenses/ path/to/package.zip [prefix_to_ignore]
$0 --check path/to/licenses/ path/to/dir/ [prefix_to_ignore]
# updates the sha1s for each jar in the zip or directory
$0 --update path/to/licenses/ path/to/package.zip [prefix_to_ignore]
$0 --update path/to/licenses/ path/to/dir/ [prefix_to_ignore]
The optional prefix_to_ignore parameter defaults to "elasticsearch".
USAGE
}
| Liziyao/elasticsearch | dev-tools/src/main/resources/license-check/check_license_and_sha.pl | Perl | apache-2.0 | 6,891 |
=pod
=head1 NAME
EVP_rc4,
EVP_rc4_40,
EVP_rc4_hmac_md5
- EVP RC4 stream cipher
=head1 SYNOPSIS
#include <openssl/evp.h>
const EVP_CIPHER *EVP_rc4(void);
const EVP_CIPHER *EVP_rc4_40(void);
const EVP_CIPHER *EVP_rc4_hmac_md5(void);
=head1 DESCRIPTION
The RC4 stream cipher for EVP.
=over 4
=item EVP_rc4()
RC4 stream cipher. This is a variable key length cipher with a default key
length of 128 bits.
=item EVP_rc4_40()
RC4 stream cipher with 40 bit key length.
WARNING: this function is obsolete. Its usage should be replaced with the
EVP_rc4() and the EVP_CIPHER_CTX_set_key_length() functions.
=item EVP_rc4_hmac_md5()
Authenticated encryption with the RC4 stream cipher with MD5 as HMAC.
WARNING: this is not intended for usage outside of TLS and requires calling of
some undocumented ctrl functions. These ciphers do not conform to the EVP AEAD
interface.
=back
=head1 RETURN VALUES
These functions return an B<EVP_CIPHER> structure that contains the
implementation of the symmetric cipher. See L<EVP_CIPHER_meth_new(3)> for
details of the B<EVP_CIPHER> structure.
=head1 SEE ALSO
L<evp(7)>,
L<EVP_EncryptInit(3)>,
L<EVP_CIPHER_meth_new(3)>
=head1 COPYRIGHT
Copyright 2017-2020 The OpenSSL Project Authors. All Rights Reserved.
Licensed under the Apache License 2.0 (the "License"). You may not use
this file except in compliance with the License. You can obtain a copy
in the file LICENSE in the source distribution or at
L<https://www.openssl.org/source/license.html>.
=cut
| jens-maus/amissl | openssl/doc/man3/EVP_rc4.pod | Perl | bsd-3-clause | 1,511 |
#! /usr/bin/env perl
use warnings;
use strict;
use IO::File;
my $freqfile = shift @ARGV or die;
my $numlabels = shift @ARGV or die;
my $shufbufsize = shift @ARGV or die;
my $freqfh = new IO::File $freqfile, "r" or die "$freqfile: $!";
my %dict;
my $curlabels = $numlabels;
while (defined ($_ = <$freqfh>))
{
chomp;
my ($key, undef) = split /\s+/, $_;
$dict{$key} = $curlabels;
--$curlabels;
last unless $curlabels > 0;
}
srand 69;
my $context = 6;
my @shufbuf;
my @charpos = split //, "abcdefghijklmnop";
$\="\n";
while (defined ($_ = <STDIN>))
{
chomp;
s/\|/_/g; s/:/_/g; # VW special characters
my @tokens = split /\s+/, $_;
foreach my $pos ($context ... $#tokens)
{
my $label = $dict{$tokens[$pos]} || $numlabels+1;
my $index = int (rand ($shufbufsize));
print $shufbuf[$index] if length ($shufbuf[$index]);
$shufbuf[$index] = join " ", $label, map { join "", " |", $charpos[$_], " ", $tokens[$pos - $_] } (1 .. $context);
}
}
foreach my $index (0 .. $shufbufsize)
{
print $shufbuf[$index] if length ($shufbuf[$index]);
}
| jon-morra-zefr/vowpal_wabbit | demo/recall_tree/ltcb/gendata.pl | Perl | bsd-3-clause | 1,150 |
#!/usr/bin/perl -w
# Perl utility to run or verify FIPS 140-2 CMVP algorithm tests based on the
# pathnames of input algorithm test files actually present (the unqualified
# file names are consistent but the pathnames are not).
#
# FIPS test definitions
# List of all the unqualified file names we expect and command lines to run
# DSA tests
my @fips_dsa_test_list = (
"DSA",
[ "PQGGen", "fips_dssvs pqg" ],
[ "KeyPair", "fips_dssvs keypair" ],
[ "SigGen", "fips_dssvs siggen" ],
[ "SigVer", "fips_dssvs sigver" ]
);
# RSA tests
my @fips_rsa_test_list = (
"RSA",
[ "SigGen15", "fips_rsastest" ],
[ "SigVer15", "fips_rsavtest" ],
[ "SigVerRSA", "fips_rsavtest -x931" ],
[ "KeyGenRSA", "fips_rsagtest" ],
[ "SigGenRSA", "fips_rsastest -x931" ]
);
# Special cases for PSS. The filename itself is
# not sufficient to determine the test. Addditionally we
# need to examine the file contents to determine the salt length
# In these cases the test filename has (saltlen) appended.
# RSA PSS salt length 0 tests
my @fips_rsa_pss0_test_list = (
[ "SigGenPSS(0)", "fips_rsastest -saltlen 0" ],
[ "SigVerPSS(0)", "fips_rsavtest -saltlen 0" ]
);
# RSA PSS salt length 62 tests
my @fips_rsa_pss62_test_list = (
[ "SigGenPSS(62)", "fips_rsastest -saltlen 62" ],
[ "SigVerPSS(62)", "fips_rsavtest -saltlen 62" ]
);
# SHA tests
my @fips_sha_test_list = (
"SHA",
[ "SHA1LongMsg", "fips_shatest" ],
[ "SHA1Monte", "fips_shatest" ],
[ "SHA1ShortMsg", "fips_shatest" ],
[ "SHA224LongMsg", "fips_shatest" ],
[ "SHA224Monte", "fips_shatest" ],
[ "SHA224ShortMsg", "fips_shatest" ],
[ "SHA256LongMsg", "fips_shatest" ],
[ "SHA256Monte", "fips_shatest" ],
[ "SHA256ShortMsg", "fips_shatest" ],
[ "SHA384LongMsg", "fips_shatest" ],
[ "SHA384Monte", "fips_shatest" ],
[ "SHA384ShortMsg", "fips_shatest" ],
[ "SHA512LongMsg", "fips_shatest" ],
[ "SHA512Monte", "fips_shatest" ],
[ "SHA512ShortMsg", "fips_shatest" ]
);
# HMAC
my @fips_hmac_test_list = (
"HMAC",
[ "HMAC", "fips_hmactest" ]
);
# RAND tests, AES version
my @fips_rand_aes_test_list = (
"RAND (AES)",
[ "ANSI931_AES128MCT", "fips_rngvs mct" ],
[ "ANSI931_AES192MCT", "fips_rngvs mct" ],
[ "ANSI931_AES256MCT", "fips_rngvs mct" ],
[ "ANSI931_AES128VST", "fips_rngvs vst" ],
[ "ANSI931_AES192VST", "fips_rngvs vst" ],
[ "ANSI931_AES256VST", "fips_rngvs vst" ]
);
# RAND tests, DES2 version
my @fips_rand_des2_test_list = (
"RAND (DES2)",
[ "ANSI931_TDES2MCT", "fips_rngvs mct" ],
[ "ANSI931_TDES2VST", "fips_rngvs vst" ]
);
# AES tests
my @fips_aes_test_list = (
"AES",
[ "CBCGFSbox128", "fips_aesavs -f" ],
[ "CBCGFSbox192", "fips_aesavs -f" ],
[ "CBCGFSbox256", "fips_aesavs -f" ],
[ "CBCKeySbox128", "fips_aesavs -f" ],
[ "CBCKeySbox192", "fips_aesavs -f" ],
[ "CBCKeySbox256", "fips_aesavs -f" ],
[ "CBCMCT128", "fips_aesavs -f" ],
[ "CBCMCT192", "fips_aesavs -f" ],
[ "CBCMCT256", "fips_aesavs -f" ],
[ "CBCMMT128", "fips_aesavs -f" ],
[ "CBCMMT192", "fips_aesavs -f" ],
[ "CBCMMT256", "fips_aesavs -f" ],
[ "CBCVarKey128", "fips_aesavs -f" ],
[ "CBCVarKey192", "fips_aesavs -f" ],
[ "CBCVarKey256", "fips_aesavs -f" ],
[ "CBCVarTxt128", "fips_aesavs -f" ],
[ "CBCVarTxt192", "fips_aesavs -f" ],
[ "CBCVarTxt256", "fips_aesavs -f" ],
[ "CFB128GFSbox128", "fips_aesavs -f" ],
[ "CFB128GFSbox192", "fips_aesavs -f" ],
[ "CFB128GFSbox256", "fips_aesavs -f" ],
[ "CFB128KeySbox128", "fips_aesavs -f" ],
[ "CFB128KeySbox192", "fips_aesavs -f" ],
[ "CFB128KeySbox256", "fips_aesavs -f" ],
[ "CFB128MCT128", "fips_aesavs -f" ],
[ "CFB128MCT192", "fips_aesavs -f" ],
[ "CFB128MCT256", "fips_aesavs -f" ],
[ "CFB128MMT128", "fips_aesavs -f" ],
[ "CFB128MMT192", "fips_aesavs -f" ],
[ "CFB128MMT256", "fips_aesavs -f" ],
[ "CFB128VarKey128", "fips_aesavs -f" ],
[ "CFB128VarKey192", "fips_aesavs -f" ],
[ "CFB128VarKey256", "fips_aesavs -f" ],
[ "CFB128VarTxt128", "fips_aesavs -f" ],
[ "CFB128VarTxt192", "fips_aesavs -f" ],
[ "CFB128VarTxt256", "fips_aesavs -f" ],
[ "CFB8GFSbox128", "fips_aesavs -f" ],
[ "CFB8GFSbox192", "fips_aesavs -f" ],
[ "CFB8GFSbox256", "fips_aesavs -f" ],
[ "CFB8KeySbox128", "fips_aesavs -f" ],
[ "CFB8KeySbox192", "fips_aesavs -f" ],
[ "CFB8KeySbox256", "fips_aesavs -f" ],
[ "CFB8MCT128", "fips_aesavs -f" ],
[ "CFB8MCT192", "fips_aesavs -f" ],
[ "CFB8MCT256", "fips_aesavs -f" ],
[ "CFB8MMT128", "fips_aesavs -f" ],
[ "CFB8MMT192", "fips_aesavs -f" ],
[ "CFB8MMT256", "fips_aesavs -f" ],
[ "CFB8VarKey128", "fips_aesavs -f" ],
[ "CFB8VarKey192", "fips_aesavs -f" ],
[ "CFB8VarKey256", "fips_aesavs -f" ],
[ "CFB8VarTxt128", "fips_aesavs -f" ],
[ "CFB8VarTxt192", "fips_aesavs -f" ],
[ "CFB8VarTxt256", "fips_aesavs -f" ],
[ "ECBGFSbox128", "fips_aesavs -f" ],
[ "ECBGFSbox192", "fips_aesavs -f" ],
[ "ECBGFSbox256", "fips_aesavs -f" ],
[ "ECBKeySbox128", "fips_aesavs -f" ],
[ "ECBKeySbox192", "fips_aesavs -f" ],
[ "ECBKeySbox256", "fips_aesavs -f" ],
[ "ECBMCT128", "fips_aesavs -f" ],
[ "ECBMCT192", "fips_aesavs -f" ],
[ "ECBMCT256", "fips_aesavs -f" ],
[ "ECBMMT128", "fips_aesavs -f" ],
[ "ECBMMT192", "fips_aesavs -f" ],
[ "ECBMMT256", "fips_aesavs -f" ],
[ "ECBVarKey128", "fips_aesavs -f" ],
[ "ECBVarKey192", "fips_aesavs -f" ],
[ "ECBVarKey256", "fips_aesavs -f" ],
[ "ECBVarTxt128", "fips_aesavs -f" ],
[ "ECBVarTxt192", "fips_aesavs -f" ],
[ "ECBVarTxt256", "fips_aesavs -f" ],
[ "OFBGFSbox128", "fips_aesavs -f" ],
[ "OFBGFSbox192", "fips_aesavs -f" ],
[ "OFBGFSbox256", "fips_aesavs -f" ],
[ "OFBKeySbox128", "fips_aesavs -f" ],
[ "OFBKeySbox192", "fips_aesavs -f" ],
[ "OFBKeySbox256", "fips_aesavs -f" ],
[ "OFBMCT128", "fips_aesavs -f" ],
[ "OFBMCT192", "fips_aesavs -f" ],
[ "OFBMCT256", "fips_aesavs -f" ],
[ "OFBMMT128", "fips_aesavs -f" ],
[ "OFBMMT192", "fips_aesavs -f" ],
[ "OFBMMT256", "fips_aesavs -f" ],
[ "OFBVarKey128", "fips_aesavs -f" ],
[ "OFBVarKey192", "fips_aesavs -f" ],
[ "OFBVarKey256", "fips_aesavs -f" ],
[ "OFBVarTxt128", "fips_aesavs -f" ],
[ "OFBVarTxt192", "fips_aesavs -f" ],
[ "OFBVarTxt256", "fips_aesavs -f" ]
);
my @fips_aes_cfb1_test_list = (
# AES CFB1 tests
[ "CFB1GFSbox128", "fips_aesavs -f" ],
[ "CFB1GFSbox192", "fips_aesavs -f" ],
[ "CFB1GFSbox256", "fips_aesavs -f" ],
[ "CFB1KeySbox128", "fips_aesavs -f" ],
[ "CFB1KeySbox192", "fips_aesavs -f" ],
[ "CFB1KeySbox256", "fips_aesavs -f" ],
[ "CFB1MCT128", "fips_aesavs -f" ],
[ "CFB1MCT192", "fips_aesavs -f" ],
[ "CFB1MCT256", "fips_aesavs -f" ],
[ "CFB1MMT128", "fips_aesavs -f" ],
[ "CFB1MMT192", "fips_aesavs -f" ],
[ "CFB1MMT256", "fips_aesavs -f" ],
[ "CFB1VarKey128", "fips_aesavs -f" ],
[ "CFB1VarKey192", "fips_aesavs -f" ],
[ "CFB1VarKey256", "fips_aesavs -f" ],
[ "CFB1VarTxt128", "fips_aesavs -f" ],
[ "CFB1VarTxt192", "fips_aesavs -f" ],
[ "CFB1VarTxt256", "fips_aesavs -f" ]
);
# Triple DES tests
my @fips_des3_test_list = (
"Triple DES",
[ "TCBCinvperm", "fips_desmovs -f" ],
[ "TCBCMMT1", "fips_desmovs -f" ],
[ "TCBCMMT2", "fips_desmovs -f" ],
[ "TCBCMMT3", "fips_desmovs -f" ],
[ "TCBCMonte1", "fips_desmovs -f" ],
[ "TCBCMonte2", "fips_desmovs -f" ],
[ "TCBCMonte3", "fips_desmovs -f" ],
[ "TCBCpermop", "fips_desmovs -f" ],
[ "TCBCsubtab", "fips_desmovs -f" ],
[ "TCBCvarkey", "fips_desmovs -f" ],
[ "TCBCvartext", "fips_desmovs -f" ],
[ "TCFB64invperm", "fips_desmovs -f" ],
[ "TCFB64MMT1", "fips_desmovs -f" ],
[ "TCFB64MMT2", "fips_desmovs -f" ],
[ "TCFB64MMT3", "fips_desmovs -f" ],
[ "TCFB64Monte1", "fips_desmovs -f" ],
[ "TCFB64Monte2", "fips_desmovs -f" ],
[ "TCFB64Monte3", "fips_desmovs -f" ],
[ "TCFB64permop", "fips_desmovs -f" ],
[ "TCFB64subtab", "fips_desmovs -f" ],
[ "TCFB64varkey", "fips_desmovs -f" ],
[ "TCFB64vartext", "fips_desmovs -f" ],
[ "TCFB8invperm", "fips_desmovs -f" ],
[ "TCFB8MMT1", "fips_desmovs -f" ],
[ "TCFB8MMT2", "fips_desmovs -f" ],
[ "TCFB8MMT3", "fips_desmovs -f" ],
[ "TCFB8Monte1", "fips_desmovs -f" ],
[ "TCFB8Monte2", "fips_desmovs -f" ],
[ "TCFB8Monte3", "fips_desmovs -f" ],
[ "TCFB8permop", "fips_desmovs -f" ],
[ "TCFB8subtab", "fips_desmovs -f" ],
[ "TCFB8varkey", "fips_desmovs -f" ],
[ "TCFB8vartext", "fips_desmovs -f" ],
[ "TECBinvperm", "fips_desmovs -f" ],
[ "TECBMMT1", "fips_desmovs -f" ],
[ "TECBMMT2", "fips_desmovs -f" ],
[ "TECBMMT3", "fips_desmovs -f" ],
[ "TECBMonte1", "fips_desmovs -f" ],
[ "TECBMonte2", "fips_desmovs -f" ],
[ "TECBMonte3", "fips_desmovs -f" ],
[ "TECBpermop", "fips_desmovs -f" ],
[ "TECBsubtab", "fips_desmovs -f" ],
[ "TECBvarkey", "fips_desmovs -f" ],
[ "TECBvartext", "fips_desmovs -f" ],
[ "TOFBinvperm", "fips_desmovs -f" ],
[ "TOFBMMT1", "fips_desmovs -f" ],
[ "TOFBMMT2", "fips_desmovs -f" ],
[ "TOFBMMT3", "fips_desmovs -f" ],
[ "TOFBMonte1", "fips_desmovs -f" ],
[ "TOFBMonte2", "fips_desmovs -f" ],
[ "TOFBMonte3", "fips_desmovs -f" ],
[ "TOFBpermop", "fips_desmovs -f" ],
[ "TOFBsubtab", "fips_desmovs -f" ],
[ "TOFBvarkey", "fips_desmovs -f" ],
[ "TOFBvartext", "fips_desmovs -f" ]
);
# Verification special cases.
# In most cases the output of a test is deterministic and
# it can be compared to a known good result. A few involve
# the genration and use of random keys and the output will
# be different each time. In thoses cases we perform special tests
# to simply check their consistency. For example signature generation
# output will be run through signature verification to see if all outputs
# show as valid.
#
my %verify_special = (
"PQGGen" => "fips_dssvs pqgver",
"KeyPair" => "fips_dssvs keyver",
"SigGen" => "fips_dssvs sigver",
"SigGen15" => "fips_rsavtest",
"SigGenRSA" => "fips_rsavtest -x931",
"SigGenPSS(0)" => "fips_rsavtest -saltlen 0",
"SigGenPSS(62)" => "fips_rsavtest -saltlen 62",
);
my $win32 = $^O =~ m/mswin/i;
my $onedir = 0;
my $filter = "";
my $tvdir;
my $tprefix;
my $shwrap_prefix;
my $debug = 0;
my $quiet = 0;
my $notest = 0;
my $verify = 1;
my $rspdir = "rsp";
my $ignore_missing = 0;
my $ignore_bogus = 0;
my $bufout = '';
my $list_tests = 0;
my %fips_enabled = (
dsa => 1,
rsa => 1,
"rsa-pss0" => 0,
"rsa-pss62" => 1,
sha => 1,
hmac => 1,
"rand-aes" => 1,
"rand-des2" => 0,
aes => 1,
"aes-cfb1" => 0,
des3 => 1
);
foreach (@ARGV) {
if ( $_ eq "--win32" ) {
$win32 = 1;
}
elsif ( $_ eq "--onedir" ) {
$onedir = 1;
}
elsif ( $_ eq "--debug" ) {
$debug = 1;
}
elsif ( $_ eq "--ignore-missing" ) {
$ignore_missing = 1;
}
elsif ( $_ eq "--ignore-bogus" ) {
$ignore_bogus = 1;
}
elsif ( $_ eq "--generate" ) {
$verify = 0;
}
elsif ( $_ eq "--notest" ) {
$notest = 1;
}
elsif ( $_ eq "--quiet" ) {
$quiet = 1;
}
elsif (/--dir=(.*)$/) {
$tvdir = $1;
}
elsif (/--rspdir=(.*)$/) {
$rspdir = $1;
}
elsif (/--tprefix=(.*)$/) {
$tprefix = $1;
}
elsif (/--shwrap_prefix=(.*)$/) {
$shwrap_prefix = $1;
}
elsif (/^--(enable|disable)-(.*)$/) {
if ( !exists $fips_enabled{$2} ) {
print STDERR "Unknown test $2\n";
}
if ( $1 eq "enable" ) {
$fips_enabled{$2} = 1;
}
else {
$fips_enabled{$2} = 0;
}
}
elsif (/--filter=(.*)$/) {
$filter = $1;
}
elsif (/^--list-tests$/) {
$list_tests = 1;
}
else {
Help();
exit(1);
}
}
my @fips_test_list;
push @fips_test_list, @fips_dsa_test_list if $fips_enabled{"dsa"};
push @fips_test_list, @fips_rsa_test_list if $fips_enabled{"rsa"};
push @fips_test_list, @fips_rsa_pss0_test_list if $fips_enabled{"rsa-pss0"};
push @fips_test_list, @fips_rsa_pss62_test_list if $fips_enabled{"rsa-pss62"};
push @fips_test_list, @fips_sha_test_list if $fips_enabled{"sha"};
push @fips_test_list, @fips_hmac_test_list if $fips_enabled{"hmac"};
push @fips_test_list, @fips_rand_aes_test_list if $fips_enabled{"rand-aes"};
push @fips_test_list, @fips_rand_des2_test_list if $fips_enabled{"rand-des2"};
push @fips_test_list, @fips_aes_test_list if $fips_enabled{"aes"};
push @fips_test_list, @fips_aes_cfb1_test_list if $fips_enabled{"aes-cfb1"};
push @fips_test_list, @fips_des3_test_list if $fips_enabled{"des3"};
if ($list_tests) {
my ( $test, $en );
print "=====TEST LIST=====\n";
foreach $test ( sort keys %fips_enabled ) {
$en = $fips_enabled{$test};
$test =~ tr/[a-z]/[A-Z]/;
printf "%-10s %s\n", $test, $en ? "enabled" : "disabled";
}
exit(0);
}
foreach (@fips_test_list) {
next unless ref($_);
my $nm = $_->[0];
$_->[2] = "";
$_->[3] = "";
print STDERR "Duplicate test $nm\n" if exists $fips_tests{$nm};
$fips_tests{$nm} = $_;
}
$tvdir = "." unless defined $tvdir;
if ($win32) {
if ( !defined $tprefix ) {
if ($onedir) {
$tprefix = ".\\";
}
else {
$tprefix = "..\\out32dll\\";
}
}
}
else {
if ($onedir) {
$tprefix = "./" unless defined $tprefix;
$shwrap_prefix = "./" unless defined $shwrap_prefix;
}
else {
$tprefix = "../test/" unless defined $tprefix;
$shwrap_prefix = "../util/" unless defined $shwrap_prefix;
}
}
sanity_check_exe( $win32, $tprefix, $shwrap_prefix );
my $cmd_prefix = $win32 ? "" : "${shwrap_prefix}shlib_wrap.sh ";
find_files( $filter, $tvdir );
sanity_check_files();
my ( $runerr, $cmperr, $cmpok, $scheckrunerr, $scheckerr, $scheckok, $skipcnt )
= ( 0, 0, 0, 0, 0, 0, 0 );
exit(0) if $notest;
run_tests( $verify, $win32, $tprefix, $filter, $tvdir );
if ($verify) {
print "ALGORITHM TEST VERIFY SUMMARY REPORT:\n";
print "Tests skipped due to missing files: $skipcnt\n";
print "Algorithm test program execution failures: $runerr\n";
print "Test comparisons successful: $cmpok\n";
print "Test comparisons failed: $cmperr\n";
print "Test sanity checks successful: $scheckok\n";
print "Test sanity checks failed: $scheckerr\n";
print "Sanity check program execution failures: $scheckrunerr\n";
if ( $runerr || $cmperr || $scheckrunerr || $scheckerr ) {
print "***TEST FAILURE***\n";
}
else {
print "***ALL TESTS SUCCESSFUL***\n";
}
}
else {
print "ALGORITHM TEST SUMMARY REPORT:\n";
print "Tests skipped due to missing files: $skipcnt\n";
print "Algorithm test program execution failures: $runerr\n";
if ($runerr) {
print "***TEST FAILURE***\n";
}
else {
print "***ALL TESTS SUCCESSFUL***\n";
}
}
#--------------------------------
sub Help {
( my $cmd ) = ( $0 =~ m#([^/]+)$# );
print <<EOF;
$cmd: generate run CMVP algorithm tests
--debug Enable debug output
--dir=<dirname> Optional root for *.req file search
--filter=<regexp>
--onedir <dirname> Assume all components in current directory
--rspdir=<dirname> Name of subdirectories containing *.rsp files, default "resp"
--shwrap_prefix=<prefix>
--tprefix=<prefix>
--ignore-bogus Ignore duplicate or bogus files
--ignore-missing Ignore missing test files
--quiet Shhh....
--generate Generate algorithm test output
--win32 Win32 environment
EOF
}
# Sanity check to see if all necessary executables exist
sub sanity_check_exe {
my ( $win32, $tprefix, $shwrap_prefix ) = @_;
my %exe_list;
my $bad = 0;
$exe_list{ $shwrap_prefix . "shlib_wrap.sh" } = 1 unless $win32;
foreach (@fips_test_list) {
next unless ref($_);
my $cmd = $_->[1];
$cmd =~ s/ .*$//;
$cmd = $tprefix . $cmd;
$cmd .= ".exe" if $win32;
$exe_list{$cmd} = 1;
}
foreach ( sort keys %exe_list ) {
if ( !-f $_ ) {
print STDERR "ERROR: can't find executable $_\n";
$bad = 1;
}
}
if ($bad) {
print STDERR "FATAL ERROR: executables missing\n";
exit(1);
}
elsif ($debug) {
print STDERR "Executable sanity check passed OK\n";
}
}
# Search for all request and response files
sub find_files {
my ( $filter, $dir ) = @_;
my ( $dirh, $testname );
opendir( $dirh, $dir );
while ( $_ = readdir($dirh) ) {
next if ( $_ eq "." || $_ eq ".." );
$_ = "$dir/$_";
if ( -f "$_" ) {
if (/\/([^\/]*)\.rsp$/) {
$testname = fix_pss( $1, $_ );
if ( exists $fips_tests{$testname} ) {
if ( $fips_tests{$testname}->[3] eq "" ) {
$fips_tests{$testname}->[3] = $_;
}
else {
print STDERR
"WARNING: duplicate response file $_ for test $testname\n";
$nbogus++;
}
}
else {
print STDERR "WARNING: bogus file $_\n";
$nbogus++;
}
}
next unless /$filter.*\.req$/i;
if (/\/([^\/]*)\.req$/) {
$testname = fix_pss( $1, $_ );
if ( exists $fips_tests{$testname} ) {
if ( $fips_tests{$testname}->[2] eq "" ) {
$fips_tests{$testname}->[2] = $_;
}
else {
print STDERR
"WARNING: duplicate request file $_ for test $testname\n";
$nbogus++;
}
}
elsif ( !/SHAmix\.req$/ ) {
print STDERR "WARNING: unrecognized filename $_\n";
$nbogus++;
}
}
}
elsif ( -d "$_" ) {
find_files( $filter, $_ );
}
}
closedir($dirh);
}
sub fix_pss {
my ( $test, $path ) = @_;
my $sl = "";
local $_;
if ( $test =~ /PSS/ ) {
open( IN, $path ) || die "Can't Open File $path";
while (<IN>) {
if (/^\s*#\s*salt\s+len:\s+(\d+)\s*$/i) {
$sl = $1;
last;
}
}
close IN;
if ( $sl eq "" ) {
print STDERR "WARNING: No Salt length detected for file $path\n";
}
else {
return $test . "($sl)";
}
}
return $test;
}
sub sanity_check_files {
my $bad = 0;
foreach (@fips_test_list) {
next unless ref($_);
my ( $tst, $cmd, $req, $resp ) = @$_;
#print STDERR "FILES $tst, $cmd, $req, $resp\n";
if ( $req eq "" ) {
print STDERR "WARNING: missing request file for $tst\n";
$bad = 1;
next;
}
if ( $verify && $resp eq "" ) {
print STDERR "WARNING: no response file for test $tst\n";
$bad = 1;
}
elsif ( !$verify && $resp ne "" ) {
print STDERR "WARNING: response file $resp will be overwritten\n";
}
}
if ($bad) {
print STDERR "ERROR: test vector file set not complete\n";
exit(1) unless $ignore_missing;
}
if ($nbogus) {
print STDERR
"ERROR: $nbogus bogus or duplicate request and response files\n";
exit(1) unless $ignore_bogus;
}
if ( $debug && !$nbogus && !$bad ) {
print STDERR "test vector file set complete\n";
}
}
sub run_tests {
my ( $verify, $win32, $tprefix, $filter, $tvdir ) = @_;
my ( $tname, $tref );
my $bad = 0;
foreach (@fips_test_list) {
if ( !ref($_) ) {
print "Running $_ tests\n" unless $quiet;
next;
}
my ( $tname, $tcmd, $req, $rsp ) = @$_;
my $out = $rsp;
if ($verify) {
$out =~ s/\.rsp$/.tst/;
}
if ( $req eq "" ) {
print STDERR
"WARNING: Request file for $tname missing: test skipped\n";
$skipcnt++;
next;
}
if ( $verify && $rsp eq "" ) {
print STDERR
"WARNING: Response file for $tname missing: test skipped\n";
$skipcnt++;
next;
}
elsif ( !$verify ) {
if ( $rsp ne "" ) {
print STDERR "WARNING: Response file for $tname deleted\n";
unlink $rsp;
}
$out = $req;
$out =~ s|/req/(\S+)\.req|/$rspdir/$1.rsp|;
my $outdir = $out;
$outdir =~ s|/[^/]*$||;
if ( !-d $outdir ) {
print STDERR "DEBUG: Creating directory $outdir\n" if $debug;
mkdir($outdir) || die "Can't create directory $outdir";
}
}
my $cmd = "$cmd_prefix$tprefix$tcmd ";
if ( $tcmd =~ /-f$/ ) {
$cmd .= "$req $out";
}
else {
$cmd .= "<$req >$out";
}
print STDERR "DEBUG: running test $tname\n" if ( $debug && !$verify );
system($cmd);
if ( $? != 0 ) {
print STDERR
"WARNING: error executing test $tname for command: $cmd\n";
$runerr++;
next;
}
if ($verify) {
if ( exists $verify_special{$tname} ) {
my $vout = $rsp;
$vout =~ s/\.rsp$/.ver/;
$tcmd = $verify_special{$tname};
$cmd = "$cmd_prefix$tprefix$tcmd ";
$cmd .= "<$out >$vout";
system($cmd);
if ( $? != 0 ) {
print STDERR
"WARNING: error executing verify test $tname $cmd\n";
$scheckrunerr++;
next;
}
my ( $fcount, $pcount ) = ( 0, 0 );
open VER, "$vout";
while (<VER>) {
if (/^Result\s*=\s*(\S*)\s*$/i)
{
if ( $1 eq "F" ) {
$fcount++;
}
else {
$pcount++;
}
}
}
close VER;
unlink $vout;
if ( $fcount || $debug ) {
print STDERR "DEBUG: $tname, Pass=$pcount, Fail=$fcount\n";
}
if ( $fcount || !$pcount ) {
$scheckerr++;
}
else {
$scheckok++;
}
}
elsif ( !cmp_file( $tname, $rsp, $out ) ) {
$cmperr++;
}
else {
$cmpok++;
}
unlink $out;
}
}
}
sub cmp_file {
my ( $tname, $rsp, $tst ) = @_;
my ( $rspf, $tstf );
my ( $rspline, $tstline );
if ( !open( $rspf, $rsp ) ) {
print STDERR "ERROR: can't open request file $rsp\n";
return 0;
}
if ( !open( $tstf, $tst ) ) {
print STDERR "ERROR: can't open output file $tst\n";
return 0;
}
for ( ; ; ) {
$rspline = next_line($rspf);
$tstline = next_line($tstf);
if ( !defined($rspline) && !defined($tstline) ) {
print STDERR "DEBUG: $tname file comparison OK\n" if $debug;
return 1;
}
if ( !defined($rspline) ) {
print STDERR "ERROR: $tname EOF on $rspf\n";
return 0;
}
if ( !defined($tstline) ) {
print STDERR "ERROR: $tname EOF on $tstf\n";
return 0;
}
# Workaround for bug in RAND des2 test output */
if ( $tstline =~ /^Key2 =/ && $rspline =~ /^Key1 =/ ) {
$rspline =~ s/^Key1/Key2/;
}
if ( $tstline ne $rspline ) {
print STDERR "ERROR: $tname mismatch:\n";
print STDERR "\t $tstline != $rspline\n";
return 0;
}
}
return 1;
}
sub next_line {
my ($in) = @_;
while (<$in>) {
chomp;
# Delete comments
s/#.*$//;
# Ignore blank lines
next if (/^\s*$/);
# Translate multiple space into one
s/\s+/ /g;
return $_;
}
return undef;
}
| hvdieren/parsec-swan | pkgs/libs/ssl/src/fips/fipsalgtest.pl | Perl | bsd-3-clause | 25,680 |
# !!!!!!! DO NOT EDIT THIS FILE !!!!!!!
# This file is machine-generated by mktables from the Unicode
# database, Version 6.1.0. Any changes made here will be lost!
# !!!!!!! INTERNAL PERL USE ONLY !!!!!!!
# This file is for internal use by core Perl only. The format and even the
# name or existence of this file are subject to change without notice. Don't
# use it directly.
return <<'END';
0000 01F5
01FA 0217
0250 02A8
02B0 02DE
02E0 02E9
0300 0345
0360 0361
0374 0375
037A
037E
0384 038A
038C
038E 03A1
03A3 03CE
03D0 03D6
03DA
03DC
03DE
03E0
03E2 03F3
0401 040C
040E 044F
0451 045C
045E 0486
0490 04C4
04C7 04C8
04CB 04CC
04D0 04EB
04EE 04F5
04F8 04F9
0531 0556
0559 055F
0561 0587
0589
0591 05A1
05A3 05B9
05BB 05C4
05D0 05EA
05F0 05F4
060C
061B
061F
0621 063A
0640 0652
0660 066D
0670 06B7
06BA 06BE
06C0 06CE
06D0 06ED
06F0 06F9
0901 0903
0905 0939
093C 094D
0950 0954
0958 0970
0981 0983
0985 098C
098F 0990
0993 09A8
09AA 09B0
09B2
09B6 09B9
09BC
09BE 09C4
09C7 09C8
09CB 09CD
09D7
09DC 09DD
09DF 09E3
09E6 09FA
0A02
0A05 0A0A
0A0F 0A10
0A13 0A28
0A2A 0A30
0A32 0A33
0A35 0A36
0A38 0A39
0A3C
0A3E 0A42
0A47 0A48
0A4B 0A4D
0A59 0A5C
0A5E
0A66 0A74
0A81 0A83
0A85 0A8B
0A8D
0A8F 0A91
0A93 0AA8
0AAA 0AB0
0AB2 0AB3
0AB5 0AB9
0ABC 0AC5
0AC7 0AC9
0ACB 0ACD
0AD0
0AE0
0AE6 0AEF
0B01 0B03
0B05 0B0C
0B0F 0B10
0B13 0B28
0B2A 0B30
0B32 0B33
0B36 0B39
0B3C 0B43
0B47 0B48
0B4B 0B4D
0B56 0B57
0B5C 0B5D
0B5F 0B61
0B66 0B70
0B82 0B83
0B85 0B8A
0B8E 0B90
0B92 0B95
0B99 0B9A
0B9C
0B9E 0B9F
0BA3 0BA4
0BA8 0BAA
0BAE 0BB5
0BB7 0BB9
0BBE 0BC2
0BC6 0BC8
0BCA 0BCD
0BD7
0BE7 0BF2
0C01 0C03
0C05 0C0C
0C0E 0C10
0C12 0C28
0C2A 0C33
0C35 0C39
0C3E 0C44
0C46 0C48
0C4A 0C4D
0C55 0C56
0C60 0C61
0C66 0C6F
0C82 0C83
0C85 0C8C
0C8E 0C90
0C92 0CA8
0CAA 0CB3
0CB5 0CB9
0CBE 0CC4
0CC6 0CC8
0CCA 0CCD
0CD5 0CD6
0CDE
0CE0 0CE1
0CE6 0CEF
0D02 0D03
0D05 0D0C
0D0E 0D10
0D12 0D28
0D2A 0D39
0D3E 0D43
0D46 0D48
0D4A 0D4D
0D57
0D60 0D61
0D66 0D6F
0E01 0E3A
0E3F 0E5B
0E81 0E82
0E84
0E87 0E88
0E8A
0E8D
0E94 0E97
0E99 0E9F
0EA1 0EA3
0EA5
0EA7
0EAA 0EAB
0EAD 0EB9
0EBB 0EBD
0EC0 0EC4
0EC6
0EC8 0ECD
0ED0 0ED9
0EDC 0EDD
0F00 0F47
0F49 0F69
0F71 0F8B
0F90 0F95
0F97
0F99 0FAD
0FB1 0FB7
0FB9
10A0 10C5
10D0 10F6
10FB
1100 1159
115F 11A2
11A8 11F9
1E00 1E9B
1EA0 1EF9
1F00 1F15
1F18 1F1D
1F20 1F45
1F48 1F4D
1F50 1F57
1F59
1F5B
1F5D
1F5F 1F7D
1F80 1FB4
1FB6 1FC4
1FC6 1FD3
1FD6 1FDB
1FDD 1FEF
1FF2 1FF4
1FF6 1FFE
2000 202E
2030 2046
206A 2070
2074 208E
20A0 20AC
20D0 20E1
2100 2138
2153 2182
2190 21EA
2200 22F1
2300
2302 237A
2400 2424
2440 244A
2460 24EA
2500 2595
25A0 25EF
2600 2613
261A 266F
2701 2704
2706 2709
270C 2727
2729 274B
274D
274F 2752
2756
2758 275E
2761 2767
2776 2794
2798 27AF
27B1 27BE
3000 3037
303F
3041 3094
3099 309E
30A1 30FE
3105 312C
3131 318E
3190 319F
3200 321C
3220 3243
3260 327B
327F 32B0
32C0 32CB
32D0 32FE
3300 3376
337B 33DD
33E0 33FE
4E00 9FA5
AC00 D7A3
D800 FA2D
FB00 FB06
FB13 FB17
FB1E FB36
FB38 FB3C
FB3E
FB40 FB41
FB43 FB44
FB46 FBB1
FBD3 FD3F
FD50 FD8F
FD92 FDC7
FDF0 FDFB
FE20 FE23
FE30 FE44
FE49 FE52
FE54 FE66
FE68 FE6B
FE70 FE72
FE74
FE76 FEFC
FEFF
FF01 FF5E
FF61 FFBE
FFC2 FFC7
FFCA FFCF
FFD2 FFD7
FFDA FFDC
FFE0 FFE6
FFE8 FFEE
FFFC FFFF
1FFFE 1FFFF
2FFFE 2FFFF
3FFFE 3FFFF
4FFFE 4FFFF
5FFFE 5FFFF
6FFFE 6FFFF
7FFFE 7FFFF
8FFFE 8FFFF
9FFFE 9FFFF
AFFFE AFFFF
BFFFE BFFFF
CFFFE CFFFF
DFFFE DFFFF
EFFFE 10FFFF
END
| liuyangning/WX_web | xampp/perl/lib/unicore/lib/In/2_1.pl | Perl | mit | 3,431 |
use File::Find;
sub proc {
if ($File::Find::name =~ /\.htm$/) {
my $name = $_;
print "$File::Find::name\n";
open(my $r, '<', $name) or die "Can't open file $name";
open(my $w, '>', $name . "l");
my $esc = 0;
while (my $line = <$r>) {
chomp $line;
if ($esc == 1) {
$esc = 0;
next;
}
if ($line =~ /analytics/) {
$esc = 1;
next;
}
if ($line =~ /underscore/) {
$line = '<script src="../underscore/underscore-min.js"></script>';
}
print $w $line . "\n";
}
close($w);
close($r);
unlink($name);
rename($name . "l", $name);
}
}
find(\&proc, ("."));
| GlukKazan/Dagaz | utils/tree/tree.pl | Perl | mit | 847 |
#!/usr/bin/env perl
use strict;
use warnings;
use Bio::SeqIO;
die "Usage: $0 <fasta>\n" unless @ARGV==1;
my $fa = shift;
my $seqio_obj = Bio::SeqIO->new(-file=>$fa,-format=>'fasta');
my @seq_obj;
while(my $i = $seqio_obj->next_seq) {
push @seq_obj,$i;
}
for my $i(sort {$a->length <=> $b->length} @seq_obj) {
print ">".$i->id."\n";
print $i->seq."\n";
}
| yunfeiguo/bioinfo_toolbox | utilities/seq_related/sort_fa_bylen.pl | Perl | mit | 367 |
# This file is auto-generated by the Perl DateTime Suite time zone
# code generator (0.07) This code generator comes with the
# DateTime::TimeZone module distribution in the tools/ directory
#
# Generated from /tmp/Q713JNUf8G/asia. Olson data version 2016a
#
# Do not edit this file directly.
#
package DateTime::TimeZone::Asia::Riyadh;
$DateTime::TimeZone::Asia::Riyadh::VERSION = '1.95';
use strict;
use Class::Singleton 1.03;
use DateTime::TimeZone;
use DateTime::TimeZone::OlsonDB;
@DateTime::TimeZone::Asia::Riyadh::ISA = ( 'Class::Singleton', 'DateTime::TimeZone' );
my $spans =
[
[
DateTime::TimeZone::NEG_INFINITY, # utc_start
61416046388, # utc_end 1947-03-13 20:53:08 (Thu)
DateTime::TimeZone::NEG_INFINITY, # local_start
61416057600, # local_end 1947-03-14 00:00:00 (Fri)
11212,
0,
'LMT',
],
[
61416046388, # utc_start 1947-03-13 20:53:08 (Thu)
DateTime::TimeZone::INFINITY, # utc_end
61416057188, # local_start 1947-03-13 23:53:08 (Thu)
DateTime::TimeZone::INFINITY, # local_end
10800,
0,
'AST',
],
];
sub olson_version {'2016a'}
sub has_dst_changes {0}
sub _max_year {2026}
sub _new_instance {
return shift->_init( @_, spans => $spans );
}
1;
| jkb78/extrajnm | local/lib/perl5/DateTime/TimeZone/Asia/Riyadh.pm | Perl | mit | 1,213 |
package PatchReader;
use strict;
=head1 NAME
PatchReader - Utilities to read and manipulate patches and CVS
=head1 SYNOPSIS
# script that reads in a patch (in any known format), and prints out some
# information about it. Other common operations are outputting the patch
# in a raw unified diff format, outputting the patch information to
# Template::Toolkit templates, adding context to a patch from CVS, and
# narrowing the patch down to apply only to a single file or set of files.
use PatchReader::Raw;
use PatchReader::PatchInfoGrabber;
my $filename = 'filename.patch';
# Create the reader that parses the patch and the object that extracts info
# from the reader's datastream
my $reader = new PatchReader::Raw();
my $patch_info_grabber = new PatchReader::PatchInfoGrabber();
$reader->sends_data_to($patch_info_grabber);
# Iterate over the file
$reader->iterate_file($filename);
# Print the output
my $patch_info = $patch_info_grabber->patch_info();
print "Summary of Changed Files:\n";
while (my ($file, $info) = each %{$patch_info->{files}}) {
print "$file: +$info->{plus_lines} -$info->{minus_lines}\n";
}
=head1 ABSTRACT
This perl library allows you to manipulate patches programmatically by
chaining together a variety of objects that read, manipulate, and output
patch information:
PatchReader::Raw
- parse a patch in any format known to this author (unified, normal, cvs diff,
among others)
PatchReader::PatchInfoGrabber
- grab summary info for sections of a patch in a nice hash
of a patch, for example)
PatchReader::AddCVSContext
- add context to the patch by grabbing the original files from CVS
PatchReader::NarrowPatch
- narrow a patch down to only apply to a specific set of files
PatchReader::DiffPrinter::raw
- output the parsed patch in raw unified diff format
PatchReader::DiffPrinter::template
- output the parsed patch to Template::Toolkit templates (can be used to make
HTML output or anything else you please)
Additionally, it is designed so that you can plug in your own objects that
read the parsed data while it is being parsed (no need for the performance or
memory problems that can come from reading in the entire patch all at once).
You can do this by mimicking one of the existing readers (such as
PatchInfoGrabber) and overriding the methods start_patch, start_file, section,
end_file and end_patch.
=cut
$PatchReader::VERSION = '0.9.5';
1
| carlgao/lenga | images/lenny64-peon/usr/share/perl5/PatchReader.pm | Perl | mit | 2,514 |
#!/usr/bin/perl
use DBI;
use lib '/var/www/lib';
use Noosphere qw($dbh $DEBUG);
use Noosphere::IR;
use Noosphere::Config;
use Noosphere::DB;
$dbh=Noosphere::dbConnect;
my $table=Noosphere::getConfig('en_tbl');
my $objectid='1051';
my ($rv,$sth)=Noosphere::dbSelect($dbh,{WHAT=>'*',FROM=>$table,WHERE=>"uid=$objectid and revid is null"});
my $row=$sth->fetchrow_hashref();
$sth->finish();
Noosphere::irIndex($table,$row);
| holtzermann17/Noosphere | bin/test/testirindex.pl | Perl | mit | 427 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.