code stringlengths 2 1.05M | repo_name stringlengths 5 101 | path stringlengths 4 991 | language stringclasses 3 values | license stringclasses 5 values | size int64 2 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/perl -w
package Metrics::metric_access_authorization;
use strict;
use LWP::Simple;
use JSON::Parse 'parse_json';
use CGI;
use lib '../';
use FAIRMetrics::TesterHelper;
require Exporter;
use vars ('@ISA', '@EXPORT');
@ISA = qw(Exporter);
@EXPORT = qw(execute_metric_test);
my %schemas = ('allows' => ['string', "Does the Resource being tested require authentication or authorization (answer TRUE or FALSE)"],
'evidence' => ['string', "The URL of the description of how to gain the credentials necessary to authenticate or authorize"],
'subject' => ['string', "the GUID being tested"]);
my $helper = FAIRMetrics::TesterHelper->new(
title => "FAIR Metrics - Access authentication",
description => "Metric to test if the access protocol allows authentication",
tests_metric => 'https://purl.org/fair-metrics/FM_A1.2',
applies_to_principle => "A1.2",
organization => 'FAIR Metrics Authoring Group',
org_url => 'http://fairmetrics.org',
responsible_developer => "Mark D Wilkinson",
email => 'markw@illuminae.com',
developer_ORCiD => '0000-0001-6960-357X',
host => 'linkeddata.systems',
basePath => '/cgi-bin',
path => '/fair_metrics/Metrics/metric_access_protocol',
response_description => 'The response is a binary (1/0), success or failure',
schemas => \%schemas,
fairsharing_key_location => '../fairsharing.key'
);
my $cgi = CGI->new();
if (!$cgi->request_method() || $cgi->request_method() eq "GET") {
print "Content-type: application/openapi+yaml;version=3.0\n\n";
print $helper->getSwagger();
} else {
return 1; # this is returning 1 for the "require" statement in fair_metrics!!!!
}
sub execute_metric_test {
my ($self, $body) = @_;
my $json = parse_json($body);
my $check = $json->{'allows'};
my $evidence = $json->{'evidence'};
my $IRI = $json->{'subject'};
my $valid = check_metric($check, $evidence, $IRI);
my $value;
if($valid) {
$value = "1";
$helper->addComment("All OK!");
} else {
$value = "0";
$helper->addComment("The URI $evidence did not return a valid response");
}
my $response = $helper->createEvaluationResponse($IRI, $value);
print "Content-type: application/json\n\n";
print $response;
exit 1;
}
sub check_metric {
my ($check, $evidence, $subject) = @_;
return 1 if lc($check) == 'false';
my $result = get($evidence);
return 1 if $result;
return 0;
}
1; #
| GOFAIR/MetricsGroup | MetricsEvaluatorCode/Perl/Metrics/metric_access_authorization.pm | Perl | mit | 2,536 |
package Alatar::PostgreSQL::Extractors::PgViewExtractor;
use strict;
use String::Util qw(trim);
use Data::Dumper;
use Alatar::Model::SqlView;
use Alatar::Model::SqlRequest;
use Alatar::PostgreSQL::Extractors::PgExtractor;
our @ISA = qw(Alatar::PostgreSQL::Extractors::PgExtractor);
sub new {
my ($class,$owner,$code) = @_;
my $this = $class->SUPER::new($owner,$code);
bless($this,$class);
return $this;
}
# actions
# --------------------------------------------------
sub _extractObject {
my ($this,$view) = @_;
my ($name,$code) = $view =~ /\"?(.*?)\"?\sAS\s(.*)/gi;
my $reqSql;
$this->{entity} = Alatar::Model::SqlView->new($this->{owner},$name);
$reqSql = Alatar::Model::SqlRequest->new($this->{entity},($name . '_R',(trim($code) . ';')));
$this->{entity}->setSqlRequest($reqSql);
} | olivierauverlot/alatar | Alatar/PostgreSQL/Extractors/PgViewExtractor.pm | Perl | mit | 814 |
#!/usr/bin/perl
use strict;
my $CU_AD_FORMAT_UNSIGNED_INT8 = 0x01;
my $CU_AD_FORMAT_UNSIGNED_INT16 = 0x02;
my $CU_AD_FORMAT_FLOAT = 0x20;
if (!-f 'sgemm_pre_128.sass' || (stat 'sgemm128.sass')[9] > (stat 'sgemm_pre_128.sass')[9])
{
print `maxas.pl -p sgemm128.sass sgemm_pre_128.sass`;
exit if $?;
print `maxas.pl -i sgemm128.sass sgemm.cubin`;
exit if $?;
print `maxas.pl -e -k sgemm_kernel_128 sgemm.cubin sgemm_final_128.sass`;
}
if (!-f 'sgemm_pre_64.sass' || (stat 'sgemm64.sass')[9] > (stat 'sgemm_pre_64.sass')[9])
{
print `maxas.pl -p sgemm64.sass sgemm_pre_64.sass`;
exit if $?;
print `maxas.pl -i sgemm64.sass sgemm.cubin`;
exit if $?;
print `maxas.pl -e -k sgemm_kernel_64 sgemm.cubin sgemm_final_64.sass`;
}
#print `Release\\sgemm.exe $_ 20` foreach (80,60,40,30,20,10,9,8,7,6,5,4,3,2);
`Release\\sgemm.exe 64 5 $CU_AD_FORMAT_FLOAT`;
print `Release\\sgemm.exe 64 20 $CU_AD_FORMAT_UNSIGNED_INT8`;
exit;
my %data;
foreach my $thread128 (4 .. 64)
{
my $N = $thread128 * 128;
my $iterations = int(20 * (64 * 128)**3 / $N**3);
$iterations = 10000 if $iterations > 10000;
print "$N $iterations\n";
my $data = `Release\\sgemm.exe $thread128 $iterations $CU_AD_FORMAT_UNSIGNED_INT16`;
foreach my $bench (split "\n", $data)
{
if ($bench =~ /^(\w+)\s+GFLOPS: ([0-9.]+) /)
{
push @{$data{$N}}, $2;
print "$1 $2\n";
}
}
}
print join("\t", qw(size Max64 Max128 Cub64 Cub128)), "\n";
foreach my $N (sort { $a <=> $b } keys %data)
{
print join("\t", @{$data{$N}}), "\n";
}
#print $data;
__END__
64 * 128 * 16 * 1.620 * .931 / 520
Max64 GFLOPS: 1377.38 (size: 256, iterations: 2000)
Max128 GFLOPS: 973.70 (size: 256, iterations: 2000)
Cub64 GFLOPS: 1272.42 (size: 256, iterations: 2000)
Cub128 GFLOPS: 948.15 (size: 256, iterations: 2000)
my @data = grep /\S/, split "\n", $data;
my $min;
my %smData;
my @sdata;
foreach (@data)
{
next if /GFLOPS/;
my ($sm, $clock, $by, $bx) = split /\s+/;
$smData{$sm} = $clock if !$smData{$sm} || $clock < $smData{$sm};
$min = $clock if !$min || $clock < $min;
push @sdata, [$sm, $clock, $by, $bx];
}
foreach (@sdata)
{
$_->[1] -= $smData{$_->[0]};
}
foreach (sort {$a->[1] <=> $b->[1] || $a->[0] <=> $b->[0]} @sdata)
{
printf "%02d %8u by: %2d bx: %2d\n", @$_;
}
| NervanaSystems/maxas | sgemm/sgemm.pl | Perl | mit | 2,388 |
/*************************************************************************
name: modelCheckerTestSuiteDRT.pl
version: July 10, 2001
description: Test Suite for DRT model checkers
authors: Patrick Blackburn & Johan Bos
*************************************************************************/
:- module(modelCheckerTestSuiteDRT,[test/4]).
:- ensure_loaded(comsemOperators).
/*========================================================================
Check given DRS in given model with given assignment.
Correct answer recorded as fourth argument.
========================================================================*/
test(drs([X],[robber(X)]),1,[],pos).
test(drs([],[equal(yolanda,honey_bunny)]),2,[],pos).
test(drs([],[equal(mia,honey_bunny)]),2,[],neg).
test(drs([],[~ drs([],[equal(yolanda,honey_bunny)])]),2,[],neg).
test(drs([],[~ drs([],[equal(mia,honey_bunny)])]),2,[],pos).
test(drs([X,Y],[man(X),woman(Y)]),4,[],pos).
test(merge(drs([X],[man(X)]),drs([X],[woman(X)])),4,[],pos).
test(drs([],[~ drs([X],[woman(X)])]),4,[],neg).
test(drs([X],[tasty(X),burger(X)]),4,[],undef).
test(drs([],[~ drs([X],[tasty(X),burger(X)])]),4,[],undef).
test(drs([X],[~ drs([Y],[woman(Y)]),man(X)]),4,[],neg).
test(drs([X],[man(X), (~ drs([Y],[woman(Y)]))]),4,[],neg).
test(drs([X],[man(X), (~ drs([X],[woman(X)]))]),4,[],neg).
test(drs([X],[woman(X), (~ drs([X],[customer(X)]))]),5,[],undef).
test(drs([],[drs([X],[]) > drs([],[drs([],[robber(X)]) v drs([],[customer(X)])])]),2,[],pos).
test(drs([],[~ drs([],[drs([X],[]) > drs([],[drs([],[robber(X)]) v drs([],[customer(X)])])])]),2,[],neg).
test(drs([],[drs([],[robber(X)]) v drs([],[customer(X)])]),2,[],undef).
test(drs([],[drs([],[robber(X)]) v drs([],[customer(X)])]),2,[g(X,d3)],pos).
| TeamSPoon/logicmoo_workspace | packs_sys/logicmoo_nlu/ext/CURT/bb0/modelCheckerTestSuiteDRT.pl | Perl | mit | 1,795 |
package Tablize::L10N;
use strict;
use base 'MT::Plugin::L10N';
1;
| usualoma/movable-type-plugin-tablize | lib/Tablize/L10N.pm | Perl | mit | 68 |
/* Part of ClioPatria
Author: Jan Wielemaker
E-mail: J.Wielemaker@cs.vu.nl
WWW: http://www.swi-prolog.org
Copyright (C): 2012 VU University Amsterdam
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
As a special exception, if you link this library with other files,
compiled with a Free Software compiler, to produce an executable, this
library does not by itself cause the resulting executable to be covered
by the GNU General Public License. This exception does not however
invalidate any other reasons why the executable file might be covered by
the GNU General Public License.
*/
:- module(flint, []).
:- use_module(library(semweb/rdf_db)).
:- use_module(library(http/http_dispatch)).
:- use_module(library(http/http_files)).
:- use_module(library(http/html_write)).
:- use_module(library(http/html_head)).
:- use_module(library(http/json)).
:- use_module(cliopatria(hooks)).
:- http_handler(flint('index.html'), sparql_editor, []).
:- http_handler(flint('config.js'), flint_config, []).
:- http_handler(flint(.), http_reply_from_files(flint(.), []), [prefix]).
:- html_resource(flint('config.js'),
[ requires([ flint('flint-editor.js'),
flint('css/sparqlcolors.css'),
flint('css/docs.css')
])
]).
:- html_resource(flint('lib/codemirror.js'),
[ requires([ flint('jquery-1.5.2.min.js'),
flint('lib/codemirror.css')
])
]).
:- html_resource(flint('flint-editor.js'),
[ requires([ flint('lib/codemirror.js'),
flint('sparql10querymode_ll1.js'),
flint('sparql11querymode_ll1.js'),
flint('sparql11updatemode_ll1.js')
])
]).
%% sparql_editor(+Request)
%
% HTTP handler that presents the flint SPARQL editor.
sparql_editor(_Request) :-
\+ absolute_file_name(flint('flint-editor.js'), _,
[ access(read),
file_errors(fail)
]), !,
reply_html_page(cliopatria(default),
title('No Flint installed'),
\no_flint).
sparql_editor(_Request) :-
reply_html_page(
cliopatria(plain),
title('Flint SPARQL Editor'),
\flint_page).
flint_page -->
html_requires(flint('config.js')),
html(div(id(flint), [])).
%% flint_config(+Request)
%
% HTTP handler that serves the flint SPARQL editor configuration
% and initialization.
flint_config(_Request) :-
config(Config),
format('Content-type: text/javascript~n~n'),
format('$(document).ready(function() {~n'),
write_config(Config),
write_init,
format('});~n').
write_config(Config) :-
format(' var flintConfig = '),
json_write(current_output, Config, [width(72)]),
format(';').
write_init :-
format(' var fint = new FlintEditor("flint", "images", flintConfig);~n').
%% config(-Config) is det.
%
% Produce a JSON document holding the FlintEditor configuration.
config(json([ interface = json([ toolbar=true,
menu=true
]),
namespaces = NameSpaces,
defaultEndpointParameters = EndpointParameters,
endpoints = EndPoints,
defaultModes = Modes
])) :-
namespaces(NameSpaces),
endpoint_parameters(EndpointParameters),
endpoints(EndPoints),
modes(Modes).
namespaces(NameSpaces) :-
setof(NameSpace, namespace(NameSpace), NameSpaces).
namespace(json([ name(Prefix),
prefix(Prefix),
uri(URI)
])) :-
rdf_current_ns(Prefix, URI).
:- if(\+current_predicate(rdf_current_ns/2)).
rdf_current_ns(Prefix, URI) :- rdf_current_prefix(Prefix, URI).
:- endif.
endpoint_parameters(
json([ queryParameters =
json([ format(output),
query(query),
update(update)
]),
selectFormats =
[ json([ name('Plain text'),
format(text),
type('text/plain')
]),
json([ name('SPARQL-XML'),
format(sparql),
type('application/sparql-results+xml')
]),
json([ name('JSON'),
format(json),
type('application/sparql-results+json')
])
],
constructFormats =
[ json([ name('Plain text'),
format(text),
type('text/plain')
]),
json([ name('RDF/XML'),
format(rdfxml),
type('application/rdf+xml')
]),
json([ name('Turtle'),
format(turtle),
type('application/turtle')
])
]
])).
endpoints([ json([ name('ClioPatria'),
uri(EndPoint),
modes([ sparql11query, sparql10 ])
]),
json([ name('Update'),
uri(UpdateEndPoint),
modes([ sparql11update ])
])
]) :-
http_link_to_id(sparql_query, [], EndPoint),
http_link_to_id(sparql_update, [], UpdateEndPoint).
modes([ json([ name('SPARQL 1.1 Query'),
mode(sparql11query)
]),
json([ name('SPARQL 1.1 Update'),
mode(sparql11update)
]),
json([ name('SPARQL 1.0'),
mode(sparql10)
])
]).
%% no_flint//
%
% Display a message indicating the user how to install Flint
no_flint -->
{ absolute_file_name(cliopatria(.), CD0,
[ file_type(directory),
access(read)
]),
prolog_to_os_filename(CD0, ClioHome)
},
html_requires(pldoc),
html([ h1('Flint SPARQL Editor is not installed'),
p([ 'Please run the following command in the ClioPatria ',
'installation directory "~w" to install Flint.'-[ClioHome]
]),
pre(class(code),
[ 'git submodule update --init web/FlintSparqlEditor'
])
]).
/*******************************
* REGISTER WITH MENU *
*******************************/
cliopatria:menu_item(150=query/sparql_editor, 'Flint SPARQL Editor').
| TeamSPoon/logicmoo_workspace | packs_web/ClioPatria/applications/flint.pl | Perl | mit | 6,221 |
:- module(
cli_help,
[
cli_help/3 % +Name, +Usages, +Specs
]
).
/** <module> Command-line tools: help messages
*/
:- use_module(library(apply)).
:- use_module(library(clpfd)).
:- use_module(library(dcg)).
:- use_module(library(dict)).
:- use_module(library(pair_ext)).
:- use_module(library(string_ext)).
%! cli_help(+Name:atom, +Usages:list(atom), +Specs:list(dict)) is det.
cli_help(Name, Usages, Specs) :-
usages_message(Name, Usages, String1),
flags_message(Specs, String2),
format(user_output, "Usage:\n~s\nOptions:\n~s", [String1,String2]).
%! flags_message(+Specs:list(dict), -Message:string) is det.
flags_message(Specs, Message) :-
maplist(pp_short_flags, Specs, ShortStrings),
max_string_length(ShortStrings, ShortWidth),
maplist(pp_long_flags, Specs, LongStringss),
flatten(LongStringss, LongStrings0),
max_string_length(LongStrings0, LongWidth),
maplist(
{ShortWidth,LongWidth}/
[ShortString0,LongStrings0,Dict0,Line0]>>
format_option(
ShortWidth-ShortString0,
LongWidth-LongStrings0,
Dict0,
Line0
),
ShortStrings,
LongStringss,
Specs,
Lines
),
string_list_concat(Lines, Message).
pp_long_flag(Long, String) :-
format(string(String), "--~a", [Long]).
pp_long_flags(Spec, Strings) :-
dict_get(longflags, Spec, [], Longs),
maplist(pp_long_flag, Longs, Strings).
pp_short_flag(Short, String) :-
format(string(String), "-~a", [Short]).
pp_short_flags(Spec, String) :-
dict_get(shortflags, Spec, [], Shorts),
maplist(pp_short_flag, Shorts, Strings),
string_list_concat(Strings, ',', String).
%! format_option(+ShortFlags:pair(nonneg,string),
%! +LongFlags:pair(nonneg,list(string)),
%! +OptionSpec:dict,
%! -Line:string) is det.
format_option(ShortWidth1-ShortString, LongWidth1-LongStrings1, Dict, Line) :-
optionSpec{help: Message} :< Dict,
words_lines(LongStrings1, LongWidth1, ", ", LongStrings2),
% Make room for a comma and a space.
LongWidth2 #= LongWidth1 + 2,
ShortWidth2 #= ShortWidth1 + 2,
string_list_concat(LongStrings2, ",\n", LongsString),
Indent #= ShortWidth2 + LongWidth2 + 4,
format_lines(Message, Indent, Lines),
format(
string(Line),
"~s~t~*+~s~t~*+~s\n",
[LongsString,LongWidth2,ShortString,ShortWidth2,Lines]
).
%! format_lines(+Message1:or([string,list(string)]),
%! +Indent:nonneg,
%! -Message2:string) is det.
% Line splitting determined algorithmically.
format_lines(Message1, Indent, Message2) :-
string(Message1), !,
MinWidth = 40,
LineWidth = 80,
MaxWidth #= max(MinWidth, LineWidth - Indent),
insert_line_breaks(Message1, MaxWidth, Indent, Message2).
% Line splitting determined by the option specification.
format_lines(Message, Indent, Message) :-
indent_lines(Message, Indent, Message).
%! insert_line_breaks(+Message:string,
%! +LineLength:positive_integer,
%! +Indent:nonneg,
%! -TextLines:list(string)) is det.
insert_line_breaks(Message, LineLength, Indent, TextLines) :-
message_lines(Message, LineLength, Lines),
indent_lines(Lines, Indent, TextLines).
%! indent_lines(+Lines:list(string), +Indent:nonneg, -Message:string) is det.
indent_lines(Lines, Indent, Message) :-
format(string(Sep), "~n~*|", [Indent]),
string_list_concat(Lines, Sep, Message).
%! usages_message(+Name:atom,
%! +Usages:list(list(atom)),
%! -Message:string) is det.
usages_message(Name, Usages, Msg) :-
maplist(usage_line(Name), Usages, Lines),
string_list_concat(Lines, Msg).
usage_line(Name, Usage, Line) :-
string_phrase(usage_line(Name, Usage), Line).
usage_line(Name, PosArgs) -->
" ",
atom(Name),
pos_args(PosArgs),
" [options]\n".
pos_args([]) --> !, "".
pos_args([H|T]) -->
" {",
atom(H),
"}",
pos_args(T).
| wouterbeek/Prolog_Library_Collection | prolog/cli_help.pl | Perl | mit | 3,915 |
# Copyrights 2001-2008 by Mark Overmeer.
# For other contributors see ChangeLog.
# See the manual pages for details on the licensing terms.
# Pod stripped from pm file by OODoc 1.04.
use strict;
use warnings;
package Mail::Message::Field::Fast;
use vars '$VERSION';
$VERSION = '2.082';
use base 'Mail::Message::Field';
#------------------------------------------
#
# The DATA is stored as: [ NAME, FOLDED-BODY ]
# The body is kept in a folded fashion, where each line starts with
# a single blank.
sub new($;$@)
{ my $class = shift;
my ($name, $body) = $class->consume(@_==1 ? (shift) : (shift, shift));
return () unless defined $body;
my $self = bless [$name, $body], $class;
# Attributes
$self->comment(shift) if @_==1; # one attribute line
$self->attribute(shift, shift) while @_ > 1; # attribute pairs
$self;
}
#------------------------------------------
sub clone()
{ my $self = shift;
bless [ @$self ], ref $self;
}
#------------------------------------------
sub length()
{ my $self = shift;
length($self->[0]) + 1 + length($self->[1]);
}
#------------------------------------------
sub name() { lc shift->[0] }
#------------------------------------------
sub Name() { shift->[0] }
#------------------------------------------
sub folded()
{ my $self = shift;
return $self->[0].':'.$self->[1]
unless wantarray;
my @lines = $self->foldedBody;
my $first = $self->[0]. ':'. shift @lines;
($first, @lines);
}
#------------------------------------------
sub unfoldedBody($;@)
{ my $self = shift;
$self->[1] = $self->fold($self->[0], @_)
if @_;
$self->unfold($self->[1]);
}
#------------------------------------------
sub foldedBody($)
{ my ($self, $body) = @_;
if(@_==2) { $self->[1] = $body }
else { $body = $self->[1] }
wantarray ? (split m/^/, $body) : $body;
}
#------------------------------------------
# For performance reasons only
sub print(;$)
{ my $self = shift;
my $fh = shift || select;
if(ref $fh eq 'GLOB') { print $fh $self->[0].':'.$self->[1] }
else { $fh->print($self->[0].':'.$self->[1]) }
$self;
}
#------------------------------------------
1;
| carlgao/lenga | images/lenny64-peon/usr/share/perl5/Mail/Message/Field/Fast.pm | Perl | mit | 2,265 |
package Prophet::CLI::ProgressBar;
{
$Prophet::CLI::ProgressBar::VERSION = '0.751';
}
use Any::Moose 'Role';
use Time::Progress;
use Params::Validate ':all';
sub progress_bar {
my $self = shift;
my %args = validate(
@_,
{
max => 1,
format =>
{ optional => 1, default => "%30b %p %L (%E remaining)\r" }
}
);
my $bar = Time::Progress->new();
$bar->attr( max => $args{max} );
my $bar_count = 0;
my $format = $args{format};
return sub {
# disable autoflush to make \r work properly
local $| = 1;
print $bar->report( $format, ++$bar_count );
}
}
no Any::Moose 'Role';
1;
__END__
=pod
=head1 NAME
Prophet::CLI::ProgressBar
=head1 VERSION
version 0.751
=head1 AUTHORS
=over 4
=item *
Jesse Vincent <jesse@bestpractical.com>
=item *
Chia-Liang Kao <clkao@bestpractical.com>
=item *
Christine Spang <christine@spang.cc>
=back
=head1 COPYRIGHT AND LICENSE
This software is Copyright (c) 2009 by Best Practical Solutions.
This is free software, licensed under:
The MIT (X11) License
=head1 BUGS AND LIMITATIONS
You can make new bug reports, and view existing ones, through the
web interface at L<https://rt.cpan.org/Public/Dist/Display.html?Name=Prophet>.
=head1 CONTRIBUTORS
=over 4
=item *
Alex Vandiver <alexmv@bestpractical.com>
=item *
Casey West <casey@geeknest.com>
=item *
Cyril Brulebois <kibi@debian.org>
=item *
Florian Ragwitz <rafl@debian.org>
=item *
Ioan Rogers <ioanr@cpan.org>
=item *
Jonas Smedegaard <dr@jones.dk>
=item *
Kevin Falcone <falcone@bestpractical.com>
=item *
Lance Wicks <lw@judocoach.com>
=item *
Nelson Elhage <nelhage@mit.edu>
=item *
Pedro Melo <melo@simplicidade.org>
=item *
Rob Hoelz <rob@hoelz.ro>
=item *
Ruslan Zakirov <ruz@bestpractical.com>
=item *
Shawn M Moore <sartak@bestpractical.com>
=item *
Simon Wistow <simon@thegestalt.org>
=item *
Stephane Alnet <stephane@shimaore.net>
=item *
Unknown user <nobody@localhost>
=item *
Yanick Champoux <yanick@babyl.dyndns.org>
=item *
franck cuny <franck@lumberjaph.net>
=item *
robertkrimen <robertkrimen@gmail.com>
=item *
sunnavy <sunnavy@bestpractical.com>
=back
=cut
| gitpan/Prophet | lib/Prophet/CLI/ProgressBar.pm | Perl | mit | 2,251 |
#!/usr/local/ensembl/bin/perl -w
use strict;
use DBI;
use Getopt::Long;
use Bio::EnsEMBL::Compara::Production::DBSQL::DBAdaptor;
use Bio::EnsEMBL::Compara::GenomeDB;
use Bio::EnsEMBL::Analysis;
use Bio::EnsEMBL::DBLoader;
use Bio::EnsEMBL::Hive;
use Bio::EnsEMBL::Compara::MethodLinkSpeciesSet;
my $conf_file;
my %analysis_template;
my @speciesList = ();
my @uniprotList = ();
my %hive_params ;
my %conservation_score_params;
my %engine_params;
my %compara_conf = ();
#$compara_conf{'-user'} = 'ensadmin';
$compara_conf{'-port'} = 3306;
my $import_alignment_params;
my $alignment_params;
my ($help, $host, $user, $pass, $dbname, $port, $compara_conf, $adaptor, $ensembl_genomes);
my ($subset_id, $genome_db_id, $prefix, $fastadir, $verbose);
#list of compara tables to be changed to InnoDB
my @dna_pipeline_tables = qw(genomic_align_block genomic_align genomic_align_group genomic_align_tree sequence dnafrag_region constrained_element conservation_score);
# ok this is a hack, but I'm going to pretend I've got an object here
# by creating a blessed hash ref and passing it around like an object
# this is to avoid using global variables in functions, and to consolidate
# the globals into a nice '$self' package
my $self = bless {};
GetOptions('help' => \$help,
'conf=s' => \$conf_file,
'dbhost=s' => \$host,
'dbport=i' => \$port,
'dbuser=s' => \$user,
'dbpass=s' => \$pass,
'dbname=s' => \$dbname,
'ensembl_genomes' => \$ensembl_genomes,
'v' => \$verbose,
);
if ($help) { usage(); }
Bio::EnsEMBL::Registry->no_version_check(1);
$self->parse_conf($conf_file);
if($host) { $compara_conf{'-host'} = $host; }
if($port) { $compara_conf{'-port'} = $port; }
if($dbname) { $compara_conf{'-dbname'} = $dbname; }
if($user) { $compara_conf{'-user'} = $user; }
if($pass) { $compara_conf{'-pass'} = $pass; }
unless(defined($compara_conf{'-host'}) and defined($compara_conf{'-user'}) and defined($compara_conf{'-dbname'})) {
print "\nERROR : must specify host, user, and database to connect to compara\n\n";
usage();
}
$self->{'comparaDBA'} = new Bio::EnsEMBL::Compara::Production::DBSQL::DBAdaptor(%compara_conf);
$self->{'hiveDBA'} = new Bio::EnsEMBL::Hive::DBSQL::DBAdaptor(-DBCONN => $self->{'comparaDBA'}->dbc);
$self->{'analysisStatsDBA'} = $self->{'hiveDBA'}->get_AnalysisStatsAdaptor;
if (%hive_params) {
if (defined($hive_params{'hive_output_dir'})) {
die("\nERROR!! hive_output_dir doesn't exist, can't configure\n ", $hive_params{'hive_output_dir'} , "\n")
if(($hive_params{'hive_output_dir'} ne "") and !(-d $hive_params{'hive_output_dir'}));
$self->{'comparaDBA'}->get_MetaContainer->delete_key('hive_output_dir');
$self->{'comparaDBA'}->get_MetaContainer->store_key_value('hive_output_dir', $hive_params{'hive_output_dir'});
}
if (defined($hive_params{'name'})) {
$self->{'comparaDBA'}->get_MetaContainer->delete_key('name');
$self->{'comparaDBA'}->get_MetaContainer->store_key_value('name', $hive_params{'name'});
}
}
$self->set_storage_engine();
#load analysis_data
$self->prepareLowCoverageAlignerSystem;
foreach my $speciesPtr (@speciesList) {
$self->submitGenome($speciesPtr);
}
$self->setup_pipeline();
#$self->createImportAlignmentAnalysis;
exit(0);
#######################
#
# subroutines
#
#######################
sub usage {
print "loadLowCoverageAlignerSystem.pl [options]\n";
print " -help : print this help\n";
print " -conf <path> : config file describing compara, templates, and external genome databases\n";
print " -dbhost <machine> : compara mysql database host <machine>\n";
print " -dbport <port#> : compara mysql port number\n";
print " -dbname <name> : compara mysql database <name>\n";
print " -dbuser <name> : compara mysql connection user <name>\n";
print " -dbpass <pass> : compara mysql connection password\n";
print " -ensembl_genomes : use ensembl genomes specific code\n";
print "comparaLoadGenomes.pl v1.2\n";
exit(1);
}
sub parse_conf {
my $self = shift;
my($conf_file) = shift;
$self->{'set_internal_ids'} = 0;
if($conf_file and (-e $conf_file)) {
#read configuration file from disk
my @conf_list = @{do $conf_file};
foreach my $confPtr (@conf_list) {
my $type = $confPtr->{TYPE};
delete $confPtr->{TYPE};
print("HANDLE type $type\n") if($verbose);
if($type eq 'COMPARA') {
%compara_conf = %{$confPtr};
}
elsif($type eq 'SPECIES') {
push @speciesList, $confPtr;
}
elsif($type eq 'HIVE') {
%hive_params = %{$confPtr};
}
elsif($type eq 'IMPORT_ALIGNMENT') {
%$import_alignment_params = %{$confPtr};
}
elsif($type eq 'LOW_COVERAGE_GENOME_ALIGNMENT') {
%$alignment_params = %{$confPtr};
}
elsif($type eq 'CONSERVATION_SCORE') {
die "You cannot have more than one CONSERVATION_SCORE block in your configuration file"
if (%conservation_score_params);
%conservation_score_params = %{$confPtr};
}
elsif($type eq 'SET_INTERNAL_IDS') {
$self->{'set_internal_ids'} = 1;
}
elsif($type eq 'ENGINE') {
%engine_params = %{$confPtr};
}
}
}
}
sub submitGenome
{
my $self = shift;
my $species = shift; #hash reference
print("SubmitGenome for ".$species->{abrev}."\n") if($verbose);
#
# connect to external genome database
#
my $genomeDBA = undef;
my $locator = $species->{dblocator};
unless($locator) {
print(" dblocator not specified, building one\n") if($verbose);
$locator = $species->{module}."/host=".$species->{host};
$species->{port} && ($locator .= ";port=".$species->{port});
$species->{user} && ($locator .= ";user=".$species->{user});
$species->{pass} && ($locator .= ";pass=".$species->{pass});
$species->{dbname} && ($locator .= ";dbname=".$species->{dbname});
$species->{species} && ($locator .= ";species=".$species->{species});
$species->{species_id} && ($locator .=";species_id=".$species->{species_id});
}
$locator .= ";disconnect_when_inactive=1";
print(" locator = $locator\n") if($verbose);
eval {
$genomeDBA = Bio::EnsEMBL::DBLoader->new($locator);
};
unless($genomeDBA) {
print("ERROR: unable to connect to genome database $locator\n\n");
return;
}
my $meta = $genomeDBA->get_MetaContainer;
my $taxon_id = $meta->get_taxonomy_id;
#Use meta->get_production_name if it exists and fail if it doesn't.
#If this causes issues, we may need to revert back to using the other
#methods of setting the genome_db name.
my $genome_name;
if (defined $meta->get_production_name) {
print "Using meta production_name \n" if $verbose;
$genome_name = $meta->get_production_name;
} else {
throw("The production_name entry in the " . $species->{dbname} . " meta table has not been set. This needs to be added.");
}
my ($cs) = @{$genomeDBA->get_CoordSystemAdaptor->fetch_all()};
my $assembly = $cs->version;
$assembly = '-undef-' if ($ensembl_genomes && !$cs->version);
my $genebuild = ($meta->get_genebuild or "");
#EDIT because the meta container always returns a value
if ($ensembl_genomes && 1 == length($genebuild)) {
$genebuild = '' if (1 == $genebuild);
}
if($species->{taxon_id} && ($taxon_id ne $species->{taxon_id})) {
throw("$genome_name taxon_id=$taxon_id not as expected ". $species->{taxon_id});
}
my $genome = Bio::EnsEMBL::Compara::GenomeDB->new();
$genome->taxon_id($taxon_id);
$genome->name($genome_name);
$genome->assembly($assembly);
$genome->genebuild($genebuild);
$genome->locator($locator);
$genome->dbID($species->{'genome_db_id'}) if(defined($species->{'genome_db_id'}));
if($verbose) {
print(" about to store genomeDB\n");
print(" taxon_id = '".$genome->taxon_id."'\n");
print(" name = '".$genome->name."'\n");
print(" assembly = '".$genome->assembly."'\n");
print(" genebuild = '".$genome->genebuild."'\n");
print(" genome_db id=".$genome->dbID."\n");
}
$self->{'comparaDBA'}->get_GenomeDBAdaptor->store($genome);
$species->{'genome_db'} = $genome;
print " ", $genome->name, " STORED as genome_db id = ", $genome->dbID, "\n";
#
# now fill table genome_db_extra
#
eval {
my ($sth, $sql);
$sth = $self->{'comparaDBA'}->dbc->prepare("SELECT genome_db_id FROM genome_db_extn
WHERE genome_db_id = ".$genome->dbID);
$sth->execute;
my $dbID = $sth->fetchrow_array();
$sth->finish();
if($dbID) {
$sql = "UPDATE genome_db_extn SET " .
"phylum='" . $species->{phylum}."'".
",locator='".$locator."'".
" WHERE genome_db_id=". $genome->dbID;
}
else {
$sql = "INSERT INTO genome_db_extn SET " .
" genome_db_id=". $genome->dbID.
",phylum='" . $species->{phylum}."'".
",locator='".$locator."'";
}
print("$sql\n") if($verbose);
$sth = $self->{'comparaDBA'}->dbc->prepare( $sql );
$sth->execute();
$sth->finish();
print("done SQL\n") if($verbose);
};
}
#####################################################################
##
## set_storage_engine
##
#####################################################################
sub set_storage_engine {
my ($self) = @_;
if (%engine_params) {
if (defined ($engine_params{'dna_pipeline'}) && $engine_params{'dna_pipeline'} ne "") {
#Change tables to ENGINE
my $engine = $engine_params{'dna_pipeline'};
if (lc($engine) ne "innodb" && lc($engine) ne "myisam") {
print "engine2 $engine\n";
die ("\nERROR!! $engine is not supported. ENGINE type must be either InnoDB or MyISAM\n");
}
foreach my $table (@dna_pipeline_tables) {
my $sql = "ALTER TABLE $table ENGINE=$engine";
$self->{'hiveDBA'}->dbc->do($sql);
}
}
#defined individual tables
foreach my $table (keys %engine_params) {
next if ($table eq 'dna_pipeline' || $table eq "" || $table eq "TYPE");
my $engine = $engine_params{$table};
if (lc($engine) ne "innodb" && lc($engine) ne "myisam") {
die ("\nERROR!! $engine is not supported. ENGINE type must be either InnoDB or MyISAM\n");
}
my $sql = "ALTER TABLE $table ENGINE=$engine";
$self->{'hiveDBA'}->dbc->do($sql);
}
}
}
#
# Populate analysis_data
#
sub prepareLowCoverageAlignerSystem {
my $self = shift;
#
#tree
#
my $tree_string;
if (defined $alignment_params->{'tree_string'}) {
$tree_string = $alignment_params->{'tree_string'};
} elsif (defined $alignment_params->{'tree_file'}) {
my $tree_file = $alignment_params->{'tree_file'};
open TREE_FILE, $tree_file || throw("Can not open $tree_file");
$tree_string = join("", <TREE_FILE>);
close TREE_FILE;
}
if ($tree_string) {
$self->{'tree_analysis_data_id'} =
$self->{'hiveDBA'}->get_AnalysisDataAdaptor->store_if_needed($tree_string);
}
#
#taxon_tree
#
my $taxon_tree_string;
if (defined $alignment_params->{'taxon_tree_string'}) {
$taxon_tree_string = $alignment_params->{'taxon_tree_string'};
} elsif (defined $alignment_params->{'taxon_tree_file'}) {
my $taxon_tree_file = $alignment_params->{'taxon_tree_file'};
open TREE_FILE, $taxon_tree_file || throw("Can not open $taxon_tree_file");
$taxon_tree_string = join("", <TREE_FILE>);
close TREE_FILE;
}
if ($taxon_tree_string) {
$self->{'taxon_tree_analysis_data_id'} =
$self->{'hiveDBA'}->get_AnalysisDataAdaptor->store_if_needed($taxon_tree_string);
}
#
#pairwise data
#Either define all the url/mlss_id pairs in a string (pairwise_string)
#or define all the url/mlss_id pairs in a file (pairwise_file)
#or define a single url and a list of mlss_ids in that database
#
my $pairwise_string;
if (defined $alignment_params->{'pairwise_string'}) {
$pairwise_string = $alignment_params->{'pairwise_string'};
}
if (defined $alignment_params->{'pairwise_file'}) {
my $pairwise_file = $alignment_params->{'pairwise_file'};
open PAIRWISE_FILE, $pairwise_file || throw("Can not open $pairwise_file");
$pairwise_string .= join("", <PAIRWISE_FILE>);
close PAIRWISE_FILE;
}
if (defined $alignment_params->{'pairwise_url'}) {
throw("Need to define list of method_link_species_set_ids")
if (!defined $alignment_params->{'pairwise_mlss'});
foreach my $mlss (split(",", $alignment_params->{'pairwise_mlss'})) {
$pairwise_string .= " {compara_db_url=>'" .$alignment_params->{'pairwise_url'} . "',method_link_species_set_id=>$mlss} ";
}
}
if ($pairwise_string) {
$self->{'pairwise_analysis_data_id'} =
$self->{'hiveDBA'}->get_AnalysisDataAdaptor->store_if_needed($pairwise_string);
}
}
sub setup_pipeline() {
#yes this should be done with a config file and a loop, but...
my $self = shift;
my $dataflowRuleDBA = $self->{'hiveDBA'}->get_DataflowRuleAdaptor;
my $ctrlRuleDBA = $self->{'hiveDBA'}->get_AnalysisCtrlRuleAdaptor;
#ANALYSIS 1 - SetInternalIds (optional)
my $setInternalIdsAnalysis;
if ($self->{'set_internal_ids'}) {
$setInternalIdsAnalysis = $self->createSetInternalIdsAnalysis;
}
#ANALYSIS 2 - ImportAlignment
my $importAlignmentAnalysis = $self->createImportAlignmentAnalysis;
if ($self->{'set_internal_ids'}) {
$ctrlRuleDBA->create_rule($setInternalIdsAnalysis,$importAlignmentAnalysis);
}
#ANALYSIS 3 - CreateLowCoverageJobs
my $lowCoverageJobsAnalysis = $self->createLowCoverageJobsAnalysis;
$ctrlRuleDBA->create_rule($importAlignmentAnalysis,$lowCoverageJobsAnalysis);
#ANALYSIS 4 - LowCoverageGenomeAlignment
my $lowCoverageAnalysis = $self->createLowCoverageAnalysis;
$ctrlRuleDBA->create_rule($lowCoverageJobsAnalysis,$lowCoverageAnalysis);
#ANALYSIS 5 - DeleteAlignment
my $deleteAlignmentAnalysis = $self->createDeleteAlignmentAnalysis;
$ctrlRuleDBA->create_rule($lowCoverageAnalysis,$deleteAlignmentAnalysis);
#ANALYSIS 6 - UpdateMaxAlignmentLength
my $updateMaxAlignmentLengthAnalysis = $self->createUpdateMaxAlignmentLengthAnalysis;
$ctrlRuleDBA->create_rule($deleteAlignmentAnalysis, $updateMaxAlignmentLengthAnalysis);
#ANALYSIS 7 - Conservation scores
my $conservation_score_analysis = $self->create_conservation_score_analysis();
$dataflowRuleDBA->create_rule($lowCoverageAnalysis, $conservation_score_analysis);
$ctrlRuleDBA->create_rule($updateMaxAlignmentLengthAnalysis, $conservation_score_analysis);
#add entry into meta table linking gerp to it's multiple aligner mlss_id
if (defined($alignment_params->{gerp_mlss_id})) {
my $key = "gerp_" . $alignment_params->{gerp_mlss_id};
my $value = $alignment_params->{method_link_species_set_id};
$self->{'comparaDBA'}->get_MetaContainer->store_key_value($key, $value);
}
#ANALYSIS 8 - CreateNeighbourNodesJobs
my $createNeighbourNodesJobsAnalysis = $self->createNeighbourNodesJobsAnalysis;
#If have conservation scores
if (defined $conservation_score_analysis) {
$ctrlRuleDBA->create_rule($conservation_score_analysis,$createNeighbourNodesJobsAnalysis);
} else {
$ctrlRuleDBA->create_rule($updateMaxAlignmentLengthAnalysis,$createNeighbourNodesJobsAnalysis);
}
#ANALYSIS 9 - SetNeighbourNodes
my $createSetNeighbourNodesAnalysis = $self->createSetNeighbourNodesAnalysis;
$ctrlRuleDBA->create_rule($createNeighbourNodesJobsAnalysis, $createSetNeighbourNodesAnalysis);
}
sub createImportAlignmentAnalysis {
my $self = shift;
#
# Creating ImportAlignment analysis
#
my $stats;
my $importAlignmentAnalysis = Bio::EnsEMBL::Analysis->new(
-db_version => '1',
-logic_name => 'ImportAlignment',
-module => 'Bio::EnsEMBL::Compara::Production::GenomicAlignBlock::ImportAlignment',
# -parameters => ""
);
$self->{'hiveDBA'}->get_AnalysisAdaptor()->store($importAlignmentAnalysis);
$stats = $importAlignmentAnalysis->stats;
$stats->batch_size(1);
$stats->hive_capacity(1);
$stats->update();
$self->{'importAlignmentAnalysis'} = $importAlignmentAnalysis;
my $input_id = "from_db_url=>'" . $import_alignment_params->{'from_db_url'} . "',method_link_species_set_id=>" . $import_alignment_params->{'method_link_species_set_id'};
Bio::EnsEMBL::Hive::DBSQL::AnalysisJobAdaptor->CreateNewJob(
-input_id => "{$input_id}",
-analysis => $importAlignmentAnalysis
);
return $importAlignmentAnalysis;
}
sub createLowCoverageJobsAnalysis {
my $self = shift;
#
# Creating CreateLowCoverageGenomeAlignmentJobs
#
my $lc_stats;
my $createLowCoverageJobsAnalysis = Bio::EnsEMBL::Analysis->new(
-db_version => '1',
-logic_name => 'CreateLowCoverageGenomeAlignmentJobs',
-module => 'Bio::EnsEMBL::Compara::Production::GenomicAlignBlock::CreateLowCoverageJobs',
# -parameters => ""
);
$self->{'hiveDBA'}->get_AnalysisAdaptor()->store($createLowCoverageJobsAnalysis);
$lc_stats = $createLowCoverageJobsAnalysis->stats;
$lc_stats->batch_size(1);
$lc_stats->hive_capacity(-1); #unlimited
$lc_stats->update();
$self->{'createLowCoverageJobsAnalysis'} = $createLowCoverageJobsAnalysis;
my $input_id = "base_method_link_species_set_id=>" . $import_alignment_params->{'method_link_species_set_id'} .
",new_method_link_species_set_id=>" . $alignment_params->{'method_link_species_set_id'} .
",tree_analysis_data_id=>" . $self->{'tree_analysis_data_id'} .
",taxon_tree_analysis_data_id=>" . $self->{'taxon_tree_analysis_data_id'} .
",pairwise_analysis_data_id=>" . $self->{'pairwise_analysis_data_id'} .
",reference_species=>'" . $alignment_params->{'reference_species'} . "'";
Bio::EnsEMBL::Hive::DBSQL::AnalysisJobAdaptor->CreateNewJob(
-input_id => "{$input_id}",
-analysis => $createLowCoverageJobsAnalysis
);
return $createLowCoverageJobsAnalysis;
}
sub createLowCoverageAnalysis {
#
# Creating LowCoverageGenomeAlignment analysis
#
my ($logic_name, $module) = set_logic_name_and_module(
$alignment_params, "LowCoverageGenomeAlignment");
my $parameters = "max_block_size=>" . $alignment_params->{'max_block_size'};
my $stats2;
my $lowCoverageAnalysis = Bio::EnsEMBL::Analysis->new(
-db_version => '1',
-logic_name => $logic_name,
-module => $module,
-parameters => "{$parameters}"
);
$self->{'hiveDBA'}->get_AnalysisAdaptor()->store($lowCoverageAnalysis);
$stats2 = $lowCoverageAnalysis->stats;
$stats2->batch_size(1);
$stats2->hive_capacity(30);
$stats2->update();
$self->{'lowCoverageGenomeAlignmentAnalysis'} = $lowCoverageAnalysis;
return $lowCoverageAnalysis;
}
sub createDeleteAlignmentAnalysis {
my $self = shift;
#
# Creating DeleteAlignment analysis
#
my $stats;
my $deleteAlignmentAnalysis = Bio::EnsEMBL::Analysis->new(
-db_version => '1',
-logic_name => 'DeleteAlignment',
-module => 'Bio::EnsEMBL::Compara::Production::GenomicAlignBlock::DeleteAlignment',
# -parameters => ""
);
$self->{'hiveDBA'}->get_AnalysisAdaptor()->store($deleteAlignmentAnalysis);
$stats = $deleteAlignmentAnalysis->stats;
$stats->batch_size(1);
$stats->hive_capacity(1);
$stats->update();
$self->{'deleteAlignmentAnalysis'} = $deleteAlignmentAnalysis;
my $input_id = "method_link_species_set_id=>" . $import_alignment_params->{'method_link_species_set_id'};
Bio::EnsEMBL::Hive::DBSQL::AnalysisJobAdaptor->CreateNewJob(
-input_id => "{$input_id}",
-analysis => $deleteAlignmentAnalysis
);
return $deleteAlignmentAnalysis;
}
sub createSetInternalIdsAnalysis {
my $self = shift;
#
# Creating SetInternalIds analysis
#
my $stats;
my $setInternalIdsAnalysis = Bio::EnsEMBL::Analysis->new(
-db_version => '1',
-logic_name => 'SetInternalIds',
-module => 'Bio::EnsEMBL::Compara::Production::GenomicAlignBlock::SetInternalIds',
# -parameters => ""
);
$self->{'hiveDBA'}->get_AnalysisAdaptor()->store($setInternalIdsAnalysis);
$stats = $setInternalIdsAnalysis->stats;
$stats->batch_size(1);
$stats->hive_capacity(1);
$stats->update();
$self->{'setInternalIdsAnalysis'} = $setInternalIdsAnalysis;
my $input_id = "method_link_species_set_id=>" . $alignment_params->{'method_link_species_set_id'};
Bio::EnsEMBL::Hive::DBSQL::AnalysisJobAdaptor->CreateNewJob(
-input_id => "{$input_id}",
-analysis => $setInternalIdsAnalysis
);
return $setInternalIdsAnalysis;
}
sub createUpdateMaxAlignmentLengthAnalysis {
my $self = shift;
#
# Creating updateMaxAlignmentLength analysis
#
my $stats;
my $updateMaxAlignmentLengthAnalysis = Bio::EnsEMBL::Analysis->new(
-db_version => '1',
-logic_name => 'UpdateMaxAlignmentLength',
-module => 'Bio::EnsEMBL::Compara::Production::GenomicAlignBlock::UpdateMaxAlignmentLength',
# -parameters => ""
);
$self->{'hiveDBA'}->get_AnalysisAdaptor()->store($updateMaxAlignmentLengthAnalysis);
$stats = $updateMaxAlignmentLengthAnalysis->stats;
$stats->batch_size(1);
$stats->hive_capacity(1);
$stats->update();
$self->{'updateMaxAlignmentLengthAnalysis'} = $updateMaxAlignmentLengthAnalysis;
my $input_id = 1;
Bio::EnsEMBL::Hive::DBSQL::AnalysisJobAdaptor->CreateNewJob(
-analysis => $updateMaxAlignmentLengthAnalysis,
-input_id => $input_id,
);
return $updateMaxAlignmentLengthAnalysis;
}
#####################################################################
##
## create_conservation_score_analysis
##
#####################################################################
sub create_conservation_score_analysis {
my ($self) = @_;
return undef if (!%conservation_score_params);
my ($logic_name, $module) = set_logic_name_and_module(
\%conservation_score_params, "Gerp");
my ($method_link_id, $method_link_type);
my ($method_link_id_cs, $method_link_type_cs) = qw(501 GERP_CONSERVATION_SCORE);
my ($method_link_id_ce, $method_link_type_ce) = qw(11 GERP_CONSTRAINED_ELEMENT);
if (defined $conservation_score_params{'method_links'}) {
foreach my $method_link (@{$conservation_score_params{'method_links'}}) {
($method_link_id, $method_link_type) = @$method_link;
if ($method_link_type eq "GERP_CONSERVATION_SCORE") {
$method_link_id_cs = $method_link_id;
$method_link_type_cs = $method_link_type;
}
if ($method_link_type eq "GERP_CONSTRAINED_ELEMENT") {
$method_link_id_ce = $method_link_id;
$method_link_type_ce = $method_link_type;
}
}
}
my $sql = "INSERT ignore into method_link SET method_link_id=$method_link_id_cs, type='$method_link_type_cs'";
$self->{'hiveDBA'}->dbc->do($sql);
$sql = "INSERT ignore into method_link SET method_link_id=$method_link_id_ce, type='$method_link_type_ce'";
$self->{'hiveDBA'}->dbc->do($sql);
foreach my $method_link_type ($method_link_type_cs, $method_link_type_ce) {
my $mlss = new Bio::EnsEMBL::Compara::MethodLinkSpeciesSet;
$mlss->method_link_type($method_link_type);
my $gdbs = [];
foreach my $species (@speciesList) {
my $name = $species->{species};
my $gdb = $self->{'comparaDBA'}->get_GenomeDBAdaptor->fetch_by_name_assembly($name);
push @{$gdbs}, $gdb;
}
$mlss->species_set($gdbs);
#use method_link_species_set id from config file if defined
if ($method_link_type eq "GERP_CONSERVATION_SCORE") {
if (defined($conservation_score_params{'method_link_species_set_id_cs'})) {
$mlss->dbID($conservation_score_params{'method_link_species_set_id_cs'});
}
} else {
if (defined($conservation_score_params{'method_link_species_set_id_ce'})) {
$mlss->dbID($conservation_score_params{'method_link_species_set_id_ce'});
}
}
$self->{'comparaDBA'}->get_MethodLinkSpeciesSetAdaptor->store($mlss);
#add gerp conservation score mlss id for use in
#create_multiple_aligner_analysis to create entry into meta table
if ($method_link_type eq "GERP_CONSERVATION_SCORE") {
$alignment_params->{gerp_mlss_id} = $mlss->dbID;
}
}
my $parameters = "";
if (defined $conservation_score_params{'param_file'}) {
$parameters .= "param_file=>\'" . $conservation_score_params{'param_file'} ."\',";
}
if (defined $conservation_score_params{'window_sizes'}) {
$parameters .= "window_sizes=>\'" . $conservation_score_params{'window_sizes'} ."\',";
}
if (defined $conservation_score_params{'tree_file'}) {
$parameters .= "tree_file=>\'" . $conservation_score_params{'tree_file'} ."\',";
}
$parameters .= "constrained_element_method_link_type=>\'" . $method_link_type_ce ."\',";
$parameters = "{$parameters}";
#default program_version
my $program_version = 2.1;
if (defined $conservation_score_params{'program_version'}) {
$program_version = $conservation_score_params{'program_version'};
}
#location of program_file
my $program_file = "/software/ensembl/compara/gerp/GERPv2.1";
if (defined $conservation_score_params{'program_file'}) {
$program_file = $conservation_score_params{'program_file'};
}
my $conservation_score_analysis = Bio::EnsEMBL::Analysis->new(
-logic_name => $logic_name,
-module => $module,
-parameters => $parameters,
-program_version => $program_version,
-program_file => $program_file
);
$self->{'hiveDBA'}->get_AnalysisAdaptor()->store($conservation_score_analysis);
my $stats = $conservation_score_analysis->stats;
$stats->batch_size(1);
$stats->hive_capacity(60);
$stats->status('BLOCKED');
$stats->update();
return $conservation_score_analysis;
}
sub createNeighbourNodesJobsAnalysis {
my $self = shift;
#
# Creating SetNeighbourNodesJobs analysis
#
my $stats;
my $createNeighbourNodesJobsAnalysis = Bio::EnsEMBL::Analysis->new(
-db_version => '1',
-logic_name => 'CreateNeighbourNodesJobsAlignment',
-module => 'Bio::EnsEMBL::Compara::Production::GenomicAlignBlock::CreateNeighbourNodesJobs',
# -parameters => ""
);
$self->{'hiveDBA'}->get_AnalysisAdaptor()->store($createNeighbourNodesJobsAnalysis);
$stats = $createNeighbourNodesJobsAnalysis->stats;
$stats->batch_size(1);
$stats->hive_capacity(1);
$stats->update();
$self->{'createNeighbourNodesJobsAnalysis'} = $createNeighbourNodesJobsAnalysis;
my $input_id = "method_link_species_set_id=>" . $alignment_params->{'method_link_species_set_id'};
Bio::EnsEMBL::Hive::DBSQL::AnalysisJobAdaptor->CreateNewJob(
-input_id => "{$input_id}",
-analysis => $createNeighbourNodesJobsAnalysis
);
return $createNeighbourNodesJobsAnalysis;
}
sub createSetNeighbourNodesAnalysis {
my $self = shift;
#
# Creating SetNeighbourNodes analysis
#
my $stats;
my $createSetNeighbourNodesAnalysis = Bio::EnsEMBL::Analysis->new(
-db_version => '1',
-logic_name => 'SetNeighbourNodes',
-module => 'Bio::EnsEMBL::Compara::Production::GenomicAlignBlock::SetNeighbourNodes',
# -parameters => ""
);
$self->{'hiveDBA'}->get_AnalysisAdaptor()->store($createSetNeighbourNodesAnalysis);
$stats = $createSetNeighbourNodesAnalysis->stats;
$stats->batch_size(1);
$stats->hive_capacity(15);
$stats->update();
$self->{'createSetNeighbourNodesAnalysis'} = $createSetNeighbourNodesAnalysis;
return $createSetNeighbourNodesAnalysis;
}
#####################################################################
##
## set_logic_name_and_module
##
#####################################################################
sub set_logic_name_and_module {
my ($params, $default) = @_;
my $logic_name = $default; #Default value
if (defined $params->{'logic_name'}) {
$logic_name = $params->{'logic_name'};
}
my $module = "Bio::EnsEMBL::Compara::Production::GenomicAlignBlock::$logic_name";
if (defined $params->{'module'}) {
$module = $params->{'module'};
}
return ($logic_name, $module);
}
| adamsardar/perl-libs-custom | EnsemblAPI/ensembl-compara/scripts/pipeline/loadLowCoverageAlignerSystem.pl | Perl | apache-2.0 | 29,132 |
#!/usr/bin/env perl
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script to parse gff3 files for clones. At the moment based off parsing NCBI clone db files
# but should work with any correctly formatted file. It is assumed currently that there is
# order in the file, i.e:
# clone_insert line
# clone_insert_start line
# clone_insert_end line
# The reason is that the clone name from the clone_insert line needs to be used with the start/end lines
# Might change this in future
use warnings;
use strict;
use feature 'say';
use Getopt::Long;
use Bio::EnsEMBL::Utils::Exception qw(throw warning);
use Bio::EnsEMBL::IO::Parser::GFF3;
use Bio::EnsEMBL::DBSQL::DBAdaptor;
use Bio::EnsEMBL::MiscFeature;
use Bio::EnsEMBL::SimpleFeature;
use Bio::EnsEMBL::Analysis;
my $host;
my $port;
my $user;
my $pass;
my $dbname;
my $coord_system_name; # Coord system to get the slices from (toplevel usually)
my $prod_host;
my $prod_ruser = 'ensro';
my $prod_wuser;
my $prod_pass;
my $prod_port;
my $prod_dbname;
my $prod_backup_path; # where to backup the prod db to
my $prod_create_id = 106182; # id of genebuilder in the PRODUCTION DB (so not genebuilder id)
my $prod_name; # species.production_name, if not passed in this is gotten from the production db
my $species_name; # E.g 'mouse'
my $file; # The path the file with the clones
my $clone_state_file; # Path to state file. Get from ftp site e.g for mouse download ftp.ncbi.nih.gov/repository/clone/reports/Mus_musculus/clone_acstate_10090.out
my $set_code = "_clones"; # Will become 'rp24_clones' as code in misc_set
my $set_name = " clones"; # Will become 'RP24 mouse clones' as name in misc_set
my $set_desc = " library of clones for "; # Will become 'RP24 library of clones for mouse' as desc in misc_set
my $library_abbrev; # Clone library abbreviation, e.g. 'RP24'
my $clone_info_hash = {}; # Will store a variety of clone related info to pass stuff neatly into subroutine calls
my $set_code_char_lim = 25; # Current table character limit for the misc_set table code columm
my $write = 0; # For testing I guess
my $prod_write = 0;
GetOptions(
'host=s' => \$host,
'port=s' => \$port,
'user=s' => \$user,
'pass=s' => \$pass,
'dbname=s' => \$dbname,
'coord_system_name' => \$coord_system_name,
'species_name=s' => \$species_name,
'prod_wuser' => \$prod_wuser,
'prod_pass' => \$prod_pass,
'prod_port' => \$prod_port,
'prod_dbname' => \$prod_dbname,
'prod_backup_path' => \$prod_backup_path,
'prod_create_id' => \$prod_create_id,
'species_prod_name' => \$prod_name,
'clone_file=s' => \$file,
'clone_state_file=s' => \$clone_state_file,
'set_code=s' => \$set_code,
'set_name=s' => \$set_name,
'set_desc=s' => \$set_desc,
'library_abbrev=s' => \$library_abbrev,
'ref_write!' => \$write,
'prod_write!' => \$prod_write,
);
my $db = Bio::EnsEMBL::DBSQL::DBAdaptor->new(
-host => $host,
-user => $user,
-port => $port,
-pass => $pass,
-dbname => $dbname
);
my $prod_db = Bio::EnsEMBL::DBSQL::DBAdaptor->new(
-host => $prod_host,
-user => $prod_wuser,
-port => $prod_port,
-pass => $prod_pass,
-dbname => $prod_dbname
);
my $slice_adaptor = $db->get_SliceAdaptor;
my $misc_feature_adaptor = $db->get_MiscFeatureAdaptor();
my $simple_feature_adaptor = $db->get_SimpleFeatureAdaptor();
my $misc_set_adaptor_prod = $prod_db->get_MiscSetAdaptor();
my $misc_set_adaptor_core = $db->get_MiscSetAdaptor();
# The library abbreviation is usually the first set of characters before a dot in the file name
# So if the user does not define a library abbreviation then the script will try and pull it from
# the file name. If it still can't get it then throw
unless($library_abbrev) {
say "FILE: ".$file;
$file =~ /^([^\.]+)/;
$library_abbrev = $1;
unless($library_abbrev) {
throw("No library abbreviation was provided and the script failed to parse the library abbreviation ".
"from the file name. It should follow standard NCBI format, e.g:\n".
"bMQ.GCF_000001635.22.103.unique_concordant.gff\n\nor\n\n".
"bMQ.GCF_000001635.22.103.unique_discordant.gff\n\nor\n\n".
"bMQ.GCF_000001635.22.103.multiple.gff");
}
}
# Code for misc_set entry. Will take the form 'mouse_rp24_clones'. The char limit on the table is currently 25
# so the code will throw if it is too long
$set_code = $library_abbrev.$set_code;
if(length($set_code) > $set_code_char_lim) {
throw("The code generated for the misc set was ".length($set_code)." characters long, which is greater than ".
"the current limit for the table, which is ".$set_code_char_lim." characters. The generated misc set code was:\n".$set_code);
}
# Similar to the above, but there shouldn't be any space issues
$set_name = $library_abbrev." ".$species_name.$set_name;
$set_desc = $library_abbrev.$set_desc.$species_name;
# Build a misc set object based on the above info
my $clone_set = create_clone_set($set_code,$set_name,$set_desc,$misc_set_adaptor_prod);
# This hash ($clone_info_hash) is just an easy way of putting a lot of the data together
# so that the arguments list for calling the subroutines are smaller
set_clone_info_hash($clone_info_hash,$file,$species_name,$library_abbrev,$clone_set);
# If a clone state file is provided then load it and put the states into a hash so they
# can be added as attributes
my $clone_states;
if($clone_state_file) {
$clone_states = load_clone_states($clone_state_file);
} else {
warning("You have not provided a clone state file, so no states will be added for the clones.\n".
"This file is usually available on the ftp site. For examples the mouse combined file is:\n".
"ftp://ftp.ncbi.nih.gov/repository/clone/reports/Mus_musculus/clone_acstate_10090.out");
}
# This is a hash that will hold the parent inserts for the ends based on the parent id
# It is used to put the clone name of the parent into the display label of the simple feature
my $parent_clones;
# This will be the array that holds both the MiscFeature and SimpleFeature objects. The contents
# of this array will be stored at the end
my $clone_array;
# Open gff3 file and loop through it
my $gff_file = Bio::EnsEMBL::IO::Parser::GFF3->open($file);
while($gff_file->next) {
my $type = $gff_file->get_type;
unless($type) {
next;
}
if($type eq 'clone_insert' || $type eq 'clone_insert_start' || $type eq 'clone_insert_end') {
my $seq_region_name = $gff_file->get_seqname;
$seq_region_name =~ s/^ref\|(.+)\|$/$1/;
my $clone_name = $gff_file->get_attribute_by_name('Name');
# This section of code checks to see if the line is a child line, if it is it changes $clone_name
# to the value of the parent in the parent_clones hash. Otherwise the line itself is a parent and
# the details are added to the parent hash. Note that as is it assume file order, but there is a
# warning to at least point out when this happens
my $clone_parent = $gff_file->get_attribute_by_name('Parent');
if($clone_parent) {
my $child_clone_name = $clone_name;
$clone_name = $parent_clones->{$clone_parent};
unless($clone_name) {
warning("Failed to find the name of parent for: ".$child_clone_name);
}
} else {
my $clone_id = $gff_file->get_attribute_by_name('ID');
$parent_clones->{$clone_id} = $clone_name;
}
# Make a slice for the seq region name. Sometimes we might not have these seq regions, as they might
# correspond to non-reference strains. For now just skip over with a warning. Will implement a count
# at some point so that we can check for any abnormal behaviour
my $slice = $slice_adaptor->fetch_by_region($coord_system_name,$seq_region_name);
unless($slice) {
warning("No slice found in the db for the following: ".$seq_region_name);
next;
}
my $start = $gff_file->get_start;
my $end = $gff_file->get_end;
my $strand = $gff_file->get_strand;
unless(defined($start) && defined($end) && defined($strand)) {
throw("Could not parse out start/end/strand for the following: ".$seq_region_name);
}
# Make a clone feature
my $clone_feature = create_clone_feature($clone_info_hash,
$type,
$start,
$end,
$strand,
$slice,
$clone_name);
# If this is a misc feature then add all the associated attributes
if(ref($clone_feature) eq "Bio::EnsEMBL::MiscFeature") {
add_clone_attribs($clone_feature,$clone_info_hash,$clone_name,$clone_states);
}
push(@{$clone_array},$clone_feature);
} else {
throw("Unrecognised clone type: ".$type);
}
}
# Store all the features in the db as either misc or simple features
if($write) {
if($prod_write) {
backup_production_db($prod_backup_path);
update_production_db($misc_set_adaptor_prod,$clone_info_hash,$prod_create_id,$prod_name);
}
store_clone_features($clone_array,$misc_feature_adaptor,$simple_feature_adaptor,$clone_info_hash);
}
exit;
sub backup_production_db {
my ($prod_backup_path) = @_;
my $timestamp = time();
my $return_value = system('mysqldump -u'.$prod_ruser.' -h'.$prod_host.' '.$prod_dbname.' > '.
$prod_backup_path.'/prod_db_backup.'.$timestamp.'.sql');
unless($return_value == 0 && -e $prod_backup_path.'/prod_db_backup.'.$timestamp.'.sql') {
throw("Failed to backup the production db. Tried to back them up to the following dir:\n".$prod_backup_path."\n".
"Potential name of failed backup file:\n".'prod_db_backup.'.$timestamp.'.sql');
}
}
sub update_production_db {
my ($misc_set_adaptor_prod,$clone_info_hash,$prod_create_id,$prod_name) = @_;
# Store the misc_set
$misc_set_adaptor_prod->store($clone_info_hash->{'misc_set'});
my $logic_name = $clone_info_hash->{'analysis'}->logic_name;
my $sth_select = $prod_db->dbc->prepare('SELECT logic_name from analysis_description where logic_name = ?');
$sth_select->bind_param(1,$logic_name);
$sth_select->execute();
my ($result_row) = $sth_select->fetchrow_array();
unless($result_row) {
my $sth_insert = $prod_db->dbc->prepare('INSERT INTO analysis_description '.
'(logic_name,description,display_label,db_version,is_current,created_by,created_at,default_web_data_id,default_displayable) '.
'VALUES(?,?,?,1,1,?,NOW(),NULL,1)');
$sth_insert->bind_param(1, $logic_name);
$sth_insert->bind_param(2, $clone_info_hash->{'analysis'}->description);
$sth_insert->bind_param(3, $clone_info_hash->{'analysis'}->display_label);
$sth_insert->bind_param(4, $prod_create_id);
$sth_insert->execute();
} else {
warning("The logic_name '".$logic_name."' was already present in the production databases so it was not added again");
}
unless($prod_name) {
$sth_select = $db->dbc->prepare("SELECT meta_value from meta where meta_key='species.production_name'");
$sth_select->execute();
($result_row) = $sth_select->fetchrow_array();
unless($result_row) {
throw("Failed to find the species production name from the meta table and no value was set by user. SQL used:\n".
"SELECT meta_value from meta where meta_key='species.production_name'\nYou must either set the prod_name ".
"variable or have the species.production_name key set in the meta table of the core db");
}
$prod_name = $result_row;
}
$sth_select = $prod_db->dbc->prepare('SELECT species_id from species where production_name = ?');
$sth_select->bind_param(1,$prod_name);
$sth_select->execute();
($result_row) = $sth_select->fetchrow_array();
unless($result_row) {
throw("Failed to find the species id from the species table in the production db. The table was queried with the following:\n".
"SELECT species_id from species where production_name = '".$prod_name."';");
}
my $species_id = $result_row;
$sth_select = $prod_db->dbc->prepare('SELECT analysis_description_id from analysis_description where logic_name = ?');
$sth_select->bind_param(1,$logic_name);
$sth_select->execute();
($result_row) = $sth_select->fetchrow_array();
unless($result_row) {
throw("Failed to retrieve an analysis_description_id for '".$logic_name."'. The table was queried with the following:\n".
"SELECT analysis_description_id from analysis_description where logic_name = '".$logic_name."';");
}
my $analysis_description_id = $result_row;
my $sth_insert = $prod_db->dbc->prepare('INSERT INTO analysis_web_data '.
'(analysis_description_id,web_data_id,species_id,db_type,displayable,created_by,created_at) '.
'VALUES(?,NULL,?,"core",1,?,NOW())');
$sth_insert->bind_param(1, $analysis_description_id);
$sth_insert->bind_param(2, $species_id);
$sth_insert->bind_param(3, $prod_create_id);
$sth_insert->execute();
}
# This subroutine sets a bunch of different general info related to the clone file
# into the clone info hash. It just makes it easier to pass this hash in later subroutine
# calls than passing all the individual arguments
sub set_clone_info_hash {
my ($clone_info_hash,$file,$species_name,$library_abbrev,$clone_set) = @_;
$clone_info_hash->{'library_abbrev'} = $library_abbrev;
# Set concordance. Can be unique_concordant, unique_discordant or multiple. If this info is not
# present then there is just a warning at the moment, the info itself is currently unused anyway,
# ideally it will get added as a new attribute since the current ones in the webcode for MiscFeature
# don't fit too well
unless($file =~ /\.([^\.]+)\.gff$/) {
warning("Error could not parse the file name for concordance info. It should follow standard NCBI format, e.g:\n".
"bMQ.GCF_000001635.22.103.unique_concordant.gff\nor\n".
"bMQ.GCF_000001635.22.103.unique_discordant.gff\nor\n".
"bMQ.GCF_000001635.22.103.multiple.gff\n".
"As this is not a requirement the script will continue without the info");
} else {
my @concordance = split('_',$1);
if(scalar(@concordance) == 2) {
$clone_info_hash->{'unique'} = 'yes';
$clone_info_hash->{'concordance'} = $concordance[1];
} else {
$clone_info_hash->{'unique'} = 'no';
}
}
# Set the logic_name. Here if you start off with mouse as the logic_name and the library is bMQ, you
# get 'mouse_bMQ_clones'. Once this is done make an analysis object and add to hash
unless($species_name) {
throw("You should provide a generic logic name for the BAC ends as they are simple features. ".
"Using the species name (e.g 'mouse') is enough as the library will be appended automatically");
}
my $logic_name = $species_name."_".$library_abbrev."_clones";
$logic_name = lc($logic_name);
# my $analysis = $db->get_AnalysisAdaptor->fetch_by_logic_name($logic_name);
# if (!defined $analysis) {
my $analysis = Bio::EnsEMBL::Analysis->new(
-logic_name => $logic_name,
-description => $library_abbrev." library clone ends for ".$species_name,
-display_label => $library_abbrev." ".$species_name." clone ends",
-displayable => 1,
-gff_source => "NCBI",
-gff_feature => "clone insert start/end",
-db_file => $file,
-db => "NCBI clone database",
);
# }
$clone_info_hash->{'analysis'} = $analysis;
# Add clone_set
$clone_info_hash->{'misc_set'} = $clone_set;
return($clone_info_hash);
}
# This loads the clone states file. The second column in the file is the name, while the sixth
# column is the state. Remember that opening the file in emacs could lead to the removal of tabs,
# which is what the file is split on and would cause the script to throw
sub load_clone_states {
my ($clone_state_file) = @_;
my $clone_states = {};
unless(open(IN,$clone_state_file)) {
throw("You provided a clone state file but it could not be opened successfully:\n".$clone_state_file);
}
while(<IN>) {
my $line = $_;
if($line =~ /^\#/) {
next;
}
my @line_ele = split("\t",$line);
unless(scalar(@line_ele) == 10) {
warning("Parsed a line in the clone state file that had more/less than 10 columns (skipping):\n".$line);
next;
}
my $clone_name = $line_ele[1];
my $clone_state = $line_ele[5];
$clone_states->{$clone_name} = $clone_state;
}
close IN;
return($clone_states);
}
# This creates a MiscSet object for the clones
sub create_clone_set {
my ($set_code,$set_name,$set_desc,$misc_set_adaptor_prod) = @_;
unless($set_code) {
throw("Failed to create a set for the clones, you must at least have a set_code defined!");
}
my $clone_set = $misc_set_adaptor_prod->fetch_by_code($set_code);
if($clone_set) {
return $clone_set;
} else {
# If the set didn't exist then create one
$clone_set = Bio::EnsEMBL::MiscSet->new(
-CODE => $set_code,
-NAME => $set_name,
-DESCRIPTION => $set_desc,
);
return $clone_set;
}
}
# This creates either a MiscFeature of a SimpleFeature depending on whether the line refers
# to the insert (MiscFeature) or the ends (SimpleFeature)
sub create_clone_feature {
my ($clone_info_hash,$type,$start,$end,$strand,$slice,$clone_name) = @_;
if($type eq 'clone_insert') {
my $clone_feature = Bio::EnsEMBL::MiscFeature->new(
-START => $start,
-END => $end,
-STRAND => $strand,
-SLICE => $slice,
);
# $clone_feature->add_MiscSet($clone_info_hash->{'misc_set'});
return $clone_feature;
} else {
$type =~ /([^\_]+)$/;
my $subtype = $1;
my $clone_feature = Bio::EnsEMBL::SimpleFeature->new(
-ANALYSIS => $clone_info_hash->{'analysis'},
-START => $start,
-END => $end,
-STRAND => $strand,
-SLICE => $slice,
-DISPLAY_LABEL => $clone_name." (".$subtype.")",
);
return $clone_feature;
}
}
# This adds attributes to the MiscFeatures, therefore stored in misc_attrib. The codes themselves were used
# based on the fact that ensembl-webcode has a MiscFeature module that has an array for the clones. As such
# the codes used were based on this array, even if it is perhaps not the ideal setup (for example the label
# "Library:" on the zmenu is under the code "clone_name". In future we should update the table to make more
# sense than what's currently there
sub add_clone_attribs {
my ($clone_feature,$clone_info_hash,$clone_name,$clone_states) = @_;
my $attrib_name = Bio::EnsEMBL::Attribute->new(
-CODE => 'name',
-VALUE => $clone_name,
);
my $attrib_library = Bio::EnsEMBL::Attribute->new(
-CODE => 'clone_name',
-VALUE => $clone_info_hash->{'library_abbrev'},
);
$clone_feature->add_Attribute($attrib_name);
$clone_feature->add_Attribute($attrib_library);
if($clone_states->{$clone_name}) {
my $attrib_state = Bio::EnsEMBL::Attribute->new(
-CODE => 'state',
-VALUE => $clone_states->{$clone_name},
);
$clone_feature->add_Attribute($attrib_state);
}
if($clone_name =~ /([^\-]+)$/) {
my $well_name = $1;
my $attrib_well = Bio::EnsEMBL::Attribute->new(
-CODE => 'well_name',
-VALUE => $well_name,
);
$clone_feature->add_Attribute($attrib_well);
}
return $clone_feature;
}
# Store sub to store either a MiscFeature or a SimpleFeature
sub store_clone_features {
my ($clone_array_ref,$misc_feature_adaptor,$simple_feature_adaptor,$clone_info_hash) = @_;
my $clone_feature;
my $misc_set_code = $clone_info_hash->{'misc_set'}->code;
my $misc_set_prod = $misc_set_adaptor_prod->fetch_by_code($misc_set_code);
my $misc_set_prod_id = $misc_set_prod->dbID;
$misc_set_adaptor_core->store($misc_set_prod);
my $sth_update = $db->dbc->prepare('UPDATE misc_set set misc_set_id = ? where code = ?');
$sth_update->bind_param(1, $misc_set_prod_id);
$sth_update->bind_param(2, $misc_set_code);
$sth_update->execute();
my $misc_set_core = $misc_set_adaptor_core->fetch_by_dbID($misc_set_prod_id);
while($clone_feature = pop(@{$clone_array_ref})) {
if(ref($clone_feature) eq "Bio::EnsEMBL::MiscFeature") {
$clone_feature->add_MiscSet($misc_set_core);
$misc_feature_adaptor->store($clone_feature);
} elsif(ref($clone_feature) eq "Bio::EnsEMBL::SimpleFeature") {
$simple_feature_adaptor->store($clone_feature);
} else {
throw("Unexpected datastructure found on store. Should be either:\n".
"\nBio::EnsEMBL::MiscFeature\n".
"\nor\n".
"Bio::EnsEMBL::SimpleFeature\n".
"\nFound:\n".
ref($clone_feature));
}
}
my $sth_select = $prod_db->dbc->prepare('SELECT misc_set_id from misc_set where code = ?');
$sth_select->bind_param(1, $misc_set_code);
$sth_select->execute();
my ($prod_misc_set_id) = $sth_select->fetchrow_array();
}
| mn1/ensembl-analysis | scripts/clones/clone_gff_parse.pl | Perl | apache-2.0 | 22,934 |
package VMOMI::VirtualDiskSeSparseBackingOption;
use parent 'VMOMI::VirtualDeviceFileBackingOption';
use strict;
use warnings;
our @class_ancestors = (
'VirtualDeviceFileBackingOption',
'VirtualDeviceBackingOption',
'DynamicData',
);
our @class_members = (
['diskMode', 'ChoiceOption', 0, ],
['writeThrough', 'BoolOption', 0, ],
['growable', 'boolean', 0, ],
['hotGrowable', 'boolean', 0, ],
['uuid', 'boolean', 0, ],
['deltaDiskFormatsSupported', 'VirtualDiskDeltaDiskFormatsSupported', 1, ],
);
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/VirtualDiskSeSparseBackingOption.pm | Perl | apache-2.0 | 755 |
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V8::Services::UserListService::GetUserListRequest;
use strict;
use warnings;
use base qw(Google::Ads::GoogleAds::BaseEntity);
use Google::Ads::GoogleAds::Utils::GoogleAdsHelper;
sub new {
my ($class, $args) = @_;
my $self = {resourceName => $args->{resourceName}};
# Delete the unassigned fields in this object for a more concise JSON payload
remove_unassigned_fields($self, $args);
bless $self, $class;
return $self;
}
1;
| googleads/google-ads-perl | lib/Google/Ads/GoogleAds/V8/Services/UserListService/GetUserListRequest.pm | Perl | apache-2.0 | 1,049 |
% CVS: $Id: crypt_wamcc.pl,v 1.3 1998/10/19 06:35:12 pets Exp $
goal :- crypt.
myodd(X) :-
odd(X).
myeven(X) :-
even(X).
%%%%%%%%%%%%%%%%%%%%%%%%
% Cryptomultiplication:
% Find the unique answer to:
% OEE
% EE
% ---
% EOEE
% EOE
% ----
% OOEE
%
% where E=even, O=odd.
% This program generalizes easily to any such problem.
% Written by Peter Van Roy
/*RB
:- q.
q :- statistics(runtime,_), crypt,
statistics(runtime,[_,Y]), write('time : '),write(Y), nl,
halt_or_else(0,true).
*/
crypt :-
myodd(A), myeven(B), myeven(C),
myeven(E),
mult([C,B,A], E, [I,H,G,F|X]),
lefteven(F), myodd(G), myeven(H), myeven(I), zero(X),
lefteven(D),
mult([C,B,A], D, [L,K,J|Y]),
lefteven(J), myodd(K), myeven(L), zero(Y),
sum([I,H,G,F], [0,L,K,J], [P,O,N,M|Z]),
myodd(M), myodd(N), myeven(O), myeven(P), zero(Z),
write(' '), write(A), write(B), write(C), nl,
write(' '), write(D), write(E), nl,
write(F), write(G), write(H), write(I), nl,
write(J), write(K), write(L), nl,
write(M), write(N), write(O), write(P), nl.
% Addition of two numbers
sum(AL, BL, CL) :- sum(AL, BL, 0, CL).
sum([A|AL], [B|BL], Carry, [C|CL]) :- !,
X is (A+B+Carry),
C is X mod 10,
NewCarry is X // 10,
sum(AL, BL, NewCarry, CL).
sum([], BL, 0, BL) :- !.
sum(AL, [], 0, AL) :- !.
sum([], [B|BL], Carry, [C|CL]) :- !,
X is B+Carry,
NewCarry is X // 10,
C is X mod 10,
sum([], BL, NewCarry, CL).
sum([A|AL], [], Carry, [C|CL]) :- !,
X is A+Carry,
NewCarry is X // 10,
C is X mod 10,
sum([], AL, NewCarry, CL).
sum([], [], Carry, [Carry]).
% Multiplication
mult(AL, D, BL) :- mult(AL, D, 0, BL).
mult([A|AL], D, Carry, [B|BL] ) :-
X is A*D+Carry,
B is X mod 10,
NewCarry is X // 10,
mult(AL, D, NewCarry, BL).
mult([], _, Carry, [C,Cend]) :-
C is Carry mod 10,
Cend is Carry // 10.
zero([]).
zero([0|L]) :- zero(L).
odd(1).
odd(3).
odd(5).
odd(7).
odd(9).
even(0).
even(2).
even(4).
even(6).
even(8).
lefteven(2).
lefteven(4).
lefteven(6).
lefteven(8).
| pschachte/groundness | benchmarks/crypt_wamcc.pl | Perl | apache-2.0 | 1,986 |
#
# Copyright 2019 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package storage::emc::symmetrix::dmx34::local::mode::components::config;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
# ---------[ Configuration Information ]---------
#
#...
sub check {
my ($self) = @_;
$self->{output}->output_add(long_msg => "Checking config");
$self->{components}->{config} = {name => 'config', total => 0, skip => 0};
return if ($self->check_filter(section => 'config'));
if ($self->{content_file_health} !~ /----\[ Configuration Information(.*?)----\[/msi) {
$self->{output}->output_add(long_msg => 'skipping: cannot find config');
return ;
}
my $content = $1;
$self->{components}->{config}->{total}++;
# Error if not present:
# CODE OK!
if ($content !~ /CODE OK!/msi) {
$self->{output}->output_add(severity => 'CRITICAL',
short_msg => sprintf("problem of configuration"));
} else {
$self->{output}->output_add(long_msg => sprintf("no configuration problem detected"));
}
}
1;
| Sims24/centreon-plugins | storage/emc/symmetrix/dmx34/local/mode/components/config.pm | Perl | apache-2.0 | 1,830 |
:- module(domino_state_space,[
domino_game_state_space/1
]).
:- use_module(library(aggregate)).
:- use_module(library(clpfd)).
:- use_module('../../gameyard_config').
:- use_module(gameyard(game_state_space)).
:- use_module(gameyard(misc/list_extras)).
domino_game_state_space(GS) :-
default_game_state_space(GS1),
context_module(M),
GS = GS1.put(
_{
state_space_param_check: M:domino_state_space_param_check,
state_space_param_create: M:domino_state_space_param_create,
state_space_initial_state: M:domino_state_space_initial_state,
state_space_players: M:domino_state_space_players,
state_space_current_player: M:domino_state_space_current_player,
state_space_player_view: M:domino_state_space_player_view,
state_space_view_transitions: M:domino_state_view_transitions,
state_space_transitions: M:domino_state_space_transitions
}),
game_state_space(GS).
domino_state_space_param_check(_SS,Param) :-
is_dict(Param,domino_state_space_param),
number(Param.max_tile_num),
Param.max_tile_num >= 1,
number(Param.num_players),
Param.num_players >= 2,
number(Param.initial_draw),
Param.initial_draw >= 0,
atom(Param.random_seed).
domino_state_space_param_create(_SS,
domino_state_space_param{
max_tile_num:6,
num_players: 2,
initial_draw: 7,
random_seed: seed(random)}).
all_tiles(MaxTileNum,AllTiles) :-
Num1 #=< Num2,
[Num1, Num2] ins 0..MaxTileNum,
aggregate(set([Num1,Num2]),label([Num1,Num2]),AllTiles).
tiles_in(Tiles,Collection) :- tuples_in(Tiles,Collection).
tiles_not_in(Tiles,Collection) :- #\ tuples_in(Tiles,Collection).
tile_in(Tile,Collection) :- tuples_in([Tile],Collection).
tile_not_in(Tile,Collection) :- #\ tuples_in([Tile],Collection).
player_draws(_DrawSize,0,TL,[],TL) :- !, format("[player_draws] TL=~k\n",[TL]).
player_draws(DrawSize,N,TL,[Hand|PL],RestTL) :-
N1 is N - 1,
split(DrawSize,TL,P,TL1),
sort(P,Hand),
player_draws(DrawSize,N1,TL1,PL,RestTL).
domino_tile_list(Params,AllTiles,PermutedTiles) :-
MaxTileNum = Params.max_tile_num,
all_tiles(MaxTileNum,AllTiles),
random_permutation(AllTiles,PermutedTiles).
domino_state_space_initial_state(_SS, Params,InitialState) :-
set_random(Params.random_seed),
domino_tile_list(Params,AllTiles, PermTileList),
player_draws(Params.initial_draw,
Params.num_players,
PermTileList,
PlayersHands,
DrawPile),
random(0,Params.num_players,FirstPlayer),
%format("[domino_state_space_initial_state] InitialState=~k\n",[InitialState])
InitialState = domino_state{
params: Params,
all_tiles: AllTiles,
current_player: FirstPlayer,
hands: PlayersHands,
draw_pile: DrawPile,
pass_count: 0,
exposed : [],
board: []
}.
domino_state_space_players(_SS,State,Players) :-
Num is State.params.num_players - 1,
aggregate(set(X),between(0,Num,X,Players)).
domino_state_space_current_player(_SS, State,Player) :-
State.current_player = Player.
domino_state_space_player_view(_SS, State,PlayerNum,PlayerView) :-
domino_player_view(State,PlayerNum,PlayerView).
domino_player_view(State,PlayerNum,PlayerView) :-
nth0(PlayerNum,State.hands,PlayerHand),
append(State.exposed,PlayerHand,ExposedForPlayer1),
sort(ExposedForPlayer1,ExposedForPlayer),
length(State.draw_pile,DrawPileSize),
list_map(length,State.hands,HandSizes),
PlayerView = domino_player_view{
params : State.params,
view_player: PlayerNum,
current_player: State.current_player,
hand: PlayerHand,
board: State.board,
exposed: ExposedForPlayer,
pass_count: State.pass_count,
draw_pile_size: DrawPileSize,
hand_sizes: HandSizes,
all_tiles: State.all_tiles
}.
% View Transition
domain_state_space_view_transitions(_SS,StartView,MoveEndViewPairs) :-
view_simulate_moves(StartView,MoveEndViewPairs).
view_simulate_moves(StartView,[]) :-
StartView.pass_count >=4,!.
view_simulate_moves(StartView,[]) :-
member(0,StartView.hand_sizes),!.
view_simulate_moves(StartView,MoveEndViewPairs) :-
aggregate(set([Move1,EndView1]),view_simulate_play_move(StartView,Move1,EndView1),PreMoves) , !,
view_simulate_moves_1(StartView,PreMoves,MoveEndViewPairs).
view_simulate_moves(StartView,MoveEndViewPairs) :-
view_simulate_moves_1(StartView,[],MoveEndViewPairs).
view_simulate_draw(StartView,Unexposed,EndView) :-
member(DrawnTile,Unexposed),
(StartView.current_player = StartView.view_player ->
sort([DrawnTile|StartView.hand],NewHand),
sort([DrawnTile|StartView.exposed],NewExposed)
; NewHand = StartView.hand, NewExposed = StartView.exposed ),
NewDrawPileSize is StartView.draw_pile_size - 1,
nth0(StartView.current_player,StartView.hand_sizes,HandSize),
NewHandSize is HandSize + 1,
set_nth(StartView.current_player,StartView.hand_sizes,NewHandSize,NewHandSizes),
EndView = StartView.put(
_{ pass_count: 0,
hand : NewHand,
hand_sizes: NewHandSizes,
draw_pile_size : NewDrawPileSize,
exposed: NewExposed }).
view_simulate_draws(StartView,[]) :-
StartView.draw_pile_size = 0,!.
view_simulate_draws(StartView,MoveEndViewPairs) :-
subtract(StartView.all_tiles,StartView.exposed,Unexposed),
aggregate(set([draw,EndView]), view_simulate_draw(StartView,Unexposed,EndView), MoveEndViewPairs).
view_simulate_moves_1(StartView,[],[[pass,EndView]]) :-
view_simulate_pass(StartView,EndView),!.
view_simulate_moves_1(StartView,PreMoves,MoveEndViewPairs) :-
PreMoves = [_|_],!,
((StartView.current_player \= StartView.view_player,
view_simulate_pass(StartView,EndView),
sort([[pass,EndView]|PreMoves],MoveEndViewPairs1)), ! ;
MoveEndViewPairs1 = PreMoves),
view_simulate_draws(StartView,MoveEndViewPairs2),
append(MoveEndViewPairs1,MoveEndViewPairs2,MoveEndViewPairs3),
sort(MoveEndViewPairs3,MoveEndViewPairs).
view_simulate_moves_1(StartView,[],MoveEndViewPairs) :-
view_simulate_draws(StartView,MoveEndViewPairs).
view_simulate_play_move(StartView,play(Pos,Tile_),EndView) :-
StartView.current_player = StartView.view_player,
view_simulate_play_move_1(StartView.hand,StartView,Pos,Tile_,EndView1),
delete(StartView.hand,Tile_,NewHand),
nth0(StartView.current_player,StartView.hand_sizes,HandSize),
HandSize1 is HandSize - 1,
set_nth(StartView.current_player,StartView.hand_sizes,HandSize1,NewHandSizes),
EndView = EndView1.put( _{ hand : NewHand, hand_sizes: NewHandSizes }).
view_simulate_play_move(StartView,play(Pos,Tile_),EndView) :-
StartView.current_player \= StartView.view_player,
subtract(StartView.all_tiles,StartView.exposed,Unexposed),
view_simulate_play_move_1(Unexposed,StartView,Pos,Tile_,EndView1),
nth0(StartView.current_player,StartView.hand_sizes,HandSize),
HandSize1 is HandSize - 1,
set_nth(StartView.current_player,StartView.hand_sizes,HandSize1,NewHandSizes),
EndView = EndView1.put(_{ hand_sizes: NewHandSizes }).
view_simulate_play_move_1(PossibleTiles,StartView,Pos,Tile_,EndView) :-
member(Tile_,PossibleTiles),
Tile_ = [TNum1,TNum2],
sort([[TNum1,TNum2],[TNum2,TNum1]],Tiles),
member(Tile,Tiles),
Tile = [Num1,Num2],
view_simulate_match_tile(StartView.board,Num1,Num2,Pos,NewBoard),
sort([Tile_ | StartView.exposed],NewExposed),
next_player(StartView,NextPlayer),
EndView = StartView.put(
_{ pass_count: 0,
current_player: NextPlayer,
board: NewBoard,
exposed: NewExposed }).
view_simulate_match_tile([],Num1,Num2,center,[]) :- Num1 > Num2, !, fail.
view_simulate_match_tile([],Num1,Num2,center,[[Num1],[Num2]]).
view_simulate_match_tile(Board,Num1,Num2,Pos,NewBoard) :-
Trace = [Num1 | _],
nth0(Pos,Board,Trace),
NewTrace = [Num2 | Trace],
set_nth(Pos,Board,NewTrace,NewBoard).
view_simulate_pass(StartView,EndView) :-
StartView.draw_pile_size = 0,
StartView.pass_count < 4,
next_player(StartView,NextPlayer),
PassCount1 is StartView.pass_count + 1,
EndView = StartView.put(
_{ pass_count: PassCount1,
current_player: NextPlayer }).
% End View Transition
% State transitions
domain_state_space_transitions(_SS,CurrentState,MoveEndStatePairs) :-
state_transitions(CurrentState,MoveEndStatePairs).
state_transitions(CurrentState,[]) :-
CurrentState.pass_count >= 4,!.
state_transitions(CurrentState,[]) :-
member([],CurrentState.hands),!.
state_transitions(CurrentState,MoveStatePairs) :-
(aggregate(set([play(Pos,Tile),NextState]),state_transition_play(CurrentState,Pos,Tile,NextState),PlayTransitions),! ; PlayTransitions=[]),
state_transitions_1(CurrentState,PlayTransitions,MoveStatePairs).
state_transitions_1(CurrentState,[],[[draw,NextState]]) :-
CurrentState.draw_pile = [DrawnTile|NewDrawPile],!,
nth0(CurrentState.current_player,CurrentState.hands,Hand),
sort([DrawnTile|Hand],NewHand),
set_nth(CurrentState.current_player,CurrentState.hands,NewHand,NewHands),
NextState = CurrentState.put(_{
pass_count: 0,
draw_pile: NewDrawPile,
hands: NewHands
}).
state_transitions_1(CurrentState,[],[[pass,NextState]]) :-
CurrentState.draw_pile = [],!,
next_player(CurrentState,NextPlayer),
PassCount1 is CurrentState.pass_count + 1,
NextState = CurrentState.put(_{
pass_count: PassCount1,
current_player: NextPlayer
}).
state_transitions_1(_CurrentState,PlayMoveStatePairs,PlayMoveStatePairs).
state_transition_play(CurrentState,Pos,Tile_,EndState) :-
nth0(CurrentState.current_player,CurrentState.hands,Hand),
member(Tile_,Hand),
delete(Hand,Tile_,NewHand),
set_nth(CurrentState.current_player,CurrentState.hands,NewHand,NewHands),
Tile_ = [TNum1,TNum2],
sort([[TNum1,TNum2],[TNum2,TNum1]],Tiles),
member(Tile,Tiles),
Tile = [Num1,Num2],
view_simulate_match_tile(CurrentState.board,Num1,Num2,Pos,NewBoard),
next_player(CurrentState,NextPlayer),
sort([Tile_|CurrentState.exposed],NewExposed),
EndState = CurrentState.put(
_{ pass_count: 0,
current_player: NextPlayer,
board: NewBoard,
hands: NewHands,
exposed: NewExposed }).
% End state transition
% Misc routines
next_player(ViewOrState,NextPlayer) :-
NumPlayers is ViewOrState.params.num_players,
NextPlayer is (ViewOrState.current_player + 1) mod NumPlayers.
test_play(MaxRounds,Seed) :-
!,
open("/tmp/domino-test.log",write,LogStream), with_output_to(LogStream,test_play_1(MaxRounds,Seed)),
%test_play_1(MaxRounds,Seed),
format("Test ended\n").
test_play_1(MaxRounds,Seed) :-
format("Test start: MaxRounds: ~k, Seed: ~k.\n",[MaxRounds,Seed]),
SS=[],%domino_game_state_space(SS),
domino_state_space_param_create(SS,Param0),
Param = Param0.put(_{random_seed: seed(Seed)}),
domino_state_space_initial_state(SS,Param,State0),
(test_play(State0,0,MaxRounds),! ; format("Test failed!\n")).
test_play(_State,Round,Round) :-
format("maximum number of rounds reached!\n"),!.
test_play(State,Round,MaxRounds) :-
format("Round: ~a\n",[Round]),
format(" Board:\n"),
foreach(member(B,State.board),format(" ~k\n",[B])),
format(" Hands:\n"),
foreach(member(B,State.hands),format(" ~k\n",[B])),
format(" Draw pile:\n"),
format(" ~k\n",[State.draw_pile]),
format(" Current player: ~a.\n",[State.current_player]),
format(" Pass count: ~a.\n",[State.pass_count]),
format("\n"),
format("---test start---\n"),
test_view_consistency_1(State),
format("---test end---\n\n"),
(state_transitions(State,MoveStatePairs),! ;
(format("Cannot compute transitions from: ~k.\n",[State]),fail)),
test_play_1(Round,MoveStatePairs,MaxRounds).
test_play_1(_Round,[],_MaxRound) :-
format("Game ended!\n").
test_play_1(Round,MoveStatePairs,MaxRounds) :-
list_map(head,MoveStatePairs,Moves),
format(" Available moves:\n"),
format(" ~k\n",[Moves]),
length(Moves,LM),
M is random(LM),
nth0(M,Moves,Move),
format(" Selected move: ~k\n",[Move]),
nth0(M,MoveStatePairs,[_,NextState]),
format("----------------------------------------\n\n"),
Round1 is Round + 1,
test_play(NextState,Round1,MaxRounds).
test_view_consistency_1(State0) :-
format("Testing consistency.\n"),
print_state("State0"," ",State0),
NumPlayers1 is State0.params.num_players - 1,
foreach(between(0,NumPlayers1,Player),
test_view_consistency_2(State0,Player)).
test_view_consistency_2(State0,Player) :-
format(" Checking player ~k.\n",[Player]),
(test_view_consistency_3(State0,Player),! ;
format(" Inconsistency found for player: ~k.\n",[Player]),fail).
test_view_consistency_3(State0,Player) :-
domino_player_view(State0,Player,View0),
print_view("View0"," ",View0),
state_transitions(State0,StateTrans),
print_moves_1(" State0 moves:",StateTrans),
(view_simulate_moves(View0,ViewTrans),! ; format("OPS! view_simulate_moves failed!"),fail),
print_moves_1(" View0 moves:",ViewTrans),
length(StateTrans,NumStateTrans),
length(ViewTrans,NumViewTrans),
format(" State transitions length: ~k.\n",[NumStateTrans]),
format(" View transitions length: ~k.\n",[NumViewTrans]),
(NumStateTrans =< NumViewTrans, ! ; format(" ~k > ~k !\n",[NumStateTrans,NumViewTrans]),fail),
foreach(member([Move,State1],StateTrans),test_view_consistency_4(View0.view_player,ViewTrans,Move,State1)).
test_view_consistency_4(ViewPlayer,ViewTrans,Move,State1) :-
format(" Checking move: ~k,\n",[Move]),
format(" to state:\n"),
print_state("State1"," ",State1),
(test_view_consistency_5(ViewPlayer,ViewTrans,Move,State1),! ; format(" failed!\n"),fail).
check_eq(Indent,Name,Value1,Value2) :-
Value1 = Value2,!,
format("~s~s: ~k = ~k.\n",[Indent,Name,Value1,Value2]).
check_eq(Indent,Name,Value1,Value2) :-
format("~s~s: ~k <> ~k!\n",[Indent,Name,Value1,Value2]).
test_same_move(Move,[Move,_]).
test_view_consistency_5(ViewPlayer,ViewTrans,Move,State1) :-
domino_player_view(State1,ViewPlayer,View1_),
print_view("View1_"," ",View1_),
include(test_same_move(Move),ViewTrans,ViewTrans1),
(ViewTrans1 = [] -> format(" move not available for view!.\n"),
print_view_trans_set(" ",ViewTrans), fail ; true),
list_map(snd,ViewTrans1,Views1),
(member(View1_,Views1),! ;
format(" no corresponding view found!.\n"),
foreach(member(V,Views1),(format(" candidate:\n"), check_view_eq(" ",View1_,V))), fail).
check_view_eq(Indent,View1,View2) :-
check_eq(Indent,"view_player",View1.view_player,View2.view_player),
check_eq(Indent,"current_player",View1.current_player,View2.current_player),
check_eq(Indent,"hand",View1.hand,View2.hand),
check_eq(Indent,"board",View1.board,View2.board),
check_eq(Indent,"exposed",View1.exposed,View2.exposed),
check_eq(Indent,"pass_count",View1.pass_count,View2.pass_count),
check_eq(Indent,"draw_pile_size",View1.draw_pile_size,View2.draw_pile_size),
check_eq(Indent,"hand_sizes",View1.hand_sizes,View2.hand_sizes).
print_view_trans_set(Indent,ViewTrans) :-
member([Move,View],ViewTrans),
format("~s candidate[~k]:\n",[Indent,Move]),
format(string(Indent2),"~s ",[Indent]),
print_view("V",Indent2,View).
print_view(Name,Indent,View) :-
format("~s~s:\n",[Indent,Name]),
format("~s view_player: ~k,\n",[Indent,View.view_player]),
format("~s current_player: ~k,\n",[Indent,View.current_player]),
format("~s hand: ~k,\n",[Indent,View.hand]),
format("~s board: ~k,\n",[Indent,View.board]),
format("~s exposed: ~k,\n",[Indent,View.exposed]),
format("~s pass_count: ~k,\n",[Indent,View.pass_count]),
format("~s draw_pile_size: ~k,\n",[Indent,View.draw_pile_size]),
format("~s hand_sizes: ~k,\n",[Indent,View.hand_sizes]).
print_state(Name,Indent,State) :-
format("~s~s:\n",[Indent,Name]),
format("~s current_player: ~k,\n",[Indent,State.current_player]),
format("~s hands: ~k,\n",[Indent,State.hands]),
format("~s board: ~k,\n",[Indent,State.board]),
format("~s pass_count: ~k,\n",[Indent,State.pass_count]),
format("~s draw_pile: ~k,\n",[Indent,State.draw_pile]).
print_moves_1(Prefix,MoveStatePairs) :-
list_map(head,MoveStatePairs,Moves),
format("~s ~k.\n",[Prefix,Moves]).
| joaoraf/gameyard | src/main/prolog/game_types/domino/domino_state_space.pl | Perl | apache-2.0 | 16,386 |
#
# Copyright 2021 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::adva::fsp3000::snmp::mode::listinterfaces;
use base qw(snmp_standard::mode::listinterfaces);
use strict;
use warnings;
my $mapping = {
advaInventoryAidString => { oid => '.1.3.6.1.4.1.2544.1.11.7.10.1.1.6' },
advaInventoryUnitName => { oid => '.1.3.6.1.4.1.2544.1.11.7.10.1.1.7' },
};
sub set_oids_label {
my ($self, %options) = @_;
$self->{oids_label} = {
'ifdesc' => '.1.3.6.1.2.1.2.2.1.2',
'ifalias' => '.1.3.6.1.2.1.31.1.1.1.18',
};
}
sub default_oid_filter_name {
my ($self, %options) = @_;
return 'ifdesc';
}
sub default_oid_display_name {
my ($self, %options) = @_;
return 'ifdesc';
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
return $self;
}
sub manage_selection {
my ($self, %options) = @_;
$self->SUPER::manage_selection(%options);
my $oid_advaInventoryEntry = '.1.3.6.1.4.1.2544.1.11.7.10.1.1';
my $snmp_result = $self->{snmp}->get_table(
oid => $oid_advaInventoryEntry,
begin => $mapping->{advaInventoryAidString}->{oid},
end => $mapping->{advaInventoryUnitName}->{oid}
);
$self->{extra_oids}->{type} = { oid => $mapping->{advaInventoryUnitName}->{oid}, matching => '%{instance}$' };
$self->{results}->{ $self->{extra_oids}->{type} } = {};
foreach my $oid (keys %{$snmp_result}) {
next if ($oid !~ /^$mapping->{advaInventoryUnitName}->{oid}\.(.*)$/);
my $instance = $1;
my $result = $self->{snmp}->map_instance(mapping => $mapping, results => $snmp_result, instance => $instance);
next if ($result->{advaInventoryUnitName} !~ /^(SFP|XFP)/i);
# interface name example: CH-1-3-N1
# inventory name example: PL-1-3-N1
next if ($result->{advaInventoryAidString} !~ /(\d+-\d+-[^\-]+)$/);
my $lookup = $1;
foreach (sort @{$self->{interface_id_selected}}) {
my $display_value = $self->get_display_value(id => $_);
if ($display_value =~ /CH-$lookup$/) {
$self->{results}->{ $self->{extra_oids}->{type}->{oid} }->{ $self->{extra_oids}->{type}->{oid} . '.' . $_ } = $result->{advaInventoryUnitName};
}
}
}
}
sub disco_format {
my ($self, %options) = @_;
$self->{extra_oids}->{type} = { oid => $mapping->{advaInventoryUnitName}->{oid}, matching => '%{instance}$' };
$self->SUPER::disco_format(%options);
}
1;
__END__
=head1 MODE
=over 8
=item B<--interface>
Set the interface (number expected) ex: 1,2,... (empty means 'check all interface').
=item B<--name>
Allows to use interface name with option --interface instead of interface oid index (Can be a regexp)
=item B<--speed>
Set interface speed (in Mb).
=item B<--skip-speed0>
Don't display interface with speed 0.
=item B<--filter-status>
Display interfaces matching the filter (example: 'up').
=item B<--use-adminstatus>
Display interfaces with AdminStatus 'up'.
=item B<--oid-filter>
Choose OID used to filter interface (default: ifDesc) (values: ifDesc, ifAlias).
=item B<--oid-display>
Choose OID used to display interface (default: ifDesc) (values: ifDesc, ifAlias).
=item B<--display-transform-src>
Regexp src to transform display value. (security risk!!!)
=item B<--display-transform-dst>
Regexp dst to transform display value. (security risk!!!)
=item B<--add-extra-oid>
Display an OID.
Example: --add-extra-oid='alias,.1.3.6.1.2.1.31.1.1.1.18'
or --add-extra-oid='vlan,.1.3.6.1.2.1.31.19,%{instance}\..*'
=back
=cut
| Tpo76/centreon-plugins | network/adva/fsp3000/snmp/mode/listinterfaces.pm | Perl | apache-2.0 | 4,427 |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2016 by Delphix. All rights reserved.
#
# Program Name : MaskingJob_obj.pm
# Description : Delphix Engine MaskingJob object
# It's include the following classes:
# - MaskingJob_obj - class which map a Delphix Engine action API object
# Author : Marcin Przepiorowski
# Created : 18 Nov 2016 (v2.X.X)
#
#
package MaskingJob_obj;
use warnings;
use strict;
use Data::Dumper;
use JSON;
use Toolkit_helpers qw (logger);
use Date::Manip;
use lib '../lib';
# constructor
# parameters
# - dlpxObject - connection to DE
# - debug - debug flag (debug on if defined)
sub new {
my $classname = shift;
my $dlpxObject = shift;
my $startTime = shift;
my $endTime = shift;
my $debug = shift;
logger($debug, "Entering MaskingJob_obj::constructor",1);
my %maskingjob;
my $self = {
_maskingjob => \%maskingjob,
_dlpxObject => $dlpxObject,
_debug => $debug
};
bless($self,$classname);
$self->loadMaskingJobList();
return $self;
}
# Procedure verifyMaskingJobForContainer
# parameters:
# - reference
# - jobname
# Return job ref is job is assigned to source and can be used
# undef otherwise
sub verifyMaskingJobForContainer {
my $self = shift;
my $container = shift;
my $name = shift;
logger($self->{_debug}, "Entering MaskingJob_obj::verifyMaskingJobForContainer",1);
my $contjobs = $self->getMaskingJobForContainer($container);
my @refarray = grep { lc $self->getName($_) eq lc $name } @{$contjobs};
if (scalar(@refarray) gt 1) {
print "Too many jobs with same name defined in source\n";
return undef;
}
if (scalar(@refarray) lt 1) {
print "Job with name $name not defined in source database\n";
return undef;
}
return $refarray[-1];
}
# Procedure getMaskingJobForContainer
# parameters:
# - reference
# Return a masking job reference
sub getMaskingJobForContainer {
my $self = shift;
my $container = shift;
logger($self->{_debug}, "Entering MaskingJob_obj::getMaskingJobForContainer",1);
my $jobs = $self->{_maskingjob};
my @retarray = grep { defined($jobs->{$_}->{associatedContainer}) && ($jobs->{$_}->{associatedContainer} eq $container) } keys %{$jobs};
return \@retarray;
}
# Procedure getAssociatedContainer
# parameters:
# - reference
# Return a masking job name
sub getAssociatedContainer {
my $self = shift;
my $reference = shift;
logger($self->{_debug}, "Entering MaskingJob_obj::getAssociatedContainer",1);
my $maskingjob = $self->{_maskingjob};
my $ret;
if (defined($reference)) {
if (defined($maskingjob->{$reference}) && defined($maskingjob->{$reference}->{associatedContainer})) {
$ret = $maskingjob->{$reference}->{associatedContainer};
} else {
$ret = 'N/A';
}
} else {
$ret = 'N/A';
}
return $ret;
}
# Procedure setAssociatedContainer
# parameters:
# - job reference
# - containter reference
# Assign job to container
sub setAssociatedContainer {
my $self = shift;
my $jobref = shift;
my $contref = shift;
logger($self->{_debug}, "Entering MaskingJob_obj::setAssociatedContainer",1);
my $maskingjob = $self->{_maskingjob};
my $ret;
my $operation = "resources/json/delphix/maskingjob/" . $jobref;
my %masking_hash = (
"type" => "MaskingJob",
"associatedContainer" => $contref
);
my $json_data = to_json(\%masking_hash);
logger($self->{_debug}, $json_data,2);
my ($result, $result_fmt) = $self->{_dlpxObject}->postJSONData($operation, $json_data);
if ( defined($result->{status}) && ($result->{status} eq 'OK' )) {
$ret = 0;
} else {
$ret = 1;
if (defined($result->{error})) {
print "Problem with assigning job " . $result->{error}->{details} . "\n";
logger($self->{_debug}, $result->{error}->{action} ,1);
} else {
print "Unknown error. Try with debug flag\n";
}
}
return $ret;
}
# Procedure getName
# parameters:
# - reference
# Return a masking job name
sub getName {
my $self = shift;
my $reference = shift;
logger($self->{_debug}, "Entering MaskingJob_obj::getName",1);
my $maskingjob = $self->{_maskingjob};
my $ret;
if (defined($reference)) {
if (defined($maskingjob->{$reference})) {
$ret = $maskingjob->{$reference}->{name};
} else {
$ret = 'N/A';
}
} else {
$ret = 'N/A';
}
return $ret;
}
# Procedure getMaskingJobByName
# parameters:
# - name
# Return a masking job reference for a name
sub getMaskingJobByName {
my $self = shift;
my $name = shift;
logger($self->{_debug}, "Entering MaskingJob_obj::getMaskingJobByName",1);
my $maskingjob = $self->{_maskingjob};
my @refarray = grep { lc $self->getName($_) eq lc $name } keys %{$maskingjob};
if (scalar(@refarray) gt 1) {
print "Too many jobs with same name\n";
return undef;
}
if (scalar(@refarray) lt 1) {
print "Job with name $name not defined\n";
return undef;
}
return $refarray[-1];
}
# Procedure getMaskingJob
# parameters:
# - reference
# Return a masking job reference
sub getMaskingJob {
my $self = shift;
my $reference = shift;
logger($self->{_debug}, "Entering MaskingJob_obj::getMaskingJob",1);
my $maskingjob = $self->{_maskingjob};
my $ret;
if (defined($maskingjob->{$reference})) {
$ret = $maskingjob->{$reference};
}
return $ret;
}
# Procedure getMaskingJobs
# parameters:
# Return array of masking jobs
sub getMaskingJobs {
my $self = shift;
logger($self->{_debug}, "Entering MaskingJob_obj::getMaskingJobs",1);
my $maskingjob = $self->{_maskingjob};
my @retarray = sort (keys %{$maskingjob});
return \@retarray;
}
# Procedure loadMaskingJobList
# parameters: none
# Load a list of masking jobs objects from Delphix Engine
sub loadMaskingJobList
{
my $self = shift;
logger($self->{_debug}, "Entering MaskingJob_obj::loadMaskingJobList",1);
my $pageSize = 5000;
my $offset = 0;
my $operation = "resources/json/delphix/maskingjob";
my $maskingjob = $self->{_maskingjob};
my ($result, $result_fmt) = $self->{_dlpxObject}->getJSONResult($operation);
if (defined($result->{status}) && ($result->{status} eq 'OK')) {
my @res = @{$result->{result}};
#my $jobs = $self->{_jobs};
for my $maskjobitem (@res) {
$maskingjob->{$maskjobitem->{reference}} = $maskjobitem;
}
} else {
print "No data returned for $operation. Try to increase timeout \n";
}
}
1; | delphix/dxtoolkit | lib/MaskingJob_obj.pm | Perl | apache-2.0 | 7,458 |
#
# Copyright 2022 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package cloud::aws::elb::network::plugin;
use strict;
use warnings;
use base qw(centreon::plugins::script_custom);
sub new {
my ( $class, %options ) = @_;
my $self = $class->SUPER::new( package => __PACKAGE__, %options );
bless $self, $class;
$self->{version} = '0.1';
%{ $self->{modes} } = (
'discovery' => 'cloud::aws::elb::network::mode::discovery',
'targets-health' => 'cloud::aws::elb::network::mode::targetshealth',
);
$self->{custom_modes}{paws} = 'cloud::aws::custom::paws';
$self->{custom_modes}{awscli} = 'cloud::aws::custom::awscli';
return $self;
}
1;
__END__
=head1 PLUGIN DESCRIPTION
Check Amazon Network Elastic Load Balancing (Amazon Network ELB).
=cut
| centreon/centreon-plugins | cloud/aws/elb/network/plugin.pm | Perl | apache-2.0 | 1,509 |
package VMOMI::DVPortgroupEvent;
use parent 'VMOMI::Event';
use strict;
use warnings;
our @class_ancestors = (
'Event',
'DynamicData',
);
our @class_members = ( );
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/DVPortgroupEvent.pm | Perl | apache-2.0 | 393 |
#!/usr/bin/env perl
use v5.14;
use strict;
use warnings;
use CoGe::Accessory::Web qw(get_defaults);
use Data::Dumper;
use File::Basename;
use File::Path;
use File::Spec::Functions;
my %opts = @ARGV;
my $gid = $opts{'gid'};
my $chr = $opts{'chr'};
my $config = get_defaults(catfile(File::Basename::dirname(File::Basename::dirname($0)), 'coge.conf'));
my $path = catfile($config->{SECTEMPDIR}, "downloads/genome", $gid);
mkpath( $path, 0, 0777 ) unless -d $path;
my $file = catfile($path, $gid . "_" . $chr . ".faa");
unless (-e $file) {
my $db = CoGeX->dbconnect($config);
die "ERROR: couldn't connect to the database" unless $db;
my $genome = $db->resultset('Genome')->find($gid);
die "ERROR: unable to create genome object using id $gid" unless ($genome);
# Get sequence from file
my $seq = $genome->get_genomic_sequence(
chr => $chr,
start => 1,
stop => $genome->get_chromosome_length($chr)
);
open(my $fh, '>', $file) or die "Could not open file '$file' $!";
print $fh '>chromosome ' . $chr . "\n";
for (my $i=0; $i<length($seq); $i+=70) {
print $fh substr($seq, $i, 70);
print $fh "\n";
}
close $fh;
}
| LyonsLab/coge | scripts/generate_chr_fasta.pl | Perl | bsd-2-clause | 1,199 |
package MyElements::getScientificNameFromTSN;
use strict;
use warnings;
{ # BLOCK to scope variables
sub get_xmlns { 'http://itis_service.itis.usgs.org' }
__PACKAGE__->__set_name('getScientificNameFromTSN');
__PACKAGE__->__set_nillable();
__PACKAGE__->__set_minOccurs();
__PACKAGE__->__set_maxOccurs();
__PACKAGE__->__set_ref();
use base qw(
SOAP::WSDL::XSD::Typelib::Element
SOAP::WSDL::XSD::Typelib::ComplexType
);
our $XML_ATTRIBUTE_CLASS;
undef $XML_ATTRIBUTE_CLASS;
sub __get_attr_class {
return $XML_ATTRIBUTE_CLASS;
}
use Class::Std::Fast::Storable constructor => 'none';
use base qw(SOAP::WSDL::XSD::Typelib::ComplexType);
Class::Std::initialize();
{ # BLOCK to scope variables
my %tsn_of :ATTR(:get<tsn>);
__PACKAGE__->_factory(
[ qw( tsn
) ],
{
'tsn' => \%tsn_of,
},
{
'tsn' => 'SOAP::WSDL::XSD::Typelib::Builtin::string',
},
{
'tsn' => 'tsn',
}
);
} # end BLOCK
} # end of BLOCK
1;
=pod
=head1 NAME
MyElements::getScientificNameFromTSN
=head1 DESCRIPTION
Perl data type class for the XML Schema defined element
getScientificNameFromTSN from the namespace http://itis_service.itis.usgs.org.
=head1 PROPERTIES
The following properties may be accessed using get_PROPERTY / set_PROPERTY
methods:
=over
=item * tsn
$element->set_tsn($data);
$element->get_tsn();
=back
=head1 METHODS
=head2 new
my $element = MyElements::getScientificNameFromTSN->new($data);
Constructor. The following data structure may be passed to new():
{
tsn => $some_value, # string
},
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| phylotastic-legacy/TNRastic | tnrs_handler/bin/tnrs_adapter/itis/SOAPInterface/MyElements/getScientificNameFromTSN.pm | Perl | bsd-3-clause | 1,647 |
package MyElements::getRankNamesResponse;
use strict;
use warnings;
{ # BLOCK to scope variables
sub get_xmlns { 'http://itis_service.itis.usgs.org' }
__PACKAGE__->__set_name('getRankNamesResponse');
__PACKAGE__->__set_nillable();
__PACKAGE__->__set_minOccurs();
__PACKAGE__->__set_maxOccurs();
__PACKAGE__->__set_ref();
use base qw(
SOAP::WSDL::XSD::Typelib::Element
SOAP::WSDL::XSD::Typelib::ComplexType
);
our $XML_ATTRIBUTE_CLASS;
undef $XML_ATTRIBUTE_CLASS;
sub __get_attr_class {
return $XML_ATTRIBUTE_CLASS;
}
use Class::Std::Fast::Storable constructor => 'none';
use base qw(SOAP::WSDL::XSD::Typelib::ComplexType);
Class::Std::initialize();
{ # BLOCK to scope variables
my %return_of :ATTR(:get<return>);
__PACKAGE__->_factory(
[ qw( return
) ],
{
'return' => \%return_of,
},
{
'return' => 'MyTypes::SvcRankNameList',
},
{
'return' => 'return',
}
);
} # end BLOCK
} # end of BLOCK
1;
=pod
=head1 NAME
MyElements::getRankNamesResponse
=head1 DESCRIPTION
Perl data type class for the XML Schema defined element
getRankNamesResponse from the namespace http://itis_service.itis.usgs.org.
=head1 PROPERTIES
The following properties may be accessed using get_PROPERTY / set_PROPERTY
methods:
=over
=item * return
$element->set_return($data);
$element->get_return();
=back
=head1 METHODS
=head2 new
my $element = MyElements::getRankNamesResponse->new($data);
Constructor. The following data structure may be passed to new():
{
return => { # MyTypes::SvcRankNameList
rankNames => { # MyTypes::SvcRankName
kingdomId => $some_value, # string
kingdomName => $some_value, # string
rankId => $some_value, # string
rankName => $some_value, # string
},
},
},
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| phylotastic-legacy/TNRastic | tnrs_handler/bin/tnrs_adapter/itis/SOAPInterface/MyElements/getRankNamesResponse.pm | Perl | bsd-3-clause | 1,881 |
/* assert on complexType */
'assert on empty complexType (none, invalid)'(fail):
{|xml||
<assert_complexType_empty_none>
String
</assert_complexType_empty_none>
|}.
'assert on empty complexType (none, valid)':
{|xml||
<assert_complexType_empty_none></assert_complexType_empty_none>
|}.
'assert on empty complexType (single, invalid)'(fail):
{|xml||
<assert_complexType_empty_single_invalid></assert_complexType_empty_single_invalid>
|}.
'assert on empty complexType (single, valid)':
{|xml||
<assert_complexType_empty_single_valid></assert_complexType_empty_single_valid>
|}.
'assert on empty complexType with attribute (single, invalid)'(fail):
{|xml||
<assert_complexType_empty_attribute_single_invalid attr="2"></assert_complexType_empty_attribute_single_invalid>
|}.
'assert on empty complexType with attribute (single, valid)':
{|xml||
<assert_complexType_empty_attribute_single_valid attr="2"></assert_complexType_empty_attribute_single_valid>
|}.
'assert on empty complexType (multiple, none valid)'(fail):
{|xml||
<assert_complexType_empty_multiple_none_valid></assert_complexType_empty_multiple_none_valid>
|}.
'assert on empty complexType (multiple, partially valid)'(fail):
{|xml||
<assert_complexType_empty_multiple_partially_valid></assert_complexType_empty_multiple_partially_valid>
|}.
'assert on empty complexType (multiple, all valid)':
{|xml||
<assert_complexType_empty_multiple_all_valid></assert_complexType_empty_multiple_all_valid>
|}.
'assert on non-empty complexType (none, invalid)'(fail):
{|xml||
<assert_complexType_none>
<item>$asijc</item>
</assert_complexType_none>
|}.
'assert on non-empty complexType (none, valid)':
{|xml||
<assert_complexType_none>
<item>2</item>
</assert_complexType_none>
|}.
'assert on non-empty complexType (single, invalid)'(fail):
{|xml||
<assert_complexType_single_invalid>
<item>2</item>
</assert_complexType_single_invalid>
|}.
'assert on non-empty complexType (single, valid)':
{|xml||
<assert_complexType_single_valid>
<item>2</item>
</assert_complexType_single_valid>
|}.
'assert on non-empty complexType with attribute (single, invalid)'(fail):
{|xml||
<assert_complexType_single_attribute_invalid attr="2">
<item>2</item>
</assert_complexType_single_attribute_invalid>
|}.
'assert on non-empty complexType with attribute (single, valid)':
{|xml||
<assert_complexType_single_attribute_valid attr="2">
<item>2</item>
</assert_complexType_single_attribute_valid>
|}.
'assert on non-empty complexType (multiple, none valid)'(fail):
{|xml||
<assert_complexType_multiple_non_valid>
<item>2</item>
</assert_complexType_multiple_non_valid>
|}.
'assert on non-empty complexType (multiple, partially valid)'(fail):
{|xml||
<assert_complexType_multiple_partially_valid>
<item>2</item>
</assert_complexType_multiple_partially_valid>
|}.
'assert on non-empty complexType (multiple, all valid)':
{|xml||
<assert_complexType_multiple_all_valid>
<item>2</item>
</assert_complexType_multiple_all_valid>
|}.
/* assertion on simpleType */
'assertion on simpleType restriction (none, invalid)'(fail):
{|xml||
<assertion_restriction_none>String</assertion_restriction_none>
|}.
'assertion on simpleType restriction (none, valid)':
{|xml||
<assertion_restriction_none>2</assertion_restriction_none>
|}.
'assertion on simpleType restriction (single, invalid)'(fail):
{|xml||
<assertion_restriction_single>1</assertion_restriction_single>
|}.
'assertion on simpleType restriction (single, valid)':
{|xml||
<assertion_restriction_single>2</assertion_restriction_single>
|}.
'assertion on simpleType restriction (multiple, none valid)'(fail):
{|xml||
<assertion_restriction_multiple>1</assertion_restriction_multiple>
|}.
'assertion on simpleType restriction (multiple, partially valid)'(fail):
{|xml||
<assertion_restriction_multiple>2</assertion_restriction_multiple>
|}.
'assertion on simpleType restriction (multiple, all valid)':
{|xml||
<assertion_restriction_multiple>6</assertion_restriction_multiple>
|}. | jonakalkus/xsd | test/validation/xpath_calls.pl | Perl | mit | 4,034 |
package Google::Ads::AdWords::v201406::Dimensions;
use strict;
use warnings;
__PACKAGE__->_set_element_form_qualified(1);
sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201406' };
our $XML_ATTRIBUTE_CLASS;
undef $XML_ATTRIBUTE_CLASS;
sub __get_attr_class {
return $XML_ATTRIBUTE_CLASS;
}
use Class::Std::Fast::Storable constructor => 'none';
use base qw(Google::Ads::SOAP::Typelib::ComplexType);
{ # BLOCK to scope variables
my %width_of :ATTR(:get<width>);
my %height_of :ATTR(:get<height>);
__PACKAGE__->_factory(
[ qw( width
height
) ],
{
'width' => \%width_of,
'height' => \%height_of,
},
{
'width' => 'SOAP::WSDL::XSD::Typelib::Builtin::int',
'height' => 'SOAP::WSDL::XSD::Typelib::Builtin::int',
},
{
'width' => 'width',
'height' => 'height',
}
);
} # end BLOCK
1;
=pod
=head1 NAME
Google::Ads::AdWords::v201406::Dimensions
=head1 DESCRIPTION
Perl data type class for the XML Schema defined complexType
Dimensions from the namespace https://adwords.google.com/api/adwords/cm/v201406.
Represents a simple height-width dimension.
=head2 PROPERTIES
The following properties may be accessed using get_PROPERTY / set_PROPERTY
methods:
=over
=item * width
=item * height
=back
=head1 METHODS
=head2 new
Constructor. The following data structure may be passed to new():
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| gitpan/GOOGLE-ADWORDS-PERL-CLIENT | lib/Google/Ads/AdWords/v201406/Dimensions.pm | Perl | apache-2.0 | 1,477 |
## OpenXPKI::Server::Authentication::Connector
##
## Written 2013 by Oliver Welter, based on
## OpenXPKI::Server::Authentication::Password
## (C) Copyright 2013 by The OpenXPKI Project
package OpenXPKI::Server::Authentication::Connector;
use strict;
use warnings;
use OpenXPKI::Debug;
use OpenXPKI::Exception;
use OpenXPKI::Server::Context qw( CTX );
use Moose;
has source => (
is => 'ro',
isa => 'ArrayRef',
);
has role => (
is => 'ro',
isa => 'Str|Undef',
);
has name => (
is => 'ro',
isa => 'Str',
default => 'Connector',
);
has description => (
is => 'ro',
isa => 'Str',
default => '',
);
around BUILDARGS => sub {
my $orig = shift;
my $class = shift;
# path is passed as single argument
my $path = shift;
my $config = CTX('config');
my @source = split /\./, $path;
push @source, 'source';
##! 2: "load name and description for handler"
return $class->$orig({
description => $config->get("$path.description"),
name => $config->get("$path.label"),
role => $config->get("$path.role"),
source => \@source,
});
};
sub login_step {
##! 1: 'start'
my $self = shift;
my $arg_ref = shift;
my $name = $arg_ref->{HANDLER};
my $msg = $arg_ref->{MESSAGE};
if (! exists $msg->{PARAMS}->{LOGIN} ||
! exists $msg->{PARAMS}->{PASSWD}) {
##! 4: 'no login data received (yet)'
return (undef, undef,
{
SERVICE_MSG => "GET_PASSWD_LOGIN",
PARAMS => {
NAME => $self->name(),
DESCRIPTION => $self->description(),
},
},
);
}
##! 2: 'login data received'
my $account = $msg->{PARAMS}->{LOGIN};
my $passwd = $msg->{PARAMS}->{PASSWD};
##! 2: "account ... $account"
# check account - password checking is done using an authentication
# connector with password binding.
my $result = CTX('config')->get( [ @{$self->source()}, $account ], { password => $passwd } );
if (defined $result && ref $result ne '') {
# this usually means a wrong connector definition
OpenXPKI::Exception->throw (
message => "I18N_OPENXPKI_SERVER_AUTHENTICATION_PASSWORD_CONNECTOR_RETURN_NOT_SCALAR",
params => {
SOURCE => $self->source(),
ACCOUNT => $account,
},
);
}
if ($result) {
# result ok - return user, role, service ready message
CTX('log')->auth()->info("Login successful for user $account with role " . $self->role());
return ($account, $self->role(), { SERVICE_MSG => 'SERVICE_READY', });
}
##! 4: "Login failed for $account with result $result"
CTX('log')->auth()->error("Login FAILED for user $account with role " . $self->role());
OpenXPKI::Exception->throw (
message => "I18N_OPENXPKI_SERVER_AUTHENTICATION_PASSWORD_LOGIN_FAILED",
params => {
USER => $account,
},
);
}
1;
__END__
=head1 Name
OpenXPKI::Server::Authentication::Connector - passphrase based authentication
using connector backend.
=head1 Description
Replacement for OpenXPKI::Server::Authentication::Password which checks the
password aganist a connector backend using the password as bind parameter.
=head1 Configuration
Requires name, description as all other connectors. The connector just checks
the password, the role is static for all users and given as key I<role>.
The key I<source> must be a connector reference that supports the password
bind query syntax.
Suited connectors are e.g. Connector::Builtin::Authentication::*
User Password:
type: Connector
label: User Password
description: I18N_OPENXPKI_CONFIG_AUTH_HANDLER_DESCRIPTION_PASSWORD
role: User
source@: connector:auth.connector.localuser
returns a pair of (user, role, response_message) for a given login
step. If user and role are undefined, the login is not yet finished.
| stefanomarty/openxpki | core/server/OpenXPKI/Server/Authentication/Connector.pm | Perl | apache-2.0 | 4,054 |
package Google::Ads::AdWords::v201406::AdGroupAdService::queryResponse;
use strict;
use warnings;
{ # BLOCK to scope variables
sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201406' }
__PACKAGE__->__set_name('queryResponse');
__PACKAGE__->__set_nillable();
__PACKAGE__->__set_minOccurs();
__PACKAGE__->__set_maxOccurs();
__PACKAGE__->__set_ref();
use base qw(
SOAP::WSDL::XSD::Typelib::Element
Google::Ads::SOAP::Typelib::ComplexType
);
our $XML_ATTRIBUTE_CLASS;
undef $XML_ATTRIBUTE_CLASS;
sub __get_attr_class {
return $XML_ATTRIBUTE_CLASS;
}
use Class::Std::Fast::Storable constructor => 'none';
use base qw(Google::Ads::SOAP::Typelib::ComplexType);
{ # BLOCK to scope variables
my %rval_of :ATTR(:get<rval>);
__PACKAGE__->_factory(
[ qw( rval
) ],
{
'rval' => \%rval_of,
},
{
'rval' => 'Google::Ads::AdWords::v201406::AdGroupAdPage',
},
{
'rval' => 'rval',
}
);
} # end BLOCK
} # end of BLOCK
1;
=pod
=head1 NAME
Google::Ads::AdWords::v201406::AdGroupAdService::queryResponse
=head1 DESCRIPTION
Perl data type class for the XML Schema defined element
queryResponse from the namespace https://adwords.google.com/api/adwords/cm/v201406.
=head1 PROPERTIES
The following properties may be accessed using get_PROPERTY / set_PROPERTY
methods:
=over
=item * rval
$element->set_rval($data);
$element->get_rval();
=back
=head1 METHODS
=head2 new
my $element = Google::Ads::AdWords::v201406::AdGroupAdService::queryResponse->new($data);
Constructor. The following data structure may be passed to new():
{
rval => $a_reference_to, # see Google::Ads::AdWords::v201406::AdGroupAdPage
},
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| gitpan/GOOGLE-ADWORDS-PERL-CLIENT | lib/Google/Ads/AdWords/v201406/AdGroupAdService/queryResponse.pm | Perl | apache-2.0 | 1,773 |
#------------------------------------------------------------------------------
# File: Ricoh.pm
#
# Description: Ricoh EXIF maker notes tags
#
# Revisions: 03/28/2005 - P. Harvey Created
#
# References: 1) http://www.ozhiker.com/electronics/pjmt/jpeg_info/ricoh_mn.html
# 2) http://homepage3.nifty.com/kamisaka/makernote/makernote_ricoh.htm
# 3) Tim Gray private communication (GR)
# 4) https://github.com/atotto/ricoh-theta-tools/
# IB) Iliah Borg private communication (LibRaw)
#------------------------------------------------------------------------------
package Image::ExifTool::Ricoh;
use strict;
use vars qw($VERSION);
use Image::ExifTool qw(:DataAccess :Utils);
use Image::ExifTool::Exif;
$VERSION = '1.33';
sub ProcessRicohText($$$);
sub ProcessRicohRMETA($$$);
# lens types for Ricoh GXR
my %ricohLensIDs = (
Notes => q{
Lens units available for the GXR, used by the Ricoh Composite LensID tag. Note
that unlike lenses for all other makes of cameras, the focal lengths in these
model names have already been scaled to include the 35mm crop factor.
},
# (the exact lens model names used by Ricoh, except for a change in case)
'RL1' => 'GR Lens A12 50mm F2.5 Macro',
'RL2' => 'Ricoh Lens S10 24-70mm F2.5-4.4 VC',
'RL3' => 'Ricoh Lens P10 28-300mm F3.5-5.6 VC',
'RL5' => 'GR Lens A12 28mm F2.5',
'RL8' => 'Mount A12',
'RL6' => 'Ricoh Lens A16 24-85mm F3.5-5.5',
);
%Image::ExifTool::Ricoh::Main = (
GROUPS => { 0 => 'MakerNotes', 2 => 'Camera' },
WRITE_PROC => \&Image::ExifTool::Exif::WriteExif,
CHECK_PROC => \&Image::ExifTool::Exif::CheckExif,
WRITABLE => 1,
0x0001 => { Name => 'MakerNoteType', Writable => 'string' },
0x0002 => { #PH
Name => 'FirmwareVersion',
Writable => 'string',
# eg. "Rev0113" is firmware version 1.13
PrintConv => '$val=~/^Rev(\d+)$/ ? sprintf("%.2f",$1/100) : $val',
PrintConvInv => '$val=~/^(\d+)\.(\d+)$/ ? sprintf("Rev%.2d%.2d",$1,$2) : $val',
},
0x0005 => [ #PH
{
Condition => '$$valPt =~ /^[-\w ]+$/',
Name => 'SerialNumber', # (verified for GXR)
Writable => 'undef',
Count => 16,
Notes => q{
the serial number stamped on the camera begins with 2 model-specific letters
followed by the last 8 digits of this value. For the GXR, this is the
serial number of the lens unit
},
PrintConv => '$val=~s/^(.*)(.{8})$/($1)$2/; $val',
PrintConvInv => '$val=~tr/()//d; $val',
},{
Name => 'InternalSerialNumber',
Writable => 'undef',
Count => 16,
ValueConv => 'unpack("H*", $val)',
ValueConvInv => 'pack("H*", $val)',
},
],
0x0e00 => {
Name => 'PrintIM',
Writable => 0,
Description => 'Print Image Matching',
SubDirectory => { TagTable => 'Image::ExifTool::PrintIM::Main' },
},
0x1000 => { #3
Name => 'RecordingFormat',
Writable => 'int16u',
PrintConv => {
2 => 'JPEG',
3 => 'DNG',
},
},
0x1001 => [{
Name => 'ImageInfo',
Condition => '$format ne "int16u"',
SubDirectory => { TagTable => 'Image::ExifTool::Ricoh::ImageInfo' },
},{ #3
Name => 'ExposureProgram',
Writable => 'int16u',
Notes => 'GR',
PrintConv => {
1 => 'Auto',
2 => 'Program AE',
3 => 'Aperture-priority AE',
4 => 'Shutter speed priority AE',
5 => 'Shutter/aperture priority AE', # TAv
6 => 'Manual',
7 => 'Movie', #PH
},
}],
0x1002 => { #3
Name => 'DriveMode',
Condition => '$format eq "int16u"',
Notes => 'valid only for some models',
Writable => 'int16u',
PrintConv => {
0 => 'Single-frame',
1 => 'Continuous',
8 => 'AF-priority Continuous',
},
},
0x1003 => [{
Name => 'Sharpness',
Condition => '$format ne "int16u"',
Writable => 'int32u',
PrintConv => {
0 => 'Sharp',
1 => 'Normal',
2 => 'Soft',
},
},{ #3
Name => 'WhiteBalance',
Writable => 'int16u',
Notes => 'GR',
PrintConv => {
0 => 'Auto',
1 => 'Multi-P Auto',
2 => 'Daylight',
3 => 'Cloudy',
4 => 'Incandescent 1',
5 => 'Incandescent 2',
6 => 'Daylight Fluorescent',
7 => 'Neutral White Fluorescent',
8 => 'Cool White Fluorescent',
9 => 'Warm White Fluorescent',
10 => 'Manual',
11 => 'Kelvin',
12 => 'Shade', #IB
},
}],
0x1004 => { #3
Name => 'WhiteBalanceFineTune',
Condition => '$format eq "int16u"',
Format => 'int16s',
Writable => 'int16u',
Notes => q{
2 numbers: amount of adjustment towards Amber and Green. Not valid for all
models
},
},
# 0x1005 int16u - 5
0x1006 => { #3
Name => 'FocusMode',
Writable => 'int16u',
PrintConv => {
1 => 'Manual',
2 => 'Multi AF',
3 => 'Spot AF',
4 => 'Snap',
5 => 'Infinity',
7 => 'Face Detect', #PH
8 => 'Subject Tracking',
9 => 'Pinpoint AF',
10 => 'Movie', #PH
},
},
0x1007 => { #3
Name => 'AutoBracketing',
Writable => 'int16u',
PrintConv => {
0 => 'Off',
9 => 'AE',
11 => 'WB',
16 => 'DR', # (dynamic range)
17 => 'Contrast',
18 => 'WB2', # (selects two different WB presets besides normal)
19 => 'Effect',
},
},
0x1009 => { #3
Name => 'MacroMode',
Writable => 'int16u',
PrintConv => { 0 => 'Off', 1 => 'On' },
},
0x100a => { #3
Name => 'FlashMode',
Writable => 'int16u',
PrintConv => {
0 => 'Off',
1 => 'Auto, Fired',
2 => 'On',
3 => 'Auto, Fired, Red-eye reduction',
4 => 'Slow Sync',
5 => 'Manual',
6 => 'On, Red-eye reduction',
7 => 'Synchro, Red-eye reduction',
8 => 'Auto, Did not fire',
},
},
0x100b => { #3
Name => 'FlashExposureComp',
Writable => 'rational64s',
PrintConv => '$val ? sprintf("%+.1f",$val) : 0',
PrintConvInv => '$val',
},
0x100c => { #3
Name => 'ManualFlashOutput',
Writable => 'rational64s',
PrintConv => {
0 => 'Full',
-24 => '1/1.4',
-48 => '1/2',
-72 => '1/2.8',
-96 => '1/4',
-120 => '1/5.6',
-144 => '1/8',
-168 => '1/11',
-192 => '1/16',
-216 => '1/22',
-240 => '1/32',
-288 => '1/64',
},
},
0x100d => { #3
Name => 'FullPressSnap',
Writable => 'int16u',
PrintConv => { 0 => 'Off', 1 => 'On' },
},
0x100e => { #3
Name => 'DynamicRangeExpansion',
Writable => 'int16u',
PrintConv => {
0 => 'Off',
3 => 'Weak',
4 => 'Medium',
5 => 'Strong',
},
},
0x100f => { #3
Name => 'NoiseReduction',
Writable => 'int16u',
PrintConv => {
0 => 'Off',
1 => 'Weak',
2 => 'Medium',
3 => 'Strong',
},
},
0x1010 => { #3
Name => 'ImageEffects',
Writable => 'int16u',
PrintConv => {
0 => 'Standard',
1 => 'Vivid',
3 => 'Black & White',
5 => 'B&W Toning Effect',
6 => 'Setting 1',
7 => 'Setting 2',
9 => 'High-contrast B&W',
10 => 'Cross Process',
11 => 'Positive Film',
12 => 'Bleach Bypass',
13 => 'Retro',
15 => 'Miniature',
17 => 'High Key',
},
},
0x1011 => { #3
Name => 'Vignetting',
Writable => 'int16u',
PrintConv => {
0 => 'Off',
1 => 'Low',
2 => 'Medium',
3 => 'High',
},
},
0x1012 => { #PH
Name => 'Contrast',
Writable => 'int32u',
Format => 'int32s', #3 (high-contrast B&W also has -1 and -2 settings)
PrintConv => {
OTHER => sub { shift },
2147483647 => 'MAX', #3 (high-contrast B&W effect MAX setting)
},
},
0x1013 => { Name => 'Saturation', Writable => 'int32u' }, #PH
0x1014 => { Name => 'Sharpness', Writable => 'int32u' }, #3
0x1015 => { #3
Name => 'ToningEffect',
Writable => 'int16u',
PrintConv => {
0 => 'Off',
1 => 'Sepia',
2 => 'Red',
3 => 'Green',
4 => 'Blue',
5 => 'Purple',
6 => 'B&W',
7 => 'Color',
},
},
0x1016 => { #3
Name => 'HueAdjust',
Writable => 'int16u',
PrintConv => {
0 => 'Off',
1 => 'Basic',
2 => 'Magenta',
3 => 'Yellow',
4 => 'Normal',
5 => 'Warm',
6 => 'Cool',
},
},
0x1017 => { #3
Name => 'WideAdapter',
Writable => 'int16u',
PrintConv => {
0 => 'Not Attached',
2 => 'Attached', # (21mm)
},
},
0x1018 => { #3
Name => 'CropMode35mm',
Writable => 'int16u',
PrintConv => { 0 => 'Off', 1 => 'On' },
},
0x1019 => { #3
Name => 'NDFilter',
Writable => 'int16u',
PrintConv => { 0 => 'Off', 1 => 'On' },
},
0x101a => { Name => 'WBBracketShotNumber', Writable => 'int16u' }, #3
# 0x1100 - related to DR correction (ref 3)
0x1307 => { Name => 'ColorTempKelvin', Writable => 'int32u' }, #3
0x1308 => { Name => 'ColorTemperature', Writable => 'int32u' }, #3
0x1500 => { #3
Name => 'FocalLength',
Writable => 'rational64u',
PrintConv => 'sprintf("%.1f mm",$val)',
PrintConvInv => '$val=~s/\s*mm$//;$val',
},
0x1200 => { #3
Name => 'AFStatus',
Writable => 'int16u',
PrintConv => {
0 => 'Out of Focus',
1 => 'In Focus',
},
},
# 0x1201-0x1204 - related to focus points (ref 3)
0x1201 => { #PH (NC)
Name => 'AFAreaXPosition1',
Writable => 'int32u',
Notes => 'manual AF area position in a 1280x864 image',
},
0x1202 => { Name => 'AFAreaYPosition1', Writable => 'int32u' }, #PH (NC)
0x1203 => { #PH (NC)
Name => 'AFAreaXPosition',
Writable => 'int32u',
Notes => 'manual AF area position in the full image',
# (coordinates change to correspond with smaller image
# when recording reduced-size JPEG)
},
0x1204 => { Name => 'AFAreaYPosition', Writable => 'int32u' }, #PH (NC)
0x1205 => { #3
Name => 'AFAreaMode',
Writable => 'int16u',
PrintConv => {
0 => 'Auto',
2 => 'Manual',
},
},
0x1601 => { Name => 'SensorWidth', Writable => 'int32u' }, #3
0x1602 => { Name => 'SensorHeight', Writable => 'int32u' }, #3
0x1603 => { Name => 'CroppedImageWidth', Writable => 'int32u' }, #3
0x1604 => { Name => 'CroppedImageHeight', Writable => 'int32u' }, #3
# 0x1700 - Composite? (0=normal image, 1=interval composite, 2=multi-exposure composite) (ref 3)
# 0x1703 - 0=normal, 1=final composite (ref 3)
# 0x1704 - 0=normal, 2=final composite (ref 3)
0x2001 => [
{
Name => 'RicohSubdir',
Condition => q{
$self->{Model} !~ /^Caplio RR1\b/ and
($format ne 'int32u' or $count != 1)
},
SubDirectory => {
Validate => '$val =~ /^\[Ricoh Camera Info\]/',
TagTable => 'Image::ExifTool::Ricoh::Subdir',
Start => '$valuePtr + 20',
ByteOrder => 'BigEndian',
},
},
{
Name => 'RicohSubdirIFD',
# the CX6 and GR Digital 4 write an int32u pointer in AVI videos -- doh!
Condition => '$self->{Model} !~ /^Caplio RR1\b/',
Flags => 'SubIFD',
SubDirectory => {
TagTable => 'Image::ExifTool::Ricoh::Subdir',
Start => '$val + 20', # (skip over "[Ricoh Camera Info]\0" header)
ByteOrder => 'BigEndian',
},
},
{
Name => 'RicohRR1Subdir',
SubDirectory => {
Validate => '$val =~ /^\[Ricoh Camera Info\]/',
TagTable => 'Image::ExifTool::Ricoh::Subdir',
Start => '$valuePtr + 20',
ByteOrder => 'BigEndian',
# the Caplio RR1 uses a different base address -- doh!
Base => '$start-20',
},
},
],
0x4001 => {
Name => 'ThetaSubdir',
Groups => { 1 => 'MakerNotes' }, # SubIFD needs group 1 set
Flags => 'SubIFD',
SubDirectory => {
TagTable => 'Image::ExifTool::Ricoh::ThetaSubdir',
Start => '$val',
},
},
);
# Ricoh type 2 maker notes (ref PH)
# (similar to Kodak::Type11 and GE::Main)
%Image::ExifTool::Ricoh::Type2 = (
GROUPS => { 0 => 'MakerNotes', 2 => 'Camera' },
NOTES => q{
Tags written by models such as the Ricoh HZ15 and the Pentax XG-1. These
are not writable due to numerous formatting errors as written by these
cameras.
},
# 0x104 - int32u: 1
# 0x200 - int32u[3]: 0 0 0
# 0x202 - int16u: 0 (GE Macro?)
# 0x203 - int16u: 0,3 (Kodak PictureEffect?)
# 0x204 - rational64u: 0/10
# 0x205 - rational64u: 150/1
# 0x206 - float[6]: (not really float because size should be 2 bytes)
0x207 => {
Name => 'RicohModel',
Writable => 'string',
},
0x300 => {
# brutal. There are lots of errors in the XG-1 maker notes. For the XG-1,
# 0x300 has a value of "XG-1Pentax". The "XG-1" part is likely an improperly
# stored 0x207 RicohModel, resulting in an erroneous 4-byte offset for this tag
Name => 'RicohMake',
Writable => 'undef',
ValueConv => '$val =~ s/ *$//; $val',
},
# 0x306 - int16u: 1
# 0x500 - int16u: 0,1
# 0x501 - int16u: 0
# 0x502 - int16u: 0
# 0x9c9c - int8u[6]: ?
# 0xadad - int8u[20480]: ?
);
# Ricoh image info (ref 2)
%Image::ExifTool::Ricoh::ImageInfo = (
GROUPS => { 0 => 'MakerNotes', 2 => 'Image' },
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
WRITE_PROC => \&Image::ExifTool::WriteBinaryData,
CHECK_PROC => \&Image::ExifTool::CheckBinaryData,
WRITABLE => 1,
PRIORITY => 0,
FORMAT => 'int8u',
FIRST_ENTRY => 0,
IS_OFFSET => [ 28 ], # tag 28 is 'IsOffset'
0 => {
Name => 'RicohImageWidth',
Format => 'int16u',
},
2 => {
Name => 'RicohImageHeight',
Format => 'int16u',
},
6 => {
Name => 'RicohDate',
Groups => { 2 => 'Time' },
Format => 'int8u[7]',
# (what an insane way to encode the date)
ValueConv => q{
sprintf("%.2x%.2x:%.2x:%.2x %.2x:%.2x:%.2x",
split(' ', $val));
},
ValueConvInv => q{
my @vals = ($val =~ /(\d{1,2})/g);
push @vals, 0 if @vals < 7;
join(' ', map(hex, @vals));
},
},
28 => {
Name => 'PreviewImageStart',
Format => 'int16u', # ha! (only the lower 16 bits, even if > 0xffff)
Flags => 'IsOffset',
OffsetPair => 30, # associated byte count tagID
DataTag => 'PreviewImage',
Protected => 2,
WriteGroup => 'MakerNotes',
# prevent preview from being written to MakerNotes of DNG images
RawConvInv => q{
return $val if $$self{FILE_TYPE} eq "JPEG";
warn "\n"; # suppress warning
return undef;
},
},
30 => {
Name => 'PreviewImageLength',
Format => 'int16u',
OffsetPair => 28, # point to associated offset
DataTag => 'PreviewImage',
Protected => 2,
WriteGroup => 'MakerNotes',
RawConvInv => q{
return $val if $$self{FILE_TYPE} eq "JPEG";
warn "\n"; # suppress warning
return undef;
},
},
32 => {
Name => 'FlashMode',
PrintConv => {
0 => 'Off',
1 => 'Auto', #PH
2 => 'On',
},
},
33 => {
Name => 'Macro',
PrintConv => { 0 => 'Off', 1 => 'On' },
},
34 => {
Name => 'Sharpness',
PrintConv => {
0 => 'Sharp',
1 => 'Normal',
2 => 'Soft',
},
},
38 => {
Name => 'WhiteBalance',
PrintConv => {
0 => 'Auto',
1 => 'Daylight',
2 => 'Cloudy',
3 => 'Tungsten',
4 => 'Fluorescent',
5 => 'Manual', #PH (GXR)
7 => 'Detail',
9 => 'Multi-pattern Auto', #PH (GXR)
},
},
39 => {
Name => 'ISOSetting',
PrintConv => {
0 => 'Auto',
1 => 64,
2 => 100,
4 => 200,
6 => 400,
7 => 800,
8 => 1600,
9 => 'Auto', #PH (? CX3)
10 => 3200, #PH (A16)
11 => '100 (Low)', #PH (A16)
},
},
40 => {
Name => 'Saturation',
PrintConv => {
0 => 'High',
1 => 'Normal',
2 => 'Low',
3 => 'B&W',
6 => 'Toning Effect', #PH (GXR Sepia,Red,Green,Blue,Purple)
9 => 'Vivid', #PH (GXR)
10 => 'Natural', #PH (GXR)
},
},
);
# Ricoh subdirectory tags (ref PH)
# NOTE: this subdir is currently not writable because the offsets would require
# special code to handle the funny start location and base offset
%Image::ExifTool::Ricoh::Subdir = (
GROUPS => { 0 => 'MakerNotes', 2 => 'Camera' },
WRITE_PROC => \&Image::ExifTool::Exif::WriteExif,
CHECK_PROC => \&Image::ExifTool::Exif::CheckExif,
# the significance of the following 2 dates is not known. They are usually
# within a month of each other, but I have seen differences of nearly a year.
# Sometimes the first is more recent, and sometimes the second.
# 0x0003 - int32u[1]
0x0004 => { # (NC)
Name => 'ManufactureDate1',
Groups => { 2 => 'Time' },
Writable => 'string',
Count => 20,
},
0x0005 => { # (NC)
Name => 'ManufactureDate2',
Groups => { 2 => 'Time' },
Writable => 'string',
Count => 20,
},
# 0x0006 - undef[16] ?
# 0x0007 - int32u[1] ?
# 0x000c - int32u[2] 1st number is a counter (file number? shutter count?) - PH
# 0x0014 - int8u[338] could contain some data related to face detection? - PH
# 0x0015 - int8u[2]: related to noise reduction?
0x001a => { #PH
Name => 'FaceInfo',
SubDirectory => { TagTable => 'Image::ExifTool::Ricoh::FaceInfo' },
},
0x0029 => {
Name => 'FirmwareInfo',
SubDirectory => { TagTable => 'Image::ExifTool::Ricoh::FirmwareInfo' },
},
0x002a => {
Name => 'NoiseReduction',
# this is the applied value if NR is set to "Auto"
Writable => 'int32u',
PrintConv => {
0 => 'Off',
1 => 'Weak',
2 => 'Strong',
3 => 'Max',
},
},
0x002c => { # (GXR)
Name => 'SerialInfo',
SubDirectory => { TagTable => 'Image::ExifTool::Ricoh::SerialInfo' },
}
# 0x000E ProductionNumber? (ref 2) [no. zero for most models - PH]
);
# Ricoh Theta subdirectory tags - Contains orientation information (ref 4)
%Image::ExifTool::Ricoh::ThetaSubdir = (
GROUPS => { 0 => 'MakerNotes', 2 => 'Camera' },
WRITE_PROC => \&Image::ExifTool::Exif::WriteExif,
CHECK_PROC => \&Image::ExifTool::Exif::CheckExif,
# 0x0001 - int16u[1] ?
# 0x0002 - int16u[1] ?
0x0003 => {
Name => 'Accelerometer',
Writable => 'rational64s',
Count => 2,
},
0x0004 => {
Name => 'Compass',
Writable => 'rational64u',
},
# 0x0005 - int16u[1] ?
# 0x0006 - int16u[1] ?
# 0x0007 - int16u[1] ?
# 0x0008 - int16u[1] ?
# 0x0009 - int16u[1] ?
0x000a => {
Name => 'TimeZone',
Writable => 'string',
},
# 0x0101 - int16u[4] ISO (why 4 values?)
# 0x0102 - rational64s[2] FNumber (why 2 values?)
# 0x0103 - rational64u[2] ExposureTime (why 2 values?)
# 0x0104 - string[9] SerialNumber?
# 0x0105 - string[9] SerialNumber?
);
# face detection information (ref PH, CX4)
%Image::ExifTool::Ricoh::FaceInfo = (
GROUPS => { 0 => 'MakerNotes', 2 => 'Camera' },
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
WRITE_PROC => \&Image::ExifTool::WriteBinaryData,
CHECK_PROC => \&Image::ExifTool::CheckBinaryData,
WRITABLE => 1,
FIRST_ENTRY => 0,
DATAMEMBER => [ 181 ],
0xb5 => { # (should be int16u at 0xb4?)
Name => 'FacesDetected',
DataMember => 'FacesDetected',
RawConv => '$$self{FacesDetected} = $val',
},
0xb6 => {
Name => 'FaceDetectFrameSize',
Format => 'int16u[2]',
},
0xbc => {
Name => 'Face1Position',
Condition => '$$self{FacesDetected} >= 1',
Format => 'int16u[4]',
Notes => q{
left, top, width and height of detected face in coordinates of
FaceDetectFrameSize with increasing Y downwards
},
},
0xc8 => {
Name => 'Face2Position',
Condition => '$$self{FacesDetected} >= 2',
Format => 'int16u[4]',
},
0xd4 => {
Name => 'Face3Position',
Condition => '$$self{FacesDetected} >= 3',
Format => 'int16u[4]',
},
0xe0 => {
Name => 'Face4Position',
Condition => '$$self{FacesDetected} >= 4',
Format => 'int16u[4]',
},
0xec => {
Name => 'Face5Position',
Condition => '$$self{FacesDetected} >= 5',
Format => 'int16u[4]',
},
0xf8 => {
Name => 'Face6Position',
Condition => '$$self{FacesDetected} >= 6',
Format => 'int16u[4]',
},
0x104 => {
Name => 'Face7Position',
Condition => '$$self{FacesDetected} >= 7',
Format => 'int16u[4]',
},
0x110 => {
Name => 'Face8Position',
Condition => '$$self{FacesDetected} >= 8',
Format => 'int16u[4]',
},
);
# firmware version information (ref PH)
%Image::ExifTool::Ricoh::FirmwareInfo = (
GROUPS => { 0 => 'MakerNotes', 2 => 'Camera' },
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
WRITE_PROC => \&Image::ExifTool::WriteBinaryData,
CHECK_PROC => \&Image::ExifTool::CheckBinaryData,
WRITABLE => 1,
0x00 => {
Name => 'FirmwareRevision',
Format => 'string[12]',
},
0x0c => {
Name => 'FirmwareRevision2',
Format => 'string[12]',
},
);
# serial/version number information written by GXR (ref PH)
%Image::ExifTool::Ricoh::SerialInfo = (
GROUPS => { 0 => 'MakerNotes', 2 => 'Camera' },
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
WRITE_PROC => \&Image::ExifTool::WriteBinaryData,
CHECK_PROC => \&Image::ExifTool::CheckBinaryData,
WRITABLE => 1,
NOTES => 'This information is found in images from the GXR.',
0 => {
Name => 'BodyFirmware', #(NC)
Format => 'string[16]',
# observed: "RS1 :V00560000" --> FirmwareVersion "Rev0056"
# "RS1 :V01020200" --> FirmwareVersion "Rev0102"
},
16 => {
Name => 'BodySerialNumber',
Format => 'string[16]',
# observed: "SID:00100056" --> "WD00100056" on plate
},
32 => {
Name => 'LensFirmware', #(NC)
Format => 'string[16]',
# observed: "RL1 :V00560000", "RL1 :V01020200" - A12 50mm F2.5 Macro
# "RL2 :V00560000", "RL2 :V01020300" - S10 24-70mm F2.5-4.4 VC
# --> used in a Composite tag to determine LensType
},
48 => {
Name => 'LensSerialNumber',
Format => 'string[16]',
# observed: (S10) "LID:00010024" --> "WF00010024" on plate
# (A12) "LID:00010054" --> "WE00010029" on plate??
},
);
# Ricoh text-type maker notes (PH)
%Image::ExifTool::Ricoh::Text = (
GROUPS => { 0 => 'MakerNotes', 2 => 'Camera' },
PROCESS_PROC => \&ProcessRicohText,
NOTES => q{
Some Ricoh DC and RDC models use a text-based format for their maker notes
instead of the IFD format used by the Caplio models. Below is a list of known
tags in this information.
},
Rev => {
Name => 'FirmwareVersion',
PrintConv => '$val=~/^\d+$/ ? sprintf("%.2f",$val/100) : $val',
PrintConvInv => '$val=~/^(\d+)\.(\d+)$/ ? sprintf("%.2d%.2d",$1,$2) : $val',
},
Rv => {
Name => 'FirmwareVersion',
PrintConv => '$val=~/^\d+$/ ? sprintf("%.2f",$val/100) : $val',
PrintConvInv => '$val=~/^(\d+)\.(\d+)$/ ? sprintf("%.2d%.2d",$1,$2) : $val',
},
Rg => 'RedGain',
Gg => 'GreenGain',
Bg => 'BlueGain',
);
%Image::ExifTool::Ricoh::RMETA = (
GROUPS => { 0 => 'APP5', 1 => 'RMETA', 2 => 'Image' },
PROCESS_PROC => \&Image::ExifTool::Ricoh::ProcessRicohRMETA,
NOTES => q{
The Ricoh Caplio Pro G3 has the ability to add custom fields to the APP5
"RMETA" segment of JPEG images. While only a few observed tags have been
defined below, ExifTool will extract any information found here.
},
'Sign type' => { Name => 'SignType', PrintConv => {
1 => 'Directional',
2 => 'Warning',
3 => 'Information',
} },
Location => { PrintConv => {
1 => 'Verge',
2 => 'Gantry',
3 => 'Central reservation',
4 => 'Roundabout',
} },
Lit => { PrintConv => {
1 => 'Yes',
2 => 'No',
} },
Condition => { PrintConv => {
1 => 'Good',
2 => 'Fair',
3 => 'Poor',
4 => 'Damaged',
} },
Azimuth => { PrintConv => {
1 => 'N',
2 => 'NNE',
3 => 'NE',
4 => 'ENE',
5 => 'E',
6 => 'ESE',
7 => 'SE',
8 => 'SSE',
9 => 'S',
10 => 'SSW',
11 => 'SW',
12 => 'WSW',
13 => 'W',
14 => 'WNW',
15 => 'NW',
16 => 'NNW',
} },
_audio => {
Name => 'SoundFile',
Notes => 'audio data recorded in JPEG images by the G700SE',
},
);
# information stored in Ricoh AVI images (ref PH)
%Image::ExifTool::Ricoh::AVI = (
GROUPS => { 0 => 'MakerNotes', 2 => 'Video' },
ucmt => {
Name => 'Comment',
# Ricoh writes a "Unicode" header even when text is ASCII (spaces anyway)
ValueConv => '$_=$val; s/^(Unicode\0|ASCII\0\0\0)//; tr/\0//d; s/\s+$//; $_',
},
mnrt => {
Name => 'MakerNoteRicoh',
SubDirectory => {
TagTable => 'Image::ExifTool::Ricoh::Main',
Start => '$valuePtr + 8',
ByteOrder => 'BigEndian',
Base => '8',
},
},
rdc2 => {
Name => 'RicohRDC2',
Unknown => 1,
ValueConv => 'unpack("H*",$val)',
# have seen values like 0a000444 and 00000000 - PH
},
thum => {
Name => 'ThumbnailImage',
Groups => { 2 => 'Preview' },
Binary => 1,
},
);
# Ricoh composite tags
%Image::ExifTool::Ricoh::Composite = (
GROUPS => { 2 => 'Camera' },
LensID => {
SeparateTable => 'Ricoh LensID',
Require => 'Ricoh:LensFirmware',
RawConv => '$val[0] ? $val[0] : undef',
ValueConv => '$val=~s/\s*:.*//; $val',
PrintConv => \%ricohLensIDs,
},
RicohPitch => {
Require => 'Ricoh:Accelerometer',
ValueConv => 'my @v = split(" ",$val); $v[1]',
},
RicohRoll => {
Require => 'Ricoh:Accelerometer',
ValueConv => 'my @v = split(" ",$val); $v[0] <= 180 ? $v[0] : $v[0] - 360',
},
);
# add our composite tags
Image::ExifTool::AddCompositeTags('Image::ExifTool::Ricoh');
#------------------------------------------------------------------------------
# Process Ricoh text-based maker notes
# Inputs: 0) ExifTool object reference
# 1) Reference to directory information hash
# 2) Pointer to tag table for this directory
# Returns: 1 on success, otherwise returns 0 and sets a Warning
sub ProcessRicohText($$$)
{
my ($et, $dirInfo, $tagTablePtr) = @_;
my $dataPt = $$dirInfo{DataPt};
my $dataLen = $$dirInfo{DataLen};
my $dirStart = $$dirInfo{DirStart} || 0;
my $dirLen = $$dirInfo{DirLen} || $dataLen - $dirStart;
my $verbose = $et->Options('Verbose');
my $data = substr($$dataPt, $dirStart, $dirLen);
return 1 if $data =~ /^\0/; # blank Ricoh maker notes
# validate text maker notes
unless ($data =~ /^(Rev|Rv)/) {
$et->Warn('Bad Ricoh maker notes');
return 0;
}
while ($data =~ m/([A-Z][a-z]{1,2})([0-9A-F]+);/sg) {
my $tag = $1;
my $val = $2;
my $tagInfo = $et->GetTagInfo($tagTablePtr, $tag);
if ($verbose) {
$et->VerboseInfo($tag, $tagInfo,
Table => $tagTablePtr,
Value => $val,
);
}
unless ($tagInfo) {
next unless $$et{OPTIONS}{Unknown};
$tagInfo = {
Name => "Ricoh_Text_$tag",
Unknown => 1,
PrintConv => 'length($val) > 60 ? substr($val,0,55) . "[...]" : $val',
};
# add tag information to table
AddTagToTable($tagTablePtr, $tag, $tagInfo);
}
$et->FoundTag($tagInfo, $val);
}
return 1;
}
#------------------------------------------------------------------------------
# Process Ricoh APP5 RMETA information
# Inputs: 0) ExifTool ref, 1) dirInfo ref, 2) tag table ref
# Returns: 1 on success, otherwise returns 0 and sets a Warning
sub ProcessRicohRMETA($$$)
{
my ($et, $dirInfo, $tagTablePtr) = @_;
my $dataPt = $$dirInfo{DataPt};
my $dirStart = $$dirInfo{DirStart};
my $dataLen = length($$dataPt);
my $dirLen = $dataLen - $dirStart;
my $verbose = $et->Options('Verbose');
$et->VerboseDir('Ricoh RMETA') if $verbose;
$dirLen < 20 and $et->Warn('Truncated Ricoh RMETA data', 1), return 0;
my $byteOrder = substr($$dataPt, $dirStart, 2);
$byteOrder = GetByteOrder() if $byteOrder eq "\0\0"; # (same order as container)
SetByteOrder($byteOrder) or $et->Warn('Bad Ricoh RMETA data', 1), return 0;
# get the RMETA segment number
my $rmetaNum = Get16u($dataPt, $dirStart+4);
if ($rmetaNum != 0) {
# not sure how to recognize audio, so do it by checking for "RIFF" header
# and assume all subsequent RMETA segments are part of the audio data
# (but it looks like the int16u at $dirStart+6 is the next block number
# if the data is continued, or 0 for the last block)
$dirLen < 14 and $et->Warn('Short Ricoh RMETA block', 1), return 0;
my $audioLen = Get16u($dataPt, $dirStart+12);
$audioLen + 14 > $dirLen and $et->Warn('Truncated Ricoh RMETA audio data', 1), return 0;
my $buff = substr($$dataPt, $dirStart + 14, $audioLen);
if ($audioLen >= 4 and substr($buff, 0, 4) eq 'RIFF') {
$et->HandleTag($tagTablePtr, '_audio', \$buff);
} elsif ($$et{VALUE}{SoundFile}) {
${$$et{VALUE}{SoundFile}} .= $buff;
} else {
$et->Warn('Unknown Ricoh RMETA type', 1);
return 0;
}
return 1;
}
# decode standard RMETA tag directory
my (@tags, @vals, @nums, $valPos, $numPos);
my $pos = $dirStart + Get16u($dataPt, $dirStart+8);
my $numEntries = Get16u($dataPt, $pos);
$numEntries > 100 and $et->Warn('Bad RMETA entry count'), return 0;
$pos += 10; # start of first RMETA section
# loop through RMETA sections
while ($pos <= $dataLen - 4) {
my $type = Get16u($dataPt, $pos);
my $size = Get16u($dataPt, $pos + 2);
last unless $size;
$pos += 4;
$size -= 2;
if ($size < 0 or $pos + $size > $dataLen) {
$et->Warn('Corrupted Ricoh RMETA data', 1);
last;
}
my $dat = substr($$dataPt, $pos, $size);
if ($verbose) {
$et->VPrint(2, "$$et{INDENT}RMETA section type=$type size=$size\n");
$et->VerboseDump(\$dat, Addr => $$dirInfo{DataPos} + $pos);
}
if ($type == 1) { # section 1: tag names
# save the tag names
@tags = split /\0/, $dat, $numEntries+1;
} elsif ($type == 2 || $type == 18) { # section 2/18: string values (G800 uses type 18)
# save the tag values (assume "ASCII\0" encoding since others never seen)
@vals = split /\0/, $dat, $numEntries+1;
$valPos = $pos; # save position of first string value
} elsif ($type == 3) { # section 3: numerical values
if ($size < $numEntries * 2) {
$et->Warn('Truncated RMETA section 3');
} else {
# save the numerical tag values
# (0=empty, 0xffff=text input, otherwise menu item number)
@nums = unpack(($byteOrder eq 'MM' ? 'n' : 'v').$numEntries, $dat);
$numPos = $pos; # save position of numerical values
}
} elsif ($type != 16) {
$et->Warn("Unrecognized RMETA section (type $type, len $size)");
}
$pos += $size;
}
return 1 unless @tags or @vals;
$valPos or $valPos = 0; # (just in case there was no value section)
# find next tag in null-delimited list
# unpack numerical values from block of int16u values
my ($i, $name);
for ($i=0; $i<$numEntries; ++$i) {
my $tag = $tags[$i];
my $val = $vals[$i];
$val = '' unless defined $val;
unless (defined $tag and length $tag) {
length $val or ++$valPos, next; # (skip empty entries)
$tag = '';
}
($name = $tag) =~ s/\b([a-z])/\U$1/gs; # capitalize all words
$name =~ s/ (\w)/\U$1/g; # remove special characters
$name = 'RMETA_Unknown' unless length($name);
my $num = $nums[$i];
my $tagInfo = $et->GetTagInfo($tagTablePtr, $tag);
if ($tagInfo) {
# make sure print conversion is defined
$$tagInfo{PrintConv} = { } unless ref $$tagInfo{PrintConv} eq 'HASH';
} else {
# create tagInfo hash
$tagInfo = { Name => $name, PrintConv => { } };
AddTagToTable($tagTablePtr, $tag, $tagInfo);
}
# use string value directly if no numerical value
$num = $val unless defined $num;
# add conversion for this value (replacing any existing entry)
$tagInfo->{PrintConv}->{$num} = length $val ? $val : $num;
if ($verbose) {
my %datParms;
if (length $val) {
%datParms = ( Start => $valPos, Size => length($val), Format => 'string' );
} elsif ($numPos) {
%datParms = ( Start => $numPos + $i * 2, Size => 2, Format => 'int16u' );
}
%datParms and $datParms{DataPt} = $dataPt, $datParms{DataPos} = $$dirInfo{DataPos};
$et->VerboseInfo($tag, $tagInfo, Table=>$tagTablePtr, Value=>$num, %datParms);
}
$et->FoundTag($tagInfo, $num);
$valPos += length($val) + 1;
}
return 1;
}
1; # end
__END__
=head1 NAME
Image::ExifTool::Ricoh - Ricoh EXIF maker notes tags
=head1 SYNOPSIS
This module is loaded automatically by Image::ExifTool when required.
=head1 DESCRIPTION
This module contains definitions required by Image::ExifTool to
interpret Ricoh maker notes EXIF meta information.
=head1 AUTHOR
Copyright 2003-2019, Phil Harvey (phil at owl.phy.queensu.ca)
This library is free software; you can redistribute it and/or modify it
under the same terms as Perl itself.
=head1 REFERENCES
=over 4
=item L<http://www.ozhiker.com/electronics/pjmt/jpeg_info/ricoh_mn.html>
=back
=head1 ACKNOWLEDGEMENTS
Thanks to Tim Gray for his help decoding a number of tags for the Ricoh GR.
=head1 SEE ALSO
L<Image::ExifTool::TagNames/Ricoh Tags>,
L<Image::ExifTool(3pm)|Image::ExifTool>
=cut
| philmoz/Focus-Points | focuspoints.lrdevplugin/bin/exiftool/lib/Image/ExifTool/Ricoh.pm | Perl | apache-2.0 | 37,305 |
#!/usr/bin/perl -w
#
# Copyright (C) 2006 Apple Computer, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this library; see the file COPYING.LIB. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
#
# Usage: make-css-file-arrays.pl <header> <output> <input> ...
use strict;
use Getopt::Long;
my $defines;
my $preprocessor;
GetOptions('defines=s' => \$defines,
'preprocessor=s' => \$preprocessor);
my $header = $ARGV[0];
shift;
my $out = $ARGV[0];
shift;
open HEADER, ">", $header or die;
open OUT, ">", $out or die;
print HEADER "namespace WebCore {\n";
print OUT "namespace WebCore {\n";
for my $in (@ARGV) {
$in =~ /(\w+)\.css$/ or die;
my $name = $1;
# Slurp in the CSS file.
my $text;
# We should not set --defines option and run "moc" preprocessor on Qt.
# See http://webkit.org/b/37296.
if (!$defines) {
open IN, "<", $in or die;
{ local $/; $text = <IN>; }
close IN;
# Remove preprocessor directives.
$text =~ s|^#.*?$||mg;
} else {
require preprocessor;
$text = join('', applyPreprocessor($in, $defines, $preprocessor));
}
# Remove comments in a simple-minded way that will work fine for our files.
# Could do this a fancier way if we were worried about arbitrary CSS source.
$text =~ s|/\*.*?\*/||gs;
# Crunch whitespace just to make it a little smaller.
# Could do work to avoid doing this inside quote marks but our files don't have runs of spaces in quotes.
# Could crunch further based on places where whitespace is optional.
$text =~ s|\s+| |gs;
$text =~ s|^ ||;
$text =~ s| $||;
# Write out a C array of the characters.
my $length = length $text;
print HEADER "extern const char ${name}UserAgentStyleSheet[${length}];\n";
print OUT "extern const char ${name}UserAgentStyleSheet[${length}] = {\n";
my $i = 0;
while ($i < $length) {
print OUT " ";
my $j = 0;
while ($j < 16 && $i < $length) {
print OUT ", " unless $j == 0;
print OUT ord substr $text, $i, 1;
++$i;
++$j;
}
print OUT "," unless $i == $length;
print OUT "\n";
}
print OUT "};\n";
}
print HEADER "}\n";
print OUT "}\n";
| Treeeater/WebPermission | css/make-css-file-arrays.pl | Perl | bsd-2-clause | 2,920 |
#!/usr/bin/perl -w
# Perl utility to run or verify FIPS 140-2 CAVP algorithm tests based on the
# pathnames of input algorithm test files actually present (the unqualified
# file names are consistent but the pathnames are not).
#
# FIPS test definitions
# List of all the unqualified file names we expect and command lines to run
my @fips_dsa_pqgver_test_list = (
"DSA",
[ "PQGVer", "fips_dssvs pqgver"]
);
# DSA2 tests
my @fips_dsa2_test_list = (
"DSA2",
[ "PQGGen", "fips_dssvs pqg", "path:[^C]DSA2/.*PQGGen" ],
[ "KeyPair", "fips_dssvs keypair", "path:[^C]DSA2/.*KeyPair" ],
[ "SigGen", "fips_dssvs siggen", "path:[^C]DSA2/.*SigGen" ],
[ "SigVer", "fips_dssvs sigver", "path:[^C]DSA2/.*SigVer" ],
[ "PQGVer", "fips_dssvs pqgver", "file:L\\s*=.*N\\s*=" ]
);
# ECDSA2 tests
my @fips_ecdsa2_test_list = (
"ECDSA2",
[ "KeyPair", "fips_ecdsavs KeyPair", "path:/ECDSA2/.*KeyPair" ],
[ "PKV", "fips_ecdsavs PKV", "path:/ECDSA2/.*PKV" ],
[ "SigGen", "fips_ecdsavs SigGen", "path:/ECDSA2/.*SigGen" ],
[ "SigVer", "fips_ecdsavs SigVer", "path:/ECDSA2/.*SigVer" ],
);
# RSA tests
my @fips_rsa_test_list = (
"RSA",
[ "SigGen15", "fips_rsastest" ],
[ "SigVer15", "fips_rsavtest" ],
[ "SigVer(X9.31)", "fips_rsavtest -x931", 'file:9\.31' ],
[ "KeyGenRSA", "fips_rsagtest" ],
[ "SigGen(X9.31)", "fips_rsastest -x931" , 'file:9\.31']
);
# Special cases for PSS. The filename itself is
# not sufficient to determine the test. Addditionally we
# need to examine the file contents to determine the salt length
# In these cases the test filename has (saltlen) appended.
# RSA PSS salt length 0 tests
my @fips_rsa_pss0_test_list = (
"RSA",
[ "SigGenPSS(0)", "fips_rsastest -saltlen 0",
'file:salt\s+len:\s+0' ],
[ "SigVerPSS(0)", "fips_rsavtest -saltlen 0",
'file:salt\s+len:\s+0' ],
);
# RSA PSS salt length 62 tests
my @fips_rsa_pss62_test_list = (
"RSA",
[ "SigGenPSS(62)", "fips_rsastest -saltlen 62",
'file:salt\s+len:\s+62' ],
[ "SigVerPSS(62)", "fips_rsavtest -saltlen 62",
'file:salt\s+len:\s+62' ],
);
# SHA tests
my @fips_sha_test_list = (
"SHA",
[ "SHA1LongMsg", "fips_shatest" ],
[ "SHA1Monte", "fips_shatest" ],
[ "SHA1ShortMsg", "fips_shatest" ],
[ "SHA224LongMsg", "fips_shatest" ],
[ "SHA224Monte", "fips_shatest" ],
[ "SHA224ShortMsg", "fips_shatest" ],
[ "SHA256LongMsg", "fips_shatest" ],
[ "SHA256Monte", "fips_shatest" ],
[ "SHA256ShortMsg", "fips_shatest" ],
[ "SHA384LongMsg", "fips_shatest" ],
[ "SHA384Monte", "fips_shatest" ],
[ "SHA384ShortMsg", "fips_shatest" ],
[ "SHA512LongMsg", "fips_shatest" ],
[ "SHA512Monte", "fips_shatest" ],
[ "SHA512ShortMsg", "fips_shatest" ]
);
# HMAC
my @fips_hmac_test_list = (
"HMAC",
[ "HMAC", "fips_hmactest" ]
);
# CMAC
my @fips_cmac_test_list = (
"CMAC",
[ "CMACGenAES128", "fips_cmactest -a aes128 -g" ],
[ "CMACVerAES128", "fips_cmactest -a aes128 -v" ],
[ "CMACGenAES192", "fips_cmactest -a aes192 -g" ],
[ "CMACVerAES192", "fips_cmactest -a aes192 -v" ],
[ "CMACGenAES256", "fips_cmactest -a aes256 -g" ],
[ "CMACVerAES256", "fips_cmactest -a aes256 -v" ],
[ "CMACGenTDES3", "fips_cmactest -a tdes3 -g" ],
[ "CMACVerTDES3", "fips_cmactest -a tdes3 -v" ],
);
# RAND tests, AES version
my @fips_rand_aes_test_list = (
"RAND (AES)",
[ "ANSI931_AES128MCT", "fips_rngvs mct" ],
[ "ANSI931_AES192MCT", "fips_rngvs mct" ],
[ "ANSI931_AES256MCT", "fips_rngvs mct" ],
[ "ANSI931_AES128VST", "fips_rngvs vst" ],
[ "ANSI931_AES192VST", "fips_rngvs vst" ],
[ "ANSI931_AES256VST", "fips_rngvs vst" ]
);
# RAND tests, DES2 version
my @fips_rand_des2_test_list = (
"RAND (DES2)",
[ "ANSI931_TDES2MCT", "fips_rngvs mct" ],
[ "ANSI931_TDES2VST", "fips_rngvs vst" ]
);
# AES tests
my @fips_aes_test_list = (
"AES",
[ "CBCGFSbox128", "fips_aesavs -f" ],
[ "CBCGFSbox192", "fips_aesavs -f" ],
[ "CBCGFSbox256", "fips_aesavs -f" ],
[ "CBCKeySbox128", "fips_aesavs -f" ],
[ "CBCKeySbox192", "fips_aesavs -f" ],
[ "CBCKeySbox256", "fips_aesavs -f" ],
[ "CBCMCT128", "fips_aesavs -f" ],
[ "CBCMCT192", "fips_aesavs -f" ],
[ "CBCMCT256", "fips_aesavs -f" ],
[ "CBCMMT128", "fips_aesavs -f" ],
[ "CBCMMT192", "fips_aesavs -f" ],
[ "CBCMMT256", "fips_aesavs -f" ],
[ "CBCVarKey128", "fips_aesavs -f" ],
[ "CBCVarKey192", "fips_aesavs -f" ],
[ "CBCVarKey256", "fips_aesavs -f" ],
[ "CBCVarTxt128", "fips_aesavs -f" ],
[ "CBCVarTxt192", "fips_aesavs -f" ],
[ "CBCVarTxt256", "fips_aesavs -f" ],
[ "CFB128GFSbox128", "fips_aesavs -f" ],
[ "CFB128GFSbox192", "fips_aesavs -f" ],
[ "CFB128GFSbox256", "fips_aesavs -f" ],
[ "CFB128KeySbox128", "fips_aesavs -f" ],
[ "CFB128KeySbox192", "fips_aesavs -f" ],
[ "CFB128KeySbox256", "fips_aesavs -f" ],
[ "CFB128MCT128", "fips_aesavs -f" ],
[ "CFB128MCT192", "fips_aesavs -f" ],
[ "CFB128MCT256", "fips_aesavs -f" ],
[ "CFB128MMT128", "fips_aesavs -f" ],
[ "CFB128MMT192", "fips_aesavs -f" ],
[ "CFB128MMT256", "fips_aesavs -f" ],
[ "CFB128VarKey128", "fips_aesavs -f" ],
[ "CFB128VarKey192", "fips_aesavs -f" ],
[ "CFB128VarKey256", "fips_aesavs -f" ],
[ "CFB128VarTxt128", "fips_aesavs -f" ],
[ "CFB128VarTxt192", "fips_aesavs -f" ],
[ "CFB128VarTxt256", "fips_aesavs -f" ],
[ "CFB8GFSbox128", "fips_aesavs -f" ],
[ "CFB8GFSbox192", "fips_aesavs -f" ],
[ "CFB8GFSbox256", "fips_aesavs -f" ],
[ "CFB8KeySbox128", "fips_aesavs -f" ],
[ "CFB8KeySbox192", "fips_aesavs -f" ],
[ "CFB8KeySbox256", "fips_aesavs -f" ],
[ "CFB8MCT128", "fips_aesavs -f" ],
[ "CFB8MCT192", "fips_aesavs -f" ],
[ "CFB8MCT256", "fips_aesavs -f" ],
[ "CFB8MMT128", "fips_aesavs -f" ],
[ "CFB8MMT192", "fips_aesavs -f" ],
[ "CFB8MMT256", "fips_aesavs -f" ],
[ "CFB8VarKey128", "fips_aesavs -f" ],
[ "CFB8VarKey192", "fips_aesavs -f" ],
[ "CFB8VarKey256", "fips_aesavs -f" ],
[ "CFB8VarTxt128", "fips_aesavs -f" ],
[ "CFB8VarTxt192", "fips_aesavs -f" ],
[ "CFB8VarTxt256", "fips_aesavs -f" ],
[ "ECBGFSbox128", "fips_aesavs -f" ],
[ "ECBGFSbox192", "fips_aesavs -f" ],
[ "ECBGFSbox256", "fips_aesavs -f" ],
[ "ECBKeySbox128", "fips_aesavs -f" ],
[ "ECBKeySbox192", "fips_aesavs -f" ],
[ "ECBKeySbox256", "fips_aesavs -f" ],
[ "ECBMCT128", "fips_aesavs -f" ],
[ "ECBMCT192", "fips_aesavs -f" ],
[ "ECBMCT256", "fips_aesavs -f" ],
[ "ECBMMT128", "fips_aesavs -f" ],
[ "ECBMMT192", "fips_aesavs -f" ],
[ "ECBMMT256", "fips_aesavs -f" ],
[ "ECBVarKey128", "fips_aesavs -f" ],
[ "ECBVarKey192", "fips_aesavs -f" ],
[ "ECBVarKey256", "fips_aesavs -f" ],
[ "ECBVarTxt128", "fips_aesavs -f" ],
[ "ECBVarTxt192", "fips_aesavs -f" ],
[ "ECBVarTxt256", "fips_aesavs -f" ],
[ "OFBGFSbox128", "fips_aesavs -f" ],
[ "OFBGFSbox192", "fips_aesavs -f" ],
[ "OFBGFSbox256", "fips_aesavs -f" ],
[ "OFBKeySbox128", "fips_aesavs -f" ],
[ "OFBKeySbox192", "fips_aesavs -f" ],
[ "OFBKeySbox256", "fips_aesavs -f" ],
[ "OFBMCT128", "fips_aesavs -f" ],
[ "OFBMCT192", "fips_aesavs -f" ],
[ "OFBMCT256", "fips_aesavs -f" ],
[ "OFBMMT128", "fips_aesavs -f" ],
[ "OFBMMT192", "fips_aesavs -f" ],
[ "OFBMMT256", "fips_aesavs -f" ],
[ "OFBVarKey128", "fips_aesavs -f" ],
[ "OFBVarKey192", "fips_aesavs -f" ],
[ "OFBVarKey256", "fips_aesavs -f" ],
[ "OFBVarTxt128", "fips_aesavs -f" ],
[ "OFBVarTxt192", "fips_aesavs -f" ],
[ "OFBVarTxt256", "fips_aesavs -f" ]
);
my @fips_aes_cfb1_test_list = (
# AES CFB1 tests
[ "CFB1GFSbox128", "fips_aesavs -f" ],
[ "CFB1GFSbox192", "fips_aesavs -f" ],
[ "CFB1GFSbox256", "fips_aesavs -f" ],
[ "CFB1KeySbox128", "fips_aesavs -f" ],
[ "CFB1KeySbox192", "fips_aesavs -f" ],
[ "CFB1KeySbox256", "fips_aesavs -f" ],
[ "CFB1MCT128", "fips_aesavs -f" ],
[ "CFB1MCT192", "fips_aesavs -f" ],
[ "CFB1MCT256", "fips_aesavs -f" ],
[ "CFB1MMT128", "fips_aesavs -f" ],
[ "CFB1MMT192", "fips_aesavs -f" ],
[ "CFB1MMT256", "fips_aesavs -f" ],
[ "CFB1VarKey128", "fips_aesavs -f" ],
[ "CFB1VarKey192", "fips_aesavs -f" ],
[ "CFB1VarKey256", "fips_aesavs -f" ],
[ "CFB1VarTxt128", "fips_aesavs -f" ],
[ "CFB1VarTxt192", "fips_aesavs -f" ],
[ "CFB1VarTxt256", "fips_aesavs -f" ]
);
my @fips_aes_ccm_test_list = (
# AES CCM tests
"AES CCM",
[ "DVPT128", "fips_gcmtest -ccm" ],
[ "DVPT192", "fips_gcmtest -ccm" ],
[ "DVPT256", "fips_gcmtest -ccm" ],
[ "VADT128", "fips_gcmtest -ccm" ],
[ "VADT192", "fips_gcmtest -ccm" ],
[ "VADT256", "fips_gcmtest -ccm" ],
[ "VNT128", "fips_gcmtest -ccm" ],
[ "VNT192", "fips_gcmtest -ccm" ],
[ "VNT256", "fips_gcmtest -ccm" ],
[ "VPT128", "fips_gcmtest -ccm" ],
[ "VPT192", "fips_gcmtest -ccm" ],
[ "VPT256", "fips_gcmtest -ccm" ],
[ "VTT128", "fips_gcmtest -ccm" ],
[ "VTT192", "fips_gcmtest -ccm" ],
[ "VTT256", "fips_gcmtest -ccm" ]
);
my @fips_aes_gcm_test_list = (
# AES GCM tests
"AES GCM",
[ "gcmDecrypt128", "fips_gcmtest -decrypt" ],
[ "gcmDecrypt192", "fips_gcmtest -decrypt" ],
[ "gcmDecrypt256", "fips_gcmtest -decrypt" ],
[ "gcmEncryptIntIV128", "fips_gcmtest -encrypt" ],
[ "gcmEncryptIntIV192", "fips_gcmtest -encrypt" ],
[ "gcmEncryptIntIV256", "fips_gcmtest -encrypt" ],
);
my @fips_aes_xts_test_list = (
# AES XTS tests
"AES XTS",
[ "XTSGenAES128", "fips_gcmtest -xts" ],
[ "XTSGenAES256", "fips_gcmtest -xts" ],
);
# Triple DES tests
my @fips_des3_test_list = (
"Triple DES",
[ "TCBCinvperm", "fips_desmovs -f" ],
[ "TCBCMMT2", "fips_desmovs -f" ],
[ "TCBCMMT3", "fips_desmovs -f" ],
[ "TCBCMonte2", "fips_desmovs -f" ],
[ "TCBCMonte3", "fips_desmovs -f" ],
[ "TCBCpermop", "fips_desmovs -f" ],
[ "TCBCsubtab", "fips_desmovs -f" ],
[ "TCBCvarkey", "fips_desmovs -f" ],
[ "TCBCvartext", "fips_desmovs -f" ],
[ "TCFB64invperm", "fips_desmovs -f" ],
[ "TCFB64MMT2", "fips_desmovs -f" ],
[ "TCFB64MMT3", "fips_desmovs -f" ],
[ "TCFB64Monte2", "fips_desmovs -f" ],
[ "TCFB64Monte3", "fips_desmovs -f" ],
[ "TCFB64permop", "fips_desmovs -f" ],
[ "TCFB64subtab", "fips_desmovs -f" ],
[ "TCFB64varkey", "fips_desmovs -f" ],
[ "TCFB64vartext", "fips_desmovs -f" ],
[ "TCFB8invperm", "fips_desmovs -f" ],
[ "TCFB8MMT2", "fips_desmovs -f" ],
[ "TCFB8MMT3", "fips_desmovs -f" ],
[ "TCFB8Monte2", "fips_desmovs -f" ],
[ "TCFB8Monte3", "fips_desmovs -f" ],
[ "TCFB8permop", "fips_desmovs -f" ],
[ "TCFB8subtab", "fips_desmovs -f" ],
[ "TCFB8varkey", "fips_desmovs -f" ],
[ "TCFB8vartext", "fips_desmovs -f" ],
[ "TECBinvperm", "fips_desmovs -f" ],
[ "TECBMMT2", "fips_desmovs -f" ],
[ "TECBMMT3", "fips_desmovs -f" ],
[ "TECBMonte2", "fips_desmovs -f" ],
[ "TECBMonte3", "fips_desmovs -f" ],
[ "TECBpermop", "fips_desmovs -f" ],
[ "TECBsubtab", "fips_desmovs -f" ],
[ "TECBvarkey", "fips_desmovs -f" ],
[ "TECBvartext", "fips_desmovs -f" ],
[ "TOFBinvperm", "fips_desmovs -f" ],
[ "TOFBMMT2", "fips_desmovs -f" ],
[ "TOFBMMT3", "fips_desmovs -f" ],
[ "TOFBMonte2", "fips_desmovs -f" ],
[ "TOFBMonte3", "fips_desmovs -f" ],
[ "TOFBpermop", "fips_desmovs -f" ],
[ "TOFBsubtab", "fips_desmovs -f" ],
[ "TOFBvarkey", "fips_desmovs -f" ],
[ "TOFBvartext", "fips_desmovs -f" ]
);
my @fips_des3_cfb1_test_list = (
# DES3 CFB1 tests
[ "TCFB1invperm", "fips_desmovs -f" ],
[ "TCFB1MMT2", "fips_desmovs -f" ],
[ "TCFB1MMT3", "fips_desmovs -f" ],
[ "TCFB1Monte2", "fips_desmovs -f" ],
[ "TCFB1Monte3", "fips_desmovs -f" ],
[ "TCFB1permop", "fips_desmovs -f" ],
[ "TCFB1subtab", "fips_desmovs -f" ],
[ "TCFB1varkey", "fips_desmovs -f" ],
[ "TCFB1vartext", "fips_desmovs -f" ],
);
my @fips_drbg_test_list = (
# SP800-90 DRBG tests
"SP800-90 DRBG",
[ "CTR_DRBG", "fips_drbgvs" ],
[ "Hash_DRBG", "fips_drbgvs" ],
[ "HMAC_DRBG", "fips_drbgvs" ]
);
my @fips_dh_test_list = (
# DH
"DH Ephemeral Primitives Only",
[ "KASValidityTest_FFCEphem_NOKC_ZZOnly_init", "fips_dhvs dhver" ],
[ "KASValidityTest_FFCEphem_NOKC_ZZOnly_resp", "fips_dhvs dhver" ],
);
my @fips_ecdh_test_list = (
# ECDH
"ECDH Ephemeral Primitives Only",
[ "KAS_ECC_CDH_PrimitiveTest", "fips_ecdhvs ecdhgen" ],
# [ "KASValidityTest_ECCEphemeralUnified_NOKC_ZZOnly_init",
# "fips_ecdhvs ecdhver" ],
# [ "KASValidityTest_ECCEphemeralUnified_NOKC_ZZOnly_resp",
# "fips_ecdhvs ecdhver" ],
);
# Verification special cases.
# In most cases the output of a test is deterministic and
# it can be compared to a known good result. A few involve
# the genration and use of random keys and the output will
# be different each time. In thoses cases we perform special tests
# to simply check their consistency. For example signature generation
# output will be run through signature verification to see if all outputs
# show as valid.
#
my %verify_special = (
"DSA:SigGen" => "fips_dssvs sigver",
"DSA2:PQGGen" => "fips_dssvs pqgver",
"DSA2:KeyPair" => "fips_dssvs keyver",
"DSA2:SigGen" => "fips_dssvs sigver",
"ECDSA:KeyPair" => "fips_ecdsavs PKV",
"ECDSA:SigGen" => "fips_ecdsavs SigVer",
"ECDSA2:KeyPair" => "fips_ecdsavs PKV",
"ECDSA2:SigGen" => "fips_ecdsavs SigVer",
"RSA:SigGen15" => "fips_rsavtest",
"RSA:SigGenRSA" => "fips_rsavtest -x931",
"RSA:SigGenPSS(0)" => "fips_rsavtest -saltlen 0",
"RSA:SigGenPSS(62)" => "fips_rsavtest -saltlen 62",
"ECDH Ephemeral Primitives Only:KAS_ECC_CDH_PrimitiveTest" => "skip"
);
my $win32 = $^O =~ m/mswin/i;
my $onedir = 0;
my $filter = "";
my $tvdir;
my $tprefix;
my $sfprefix = "";
my $debug = 0;
my $quiet = 0;
my $notest = 0;
my $verify = 1;
my $rspdir = "resp";
my $ignore_missing = 0;
my $ignore_bogus = 0;
my $bufout = '';
my $list_tests = 0;
my $minimal_script = 0;
my $outfile = '';
my $no_warn_missing = 0;
my $no_warn_bogus = 0;
my $rmcmd = "rm -rf";
my $mkcmd = "mkdir";
my $cmpall = 0;
my $info = 0;
my %fips_enabled = (
"dsa" => 1,
"dsa2" => 2,
"dsa-pqgver" => 2,
"ecdsa" => 2,
"ecdsa2" => 2,
"rsa" => 1,
"rsa-pss0" => 2,
"rsa-pss62" => 1,
"sha" => 1,
"hmac" => 1,
"cmac" => 2,
"rand-aes" => 1,
"rand-des2" => 0,
"aes" => 1,
"aes-cfb1" => 2,
"des3" => 1,
"des3-cfb1" => 2,
"drbg" => 2,
"aes-ccm" => 2,
"aes-xts" => 2,
"aes-gcm" => 2,
"dh" => 0,
"ecdh" => 2,
"v2" => 1,
);
foreach (@ARGV) {
if ( $_ eq "--win32" ) {
$win32 = 1;
}
elsif ( $_ eq "--onedir" ) {
$onedir = 1;
}
elsif ( $_ eq "--debug" ) {
$debug = 1;
}
elsif ( $_ eq "--quiet-missing" ) {
$ignore_missing = 1;
$no_warn_missing = 1;
}
elsif ( $_ eq "--ignore-missing" ) {
$ignore_missing = 1;
}
elsif ( $_ eq "--quiet-bogus" ) {
$ignore_bogus = 1;
$no_warn_bogus = 1;
}
elsif ( $_ eq "--ignore-bogus" ) {
$ignore_bogus = 1;
}
elsif ( $_ eq "--minimal-script" ) {
$minimal_script = 1;
}
elsif (/--generate-script=(.*)$/) {
$outfile = $1;
$verify = 0;
} elsif ( $_ eq "--generate" ) {
$verify = 0;
}
elsif ( $_ eq "--compare-all" ) {
$cmpall = 1;
}
elsif ( $_ eq "--notest" ) {
$notest = 1;
}
elsif ( $_ eq "--debug-detect" ) {
$notest = 1;
$ignore_missing = 1;
$ignore_bogus = 1;
$info = 1;
}
elsif ( $_ eq "--quiet" ) {
$quiet = 1;
}
elsif (/--dir=(.*)$/) {
$tvdir = $1;
}
elsif (/--rspdir=(.*)$/) {
$rspdir = $1;
}
elsif (/--tprefix=(.*)$/) {
$tprefix = $1;
}
elsif (/^--disable-all$/) {
foreach (keys %fips_enabled) {
$fips_enabled{$_} = 0;
}
}
elsif (/^--(enable|disable)-(.*)$/) {
if ( !exists $fips_enabled{$2} ) {
print STDERR "Unknown test $2\n";
exit(1);
}
if ( $1 eq "enable" ) {
$fips_enabled{$2} = 1;
}
else {
$fips_enabled{$2} = 0;
}
}
elsif (/--filter=(.*)$/) {
$filter = $1;
}
elsif (/--rm=(.*)$/) {
$rmcmd = $1;
}
elsif (/--script-tprefix=(.*)$/) {
$stprefix = $1;
}
elsif (/--script-fprefix=(.*)$/) {
$sfprefix = $1;
}
elsif (/--mkdir=(.*)$/) {
$mkcmd = $1;
}
elsif (/^--list-tests$/) {
$list_tests = 1;
}
else {
Help();
exit(1);
}
}
my @fips_test_list;
if (!$fips_enabled{"v2"}) {
foreach (keys %fips_enabled) {
$fips_enabled{$_} = 0 if $fips_enabled{$_} == 2;
}
}
push @fips_test_list, @fips_dsa2_test_list if $fips_enabled{"dsa2"};
push @fips_test_list, @fips_dsa_pqgver_test_list if $fips_enabled{"dsa-pqgver"};
push @fips_test_list, @fips_ecdsa2_test_list if $fips_enabled{"ecdsa2"};
push @fips_test_list, @fips_rsa_test_list if $fips_enabled{"rsa"};
push @fips_test_list, @fips_rsa_pss0_test_list if $fips_enabled{"rsa-pss0"};
push @fips_test_list, @fips_rsa_pss62_test_list if $fips_enabled{"rsa-pss62"};
push @fips_test_list, @fips_sha_test_list if $fips_enabled{"sha"};
push @fips_test_list, @fips_drbg_test_list if $fips_enabled{"drbg"};
push @fips_test_list, @fips_hmac_test_list if $fips_enabled{"hmac"};
push @fips_test_list, @fips_cmac_test_list if $fips_enabled{"cmac"};
push @fips_test_list, @fips_rand_aes_test_list if $fips_enabled{"rand-aes"};
push @fips_test_list, @fips_rand_des2_test_list if $fips_enabled{"rand-des2"};
push @fips_test_list, @fips_aes_test_list if $fips_enabled{"aes"};
push @fips_test_list, @fips_aes_cfb1_test_list if $fips_enabled{"aes-cfb1"};
push @fips_test_list, @fips_des3_test_list if $fips_enabled{"des3"};
push @fips_test_list, @fips_des3_cfb1_test_list if $fips_enabled{"des3-cfb1"};
push @fips_test_list, @fips_aes_ccm_test_list if $fips_enabled{"aes-ccm"};
push @fips_test_list, @fips_aes_gcm_test_list if $fips_enabled{"aes-gcm"};
push @fips_test_list, @fips_aes_xts_test_list if $fips_enabled{"aes-xts"};
push @fips_test_list, @fips_dh_test_list if $fips_enabled{"dh"};
push @fips_test_list, @fips_ecdh_test_list if $fips_enabled{"ecdh"};
if ($list_tests) {
my ( $test, $en );
print "=====TEST LIST=====\n";
foreach $test ( sort keys %fips_enabled ) {
$en = $fips_enabled{$test};
$test =~ tr/[a-z]/[A-Z]/;
printf "%-10s %s\n", $test, $en ? "enabled" : "disabled";
}
exit(0);
}
foreach (@fips_test_list) {
next unless ref($_);
my $nm = $$_[0];
$$_[3] = "";
$$_[4] = "";
}
$tvdir = "." unless defined $tvdir;
if ($win32) {
if ( !defined $tprefix ) {
if ($onedir) {
$tprefix = ".\\";
}
else {
$tprefix = "..\\out32dll\\";
}
}
}
else {
if ($onedir) {
$tprefix = "./" unless defined $tprefix;
}
else {
$tprefix = "../test/" unless defined $tprefix;
}
}
sanity_check_exe( $win32, $tprefix) if $outfile eq "";
find_files( $filter, $tvdir );
sanity_check_files();
my ( $runerr, $cmperr, $cmpok, $scheckrunerr, $scheckerr, $scheckok, $skipcnt )
= ( 0, 0, 0, 0, 0, 0, 0 );
print "Outputting commands to $outfile\n" if $outfile ne "";
run_tests( $verify, $win32, $tprefix, $filter, $tvdir, $outfile );
exit(0) if $notest;
if ($verify) {
print "ALGORITHM TEST VERIFY SUMMARY REPORT:\n";
print "Tests skipped due to missing files: $skipcnt\n";
print "Algorithm test program execution failures: $runerr\n";
print "Test comparisons successful: $cmpok\n";
print "Test comparisons failed: $cmperr\n";
print "Test sanity checks successful: $scheckok\n";
print "Test sanity checks failed: $scheckerr\n";
print "Sanity check program execution failures: $scheckrunerr\n";
if ( $runerr || $cmperr || $scheckrunerr || $scheckerr ) {
print "***TEST FAILURE***\n";
}
else {
print "***ALL TESTS SUCCESSFUL***\n";
}
}
elsif ($outfile eq "") {
print "ALGORITHM TEST SUMMARY REPORT:\n";
print "Tests skipped due to missing files: $skipcnt\n";
print "Algorithm test program execution failures: $runerr\n";
if ($runerr) {
print "***TEST FAILURE***\n";
}
else {
print "***ALL TESTS SUCCESSFUL***\n";
}
}
#--------------------------------
sub Help {
( my $cmd ) = ( $0 =~ m#([^/]+)$# );
print <<EOF;
$cmd: generate run CAVP algorithm tests
--debug Enable debug output
--dir=<dirname> Optional root for *.req file search
--filter=<regexp> Regex for input files of interest
--onedir <dirname> Assume all components in current directory
--rspdir=<dirname> Name of subdirectories containing *.rsp files, default "resp"
--tprefix=<prefix> Pathname prefix for directory containing test programs
--ignore-bogus Ignore duplicate or bogus files
--ignore-missing Ignore missing test files
--quiet Shhh....
--quiet-bogus Skip unrecognized file warnings
--quiet-missing Skip missing request file warnings
--generate Generate algorithm test output
--generate-script=<filename> Generate script to call algorithm programs
--minimal-script Simplest possible output for --generate-script
--win32 Win32 environment
--compare-all Verify unconditionally for all tests
--list-tests Show individual tests
--mkdir=<cmd> Specify "mkdir" command
--notest Exit before running tests
--rm=<cmd> Specify "rm" command
--script-tprefix Pathname prefix for --generate-script output
--enable-<alg> Enable algorithm set <alg>.
--disable-<alg> Disable algorithm set <alg>.
Where <alg> can be one of:
EOF
while (my ($key, $value) = each %fips_enabled)
{
printf "\t\t%-20s(%s by default)\n", $key ,
$value != 0 ? "enabled" : "disabled";
}
}
# Sanity check to see if all necessary executables exist
sub sanity_check_exe {
my ( $win32, $tprefix, ) = @_;
my %exe_list;
my $bad = 0;
foreach (@fips_test_list) {
next unless ref($_);
my $cmd = $_->[1];
$cmd =~ s/ .*$//;
$cmd = $tprefix . $cmd;
$cmd .= ".exe" if $win32;
$exe_list{$cmd} = 1;
}
foreach ( sort keys %exe_list ) {
if ( !-f $_ ) {
print STDERR "ERROR: can't find executable $_\n";
$bad = 1;
}
}
if ($bad) {
print STDERR "FATAL ERROR: executables missing\n";
exit(1);
}
elsif ($debug) {
print STDERR "Executable sanity check passed OK\n";
}
}
# Search for all request and response files
sub find_files {
my ( $filter, $dir ) = @_;
my ( $dirh, $testname, $tref );
my $ttype;
opendir( $dirh, $dir );
while ( $_ = readdir($dirh) ) {
next if ( $_ eq "." || $_ eq ".." );
$_ = "$dir/$_";
if ( -f "$_" ) {
if (/\/([^\/]*)\.rsp$/) {
$tref = find_test($1, $_, \$ttype);
if ( defined $tref ) {
$testname = $$tref[0];
if ( $$tref[4] eq "" ) {
$$tref[4] = $_;
}
else {
print STDERR
"WARNING: duplicate response file $_ for $ttype test $testname\n";
$nbogus++;
}
}
else {
print STDERR "WARNING: bogus file $_\n" unless $no_warn_bogus;
$nbogus++;
}
}
next unless /$filter.*\.req$/i;
if (/\/([^\/]*)\.req$/) {
$tref = find_test($1, $_, \$ttype);
if ( defined $tref ) {
$testname = $$tref[0];
my $tfname = $$tref[3];
if ( $tfname eq "" ) {
$$tref[3] = $_;
}
else {
print STDERR
"WARNING: duplicate request file $_ for $ttype test $testname\n";
if ($info) {
print_file_start($_, \*STDERR);
print STDERR "Original filename $tfname\n";
print_file_start($tfname, \*STDERR);
}
$nbogus++;
}
}
elsif ( !/SHAmix\.req$/ ) {
print STDERR "WARNING: unrecognized filename $_\n" unless $no_warn_bogus;
print_file_start($_, \*STDERR) if $info;
$nbogus++;
}
}
}
elsif ( -d "$_" ) {
find_files( $filter, $_ );
}
}
closedir($dirh);
}
#
# Find test based on filename.
# In ambiguous cases search file contents for a match
#
sub find_test {
my ( $test, $path, $type ) = @_;
foreach $tref (@fips_test_list) {
if (!ref($tref)) {
$$type = $tref;
next;
}
my ( $tst, $cmd, $excmd, $req, $resp ) = @$tref;
my $regexp;
$tst =~ s/\(.*$//;
if (defined $excmd) {
if ($excmd =~ /^path:(.*)$/) {
my $fmatch = $1;
return $tref if ($path =~ /$fmatch/);
next;
}
elsif ($excmd =~ /^file:(.*)$/) {
$regexp = $1;
}
}
if ($test =~ /^$tst/) {
return $tref if (!defined $regexp);
my $found = 0;
my $line;
open( IN, $path ) || die "Can't Open File $path";
while ($line = <IN>) {
if ($line =~ /$regexp/i) {
$found = 1;
last;
}
}
close IN;
return $tref if $found == 1;
}
}
return undef;
}
sub sanity_check_files {
my $bad = 0;
my $ttype;
foreach (@fips_test_list) {
if (!ref($_)) {
$ttype = $_;
next;
}
my ( $tst, $cmd, $regexp, $req, $resp ) = @$_;
#print STDERR "FILES $tst, $cmd, $req, $resp\n";
if ( $req eq "" ) {
print STDERR "WARNING: missing request file for $ttype test $tst\n" unless $no_warn_missing;
$bad = 1;
next;
}
if ( $verify && $resp eq "" ) {
print STDERR "WARNING: no response file for $ttype test test $tst\n";
$bad = 1;
}
elsif ( !$verify && $resp ne "" ) {
print STDERR "WARNING: response file $resp for $ttype test $tst will be overwritten\n";
}
}
if ($bad) {
print STDERR "ERROR: test vector file set not complete\n";
exit(1) unless $ignore_missing;
}
if ($nbogus) {
print STDERR
"ERROR: $nbogus bogus or duplicate request and response files\n";
exit(1) unless $ignore_bogus;
}
if ( $debug && !$nbogus && !$bad ) {
print STDERR "test vector file set complete\n";
}
}
sub run_tests {
my ( $verify, $win32, $tprefix, $filter, $tvdir, $outfile ) = @_;
my ( $tname, $tref );
my $bad = 0;
my $lastdir = "";
$stprefix = $tprefix unless defined $stprefix;
if ($outfile ne "") {
open OUT, ">$outfile" || die "Can't open $outfile";
}
if ($outfile ne "" && !$minimal_script) {
if ($win32) {
print OUT <<\END;
@echo off
rem Test vector run script
rem Auto generated by fipsalgtest.pl script
rem Do not edit
echo Running Algorithm Tests
END
} else {
print OUT <<END;
#!/bin/sh
# Test vector run script
# Auto generated by fipsalgtest.pl script
# Do not edit
echo Running Algorithm Tests
RM="$rmcmd";
MKDIR="$mkcmd";
TPREFIX=$stprefix
END
}
}
my $ttype = "";
foreach (@fips_test_list) {
if ( !ref($_) ) {
if ($outfile ne "") {
print "Generating script for $_ tests\n";
print OUT "\n\n\necho \"Running $_ tests\"\n" unless $minimal_script;
} elsif ($notest) {
print "Info for $_ tests:\n";
} else {
print "Running $_ tests\n" unless $quiet;
}
$ttype = $_;
next;
}
my ( $tname, $tcmd, $regexp, $req, $rsp ) = @$_;
if ($notest) {
if ($req ne "") {
print "Test $ttype, $tname: $req\n";
print_file_start($req, \*STDOUT) if ($info);
} else {
print "$tname: not found\n";
}
next;
}
my $out = $rsp;
if ($verify) {
$out =~ s/\.rsp$/.tst/;
}
if ( $req eq "" ) {
print STDERR
"WARNING: Request file for $ttype test $tname missing: test skipped\n" unless $no_warn_missing;
$skipcnt++;
next;
}
if ( $verify && $rsp eq "" ) {
print STDERR
"WARNING: Response file for $ttype test $tname missing: test skipped\n";
$skipcnt++;
next;
}
elsif ( !$verify ) {
if ( $rsp ne "" ) {
print STDERR "WARNING: Response file for $tname deleted\n";
unlink $rsp;
}
$out = $req;
$out =~ s|/req/(\S+)\.req|/$rspdir/$1.rsp|;
my $outdir = $out;
$outdir =~ s|/[^/]*$||;
if ( !-d $outdir && ($outfile eq "" || $minimal_script)) {
print STDERR "DEBUG: Creating directory $outdir\n" if $debug;
mkdir($outdir) || die "Can't create directory $outdir";
}
if ($outfile ne "") {
if ($win32) {
$outdir =~ tr|/|\\|;
$req =~ tr|/|\\|;
$out =~ tr|/|\\|;
}
if ($outdir ne $lastdir && !$minimal_script) {
if ($win32) {
print OUT <<END
if exist \"$outdir\" rd /s /q "$outdir"
md \"$outdir\"
END
} else {
print OUT <<END
\$RM \"$outdir\"
\$MKDIR \"$outdir\"
END
}
$lastdir = $outdir;
}
}
}
my $cmd = "$tcmd \"$sfprefix$req\" \"$sfprefix$out\"";
print STDERR "DEBUG: running test $tname\n" if ( $debug && !$verify );
if ($outfile ne "") {
if ($minimal_script) {
print OUT "$stprefix$cmd\n";
} else {
print OUT "echo \" running $tname test\"\n" unless $minimal_script;
print OUT "\${TPREFIX}$cmd\n";
}
} else {
$cmd = "$tprefix$cmd";
system($cmd);
if ( $? != 0 ) {
print STDERR
"WARNING: error executing test $tname for command: $cmd\n";
$runerr++;
next;
}
}
if ($verify) {
if ( exists $verify_special{"$ttype:$tname"} && !$cmpall) {
my $vout = $rsp;
$vout =~ s/\.rsp$/.ver/;
$tcmd = $verify_special{"$ttype:$tname"};
if ($tcmd eq "skip") {
print STDERR "DEBUG: No verify possible: skipped.\n" if $debug;
$scheckok++;
next;
}
$cmd = "$tprefix$tcmd ";
$cmd .= "\"$out\" \"$vout\"";
system($cmd);
if ( $? != 0 ) {
print STDERR
"WARNING: error executing verify test $tname $cmd\n";
$scheckrunerr++;
next;
}
my ( $fcount, $pcount ) = ( 0, 0 );
open VER, "$vout";
while (<VER>) {
if (/^Result\s*=\s*(\S*)\s*$/i)
{
if ( $1 eq "F" ) {
$fcount++;
}
else {
$pcount++;
}
}
}
close VER;
unlink $vout;
if ( $fcount || $debug ) {
print STDERR "DEBUG: $tname, Pass=$pcount, Fail=$fcount\n";
}
if ( $fcount || !$pcount ) {
$scheckerr++;
}
else {
$scheckok++;
}
}
elsif ( !cmp_file( $tname, $rsp, $out ) ) {
$cmperr++;
}
else {
$cmpok++;
}
unlink $out;
}
}
if ($outfile ne "") {
print OUT "\n\necho All Tests Completed\n" unless $minimal_script;
close OUT;
}
}
sub cmp_file {
my ( $tname, $rsp, $tst ) = @_;
my ( $rspf, $tstf );
my ( $rspline, $tstline );
my $monte = 0;
if ( !open( $rspf, $rsp ) ) {
print STDERR "ERROR: can't open request file $rsp\n";
return 0;
}
if ( !open( $tstf, $tst ) ) {
print STDERR "ERROR: can't open output file $tst\n";
return 0;
}
$monte = 1 if ($rsp =~ /Monte[123]/);
for ( ; ; ) {
$rspline = next_line($rspf);
$tstline = next_line($tstf);
if ( !defined($rspline) && !defined($tstline) ) {
print STDERR "DEBUG: $tname file comparison OK\n" if $debug;
return 1;
}
# Workaround for old broken DES3 MCT format which added bogus
# extra lines: after [ENCRYPT] or [DECRYPT] skip until first
# COUNT line.
if ($monte) {
if ($rspline =~ /CRYPT/) {
do {
$rspline = next_line($rspf);
} while (defined($rspline) && $rspline !~ /COUNT/);
}
if ($tstline =~ /CRYPT/) {
do {
$tstline = next_line($tstf);
} while (defined($tstline) && $tstline !~ /COUNT/);
}
}
if ( !defined($rspline) ) {
print STDERR "ERROR: $tname EOF on $rsp\n";
return 0;
}
if ( !defined($tstline) ) {
print STDERR "ERROR: $tname EOF on $tst\n";
return 0;
}
# Workaround for bug in RAND des2 test output */
if ( $tstline =~ /^Key2 =/ && $rspline =~ /^Key1 =/ ) {
$rspline =~ s/^Key1/Key2/;
}
if ( $tstline ne $rspline ) {
print STDERR "ERROR: $tname mismatch:\n";
print STDERR "\t \"$tstline\" != \"$rspline\"\n";
return 0;
}
}
return 1;
}
sub next_line {
my ($in) = @_;
while (<$in>) {
chomp;
# Delete comments
s/#.*$//;
# Ignore blank lines
next if (/^\s*$/);
# Translate multiple space into one
s/\s+/ /g;
# Delete trailing whitespace
s/\s+$//;
# Remove leading zeroes
s/= 00/= /;
# Translate to upper case
return uc $_;
}
return undef;
}
sub print_file_start {
my ($fname, $fh) = @_;
print $fh "======\n";
open IN, $fname;
while (<IN>) {
my $line = $_;
s/#.*$//;
last unless (/^\s*$/);
print $fh $line;
}
my $lines = 0;
while (<IN>) {
print $fh $_;
last if $lines++ > 10;
}
close IN;
print $fh "======\n";
}
| GaloisInc/hacrypto | src/C/openssl/openssl-fips-ecp-2.0.11/fips/fipsalgtest.pl | Perl | bsd-3-clause | 35,776 |
# !!!!!!! DO NOT EDIT THIS FILE !!!!!!!
# This file is machine-generated by lib/unicore/mktables from the Unicode
# database, Version 8.0.0. Any changes made here will be lost!
# !!!!!!! INTERNAL PERL USE ONLY !!!!!!!
# This file is for internal use by core Perl only. The format and even the
# name or existence of this file are subject to change without notice. Don't
# use it directly. Use Unicode::UCD to access the Unicode character data
# base.
return <<'END';
V76
834
835
837
838
880
884
885
888
890
894
895
896
900
901
902
903
904
907
908
909
910
930
931
994
1008
1024
7462
7467
7517
7522
7526
7531
7615
7618
7936
7958
7960
7966
7968
8006
8008
8014
8016
8024
8025
8026
8027
8028
8029
8030
8031
8062
8064
8117
8118
8133
8134
8148
8150
8156
8157
8176
8178
8181
8182
8191
8486
8487
43877
43878
65856
65933
65952
65953
119296
119366
END
| operepo/ope | bin/usr/share/perl5/core_perl/unicore/lib/Scx/Grek.pl | Perl | mit | 856 |
=pod
=head1 NAME
X509_VERIFY_PARAM_set_flags, X509_VERIFY_PARAM_clear_flags, X509_VERIFY_PARAM_get_flags, X509_VERIFY_PARAM_set_purpose, X509_VERIFY_PARAM_set_trust, X509_VERIFY_PARAM_set_depth, X509_VERIFY_PARAM_get_depth, X509_VERIFY_PARAM_set_auth_level, X509_VERIFY_PARAM_get_auth_level, X509_VERIFY_PARAM_set_time, X509_VERIFY_PARAM_add0_policy, X509_VERIFY_PARAM_set1_policies, X509_VERIFY_PARAM_set1_host, X509_VERIFY_PARAM_add1_host, X509_VERIFY_PARAM_set_hostflags, X509_VERIFY_PARAM_get0_peername, X509_VERIFY_PARAM_set1_email, X509_VERIFY_PARAM_set1_ip, X509_VERIFY_PARAM_set1_ip_asc - X509 verification parameters
=head1 SYNOPSIS
#include <openssl/x509_vfy.h>
int X509_VERIFY_PARAM_set_flags(X509_VERIFY_PARAM *param,
unsigned long flags);
int X509_VERIFY_PARAM_clear_flags(X509_VERIFY_PARAM *param,
unsigned long flags);
unsigned long X509_VERIFY_PARAM_get_flags(X509_VERIFY_PARAM *param);
int X509_VERIFY_PARAM_set_purpose(X509_VERIFY_PARAM *param, int purpose);
int X509_VERIFY_PARAM_set_trust(X509_VERIFY_PARAM *param, int trust);
void X509_VERIFY_PARAM_set_time(X509_VERIFY_PARAM *param, time_t t);
int X509_VERIFY_PARAM_add0_policy(X509_VERIFY_PARAM *param,
ASN1_OBJECT *policy);
int X509_VERIFY_PARAM_set1_policies(X509_VERIFY_PARAM *param,
STACK_OF(ASN1_OBJECT) *policies);
void X509_VERIFY_PARAM_set_depth(X509_VERIFY_PARAM *param, int depth);
int X509_VERIFY_PARAM_get_depth(const X509_VERIFY_PARAM *param);
void X509_VERIFY_PARAM_set_auth_level(X509_VERIFY_PARAM *param,
int auth_level);
int X509_VERIFY_PARAM_get_auth_level(const X509_VERIFY_PARAM *param);
int X509_VERIFY_PARAM_set1_host(X509_VERIFY_PARAM *param,
const char *name, size_t namelen);
int X509_VERIFY_PARAM_add1_host(X509_VERIFY_PARAM *param,
const char *name, size_t namelen);
void X509_VERIFY_PARAM_set_hostflags(X509_VERIFY_PARAM *param,
unsigned int flags);
char *X509_VERIFY_PARAM_get0_peername(X509_VERIFY_PARAM *param);
int X509_VERIFY_PARAM_set1_email(X509_VERIFY_PARAM *param,
const char *email, size_t emaillen);
int X509_VERIFY_PARAM_set1_ip(X509_VERIFY_PARAM *param,
const unsigned char *ip, size_t iplen);
int X509_VERIFY_PARAM_set1_ip_asc(X509_VERIFY_PARAM *param, const char *ipasc);
=head1 DESCRIPTION
These functions manipulate the B<X509_VERIFY_PARAM> structure associated with
a certificate verification operation.
The X509_VERIFY_PARAM_set_flags() function sets the flags in B<param> by oring
it with B<flags>. See the B<VERIFICATION FLAGS> section for a complete
description of values the B<flags> parameter can take.
X509_VERIFY_PARAM_get_flags() returns the flags in B<param>.
X509_VERIFY_PARAM_clear_flags() clears the flags B<flags> in B<param>.
X509_VERIFY_PARAM_set_purpose() sets the verification purpose in B<param>
to B<purpose>. This determines the acceptable purpose of the certificate
chain, for example SSL client or SSL server.
X509_VERIFY_PARAM_set_trust() sets the trust setting in B<param> to
B<trust>.
X509_VERIFY_PARAM_set_time() sets the verification time in B<param> to
B<t>. Normally the current time is used.
X509_VERIFY_PARAM_add0_policy() enables policy checking (it is disabled
by default) and adds B<policy> to the acceptable policy set.
X509_VERIFY_PARAM_set1_policies() enables policy checking (it is disabled
by default) and sets the acceptable policy set to B<policies>. Any existing
policy set is cleared. The B<policies> parameter can be B<NULL> to clear
an existing policy set.
X509_VERIFY_PARAM_set_depth() sets the maximum verification depth to B<depth>.
That is the maximum number of intermediate CA certificates that can appear in a
chain.
A maximal depth chain contains 2 more certificates than the limit, since
neither the end-entity certificate nor the trust-anchor count against this
limit.
Thus a B<depth> limit of 0 only allows the end-entity certificate to be signed
directly by the trust-anchor, while with a B<depth> limit of 1 there can be one
intermediate CA certificate between the trust-anchor and the end-entity
certificate.
X509_VERIFY_PARAM_set_auth_level() sets the authentication security level to
B<auth_level>.
The authentication security level determines the acceptable signature and public
key strength when verifying certificate chains.
For a certificate chain to validate, the public keys of all the certificates
must meet the specified security level.
The signature algorithm security level is not enforced for the chain's I<trust
anchor> certificate, which is either directly trusted or validated by means other
than its signature.
See L<SSL_CTX_set_security_level(3)> for the definitions of the available
levels.
The default security level is -1, or "not set".
At security level 0 or lower all algorithms are acceptable.
Security level 1 requires at least 80-bit-equivalent security and is broadly
interoperable, though it will, for example, reject MD5 signatures or RSA keys
shorter than 1024 bits.
X509_VERIFY_PARAM_set1_host() sets the expected DNS hostname to
B<name> clearing any previously specified host name or names. If
B<name> is NULL, or empty the list of hostnames is cleared, and
name checks are not performed on the peer certificate. If B<name>
is NUL-terminated, B<namelen> may be zero, otherwise B<namelen>
must be set to the length of B<name>. When a hostname is specified,
certificate verification automatically invokes L<X509_check_host(3)>
with flags equal to the B<flags> argument given to
X509_VERIFY_PARAM_set_hostflags() (default zero). Applications
are strongly advised to use this interface in preference to explicitly
calling L<X509_check_host(3)>, hostname checks are out of scope
with the DANE-EE(3) certificate usage, and the internal check will
be suppressed as appropriate when DANE support is added to OpenSSL.
X509_VERIFY_PARAM_add1_host() adds B<name> as an additional reference
identifier that can match the peer's certificate. Any previous names
set via X509_VERIFY_PARAM_set1_host() or X509_VERIFY_PARAM_add1_host()
are retained, no change is made if B<name> is NULL or empty. When
multiple names are configured, the peer is considered verified when
any name matches.
X509_VERIFY_PARAM_get0_peername() returns the DNS hostname or subject
CommonName from the peer certificate that matched one of the reference
identifiers. When wildcard matching is not disabled, or when a
reference identifier specifies a parent domain (starts with ".")
rather than a hostname, the peer name may be a wildcard name or a
sub-domain of the reference identifier respectively. The return
string is allocated by the library and is no longer valid once the
associated B<param> argument is freed. Applications must not free
the return value.
X509_VERIFY_PARAM_set1_email() sets the expected RFC822 email address to
B<email>. If B<email> is NUL-terminated, B<emaillen> may be zero, otherwise
B<emaillen> must be set to the length of B<email>. When an email address
is specified, certificate verification automatically invokes
L<X509_check_email(3)>.
X509_VERIFY_PARAM_set1_ip() sets the expected IP address to B<ip>.
The B<ip> argument is in binary format, in network byte-order and
B<iplen> must be set to 4 for IPv4 and 16 for IPv6. When an IP
address is specified, certificate verification automatically invokes
L<X509_check_ip(3)>.
X509_VERIFY_PARAM_set1_ip_asc() sets the expected IP address to
B<ipasc>. The B<ipasc> argument is a NUL-terminal ASCII string:
dotted decimal quad for IPv4 and colon-separated hexadecimal for
IPv6. The condensed "::" notation is supported for IPv6 addresses.
=head1 RETURN VALUES
X509_VERIFY_PARAM_set_flags(), X509_VERIFY_PARAM_clear_flags(),
X509_VERIFY_PARAM_set_purpose(), X509_VERIFY_PARAM_set_trust(),
X509_VERIFY_PARAM_add0_policy() X509_VERIFY_PARAM_set1_policies(),
X509_VERIFY_PARAM_set1_host(), X509_VERIFY_PARAM_add1_host(),
X509_VERIFY_PARAM_set1_email(), X509_VERIFY_PARAM_set1_ip() and
X509_VERIFY_PARAM_set1_ip_asc() return 1 for success and 0 for
failure.
X509_VERIFY_PARAM_get_flags() returns the current verification flags.
X509_VERIFY_PARAM_set_time() and X509_VERIFY_PARAM_set_depth() do not return
values.
X509_VERIFY_PARAM_get_depth() returns the current verification depth.
X509_VERIFY_PARAM_get_auth_level() returns the current authentication security
level.
=head1 VERIFICATION FLAGS
The verification flags consists of zero or more of the following flags
ored together.
B<X509_V_FLAG_CRL_CHECK> enables CRL checking for the certificate chain leaf
certificate. An error occurs if a suitable CRL cannot be found.
B<X509_V_FLAG_CRL_CHECK_ALL> enables CRL checking for the entire certificate
chain.
B<X509_V_FLAG_IGNORE_CRITICAL> disabled critical extension checking. By default
any unhandled critical extensions in certificates or (if checked) CRLs results
in a fatal error. If this flag is set unhandled critical extensions are
ignored. B<WARNING> setting this option for anything other than debugging
purposes can be a security risk. Finer control over which extensions are
supported can be performed in the verification callback.
The B<X509_V_FLAG_X509_STRICT> flag disables workarounds for some broken
certificates and makes the verification strictly apply B<X509> rules.
B<X509_V_FLAG_ALLOW_PROXY_CERTS> enables proxy certificate verification.
B<X509_V_FLAG_POLICY_CHECK> enables certificate policy checking, by default
no policy checking is performed. Additional information is sent to the
verification callback relating to policy checking.
B<X509_V_FLAG_EXPLICIT_POLICY>, B<X509_V_FLAG_INHIBIT_ANY> and
B<X509_V_FLAG_INHIBIT_MAP> set the B<require explicit policy>, B<inhibit any
policy> and B<inhibit policy mapping> flags respectively as defined in
B<RFC3280>. Policy checking is automatically enabled if any of these flags
are set.
If B<X509_V_FLAG_NOTIFY_POLICY> is set and the policy checking is successful
a special status code is set to the verification callback. This permits it
to examine the valid policy tree and perform additional checks or simply
log it for debugging purposes.
By default some additional features such as indirect CRLs and CRLs signed by
different keys are disabled. If B<X509_V_FLAG_EXTENDED_CRL_SUPPORT> is set
they are enabled.
If B<X509_V_FLAG_USE_DELTAS> is set delta CRLs (if present) are used to
determine certificate status. If not set deltas are ignored.
B<X509_V_FLAG_CHECK_SS_SIGNATURE> enables checking of the root CA self signed
certificate signature. By default this check is disabled because it doesn't
add any additional security but in some cases applications might want to
check the signature anyway. A side effect of not checking the root CA
signature is that disabled or unsupported message digests on the root CA
are not treated as fatal errors.
If B<X509_V_FLAG_TRUSTED_FIRST> is set, when constructing the certificate chain,
L<X509_verify_cert(3)> will search the trust store for issuer certificates before
searching the provided untrusted certificates.
As of OpenSSL 1.1.0 this option is on by default and cannot be disabled.
The B<X509_V_FLAG_NO_ALT_CHAINS> flag suppresses checking for alternative
chains.
By default, unless B<X509_V_FLAG_TRUSTED_FIRST> is set, when building a
certificate chain, if the first certificate chain found is not trusted, then
OpenSSL will attempt to replace untrusted certificates supplied by the peer
with certificates from the trust store to see if an alternative chain can be
found that is trusted.
As of OpenSSL 1.1.0, with B<X509_V_FLAG_TRUSTED_FIRST> always set, this option
has no effect.
The B<X509_V_FLAG_NO_CHECK_TIME> flag suppresses checking the validity period
of certificates and CRLs against the current time. If X509_VERIFY_PARAM_set_time()
is used to specify a verification time, the check is not suppressed.
=head1 NOTES
The above functions should be used to manipulate verification parameters
instead of legacy functions which work in specific structures such as
X509_STORE_CTX_set_flags().
=head1 BUGS
Delta CRL checking is currently primitive. Only a single delta can be used and
(partly due to limitations of B<X509_STORE>) constructed CRLs are not
maintained.
If CRLs checking is enable CRLs are expected to be available in the
corresponding B<X509_STORE> structure. No attempt is made to download
CRLs from the CRL distribution points extension.
=head1 EXAMPLE
Enable CRL checking when performing certificate verification during SSL
connections associated with an B<SSL_CTX> structure B<ctx>:
X509_VERIFY_PARAM *param;
param = X509_VERIFY_PARAM_new();
X509_VERIFY_PARAM_set_flags(param, X509_V_FLAG_CRL_CHECK);
SSL_CTX_set1_param(ctx, param);
X509_VERIFY_PARAM_free(param);
=head1 SEE ALSO
L<X509_verify_cert(3)>,
L<X509_check_host(3)>,
L<X509_check_email(3)>,
L<X509_check_ip(3)>
=head1 HISTORY
The B<X509_V_FLAG_NO_ALT_CHAINS> flag was added in OpenSSL 1.1.0
The legacy B<X509_V_FLAG_CB_ISSUER_CHECK> flag is deprecated as of
OpenSSL 1.1.0, and has no effect.
=head1 COPYRIGHT
Copyright 2009-2016 The OpenSSL Project Authors. All Rights Reserved.
Licensed under the OpenSSL license (the "License"). You may not use
this file except in compliance with the License. You can obtain a copy
in the file LICENSE in the source distribution or at
L<https://www.openssl.org/source/license.html>.
=cut
| GaloisInc/hacrypto | src/C/openssl/openssl-1.1.0b/doc/crypto/X509_VERIFY_PARAM_set_flags.pod | Perl | bsd-3-clause | 13,639 |
# !!!!!!! DO NOT EDIT THIS FILE !!!!!!!
# This file is machine-generated by mktables from the Unicode
# database, Version 6.1.0. Any changes made here will be lost!
# !!!!!!! INTERNAL PERL USE ONLY !!!!!!!
# This file is for internal use by core Perl only. The format and even the
# name or existence of this file are subject to change without notice. Don't
# use it directly.
return <<'END';
10800 1083F
END
| Dokaponteam/ITF_Project | xampp/perl/lib/unicore/lib/Blk/CypriotS.pl | Perl | mit | 423 |
# !!!!!!! DO NOT EDIT THIS FILE !!!!!!!
# This file is machine-generated by mktables from the Unicode
# database, Version 6.1.0. Any changes made here will be lost!
# !!!!!!! INTERNAL PERL USE ONLY !!!!!!!
# This file is for internal use by core Perl only. The format and even the
# name or existence of this file are subject to change without notice. Don't
# use it directly.
return <<'END';
3200 32FF
END
| Dokaponteam/ITF_Project | xampp/perl/lib/unicore/lib/Blk/Enclosed.pl | Perl | mit | 421 |
package PPI::Token::DashedWord;
=pod
=head1 NAME
PPI::Token::DashedWord - A dashed bareword token
=head1 INHERITANCE
PPI::Token::DashedWord
isa PPI::Token
isa PPI::Element
=head1 DESCRIPTION
The "dashed bareword" token represents literal values like C<-foo>.
NOTE: this class is currently unused. All tokens that should be
PPI::Token::DashedWords are just normal PPI::Token::Word instead.
That actually makes sense, since there really is nothing special about
this class except that dashed words cannot be subroutine names or
keywords. As such, this class may be removed from PPI in the future.
=head1 METHODS
=cut
use strict;
use PPI::Token ();
use vars qw{$VERSION @ISA};
BEGIN {
$VERSION = '1.215';
@ISA = 'PPI::Token';
}
=pod
=head2 literal
Returns the value of the dashed word as a string. This differs from
C<content> because C<-Foo'Bar> expands to C<-Foo::Bar>.
=begin testing literal 9
my @pairs = (
"-foo", '-foo',
"-Foo::Bar", '-Foo::Bar',
"-Foo'Bar", '-Foo::Bar',
);
while ( @pairs ) {
my $from = shift @pairs;
my $to = shift @pairs;
my $doc = PPI::Document->new( \"( $from => 1 );" );
isa_ok( $doc, 'PPI::Document' );
my $word = $doc->find_first('Token::DashedWord');
SKIP: {
skip( "PPI::Token::DashedWord is deactivated", 2 );
isa_ok( $word, 'PPI::Token::DashedWord' );
is( $word && $word->literal, $to, "The source $from becomes $to ok" );
}
}
=end testing
=cut
*literal = *PPI::Token::Word::literal;
#####################################################################
# Tokenizer Methods
sub __TOKENIZER__on_char {
my $t = $_[1];
# Suck to the end of the dashed bareword
my $line = substr( $t->{line}, $t->{line_cursor} );
if ( $line =~ /^(\w+)/ ) {
$t->{token}->{content} .= $1;
$t->{line_cursor} += length $1;
}
# Are we a file test operator?
if ( $t->{token}->{content} =~ /^\-[rwxoRWXOezsfdlpSbctugkTBMAC]$/ ) {
# File test operator
$t->{class} = $t->{token}->set_class( 'Operator' );
} else {
# No, normal dashed bareword
$t->{class} = $t->{token}->set_class( 'Word' );
}
$t->_finalize_token->__TOKENIZER__on_char( $t );
}
1;
=pod
=head1 SUPPORT
See the L<support section|PPI/SUPPORT> in the main module.
=head1 AUTHOR
Adam Kennedy E<lt>adamk@cpan.orgE<gt>
=head1 COPYRIGHT
Copyright 2001 - 2011 Adam Kennedy.
This program is free software; you can redistribute
it and/or modify it under the same terms as Perl itself.
The full text of the license can be found in the
LICENSE file included with this module.
=cut
| Dokaponteam/ITF_Project | xampp/perl/vendor/lib/PPI/Token/DashedWord.pm | Perl | mit | 2,548 |
#line 1
package Module::Install::Metadata;
use strict 'vars';
use Module::Install::Base ();
use vars qw{$VERSION @ISA $ISCORE};
BEGIN {
$VERSION = '1.12';
@ISA = 'Module::Install::Base';
$ISCORE = 1;
}
my @boolean_keys = qw{
sign
};
my @scalar_keys = qw{
name
module_name
abstract
version
distribution_type
tests
installdirs
};
my @tuple_keys = qw{
configure_requires
build_requires
requires
recommends
bundles
resources
};
my @resource_keys = qw{
homepage
bugtracker
repository
};
my @array_keys = qw{
keywords
author
};
*authors = \&author;
sub Meta { shift }
sub Meta_BooleanKeys { @boolean_keys }
sub Meta_ScalarKeys { @scalar_keys }
sub Meta_TupleKeys { @tuple_keys }
sub Meta_ResourceKeys { @resource_keys }
sub Meta_ArrayKeys { @array_keys }
foreach my $key ( @boolean_keys ) {
*$key = sub {
my $self = shift;
if ( defined wantarray and not @_ ) {
return $self->{values}->{$key};
}
$self->{values}->{$key} = ( @_ ? $_[0] : 1 );
return $self;
};
}
foreach my $key ( @scalar_keys ) {
*$key = sub {
my $self = shift;
return $self->{values}->{$key} if defined wantarray and !@_;
$self->{values}->{$key} = shift;
return $self;
};
}
foreach my $key ( @array_keys ) {
*$key = sub {
my $self = shift;
return $self->{values}->{$key} if defined wantarray and !@_;
$self->{values}->{$key} ||= [];
push @{$self->{values}->{$key}}, @_;
return $self;
};
}
foreach my $key ( @resource_keys ) {
*$key = sub {
my $self = shift;
unless ( @_ ) {
return () unless $self->{values}->{resources};
return map { $_->[1] }
grep { $_->[0] eq $key }
@{ $self->{values}->{resources} };
}
return $self->{values}->{resources}->{$key} unless @_;
my $uri = shift or die(
"Did not provide a value to $key()"
);
$self->resources( $key => $uri );
return 1;
};
}
foreach my $key ( grep { $_ ne "resources" } @tuple_keys) {
*$key = sub {
my $self = shift;
return $self->{values}->{$key} unless @_;
my @added;
while ( @_ ) {
my $module = shift or last;
my $version = shift || 0;
push @added, [ $module, $version ];
}
push @{ $self->{values}->{$key} }, @added;
return map {@$_} @added;
};
}
# Resource handling
my %lc_resource = map { $_ => 1 } qw{
homepage
license
bugtracker
repository
};
sub resources {
my $self = shift;
while ( @_ ) {
my $name = shift or last;
my $value = shift or next;
if ( $name eq lc $name and ! $lc_resource{$name} ) {
die("Unsupported reserved lowercase resource '$name'");
}
$self->{values}->{resources} ||= [];
push @{ $self->{values}->{resources} }, [ $name, $value ];
}
$self->{values}->{resources};
}
# Aliases for build_requires that will have alternative
# meanings in some future version of META.yml.
sub test_requires { shift->build_requires(@_) }
sub install_requires { shift->build_requires(@_) }
# Aliases for installdirs options
sub install_as_core { $_[0]->installdirs('perl') }
sub install_as_cpan { $_[0]->installdirs('site') }
sub install_as_site { $_[0]->installdirs('site') }
sub install_as_vendor { $_[0]->installdirs('vendor') }
sub dynamic_config {
my $self = shift;
my $value = @_ ? shift : 1;
if ( $self->{values}->{dynamic_config} ) {
# Once dynamic we never change to static, for safety
return 0;
}
$self->{values}->{dynamic_config} = $value ? 1 : 0;
return 1;
}
# Convenience command
sub static_config {
shift->dynamic_config(0);
}
sub perl_version {
my $self = shift;
return $self->{values}->{perl_version} unless @_;
my $version = shift or die(
"Did not provide a value to perl_version()"
);
# Normalize the version
$version = $self->_perl_version($version);
# We don't support the really old versions
unless ( $version >= 5.005 ) {
die "Module::Install only supports 5.005 or newer (use ExtUtils::MakeMaker)\n";
}
$self->{values}->{perl_version} = $version;
}
sub all_from {
my ( $self, $file ) = @_;
unless ( defined($file) ) {
my $name = $self->name or die(
"all_from called with no args without setting name() first"
);
$file = join('/', 'lib', split(/-/, $name)) . '.pm';
$file =~ s{.*/}{} unless -e $file;
unless ( -e $file ) {
die("all_from cannot find $file from $name");
}
}
unless ( -f $file ) {
die("The path '$file' does not exist, or is not a file");
}
$self->{values}{all_from} = $file;
# Some methods pull from POD instead of code.
# If there is a matching .pod, use that instead
my $pod = $file;
$pod =~ s/\.pm$/.pod/i;
$pod = $file unless -e $pod;
# Pull the different values
$self->name_from($file) unless $self->name;
$self->version_from($file) unless $self->version;
$self->perl_version_from($file) unless $self->perl_version;
$self->author_from($pod) unless @{$self->author || []};
$self->license_from($pod) unless $self->license;
$self->abstract_from($pod) unless $self->abstract;
return 1;
}
sub provides {
my $self = shift;
my $provides = ( $self->{values}->{provides} ||= {} );
%$provides = (%$provides, @_) if @_;
return $provides;
}
sub auto_provides {
my $self = shift;
return $self unless $self->is_admin;
unless (-e 'MANIFEST') {
warn "Cannot deduce auto_provides without a MANIFEST, skipping\n";
return $self;
}
# Avoid spurious warnings as we are not checking manifest here.
local $SIG{__WARN__} = sub {1};
require ExtUtils::Manifest;
local *ExtUtils::Manifest::manicheck = sub { return };
require Module::Build;
my $build = Module::Build->new(
dist_name => $self->name,
dist_version => $self->version,
license => $self->license,
);
$self->provides( %{ $build->find_dist_packages || {} } );
}
sub feature {
my $self = shift;
my $name = shift;
my $features = ( $self->{values}->{features} ||= [] );
my $mods;
if ( @_ == 1 and ref( $_[0] ) ) {
# The user used ->feature like ->features by passing in the second
# argument as a reference. Accomodate for that.
$mods = $_[0];
} else {
$mods = \@_;
}
my $count = 0;
push @$features, (
$name => [
map {
ref($_) ? ( ref($_) eq 'HASH' ) ? %$_ : @$_ : $_
} @$mods
]
);
return @$features;
}
sub features {
my $self = shift;
while ( my ( $name, $mods ) = splice( @_, 0, 2 ) ) {
$self->feature( $name, @$mods );
}
return $self->{values}->{features}
? @{ $self->{values}->{features} }
: ();
}
sub no_index {
my $self = shift;
my $type = shift;
push @{ $self->{values}->{no_index}->{$type} }, @_ if $type;
return $self->{values}->{no_index};
}
sub read {
my $self = shift;
$self->include_deps( 'YAML::Tiny', 0 );
require YAML::Tiny;
my $data = YAML::Tiny::LoadFile('META.yml');
# Call methods explicitly in case user has already set some values.
while ( my ( $key, $value ) = each %$data ) {
next unless $self->can($key);
if ( ref $value eq 'HASH' ) {
while ( my ( $module, $version ) = each %$value ) {
$self->can($key)->($self, $module => $version );
}
} else {
$self->can($key)->($self, $value);
}
}
return $self;
}
sub write {
my $self = shift;
return $self unless $self->is_admin;
$self->admin->write_meta;
return $self;
}
sub version_from {
require ExtUtils::MM_Unix;
my ( $self, $file ) = @_;
$self->version( ExtUtils::MM_Unix->parse_version($file) );
# for version integrity check
$self->makemaker_args( VERSION_FROM => $file );
}
sub abstract_from {
require ExtUtils::MM_Unix;
my ( $self, $file ) = @_;
$self->abstract(
bless(
{ DISTNAME => $self->name },
'ExtUtils::MM_Unix'
)->parse_abstract($file)
);
}
# Add both distribution and module name
sub name_from {
my ($self, $file) = @_;
if (
Module::Install::_read($file) =~ m/
^ \s*
package \s*
([\w:]+)
[\s|;]*
/ixms
) {
my ($name, $module_name) = ($1, $1);
$name =~ s{::}{-}g;
$self->name($name);
unless ( $self->module_name ) {
$self->module_name($module_name);
}
} else {
die("Cannot determine name from $file\n");
}
}
sub _extract_perl_version {
if (
$_[0] =~ m/
^\s*
(?:use|require) \s*
v?
([\d_\.]+)
\s* ;
/ixms
) {
my $perl_version = $1;
$perl_version =~ s{_}{}g;
return $perl_version;
} else {
return;
}
}
sub perl_version_from {
my $self = shift;
my $perl_version=_extract_perl_version(Module::Install::_read($_[0]));
if ($perl_version) {
$self->perl_version($perl_version);
} else {
warn "Cannot determine perl version info from $_[0]\n";
return;
}
}
sub author_from {
my $self = shift;
my $content = Module::Install::_read($_[0]);
if ($content =~ m/
=head \d \s+ (?:authors?)\b \s*
([^\n]*)
|
=head \d \s+ (?:licen[cs]e|licensing|copyright|legal)\b \s*
.*? copyright .*? \d\d\d[\d.]+ \s* (?:\bby\b)? \s*
([^\n]*)
/ixms) {
my $author = $1 || $2;
# XXX: ugly but should work anyway...
if (eval "require Pod::Escapes; 1") {
# Pod::Escapes has a mapping table.
# It's in core of perl >= 5.9.3, and should be installed
# as one of the Pod::Simple's prereqs, which is a prereq
# of Pod::Text 3.x (see also below).
$author =~ s{ E<( (\d+) | ([A-Za-z]+) )> }
{
defined $2
? chr($2)
: defined $Pod::Escapes::Name2character_number{$1}
? chr($Pod::Escapes::Name2character_number{$1})
: do {
warn "Unknown escape: E<$1>";
"E<$1>";
};
}gex;
}
elsif (eval "require Pod::Text; 1" && $Pod::Text::VERSION < 3) {
# Pod::Text < 3.0 has yet another mapping table,
# though the table name of 2.x and 1.x are different.
# (1.x is in core of Perl < 5.6, 2.x is in core of
# Perl < 5.9.3)
my $mapping = ($Pod::Text::VERSION < 2)
? \%Pod::Text::HTML_Escapes
: \%Pod::Text::ESCAPES;
$author =~ s{ E<( (\d+) | ([A-Za-z]+) )> }
{
defined $2
? chr($2)
: defined $mapping->{$1}
? $mapping->{$1}
: do {
warn "Unknown escape: E<$1>";
"E<$1>";
};
}gex;
}
else {
$author =~ s{E<lt>}{<}g;
$author =~ s{E<gt>}{>}g;
}
$self->author($author);
} else {
warn "Cannot determine author info from $_[0]\n";
}
}
#Stolen from M::B
my %license_urls = (
perl => 'http://dev.perl.org/licenses/',
apache => 'http://apache.org/licenses/LICENSE-2.0',
apache_1_1 => 'http://apache.org/licenses/LICENSE-1.1',
artistic => 'http://opensource.org/licenses/artistic-license.php',
artistic_2 => 'http://opensource.org/licenses/artistic-license-2.0.php',
lgpl => 'http://opensource.org/licenses/lgpl-license.php',
lgpl2 => 'http://opensource.org/licenses/lgpl-2.1.php',
lgpl3 => 'http://opensource.org/licenses/lgpl-3.0.html',
bsd => 'http://opensource.org/licenses/bsd-license.php',
gpl => 'http://opensource.org/licenses/gpl-license.php',
gpl2 => 'http://opensource.org/licenses/gpl-2.0.php',
gpl3 => 'http://opensource.org/licenses/gpl-3.0.html',
mit => 'http://opensource.org/licenses/mit-license.php',
mozilla => 'http://opensource.org/licenses/mozilla1.1.php',
open_source => undef,
unrestricted => undef,
restrictive => undef,
unknown => undef,
);
sub license {
my $self = shift;
return $self->{values}->{license} unless @_;
my $license = shift or die(
'Did not provide a value to license()'
);
$license = __extract_license($license) || lc $license;
$self->{values}->{license} = $license;
# Automatically fill in license URLs
if ( $license_urls{$license} ) {
$self->resources( license => $license_urls{$license} );
}
return 1;
}
sub _extract_license {
my $pod = shift;
my $matched;
return __extract_license(
($matched) = $pod =~ m/
(=head \d \s+ L(?i:ICEN[CS]E|ICENSING)\b.*?)
(=head \d.*|=cut.*|)\z
/xms
) || __extract_license(
($matched) = $pod =~ m/
(=head \d \s+ (?:C(?i:OPYRIGHTS?)|L(?i:EGAL))\b.*?)
(=head \d.*|=cut.*|)\z
/xms
);
}
sub __extract_license {
my $license_text = shift or return;
my @phrases = (
'(?:under )?the same (?:terms|license) as (?:perl|the perl (?:\d )?programming language)' => 'perl', 1,
'(?:under )?the terms of (?:perl|the perl programming language) itself' => 'perl', 1,
'Artistic and GPL' => 'perl', 1,
'GNU general public license' => 'gpl', 1,
'GNU public license' => 'gpl', 1,
'GNU lesser general public license' => 'lgpl', 1,
'GNU lesser public license' => 'lgpl', 1,
'GNU library general public license' => 'lgpl', 1,
'GNU library public license' => 'lgpl', 1,
'GNU Free Documentation license' => 'unrestricted', 1,
'GNU Affero General Public License' => 'open_source', 1,
'(?:Free)?BSD license' => 'bsd', 1,
'Artistic license 2\.0' => 'artistic_2', 1,
'Artistic license' => 'artistic', 1,
'Apache (?:Software )?license' => 'apache', 1,
'GPL' => 'gpl', 1,
'LGPL' => 'lgpl', 1,
'BSD' => 'bsd', 1,
'Artistic' => 'artistic', 1,
'MIT' => 'mit', 1,
'Mozilla Public License' => 'mozilla', 1,
'Q Public License' => 'open_source', 1,
'OpenSSL License' => 'unrestricted', 1,
'SSLeay License' => 'unrestricted', 1,
'zlib License' => 'open_source', 1,
'proprietary' => 'proprietary', 0,
);
while ( my ($pattern, $license, $osi) = splice(@phrases, 0, 3) ) {
$pattern =~ s#\s+#\\s+#gs;
if ( $license_text =~ /\b$pattern\b/i ) {
return $license;
}
}
return '';
}
sub license_from {
my $self = shift;
if (my $license=_extract_license(Module::Install::_read($_[0]))) {
$self->license($license);
} else {
warn "Cannot determine license info from $_[0]\n";
return 'unknown';
}
}
sub _extract_bugtracker {
my @links = $_[0] =~ m#L<(
https?\Q://rt.cpan.org/\E[^>]+|
https?\Q://github.com/\E[\w_]+/[\w_]+/issues|
https?\Q://code.google.com/p/\E[\w_\-]+/issues/list
)>#gx;
my %links;
@links{@links}=();
@links=keys %links;
return @links;
}
sub bugtracker_from {
my $self = shift;
my $content = Module::Install::_read($_[0]);
my @links = _extract_bugtracker($content);
unless ( @links ) {
warn "Cannot determine bugtracker info from $_[0]\n";
return 0;
}
if ( @links > 1 ) {
warn "Found more than one bugtracker link in $_[0]\n";
return 0;
}
# Set the bugtracker
bugtracker( $links[0] );
return 1;
}
sub requires_from {
my $self = shift;
my $content = Module::Install::_readperl($_[0]);
my @requires = $content =~ m/^use\s+([^\W\d]\w*(?:::\w+)*)\s+(v?[\d\.]+)/mg;
while ( @requires ) {
my $module = shift @requires;
my $version = shift @requires;
$self->requires( $module => $version );
}
}
sub test_requires_from {
my $self = shift;
my $content = Module::Install::_readperl($_[0]);
my @requires = $content =~ m/^use\s+([^\W\d]\w*(?:::\w+)*)\s+([\d\.]+)/mg;
while ( @requires ) {
my $module = shift @requires;
my $version = shift @requires;
$self->test_requires( $module => $version );
}
}
# Convert triple-part versions (eg, 5.6.1 or 5.8.9) to
# numbers (eg, 5.006001 or 5.008009).
# Also, convert double-part versions (eg, 5.8)
sub _perl_version {
my $v = $_[-1];
$v =~ s/^([1-9])\.([1-9]\d?\d?)$/sprintf("%d.%03d",$1,$2)/e;
$v =~ s/^([1-9])\.([1-9]\d?\d?)\.(0|[1-9]\d?\d?)$/sprintf("%d.%03d%03d",$1,$2,$3 || 0)/e;
$v =~ s/(\.\d\d\d)000$/$1/;
$v =~ s/_.+$//;
if ( ref($v) ) {
# Numify
$v = $v + 0;
}
return $v;
}
sub add_metadata {
my $self = shift;
my %hash = @_;
for my $key (keys %hash) {
warn "add_metadata: $key is not prefixed with 'x_'.\n" .
"Use appopriate function to add non-private metadata.\n" unless $key =~ /^x_/;
$self->{values}->{$key} = $hash{$key};
}
}
######################################################################
# MYMETA Support
sub WriteMyMeta {
die "WriteMyMeta has been deprecated";
}
sub write_mymeta_yaml {
my $self = shift;
# We need YAML::Tiny to write the MYMETA.yml file
unless ( eval { require YAML::Tiny; 1; } ) {
return 1;
}
# Generate the data
my $meta = $self->_write_mymeta_data or return 1;
# Save as the MYMETA.yml file
print "Writing MYMETA.yml\n";
YAML::Tiny::DumpFile('MYMETA.yml', $meta);
}
sub write_mymeta_json {
my $self = shift;
# We need JSON to write the MYMETA.json file
unless ( eval { require JSON; 1; } ) {
return 1;
}
# Generate the data
my $meta = $self->_write_mymeta_data or return 1;
# Save as the MYMETA.yml file
print "Writing MYMETA.json\n";
Module::Install::_write(
'MYMETA.json',
JSON->new->pretty(1)->canonical->encode($meta),
);
}
sub _write_mymeta_data {
my $self = shift;
# If there's no existing META.yml there is nothing we can do
return undef unless -f 'META.yml';
# We need Parse::CPAN::Meta to load the file
unless ( eval { require Parse::CPAN::Meta; 1; } ) {
return undef;
}
# Merge the perl version into the dependencies
my $val = $self->Meta->{values};
my $perl = delete $val->{perl_version};
if ( $perl ) {
$val->{requires} ||= [];
my $requires = $val->{requires};
# Canonize to three-dot version after Perl 5.6
if ( $perl >= 5.006 ) {
$perl =~ s{^(\d+)\.(\d\d\d)(\d*)}{join('.', $1, int($2||0), int($3||0))}e
}
unshift @$requires, [ perl => $perl ];
}
# Load the advisory META.yml file
my @yaml = Parse::CPAN::Meta::LoadFile('META.yml');
my $meta = $yaml[0];
# Overwrite the non-configure dependency hashes
delete $meta->{requires};
delete $meta->{build_requires};
delete $meta->{recommends};
if ( exists $val->{requires} ) {
$meta->{requires} = { map { @$_ } @{ $val->{requires} } };
}
if ( exists $val->{build_requires} ) {
$meta->{build_requires} = { map { @$_ } @{ $val->{build_requires} } };
}
return $meta;
}
1;
| gitpan/WWW-Search-KacurCZ | inc/Module/Install/Metadata.pm | Perl | bsd-2-clause | 18,114 |
package LogReporter::ConfigLoader;
use strict;
use warnings;
use Exporter 'import';
our @EXPORT = qw/LoadConfig/;
sub LoadConfig {
my ($filename) = @_;
my ($config, $EX);
{
local $@;
$config = do $filename;
$EX = $@;
}
die $EX if $EX;
return $config;
}
1;
| imMute/LogReporter | lib/LogReporter/ConfigLoader.pm | Perl | mit | 308 |
minN(X,Y,X):-X=<Y,!.
minN(X,Y,Y):-!.
minL([X],X):-!.
minL([X|Y],M):-minL(Y,Z),minN(X,Z,M).
removeE(X,[],[]):-!.
removeE(X,[X|Y],Y):-!.
removeE(X,[Z|Y],[Z|L]):-removeE(X,Y,L).
ordena([],[]):-!.
ordena(X,[W|Z]):-minL(X,W),removeE(W,X,M),ordena(M,Z).
| LorhanSohaky/UFSCar | 2019/PLP/L1/20.pl | Perl | mit | 250 |
# File: Registerable.pm
#
# Purpose: Provides functionality to register and execute one or more subroutines.
# SPDX-FileCopyrightText: 2021 Pragmatic Software <pragma78@gmail.com>
# SPDX-License-Identifier: MIT
package PBot::Core::Registerable;
use PBot::Imports;
sub new {
my ($class, %args) = @_;
my $self = bless {}, $class;
Carp::croak("Missing pbot reference to " . __FILE__) unless exists $args{pbot};
$self->{pbot} = delete $args{pbot};
$self->initialize(%args);
return $self;
}
sub initialize {
my $self = shift;
$self->{handlers} = [];
}
sub execute_all {
my $self = shift;
foreach my $func (@{$self->{handlers}}) {
$func->{subref}->(@_);
}
}
sub execute {
my $self = shift;
my $ref = shift;
Carp::croak("Missing reference parameter to Registerable::execute") if not defined $ref;
foreach my $func (@{$self->{handlers}}) {
if ($ref == $func || $ref == $func->{subref}) { return $func->{subref}->(@_); }
}
return undef;
}
sub register {
my ($self, $subref) = @_;
Carp::croak("Must pass subroutine reference to register()") if not defined $subref;
my $ref = {subref => $subref};
push @{$self->{handlers}}, $ref;
return $ref;
}
sub register_front {
my ($self, $subref) = @_;
Carp::croak("Must pass subroutine reference to register_front()") if not defined $subref;
my $ref = {subref => $subref};
unshift @{$self->{handlers}}, $ref;
return $ref;
}
sub unregister {
my ($self, $ref) = @_;
Carp::croak("Must pass reference to unregister()") if not defined $ref;
@{$self->{handlers}} = grep { $_ != $ref } @{$self->{handlers}};
}
sub unregister_all {
my ($self) = @_;
$self->{handlers} = [];
}
1;
| pragma-/pbot | lib/PBot/Core/Registerable.pm | Perl | mit | 1,757 |
#!/usr/bin/perl
use CGI qw(:standard);
print header,
start_html({
-title=>"my title",
-alink=>"red",
-vlink=>"black",
-bgcolor=>"yellow" }),
h1('Hello World!'),
hr({-size=>"5",-width=>"70%",-color=>"red"}),
h2('Fall semester'),
ol(
li('Perl'),
li('PHP'),
li('Python'),
),
hr,
h1('Course Roadmap'),
table({-border=>'1',-width=>"100%"},
caption(strong('Use this table to plan your schedule')),
Tr({-align=>CENTER,-valign=>TOP},
[
th(['\\','Fall','Spring','Summer']),
th('2010').td(['no','yes','yes']),
th('2011').td(['no','no','yes']),
th('2012').td(['yes','yes','yes'])
] )
), hr, a({
-href=>"http://www.perl.org"},
img({
-src=>"../images/olympics.jpg",
-width=>"300",
-border=>"0",
-alt=>"Olympics"})
), p,
a({
-href=>"http://www.perl.org"},
"Perl.org main web site"), p;
end_html;
| nertwork/cypresspl | labs/lab4_1.pl | Perl | mit | 829 |
# Time-stamp: "Sat Jul 14 00:27:32 2001 by Automatic Bizooty (__blocks2pm.plx)"
$Text::\SEPA\Unicode\Unidecode::Char[0x71] = [
'Hu ', 'Xi ', 'Shu ', 'He ', 'Xun ', 'Ku ', 'Jue ', 'Xiao ', 'Xi ', 'Yan ', 'Han ', 'Zhuang ', 'Jun ', 'Di ', 'Xie ', 'Ji ',
'Wu ', qq{[?] }, qq{[?] }, 'Han ', 'Yan ', 'Huan ', 'Men ', 'Ju ', 'Chou ', 'Bei ', 'Fen ', 'Lin ', 'Kun ', 'Hun ', 'Tun ', 'Xi ',
'Cui ', 'Wu ', 'Hong ', 'Ju ', 'Fu ', 'Wo ', 'Jiao ', 'Cong ', 'Feng ', 'Ping ', 'Qiong ', 'Ruo ', 'Xi ', 'Qiong ', 'Xin ', 'Zhuo ',
'Yan ', 'Yan ', 'Yi ', 'Jue ', 'Yu ', 'Gang ', 'Ran ', 'Pi ', 'Gu ', qq{[?] }, 'Sheng ', 'Chang ', 'Shao ', qq{[?] }, qq{[?] }, qq{[?] },
qq{[?] }, 'Chen ', 'He ', 'Kui ', 'Zhong ', 'Duan ', 'Xia ', 'Hui ', 'Feng ', 'Lian ', 'Xuan ', 'Xing ', 'Huang ', 'Jiao ', 'Jian ', 'Bi ',
'Ying ', 'Zhu ', 'Wei ', 'Tuan ', 'Tian ', 'Xi ', 'Nuan ', 'Nuan ', 'Chan ', 'Yan ', 'Jiong ', 'Jiong ', 'Yu ', 'Mei ', 'Sha ', 'Wei ',
'Ye ', 'Xin ', 'Qiong ', 'Rou ', 'Mei ', 'Huan ', 'Xu ', 'Zhao ', 'Wei ', 'Fan ', 'Qiu ', 'Sui ', 'Yang ', 'Lie ', 'Zhu ', 'Jie ',
'Gao ', 'Gua ', 'Bao ', 'Hu ', 'Yun ', 'Xia ', qq{[?] }, qq{[?] }, 'Bian ', 'Gou ', 'Tui ', 'Tang ', 'Chao ', 'Shan ', 'N ', 'Bo ',
'Huang ', 'Xie ', 'Xi ', 'Wu ', 'Xi ', 'Yun ', 'He ', 'He ', 'Xi ', 'Yun ', 'Xiong ', 'Nai ', 'Shan ', 'Qiong ', 'Yao ', 'Xun ',
'Mi ', 'Lian ', 'Ying ', 'Wen ', 'Rong ', 'Oozutsu ', qq{[?] }, 'Qiang ', 'Liu ', 'Xi ', 'Bi ', 'Biao ', 'Zong ', 'Lu ', 'Jian ', 'Shou ',
'Yi ', 'Lou ', 'Feng ', 'Sui ', 'Yi ', 'Tong ', 'Jue ', 'Zong ', 'Yun ', 'Hu ', 'Yi ', 'Zhi ', 'Ao ', 'Wei ', 'Liao ', 'Han ',
'Ou ', 'Re ', 'Jiong ', 'Man ', qq{[?] }, 'Shang ', 'Cuan ', 'Zeng ', 'Jian ', 'Xi ', 'Xi ', 'Xi ', 'Yi ', 'Xiao ', 'Chi ', 'Huang ',
'Chan ', 'Ye ', 'Qian ', 'Ran ', 'Yan ', 'Xian ', 'Qiao ', 'Zun ', 'Deng ', 'Dun ', 'Shen ', 'Jiao ', 'Fen ', 'Si ', 'Liao ', 'Yu ',
'Lin ', 'Tong ', 'Shao ', 'Fen ', 'Fan ', 'Yan ', 'Xun ', 'Lan ', 'Mei ', 'Tang ', 'Yi ', 'Jing ', 'Men ', qq{[?] }, qq{[?] }, 'Ying ',
'Yu ', 'Yi ', 'Xue ', 'Lan ', 'Tai ', 'Zao ', 'Can ', 'Sui ', 'Xi ', 'Que ', 'Cong ', 'Lian ', 'Hui ', 'Zhu ', 'Xie ', 'Ling ',
'Wei ', 'Yi ', 'Xie ', 'Zhao ', 'Hui ', 'Tatsu ', 'Nung ', 'Lan ', 'Ru ', 'Xian ', 'Kao ', 'Xun ', 'Jin ', 'Chou ', 'Chou ', 'Yao ',
];
1;
| dmitrirussu/php-sepa-xml-generator | src/Unicode/data/perl_source/x71.pm | Perl | mit | 2,258 |
%% cascade "Minimum/script/server.pl"
%% override load_modules -> {
use <% $module %>::Admin;
use Plack::App::File;
use Plack::Session::Store::DBI;
use DBI;
%% }
%% override app -> {
use 5.010_001;
my $basedir = File::Spec->rel2abs(dirname(__FILE__));
my $app = builder {
enable 'Plack::Middleware::Auth::Basic',
authenticator => sub { $_[0] eq 'admin' && $_[1] eq 'admin' };
enable 'Plack::Middleware::Static',
path => qr{^(?:/robots\.txt|/favicon\.ico)$},
root => File::Spec->catdir(dirname(__FILE__), '..', 'static', 'admin');
enable 'Plack::Middleware::ReverseProxy';
enable 'Plack::Middleware::Session',
store => Plack::Session::Store::DBI->new(
get_dbh => sub {
state $db_config = <% $module %>->config->{DBI} || die "Missing configuration for DBI";
DBI->connect( @$db_config )
or die $DBI::errstr;
}
);
mount '/static/' => Plack::App::File->new(root => File::Spec->catdir($basedir, '..', 'static', 'admin'))->to_app();
mount '/' => <% $module %>::Admin->to_app();
};
%% }
| rosiro/wasarabi | local/lib/perl5/auto/share/dist/Amon2/flavor/Large/script/admin.pl | Perl | mit | 1,123 |
#!/usr/bin/perl
use warnings;
use strict;
use File::Basename;
use Cwd qw(abs_path);
use lib dirname(abs_path(__FILE__));
use env;
foreach my $var (keys %gidgetConfigVars) {
print "found $var: $gidgetConfigVars{$var}\n";
}
| cancerregulome/gidget | gidget/util/test_env.pl | Perl | mit | 229 |
use SyTest::ApplicationService;
push our @EXPORT, qw( AS_USER APPSERV );
our @AS_USER = map {
my $AS_INFO = $_;
fixture(
requires => [ $main::API_CLIENTS[0], $AS_INFO ],
setup => sub {
my ( $http, $as_user_info ) = @_;
Future->done( new_User(
http => $http,
user_id => $as_user_info->user_id,
access_token => $as_user_info->as2hs_token,
));
},
);
} @main::AS_INFO;
our @APPSERV = map {
my $AS_INFO = $_;
fixture(
requires => [ $AS_INFO ],
setup => sub {
my ( $info ) = @_;
Future->done( SyTest::ApplicationService->new(
$info, \&main::await_http_request
) );
}
);
} @main::AS_INFO;
| matrix-org/sytest | tests/60app-services/00prepare.pl | Perl | apache-2.0 | 760 |
#
# Copyright 2016 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::riverbed::steelhead::snmp::mode::connections;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '0.1';
$options{options}->add_options(arguments =>
{
"warning:s" => { name => 'warning' },
"critical:s" => { name => 'critical' },
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
if (($self->{perfdata}->threshold_validate(label => 'warning', value => $self->{option_results}->{warning})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong warning threshold '" . $self->{option_results}->{warning} . "'.");
$self->{output}->option_exit();
}
if (($self->{perfdata}->threshold_validate(label => 'critical', value => $self->{option_results}->{critical})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong critical threshold '" . $self->{option_results}->{critical} . "'.");
$self->{output}->option_exit();
}
}
sub run {
my ($self, %options) = @_;
# $options{snmp} = snmp object
$self->{snmp} = $options{snmp};
my $oid_optimizedConnections = '.1.3.6.1.4.1.17163.1.1.5.2.1.0';
my $oid_passthroughConnections = '.1.3.6.1.4.1.17163.1.1.5.2.2.0';
my $oid_halfOpenedConnections = '.1.3.6.1.4.1.17163.1.1.5.2.3.0';
my $oid_halfClosedConnections = '.1.3.6.1.4.1.17163.1.1.5.2.4.0';
my $oid_establishedConnections = '.1.3.6.1.4.1.17163.1.1.5.2.5.0';
my $oid_activeConnections = '.1.3.6.1.4.1.17163.1.1.5.2.6.0';
my $oid_totalConnections = '.1.3.6.1.4.1.17163.1.1.5.2.7.0';
my $result = $self->{snmp}->get_leef(oids => [$oid_optimizedConnections, $oid_passthroughConnections, $oid_halfOpenedConnections, $oid_halfClosedConnections,
$oid_establishedConnections, $oid_activeConnections, $oid_totalConnections, ], nothing_quit => 1);
my $optimized = $result->{$oid_optimizedConnections};
my $passthrough = $result->{$oid_passthroughConnections};
my $halfOpened = $result->{$oid_halfOpenedConnections};
my $halfClosed = $result->{$oid_halfClosedConnections};
my $established = $result->{$oid_establishedConnections};
my $active = $result->{$oid_activeConnections};
my $total = $result->{$oid_totalConnections};
my $exit = $self->{perfdata}->threshold_check(value => $total, threshold => [ { label => 'critical', exit_litteral => 'critical' }, { label => 'warning', exit_litteral => 'warning' } ]);
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("Connections: total %d, established %d, active %d, optimized %d, passthrough %d, half opened %d, half closed %d ",
$total, $established, $active, $optimized, $passthrough, $halfOpened, $halfClosed));
$self->{output}->perfdata_add(label => "total",
value => $total,
warning => $self->{perfdata}->get_perfdata_for_output(label => 'warning'),
critical => $self->{perfdata}->get_perfdata_for_output(label => 'critical'),
min => 0
);
$self->{output}->perfdata_add(label => "established",
value => $established,
min => 0
);
$self->{output}->perfdata_add(label => "active",
value => $active,
min => 0
);
$self->{output}->perfdata_add(label => "optimized",
value => $optimized,
min => 0
);
$self->{output}->perfdata_add(label => "passthrough",
value => $passthrough,
min => 0
);
$self->{output}->perfdata_add(label => "half opened",
value => $halfOpened,
min => 0
);
$self->{output}->perfdata_add(label => "half closed",
value => $halfClosed,
min => 0
);
$self->{output}->display();
$self->{output}->exit();
}
1;
__END__
=head1 MODE
Current connections: total, established, active, optimized, passthrough, half opened and half closed ones (STEELHEAD-MIB).
=over 8
=item B<--warning>
Threshold warning for total connections.
=item B<--critical>
Threshold critical for total connections.
=back
=cut
| golgoth31/centreon-plugins | network/riverbed/steelhead/snmp/mode/connections.pm | Perl | apache-2.0 | 5,831 |
#
# Copyright 2021 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package apps::proxmox::ve::restapi::mode::liststorages;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$options{options}->add_options(arguments =>
{
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
}
sub manage_selection {
my ($self, %options) = @_;
$self->{storages} = $options{custom}->api_list_storages();
}
sub run {
my ($self, %options) = @_;
$self->manage_selection(%options);
foreach my $storage_id (sort keys %{$self->{storages}}) {
$self->{output}->output_add(long_msg => '[id = ' . $storage_id . "]" .
"[name = '" . $self->{storages}->{$storage_id}->{Name} . "']" .
"[node = '" . $self->{storages}->{$storage_id}->{Node} . "']" .
"[state = '" . $self->{storages}->{$storage_id}->{State} . "']"
);
}
$self->{output}->output_add(severity => 'OK',
short_msg => 'List Storages:');
$self->{output}->display(nolabel => 1, force_ignore_perfdata => 1, force_long_output => 1);
$self->{output}->exit();
}
sub disco_format {
my ($self, %options) = @_;
$self->{output}->add_disco_format(elements => ['id', 'name', 'node', 'status']);
}
sub disco_show {
my ($self, %options) = @_;
$self->manage_selection(%options);
foreach my $storage_id (sort keys %{$self->{storages}}) {
$self->{output}->add_disco_entry(
name => $self->{storages}->{$storage_id}->{Name},
node => $self->{storages}->{$storage_id}->{Node},
state => $self->{storages}->{$storage_id}->{State},
id => $storage_id,
);
}
}
1;
__END__
=head1 MODE
List storages
=over 8
=back
=cut
| Tpo76/centreon-plugins | apps/proxmox/ve/restapi/mode/liststorages.pm | Perl | apache-2.0 | 2,710 |
=head1 LICENSE
Copyright (c) 1999-2011 The European Bioinformatics Institute and
Genome Research Limited. All rights reserved.
This software is distributed under a modified Apache license.
For license details, please see
http://www.ensembl.org/info/about/code_licence.html
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <dev@ensembl.org>.
Questions may also be sent to the Ensembl help desk at
<helpdesk@ensembl.org>.
=cut
=head1 NAME
Bio::EnsEMBL::BaseAlignFeature - Baseclass providing a common abstract
implmentation for alignment features
=head1 SYNOPSIS
my $feat = new Bio::EnsEMBL::DnaPepAlignFeature(
-slice => $slice,
-start => 100,
-end => 120,
-strand => 1,
-hseqname => 'SP:RF1231',
-hstart => 200,
-hend => 220,
-analysis => $analysis,
-cigar_string => '10M3D5M2I'
);
Alternatively if you have an array of ungapped features
my $feat =
new Bio::EnsEMBL::DnaPepAlignFeature( -features => \@features );
Where @features is an array of Bio::EnsEMBL::FeaturePair
There is a method to manipulate the cigar_string into ungapped features
my @ungapped_features = $feat->ungapped_features();
This converts the cigar string into an array of Bio::EnsEMBL::FeaturePair
$analysis is a Bio::EnsEMBL::Analysis object
Bio::EnsEMBL::Feature methods can be used
Bio::EnsEMBL::FeaturePair methods can be used
The cigar_string contains the ungapped pieces that make up the gapped
alignment
It looks like: n Matches [ x Deletes or Inserts m Matches ]*
but a bit more condensed like "23M4I12M2D1M"
and evenmore condensed as you can ommit 1s "23M4I12M2DM"
To make things clearer this is how a blast HSP would be parsed
>AK014066
Length = 146
Minus Strand HSPs:
Score = 76 (26.8 bits), Expect = 1.4, P = 0.74
Identities = 20/71 (28%), Positives = 29/71 (40%), Frame = -1
Query: 479 GLQAPPPTPQGCRLIPPPPLGLQAPLPTLRAVGSSHHHP*GRQGSSLSSFRSSLASKASA 300
G APPP PQG R P P G + P L + + ++ R +A +
Sbjct: 7 GALAPPPAPQG-RWAFPRPTG-KRPATPLHGTARQDRQVRRSEAAKVTGCRGRVAPHVAP 64
Query: 299 SSPHNPSPLPS 267
H P+P P+
Sbjct: 65 PLTHTPTPTPT 75
The alignment goes from 267 to 479 in sequence 1 and 7 to 75 in sequence 2
and the strand is -1.
The alignment is made up of the following ungapped pieces :
sequence 1 start 447 , sequence 2 start 7 , match length 33 , strand -1
sequence 1 start 417 , sequence 2 start 18 , match length 27 , strand -1
sequence 1 start 267 , sequence 2 start 27 , match length 137 , strand -1
These ungapped pieces are made up into the following string (called a cigar
string) "33M3I27M3I137M" with start 267 end 479 strand -1 hstart 7 hend 75
hstrand 1 and feature type would be DnaPepAlignFeature
=cut
package Bio::EnsEMBL::BaseAlignFeature;
use Bio::EnsEMBL::FeaturePair;
use Bio::EnsEMBL::Utils::Argument qw(rearrange);
use Bio::EnsEMBL::Utils::Exception qw(throw warning);
use vars qw(@ISA);
use strict;
@ISA = qw(Bio::EnsEMBL::FeaturePair);
=head2 new
Arg [..] : List of named arguments. (-cigar_string , -features) defined
in this constructor, others defined in FeaturePair and
SeqFeature superclasses. Either cigar_string or a list
of ungapped features should be provided - not both.
Example : $baf = new BaseAlignFeatureSubclass(-cigar_string => '3M3I12M');
Description: Creates a new BaseAlignFeature using either a cigarstring or
a list of ungapped features. BaseAlignFeature is an abstract
baseclass and should not actually be instantiated - rather its
subclasses should be.
Returntype : Bio::EnsEMBL::BaseAlignFeature
Exceptions : thrown if both feature and cigar string args are provided
thrown if neither feature nor cigar string args are provided
Caller : general
Status : Stable
=cut
sub new {
my $caller = shift;
my $class = ref($caller) || $caller;
my $self = $class->SUPER::new(@_);
my ($cigar_string,$features) = rearrange([qw(CIGAR_STRING FEATURES)], @_);
if (defined($cigar_string) && defined($features)) {
throw("CIGAR_STRING or FEATURES argument is required - not both.");
} elsif (defined($features)) {
$self->_parse_features($features);
} elsif (defined($cigar_string)) {
$self->{'cigar_string'} = $cigar_string;
} else {
throw("CIGAR_STRING or FEATURES argument is required");
}
return $self;
}
=head2 new_fast
Arg [1] : hashref $hashref
A hashref which will be blessed into a PepDnaAlignFeature.
Example : none
Description: This allows for very fast object creation when a large number
of PepDnaAlignFeatures needs to be created. This is a bit of
a hack but necessary when thousands of features need to be
generated within a couple of seconds for web display. It is
not recommended that this method be called unless you know what
you are doing. It requires knowledge of the internals of this
class and its superclasses.
Returntype : Bio::EnsEMBL::BaseAlignFeature
Exceptions : none
Caller : none currently
Status : Stable
=cut
sub new_fast {
my ($class, $hashref) = @_;
return bless $hashref, $class;
}
=head2 cigar_string
Arg [1] : string $cigar_string
Example : ( "12MI3M" )
Description: get/set for attribute cigar_string
cigar_string describes the alignment. "xM" stands for
x matches (mismatches), "xI" for inserts into query sequence
(thats the ensembl sequence), "xD" for deletions
(inserts in the subject). an "x" that is 1 can be omitted.
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub cigar_string {
my $self = shift;
$self->{'cigar_string'} = shift if(@_);
return $self->{'cigar_string'};
}
=head2 alignment_length
Arg [1] : None
Example :
Description: return the alignment length (including indels) based on the cigar_string
Returntype : int
Exceptions :
Caller :
Status : Stable
=cut
sub alignment_length {
my $self = shift;
if (! defined $self->{'_alignment_length'} && defined $self->cigar_string) {
my @pieces = ( $self->cigar_string =~ /(\d*[MDI])/g );
unless (@pieces) {
print STDERR "Error parsing cigar_string\n";
}
my $alignment_length = 0;
foreach my $piece (@pieces) {
my ($length) = ( $piece =~ /^(\d*)/ );
if (! defined $length || $length eq "") {
$length = 1;
}
$alignment_length += $length;
}
$self->{'_alignment_length'} = $alignment_length;
}
return $self->{'_alignment_length'};
}
=head2 ungapped_features
Args : none
Example : @ungapped_features = $align_feature->get_feature
Description: converts the internal cigar_string into an array of
ungapped feature pairs
Returntype : list of Bio::EnsEMBL::FeaturePair
Exceptions : cigar_string not set internally
Caller : general
Status : Stable
=cut
sub ungapped_features {
my ($self) = @_;
if (!defined($self->{'cigar_string'})) {
throw("No cigar_string defined. Can't return ungapped features");
}
return @{$self->_parse_cigar()};
}
=head2 strands_reversed
Arg [1] : int $strands_reversed
Example : none
Description: get/set for attribute strands_reversed
0 means that strand and hstrand are the original strands obtained
from the alignment program used
1 means that strand and hstrand have been flipped as compared to
the original result provided by the alignment program used.
You may want to use the reverse_complement method to restore the
original strandness.
Returntype : int
Exceptions : none
Caller : general
Status : Stable
=cut
sub strands_reversed {
my ($self, $arg) = @_;
if ( defined $arg ) {
$self->{'strands_reversed'} = $arg ;
}
$self->{'strands_reversed'} = 0 unless (defined $self->{'strands_reversed'});
return $self->{'strands_reversed'};
}
=head2 reverse_complement
Args : none
Example : none
Description: reverse complement the FeaturePair,
modifing strand, hstrand and cigar_string in consequence
Returntype : none
Exceptions : none
Caller : general
Status : Stable
=cut
sub reverse_complement {
my ($self) = @_;
# reverse strand in both sequences
$self->strand($self->strand * -1);
$self->hstrand($self->hstrand * -1);
# reverse cigar_string as consequence
my $cigar_string = $self->cigar_string;
$cigar_string =~ s/(D|I|M)/$1 /g;
my @cigar_pieces = split / /,$cigar_string;
$cigar_string = "";
while (my $piece = pop @cigar_pieces) {
$cigar_string .= $piece;
}
$self->{'strands_reversed'} = 0 unless (defined $self->{'strands_reversed'});
if ($self->strands_reversed) {
$self->strands_reversed(0)
} else {
$self->strands_reversed(1);
}
$self->cigar_string($cigar_string);
}
=head2 transform
Arg 1 : String $coordinate_system_name
Arg [2] : String $coordinate_system_version
Example : $feature = $feature->transform('contig');
$feature = $feature->transform('chromosome', 'NCBI33');
Description: Moves this AlignFeature to the given coordinate system.
If the feature cannot be transformed to the destination
coordinate system undef is returned instead.
Returntype : Bio::EnsEMBL::BaseAlignFeature;
Exceptions : wrong parameters
Caller : general
Status : Medium Risk
: deprecation needs to be removed at some time
=cut
sub transform {
my $self = shift;
# catch for old style transform calls
if( ref $_[0] eq 'HASH') {
deprecate("Calling transform with a hashref is deprecate.\n" .
'Use $feat->transfer($slice) or ' .
'$feat->transform("coordsysname") instead.');
my (undef, $new_feat) = each(%{$_[0]});
return $self->transfer($new_feat->slice);
}
my $new_feature = $self->SUPER::transform(@_);
if ( !defined($new_feature)
|| $new_feature->length() != $self->length() )
{
my @segments = @{ $self->project(@_) };
if ( !@segments ) {
return undef;
}
my @ungapped;
foreach my $f ( $self->ungapped_features() ) {
$f = $f->transform(@_);
if ( defined($f) ) {
push( @ungapped, $f );
} else {
warning( "Failed to transform alignment feature; "
. "ungapped component could not be transformed" );
return undef;
}
}
eval { $new_feature = $self->new( -features => \@ungapped ); };
if ($@) {
warning($@);
return undef;
}
} ## end if ( !defined($new_feature...))
return $new_feature;
}
=head2 _parse_cigar
Args : none
Example : none
Description: PRIVATE (internal) method - creates ungapped features from
internally stored cigar line
Returntype : list of Bio::EnsEMBL::FeaturePair
Exceptions : none
Caller : ungapped_features
Status : Stable
=cut
sub _parse_cigar {
my ( $self ) = @_;
my $query_unit = $self->_query_unit();
my $hit_unit = $self->_hit_unit();
my $string = $self->{'cigar_string'};
throw("No cigar string defined in object") if(!defined($string));
my @pieces = ( $string =~ /(\d*[MDI])/g );
#print "cigar: ",join ( ",", @pieces ),"\n";
my @features;
my $strand1 = $self->{'strand'} || 1;
my $strand2 = $self->{'hstrand'}|| 1;
my ( $start1, $start2 );
if( $strand1 == 1 ) {
$start1 = $self->{'start'};
} else {
$start1 = $self->{'end'};
}
if( $strand2 == 1 ) {
$start2 = $self->{'hstart'};
} else {
$start2 = $self->{'hend'};
}
#
# Construct ungapped blocks as FeaturePairs objects for each MATCH
#
foreach my $piece (@pieces) {
my ($length) = ( $piece =~ /^(\d*)/ );
if( $length eq "" ) { $length = 1 }
my $mapped_length;
# explicit if statements to avoid rounding problems
# and make sure we have sane coordinate systems
if( $query_unit == 1 && $hit_unit == 3 ) {
$mapped_length = $length*3;
} elsif( $query_unit == 3 && $hit_unit == 1 ) {
$mapped_length = $length / 3;
} elsif ( $query_unit == 1 && $hit_unit == 1 ) {
$mapped_length = $length;
} else {
throw("Internal error $query_unit $hit_unit, currently only " .
"allowing 1 or 3 ");
}
if( int($mapped_length) != $mapped_length and
($piece =~ /M$/ or $piece =~ /D$/)) {
throw("Internal error with mismapped length of hit, query " .
"$query_unit, hit $hit_unit, length $length");
}
if( $piece =~ /M$/ ) {
#
# MATCH
#
my ( $qstart, $qend);
if( $strand1 == 1 ) {
$qstart = $start1;
$qend = $start1 + $length - 1;
$start1 = $qend + 1;
} else {
$qend = $start1;
$qstart = $start1 - $length + 1;
$start1 = $qstart - 1;
}
my ($hstart, $hend);
if( $strand2 == 1 ) {
$hstart = $start2;
$hend = $start2 + $mapped_length - 1;
$start2 = $hend + 1;
} else {
$hend = $start2;
$hstart = $start2 - $mapped_length + 1;
$start2 = $hstart - 1;
}
push @features, Bio::EnsEMBL::FeaturePair->new
(-SLICE => $self->{'slice'},
-SEQNAME => $self->{'seqname'},
-START => $qstart,
-END => $qend,
-STRAND => $strand1,
-HSLICE => $self->{'hslice'},
-HSEQNAME => $self->{'hseqname'},
-HSTART => $hstart,
-HEND => $hend,
-HSTRAND => $strand2,
-SCORE => $self->{'score'},
-PERCENT_ID => $self->{'percent_id'},
-ANALYSIS => $self->{'analysis'},
-P_VALUE => $self->{'p_value'},
-EXTERNAL_DB_ID => $self->{'external_db_id'},
-HCOVERAGE => $self->{'hcoverage'},
-GROUP_ID => $self->{'group_id'},
-LEVEL_ID => $self->{'level_id'});
# end M cigar bits
} elsif( $piece =~ /I$/ ) {
#
# INSERT
#
if( $strand1 == 1 ) {
$start1 += $length;
} else {
$start1 -= $length;
}
} elsif( $piece =~ /D$/ ) {
#
# DELETION
#
if( $strand2 == 1 ) {
$start2 += $mapped_length;
} else {
$start2 -= $mapped_length;
}
} else {
throw( "Illegal cigar line $string!" );
}
}
return \@features;
}
=head2 _parse_features
Arg 1 : listref Bio::EnsEMBL::FeaturePair $ungapped_features
Example : none
Description: creates internal cigarstring and start,end hstart,hend
entries.
Returntype : none, fills in values of self
Exceptions : argument list undergoes many sanity checks - throws under many
invalid conditions
Caller : new
Status : Stable
=cut
my $message_only_once = 1;
sub _parse_features {
my ($self,$features ) = @_;
my $query_unit = $self->_query_unit();
my $hit_unit = $self->_hit_unit();
if (ref($features) ne "ARRAY") {
throw("features must be an array reference not a [".ref($features)."]");
}
my $strand = $features->[0]->strand;
throw ('FeaturePair needs to have strand == 1 or strand == -1') if(!$strand);
my @f;
#
# Sort the features on their start position
# Ascending order on positive strand, descending on negative strand
#
if( $strand == 1 ) {
@f = sort {$a->start <=> $b->start} @$features;
} else {
@f = sort { $b->start <=> $a->start} @$features;
}
my $hstrand = $f[0]->hstrand;
my $slice = $f[0]->slice();
my $hslice = $f[0]->hslice();
my $name = $slice->name() if($slice);
my $hname = $f[0]->hseqname;
my $score = $f[0]->score;
my $percent = $f[0]->percent_id;
my $analysis = $f[0]->analysis;
my $pvalue = $f[0]->p_value();
my $external_db_id = $f[0]->external_db_id;
my $hcoverage = $f[0]->hcoverage;
my $group_id = $f[0]->group_id;
my $level_id = $f[0]->level_id;
my $seqname = $f[0]->seqname;
# implicit strand 1 for peptide sequences
$strand ||= 1;
$hstrand ||= 1;
my $ori = $strand * $hstrand;
throw("No features in the array to parse") if(scalar(@f) == 0);
my $prev1; # where last feature q part ended
my $prev2; # where last feature s part ended
my $string;
# Use strandedness info of query and hit to make sure both sets of
# start and end coordinates are oriented the right way around.
my $f1start;
my $f1end;
my $f2end;
my $f2start;
if ($strand == 1) {
$f1start = $f[0]->start;
$f1end = $f[-1]->end;
} else {
$f1start = $f[-1]->start;
$f1end = $f[0]->end;
}
if ($hstrand == 1) {
$f2start = $f[0]->hstart;
$f2end = $f[-1]->hend;
} else {
$f2start = $f[-1]->hstart;
$f2end = $f[0]->hend;
}
#
# Loop through each portion of alignment and construct cigar string
#
foreach my $f (@f) {
#
# Sanity checks
#
if (!$f->isa("Bio::EnsEMBL::FeaturePair")) {
throw("Array element [$f] is not a Bio::EnsEMBL::FeaturePair");
}
if( defined($f->hstrand()) && $f->hstrand() != $hstrand ) {
throw("Inconsistent hstrands in feature array");
}
if( defined($f->strand()) && ($f->strand != $strand)) {
throw("Inconsistent strands in feature array");
}
if ( defined($name) && $name ne $f->slice->name()) {
throw("Inconsistent names in feature array [$name - ".
$f->slice->name()."]");
}
if ( defined($hname) && $hname ne $f->hseqname) {
throw("Inconsistent hit names in feature array [$hname - ".
$f->hseqname . "]");
}
if ( defined($score) && $score ne $f->score) {
throw("Inconsisent scores in feature array [$score - " .
$f->score . "]");
}
if (defined($f->percent_id) && $percent ne $f->percent_id) {
throw("Inconsistent pids in feature array [$percent - " .
$f->percent_id . "]");
}
if(defined($pvalue) && $pvalue != $f->p_value()) {
throw("Inconsistant p_values in feature arraw [$pvalue " .
$f->p_value() . "]");
}
if($seqname && $seqname ne $f->seqname){
throw("Inconsistent seqname in feature array [$seqname - ".
$f->seqname . "]");
}
my $start1 = $f->start; #source sequence alignment start
my $start2 = $f->hstart(); #hit sequence alignment start
#
# More sanity checking
#
if (defined($prev1)) {
if ( $strand == 1 ) {
if ($f->start < $prev1) {
throw("Inconsistent coords in feature array (forward strand).\n" .
"Start [".$f->start()."] in current feature should be greater " .
"than previous feature end [$prev1].");
}
} else {
if ($f->end > $prev1) {
throw("Inconsistent coords in feature array (reverse strand).\n" .
"End [".$f->end() ."] should be less than previous feature " .
"start [$prev1].");
}
}
}
my $length = ($f->end - $f->start + 1); #length of source seq alignment
my $hlength = ($f->hend - $f->hstart + 1); #length of hit seq alignment
# using multiplication to avoid rounding errors, hence the
# switch from query to hit for the ratios
#
# Yet more sanity checking
#
if($query_unit > $hit_unit){
# I am going to make the assumption here that this situation will
# only occur with DnaPepAlignFeatures, this may not be true
my $query_p_length = sprintf "%.0f", ($length/$query_unit);
my $hit_p_length = sprintf "%.0f", ($hlength * $hit_unit);
if( $query_p_length != $hit_p_length) {
throw( "Feature lengths not comparable Lengths:" .$length .
" " . $hlength . " Ratios:" . $query_unit . " " .
$hit_unit );
}
} else{
my $query_d_length = sprintf "%.0f", ($length*$hit_unit);
my $hit_d_length = sprintf "%.0f", ($hlength * $query_unit);
if( $length * $hit_unit != $hlength * $query_unit ) {
throw( "Feature lengths not comparable Lengths:" . $length .
" " . $hlength . " Ratios:" . $query_unit . " " .
$hit_unit );
}
}
my $hlengthfactor = ($query_unit/$hit_unit);
#
# Check to see if there is an I type (insertion) gap:
# If there is a space between the end of the last source sequence
# alignment and the start of this one, then this is an insertion
#
my $insertion_flag = 0;
if( $strand == 1 ) {
if( ( defined $prev1 ) && ( $f->start > $prev1 + 1 )) {
#there is an insertion
$insertion_flag = 1;
my $gap = $f->start - $prev1 - 1;
if( $gap == 1 ) {
$gap = ""; # no need for a number if gap length is 1
}
$string .= "$gap"."I";
}
#shift our position in the source seq alignment
$prev1 = $f->end();
} else {
if(( defined $prev1 ) && ($f->end + 1 < $prev1 )) {
#there is an insertion
$insertion_flag = 1;
my $gap = $prev1 - $f->end() - 1;
if( $gap == 1 ) {
$gap = ""; # no need for a number if gap length is 1
}
$string .= "$gap"."I";
}
#shift our position in the source seq alignment
$prev1 = $f->start();
}
#
# Check to see if there is a D type (deletion) gap
# There is a deletion gap if there is a space between the end of the
# last portion of the hit sequence alignment and this one
#
if( $hstrand == 1 ) {
if(( defined $prev2 ) && ( $f->hstart() > $prev2 + 1 )) {
#there is a deletion
my $gap = $f->hstart - $prev2 - 1;
my $gap2 = int( $gap * $hlengthfactor + 0.5 );
if( $gap2 == 1 ) {
$gap2 = ""; # no need for a number if gap length is 1
}
$string .= "$gap2"."D";
#sanity check, Should not be an insertion and deletion
if($insertion_flag) {
if ($message_only_once) {
warning("Should not be an deletion and insertion on the " .
"same alignment region. cigar_line=$string\n");
$message_only_once = 0;
}
}
}
#shift our position in the hit seq alignment
$prev2 = $f->hend();
} else {
if( ( defined $prev2 ) && ( $f->hend() + 1 < $prev2 )) {
#there is a deletion
my $gap = $prev2 - $f->hend - 1;
my $gap2 = int( $gap * $hlengthfactor + 0.5 );
if( $gap2 == 1 ) {
$gap2 = ""; # no need for a number if gap length is 1
}
$string .= "$gap2"."D";
#sanity check, Should not be an insertion and deletion
if($insertion_flag) {
if ($message_only_once) {
warning("Should not be an deletion and insertion on the " .
"same alignment region. prev2 = $prev2; f->hend() = " .
$f->hend() . "; cigar_line = $string;\n");
$message_only_once = 0;
}
}
}
#shift our position in the hit seq alignment
$prev2 = $f->hstart();
}
my $matchlength = $f->end() - $f->start() + 1;
if( $matchlength == 1 ) {
$matchlength = "";
}
$string .= $matchlength."M";
}
$self->{'start'} = $f1start;
$self->{'end'} = $f1end;
$self->{'seqname'} = $seqname;
$self->{'strand'} = $strand;
$self->{'score'} = $score;
$self->{'percent_id'} = $percent;
$self->{'analysis'} = $analysis;
$self->{'slice'} = $slice;
$self->{'hslice'} = $hslice;
$self->{'hstart'} = $f2start;
$self->{'hend'} = $f2end;
$self->{'hstrand'} = $hstrand;
$self->{'hseqname'} = $hname;
$self->{'cigar_string'} = $string;
$self->{'p_value'} = $pvalue;
$self->{'external_db_id'} = $external_db_id;
$self->{'hcoverage'} = $hcoverage;
$self->{'group_id'} = $group_id;
$self->{'level_id'} = $level_id;
}
=head2 _hit_unit
Args : none
Example : none
Description: abstract method, overwrite with something that returns
one or three
Returntype : int 1,3
Exceptions : none
Caller : internal
Status : Stable
=cut
sub _hit_unit {
my $self = shift;
throw( "Abstract method call!" );
}
=head2 _query_unit
Args : none
Example : none
Description: abstract method, overwrite with something that returns
one or three
Returntype : int 1,3
Exceptions : none
Caller : internal
Status : Stable
=cut
sub _query_unit {
my $self = shift;
throw( "Abstract method call!" );
}
1;
| adamsardar/perl-libs-custom | EnsemblAPI/ensembl/modules/Bio/EnsEMBL/BaseAlignFeature.pm | Perl | apache-2.0 | 25,168 |
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V9::Services::ConversionUploadService::UploadCallConversionsResponse;
use strict;
use warnings;
use base qw(Google::Ads::GoogleAds::BaseEntity);
use Google::Ads::GoogleAds::Utils::GoogleAdsHelper;
sub new {
my ($class, $args) = @_;
my $self = {
partialFailureError => $args->{partialFailureError},
results => $args->{results}};
# Delete the unassigned fields in this object for a more concise JSON payload
remove_unassigned_fields($self, $args);
bless $self, $class;
return $self;
}
1;
| googleads/google-ads-perl | lib/Google/Ads/GoogleAds/V9/Services/ConversionUploadService/UploadCallConversionsResponse.pm | Perl | apache-2.0 | 1,132 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=pod
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=head1 NAME
Bio::EnsEMBL::Production::Pipeline::EBeye::ChecksumGenerator
=head1 DESCRIPTION
Creates a CHECKSUMS file in the given directory which is produced from running
the sum command over every file in the directory. This excludes the CHECKSUMS
file, parent directory or any hidden files.
=cut
package Bio::EnsEMBL::Production::Pipeline::EBeye::ChecksumGenerator;
use strict;
use warnings;
use base qw/Bio::EnsEMBL::Production::Pipeline::ChecksumGenerator Bio::EnsEMBL::Production::Pipeline::EBeye::Base/;
sub fetch_input {
my ($self) = @_;
my $dir = $self->data_path();
$self->param('dir', $dir);
$self->SUPER::fetch_input();
return;
}
1;
| markmcdowall/ensembl-production | modules/Bio/EnsEMBL/Production/Pipeline/EBeye/ChecksumGenerator.pm | Perl | apache-2.0 | 1,568 |
#
# Copyright 2022 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::barracuda::cloudgen::snmp::mode::vpnstatus;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
use centreon::plugins::templates::catalog_functions qw(catalog_status_threshold);
sub custom_status_output {
my ($self, %options) = @_;
my $msg = "status is '" . $self->{result_values}->{status} . "'";
return $msg;
}
sub custom_status_calc {
my ($self, %options) = @_;
$self->{result_values}->{status} = $options{new_datas}->{$self->{instance} . '_vpnState'};
$self->{result_values}->{display} = $options{new_datas}->{$self->{instance} . '_display'};
return 0;
}
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'vpns', type => 1, cb_prefix_output => 'prefix_vpns_output', message_multiple => 'All VPNs are ok' }
];
$self->{maps_counters}->{vpns} = [
{ label => 'status', threshold => 0, set => {
key_values => [ { name => 'vpnState' }, { name => 'display' } ],
closure_custom_calc => $self->can('custom_status_calc'),
closure_custom_output => $self->can('custom_status_output'),
closure_custom_perfdata => sub { return 0; },
closure_custom_threshold_check => \&catalog_status_threshold,
}
},
];
}
sub prefix_vpns_output {
my ($self, %options) = @_;
return "VPN '" . $options{instance_value}->{display} . "' ";
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$options{options}->add_options(arguments => {
'filter-name:s' => { name => 'filter_name' },
'warning-status:s' => { name => 'warning_status', default => '' },
'critical-status:s' => { name => 'critical_status', default => '%{status} =~ /^down$/i' },
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::check_options(%options);
$self->change_macros(macros => ['warning_status', 'critical_status']);
}
my %map_status = (
-1 => 'down',
0 => 'down-disabled',
1 => 'active',
);
my $oid_vpnName = '.1.3.6.1.4.1.10704.1.6.1.1';
my $mapping = {
vpnState => { oid => '.1.3.6.1.4.1.10704.1.6.1.2', map => \%map_status },
};
sub manage_selection {
my ($self, %options) = @_;
if ($options{snmp}->is_snmpv1()) {
$self->{output}->add_option_msg(short_msg => "Need to use SNMP v2c or v3.");
$self->{output}->option_exit();
}
my $snmp_result = $options{snmp}->get_table(oid => $oid_vpnName, nothing_quit => 1);
$self->{vpns} = {};
foreach my $oid (keys %{$snmp_result}) {
$oid =~ /^$oid_vpnName\.(.*)$/;
my $instance = $1;
$snmp_result->{$oid} =~ s/\\//g;
if (defined($self->{option_results}->{filter_name}) && $self->{option_results}->{filter_name} ne '' &&
$snmp_result->{$oid} !~ /$self->{option_results}->{filter_name}/) {
$self->{output}->output_add(long_msg => "skipping VPN '" . $snmp_result->{$oid} . "'.", debug => 1);
next;
}
$self->{vpns}->{$instance} = { display => $snmp_result->{$oid} };
}
$options{snmp}->load(oids => [$mapping->{vpnState}->{oid}], instances => [keys %{$self->{vpns}}], instance_regexp => '^(.*)$');
$snmp_result = $options{snmp}->get_leef(nothing_quit => 1);
foreach (keys %{$self->{vpns}}) {
my $result = $options{snmp}->map_instance(mapping => $mapping, results => $snmp_result, instance => $_);
foreach my $name (keys %$mapping) {
$self->{vpns}->{$_}->{$name} = $result->{$name};
}
}
if (scalar(keys %{$self->{vpns}}) <= 0) {
$self->{output}->add_option_msg(short_msg => "No VPNs found.");
$self->{output}->option_exit();
}
}
1;
__END__
=head1 MODE
Check VPNs status.
=over 8
=item B<--warning-status>
Set warning threshold for status.
Can used special variables like: %{status}, %{display}
=item B<--critical-status>
Set critical threshold for status (Default: '%{status} =~ /^down$/i').
Can used special variables like: %{status}, %{display}
=item B<--filter-name>
Filter by VPN name (Can be a regexp).
=back
=cut
| centreon/centreon-plugins | network/barracuda/cloudgen/snmp/mode/vpnstatus.pm | Perl | apache-2.0 | 5,110 |
#
# Copyright 2022 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package cloud::azure::database::elasticpool::mode::dtu;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
my %metrics_mapping = (
'dtu_consumption_percent' => {
'output' => 'DTU consumption percentage',
'label' => 'dtu-consumption-percent',
'nlabel' => 'elasticpool.dtu.consumpution.percentage',
'unit' => '%'
},
'eDTU_limit' => {
'output' => 'eDTU Limit',
'label' => 'edtu-limit',
'nlabel' => 'elasticpool.edtu.limit.count',
'unit' => ''
},
'eDTU_used' => {
'output' => 'eDTU Used',
'label' => 'edtu-used',
'nlabel' => 'elasticpool.edtu.used.count',
'unit' => ''
}
);
sub custom_metric_calc {
my ($self, %options) = @_;
$self->{result_values}->{timeframe} = $options{new_datas}->{$self->{instance} . '_timeframe'};
$self->{result_values}->{value} = $options{new_datas}->{$self->{instance} . '_' . $options{extra_options}->{metric}};
$self->{result_values}->{metric} = $options{extra_options}->{metric};
return 0;
}
sub custom_metric_threshold {
my ($self, %options) = @_;
my $exit = $self->{perfdata}->threshold_check(
value => $self->{result_values}->{value},
threshold => [
{ label => 'critical-' . $metrics_mapping{$self->{result_values}->{metric}}->{label} , exit_litteral => 'critical' },
{ label => 'warning-' . $metrics_mapping{$self->{result_values}->{metric}}->{label}, exit_litteral => 'warning' }
]
);
return $exit;
}
sub custom_metric_perfdata {
my ($self, %options) = @_;
$self->{output}->perfdata_add(
instances => $self->{instance},
nlabel => $metrics_mapping{$self->{result_values}->{metric}}->{nlabel},
unit => $metrics_mapping{$self->{result_values}->{metric}}->{unit},
value => sprintf('%.2f', $self->{result_values}->{value}),
warning => $self->{perfdata}->get_perfdata_for_output(label => 'warning-' . $metrics_mapping{$self->{result_values}->{metric}}->{label}),
critical => $self->{perfdata}->get_perfdata_for_output(label => 'critical-' . $metrics_mapping{$self->{result_values}->{metric}}->{label})
);
}
sub custom_metric_output {
my ($self, %options) = @_;
my ($value, $unit) = ($self->{result_values}->{value}, $metrics_mapping{$self->{result_values}->{metric}}->{unit});
return sprintf('%s: %.2f %s', $metrics_mapping{$self->{result_values}->{metric}}->{output}, $value, $unit);
}
sub prefix_metric_output {
my ($self, %options) = @_;
return "Elastic Pool '" . $options{instance_value}->{display} . "' ";
}
sub prefix_statistics_output {
my ($self, %options) = @_;
return "Statistic '" . $options{instance_value}->{display} . "' Metrics ";
}
sub long_output {
my ($self, %options) = @_;
return "Checking Pool'" . $options{instance_value}->{display} . "' ";
}
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'metrics', type => 3, cb_prefix_output => 'prefix_metric_output', cb_long_output => 'long_output',
message_multiple => 'All CPU metrics are ok', indent_long_output => ' ',
group => [
{ name => 'statistics', display_long => 1, cb_prefix_output => 'prefix_statistics_output',
message_multiple => 'All metrics are ok', type => 1, skipped_code => { -10 => 1 } }
]
}
];
foreach my $metric (keys %metrics_mapping) {
my $entry = {
label => $metrics_mapping{$metric}->{label},
nlabel => $metrics_mapping{$metric}->{nlabel},
set => {
key_values => [ { name => $metric }, { name => 'timeframe' }, { name => 'display' } ],
closure_custom_calc => $self->can('custom_metric_calc'),
closure_custom_calc_extra_options => { metric => $metric },
closure_custom_output => $self->can('custom_metric_output'),
closure_custom_perfdata => $self->can('custom_metric_perfdata'),
closure_custom_threshold_check => $self->can('custom_metric_threshold')
}
};
push @{$self->{maps_counters}->{statistics}}, $entry;
}
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options, force_new_perfdata => 1);
bless $self, $class;
$options{options}->add_options(arguments => {
'resource:s@' => { name => 'resource' },
'resource-group:s' => { name => 'resource_group' },
'filter-metric:s' => { name => 'filter_metric' }
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::check_options(%options);
if (!defined($self->{option_results}->{resource}) || $self->{option_results}->{resource} eq '') {
$self->{output}->add_option_msg(short_msg => 'Need to specify either --resource <name> with --resource-group option or --resource <id>.');
$self->{output}->option_exit();
}
$self->{az_resource} = $self->{option_results}->{resource};
$self->{az_resource_group} = $self->{option_results}->{resource_group} if (defined($self->{option_results}->{resource_group}));
$self->{az_resource_type} = 'servers';
$self->{az_resource_namespace} = 'Microsoft.Sql';
$self->{az_timeframe} = defined($self->{option_results}->{timeframe}) ? $self->{option_results}->{timeframe} : 900;
$self->{az_interval} = defined($self->{option_results}->{interval}) ? $self->{option_results}->{interval} : 'PT5M';
$self->{az_aggregations} = ['Average'];
if (defined($self->{option_results}->{aggregation})) {
$self->{az_aggregations} = [];
foreach my $stat (@{$self->{option_results}->{aggregation}}) {
if ($stat ne '') {
push @{$self->{az_aggregations}}, ucfirst(lc($stat));
}
}
}
foreach my $metric (keys %metrics_mapping) {
next if (defined($self->{option_results}->{filter_metric}) && $self->{option_results}->{filter_metric} ne ''
&& $metric !~ /$self->{option_results}->{filter_metric}/);
push @{$self->{az_metrics}}, $metric;
}
}
sub manage_selection {
my ($self, %options) = @_;
my %metric_results;
my $raw_results;
foreach my $resource (@{$self->{az_resource}}) {
my $resource_group = $self->{az_resource_group};
my ($resource_display, $resource_name);
if ($resource =~ /^(.*)\/elasticpools\/(.*)/) {
($resource_display, $resource_name) = ($1 . '/' . $2, $resource);
} else {
$self->{output}->add_option_msg(short_msg => 'Incorrect resource format');
$self->{output}->option_exit();
};
if ($resource =~ /^\/subscriptions\/.*\/resourceGroups\/(.*)\/providers\/Microsoft\.Sql\/servers\/(.*)\/elasticpools\/(.*)$/) {
$resource_group = $1;
$resource_display = $2 . '/' . $3;
$resource_name = $2 . '/elasticpools/' . $3;
}
($metric_results{$resource_display}, $raw_results) = $options{custom}->azure_get_metrics(
resource => $resource_name,
resource_group => $resource_group,
resource_type => $self->{az_resource_type},
resource_namespace => $self->{az_resource_namespace},
metrics => $self->{az_metrics},
aggregations => $self->{az_aggregations},
timeframe => $self->{az_timeframe},
interval => $self->{az_interval}
);
foreach my $metric (@{$self->{az_metrics}}) {
foreach my $aggregation (@{$self->{az_aggregations}}) {
next if (!defined($metric_results{$resource_display}->{$metric}->{lc($aggregation)}) && !defined($self->{option_results}->{zeroed}));
$self->{metrics}->{$resource_display}->{display} = $resource_display;
$self->{metrics}->{$resource_display}->{statistics}->{lc($aggregation)}->{display} = lc($aggregation);
$self->{metrics}->{$resource_display}->{statistics}->{lc($aggregation)}->{timeframe} = $self->{az_timeframe};
$self->{metrics}->{$resource_display}->{statistics}->{lc($aggregation)}->{$metric} =
defined($metric_results{$resource_display}->{$metric}->{lc($aggregation)}) ?
$metric_results{$resource_display}->{$metric}->{lc($aggregation)} : 0;
}
}
}
if (scalar(keys %{$self->{metrics}}) <= 0) {
$self->{output}->add_option_msg(short_msg => 'No metrics. Check your options or use --zeroed option to set 0 on undefined values');
$self->{output}->option_exit();
}
}
1;
__END__
=head1 MODE
Check Azure SQL Elastic Pool DTU metrics.
(Only applies to DTU-based elastic pools)
Example:
Using resource name :
perl centreon_plugins.pl --plugin=cloud::azure::database::elasticpool::plugin --custommode=azcli --mode=dtu
--resource=<sqlserver>/elasticpools/<elasticpool> --resource-group=<resourcegroup> --aggregation='average'
--critical-dtu-consumption-percent='90' --verbose
Using resource id :
perl centreon_plugins.pl --plugin=cloud::azure::compute::virtualmachine::plugin --custommode=azcli --mode=dtu
--resource='/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Sql/servers/xxx/elasticpools/xxx'
--aggregation='average' --critical-dtu-consumption-percent='90' --verbose
Default aggregation: 'average' / 'minimum' and 'maximum' are valid.
=over 8
=item B<--resource>
Set resource name or id (Required).
=item B<--resource-group>
Set resource group (Required if resource's name is used).
=item B<--filter-metric>
Filter on specific metrics. The Azure format must be used, for example: 'dtu_consumption_percent'
(Can be a regexp).
=item B<--warning-*>
Warning threshold where * can be: 'dtu-consumption-percent', 'edtu-limit', 'edtu-used'.
=item B<--critical-*>
Critical threshold where * can be: 'dtu-consumption-percent', 'edtu-limit', 'edtu-used'.
=back
=cut
| centreon/centreon-plugins | cloud/azure/database/elasticpool/mode/dtu.pm | Perl | apache-2.0 | 10,952 |
#
# Copyright 2021 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package apps::mq::vernemq::restapi::mode::listeners;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
use centreon::plugins::templates::catalog_functions qw(catalog_status_threshold catalog_status_calc);
sub custom_status_output {
my ($self, %options) = @_;
return 'status: ' . $self->{result_values}->{status};
}
sub prefix_listener_output {
my ($self, %options) = @_;
return "Listener '" . $options{instance_value}->{display} . "' ";
}
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'global', type => 0 },
{ name => 'listeners', type => 1, cb_prefix_output => 'prefix_listener_output', message_multiple => 'All listeners are ok', skipped_code => { -10 => 1 } },
];
$self->{maps_counters}->{global} = [
{ label => 'running', nlabel => 'listeners.running.count', display_ok => 0, set => {
key_values => [ { name => 'running' } ],
output_template => 'current listeners running: %s',
perfdatas => [
{ value => 'running', template => '%s', min => 0 }
]
}
},
{ label => 'notrunning', nlabel => 'listeners.notrunning.count', display_ok => 0, set => {
key_values => [ { name => 'notrunning' } ],
output_template => 'current listeners not running: %s',
perfdatas => [
{ value => 'notrunning', template => '%s', min => 0 }
]
}
}
];
$self->{maps_counters}->{listeners} = [
{ label => 'status', threshold => 0, set => {
key_values => [ { name => 'status' }, { name => 'display' } ],
closure_custom_calc => \&catalog_status_calc,
closure_custom_output => $self->can('custom_status_output'),
closure_custom_perfdata => sub { return 0; },
closure_custom_threshold_check => \&catalog_status_threshold
}
}
];
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options, force_new_perfdata => 1);
bless $self, $class;
$options{options}->add_options(arguments => {
'filter-type:s' => { name => 'filter_type' },
'unknown-status:s' => { name => 'unknown_status', default => '' },
'warning-status:s' => { name => 'warning_status', default => '' },
'critical-status:s' => { name => 'critical_status', default => '%{status} ne "running"' },
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::check_options(%options);
$self->change_macros(macros => ['warning_status', 'critical_status', 'unknown_status']);
}
sub manage_selection {
my ($self, %options) = @_;
my $clusters = $options{custom}->request_api(endpoint => '/listener/show');
$self->{global} = { running => 0, notrunning => 0 };
$self->{listeners} = {};
foreach (@{$clusters->{table}}) {
if (defined($self->{option_results}->{filter_type}) && $self->{option_results}->{filter_type} ne '' &&
$_->{type} !~ /$self->{option_results}->{filter_type}/) {
$self->{output}->output_add(long_msg => "skipping listeners '" . $_->{type} . "': no matching filter.", debug => 1);
next;
}
$_->{status} eq 'running' ? $self->{global}->{running}++ : $self->{global}->{notrunning}++;
$self->{listeners}->{$_->{type}} = {
display => $_->{type},
status => $_->{status}
};
}
if (scalar(keys %{$self->{listeners}}) <= 0) {
$self->{output}->add_option_msg(short_msg => "No listener found");
$self->{output}->option_exit();
}
}
1;
__END__
=head1 MODE
Check listeners.
=over 8
=item B<--filter-type>
Filter listener type (can be a regexp).
=item B<--unknown-status>
Set unknown threshold for status.
Can used special variables like: %{status}, %{display}
=item B<--warning-status>
Set warning threshold for status.
Can used special variables like: %{status}, %{display}
=item B<--critical-status>
Set critical threshold for status (Default: '%{status} ne "running"').
Can used special variables like: %{status}, %{display}
=item B<--warning-*> B<--critical-*>
Thresholds.
Can be: 'running', 'notrunning'.
=back
=cut
| Tpo76/centreon-plugins | apps/mq/vernemq/restapi/mode/listeners.pm | Perl | apache-2.0 | 5,202 |
package Socket;
use vars qw($VERSION @ISA @EXPORT @EXPORT_OK %EXPORT_TAGS);
$VERSION = "1.7";
=head1 NAME
Socket, sockaddr_in, sockaddr_un, inet_aton, inet_ntoa - load the C socket.h defines and structure manipulators
=head1 SYNOPSIS
use Socket;
$proto = getprotobyname('udp');
socket(Socket_Handle, PF_INET, SOCK_DGRAM, $proto);
$iaddr = gethostbyname('hishost.com');
$port = getservbyname('time', 'udp');
$sin = sockaddr_in($port, $iaddr);
send(Socket_Handle, 0, 0, $sin);
$proto = getprotobyname('tcp');
socket(Socket_Handle, PF_INET, SOCK_STREAM, $proto);
$port = getservbyname('smtp', 'tcp');
$sin = sockaddr_in($port,inet_aton("127.1"));
$sin = sockaddr_in(7,inet_aton("localhost"));
$sin = sockaddr_in(7,INADDR_LOOPBACK);
connect(Socket_Handle,$sin);
($port, $iaddr) = sockaddr_in(getpeername(Socket_Handle));
$peer_host = gethostbyaddr($iaddr, AF_INET);
$peer_addr = inet_ntoa($iaddr);
$proto = getprotobyname('tcp');
socket(Socket_Handle, PF_UNIX, SOCK_STREAM, $proto);
unlink('/tmp/usock');
$sun = sockaddr_un('/tmp/usock');
connect(Socket_Handle,$sun);
=head1 DESCRIPTION
This module is just a translation of the C F<socket.h> file.
Unlike the old mechanism of requiring a translated F<socket.ph>
file, this uses the B<h2xs> program (see the Perl source distribution)
and your native C compiler. This means that it has a
far more likely chance of getting the numbers right. This includes
all of the commonly used pound-defines like AF_INET, SOCK_STREAM, etc.
Also, some common socket "newline" constants are provided: the
constants C<CR>, C<LF>, and C<CRLF>, as well as C<$CR>, C<$LF>, and
C<$CRLF>, which map to C<\015>, C<\012>, and C<\015\012>. If you do
not want to use the literal characters in your programs, then use
the constants provided here. They are not exported by default, but can
be imported individually, and with the C<:crlf> export tag:
use Socket qw(:DEFAULT :crlf);
In addition, some structure manipulation functions are available:
=over
=item inet_aton HOSTNAME
Takes a string giving the name of a host, and translates that
to the 4-byte string (structure). Takes arguments of both
the 'rtfm.mit.edu' type and '18.181.0.24'. If the host name
cannot be resolved, returns undef. For multi-homed hosts (hosts
with more than one address), the first address found is returned.
=item inet_ntoa IP_ADDRESS
Takes a four byte ip address (as returned by inet_aton())
and translates it into a string of the form 'd.d.d.d'
where the 'd's are numbers less than 256 (the normal
readable four dotted number notation for internet addresses).
=item INADDR_ANY
Note: does not return a number, but a packed string.
Returns the 4-byte wildcard ip address which specifies any
of the hosts ip addresses. (A particular machine can have
more than one ip address, each address corresponding to
a particular network interface. This wildcard address
allows you to bind to all of them simultaneously.)
Normally equivalent to inet_aton('0.0.0.0').
=item INADDR_BROADCAST
Note: does not return a number, but a packed string.
Returns the 4-byte 'this-lan' ip broadcast address.
This can be useful for some protocols to solicit information
from all servers on the same LAN cable.
Normally equivalent to inet_aton('255.255.255.255').
=item INADDR_LOOPBACK
Note - does not return a number.
Returns the 4-byte loopback address. Normally equivalent
to inet_aton('localhost').
=item INADDR_NONE
Note - does not return a number.
Returns the 4-byte 'invalid' ip address. Normally equivalent
to inet_aton('255.255.255.255').
=item sockaddr_in PORT, ADDRESS
=item sockaddr_in SOCKADDR_IN
In an array context, unpacks its SOCKADDR_IN argument and returns an array
consisting of (PORT, ADDRESS). In a scalar context, packs its (PORT,
ADDRESS) arguments as a SOCKADDR_IN and returns it. If this is confusing,
use pack_sockaddr_in() and unpack_sockaddr_in() explicitly.
=item pack_sockaddr_in PORT, IP_ADDRESS
Takes two arguments, a port number and a 4 byte IP_ADDRESS (as returned by
inet_aton()). Returns the sockaddr_in structure with those arguments
packed in with AF_INET filled in. For internet domain sockets, this
structure is normally what you need for the arguments in bind(),
connect(), and send(), and is also returned by getpeername(),
getsockname() and recv().
=item unpack_sockaddr_in SOCKADDR_IN
Takes a sockaddr_in structure (as returned by pack_sockaddr_in()) and
returns an array of two elements: the port and the 4-byte ip-address.
Will croak if the structure does not have AF_INET in the right place.
=item sockaddr_un PATHNAME
=item sockaddr_un SOCKADDR_UN
In an array context, unpacks its SOCKADDR_UN argument and returns an array
consisting of (PATHNAME). In a scalar context, packs its PATHNAME
arguments as a SOCKADDR_UN and returns it. If this is confusing, use
pack_sockaddr_un() and unpack_sockaddr_un() explicitly.
These are only supported if your system has E<lt>F<sys/un.h>E<gt>.
=item pack_sockaddr_un PATH
Takes one argument, a pathname. Returns the sockaddr_un structure with
that path packed in with AF_UNIX filled in. For unix domain sockets, this
structure is normally what you need for the arguments in bind(),
connect(), and send(), and is also returned by getpeername(),
getsockname() and recv().
=item unpack_sockaddr_un SOCKADDR_UN
Takes a sockaddr_un structure (as returned by pack_sockaddr_un())
and returns the pathname. Will croak if the structure does not
have AF_UNIX in the right place.
=back
=cut
use Carp;
require Exporter;
require DynaLoader;
@ISA = qw(Exporter DynaLoader);
@EXPORT = qw(
inet_aton inet_ntoa pack_sockaddr_in unpack_sockaddr_in
pack_sockaddr_un unpack_sockaddr_un
sockaddr_in sockaddr_un
INADDR_ANY INADDR_BROADCAST INADDR_LOOPBACK INADDR_NONE
AF_802
AF_APPLETALK
AF_CCITT
AF_CHAOS
AF_DATAKIT
AF_DECnet
AF_DLI
AF_ECMA
AF_GOSIP
AF_HYLINK
AF_IMPLINK
AF_INET
AF_LAT
AF_MAX
AF_NBS
AF_NIT
AF_NS
AF_OSI
AF_OSINET
AF_PUP
AF_SNA
AF_UNIX
AF_UNSPEC
AF_X25
MSG_CTLFLAGS
MSG_CTLIGNORE
MSG_CTRUNC
MSG_DONTROUTE
MSG_DONTWAIT
MSG_EOF
MSG_EOR
MSG_ERRQUEUE
MSG_FIN
MSG_MAXIOVLEN
MSG_NOSIGNAL
MSG_OOB
MSG_PEEK
MSG_PROXY
MSG_RST
MSG_SYN
MSG_TRUNC
MSG_URG
MSG_WAITALL
PF_802
PF_APPLETALK
PF_CCITT
PF_CHAOS
PF_DATAKIT
PF_DECnet
PF_DLI
PF_ECMA
PF_GOSIP
PF_HYLINK
PF_IMPLINK
PF_INET
PF_LAT
PF_MAX
PF_NBS
PF_NIT
PF_NS
PF_OSI
PF_OSINET
PF_PUP
PF_SNA
PF_UNIX
PF_UNSPEC
PF_X25
SCM_CONNECT
SCM_CREDENTIALS
SCM_CREDS
SCM_RIGHTS
SCM_TIMESTAMP
SOCK_DGRAM
SOCK_RAW
SOCK_RDM
SOCK_SEQPACKET
SOCK_STREAM
SOL_SOCKET
SOMAXCONN
SO_ACCEPTCONN
SO_BROADCAST
SO_DEBUG
SO_DONTLINGER
SO_DONTROUTE
SO_ERROR
SO_KEEPALIVE
SO_LINGER
SO_OOBINLINE
SO_RCVBUF
SO_RCVLOWAT
SO_RCVTIMEO
SO_REUSEADDR
SO_SNDBUF
SO_SNDLOWAT
SO_SNDTIMEO
SO_TYPE
SO_USELOOPBACK
);
@EXPORT_OK = qw(CR LF CRLF $CR $LF $CRLF);
%EXPORT_TAGS = (
crlf => [qw(CR LF CRLF $CR $LF $CRLF)],
all => [@EXPORT, @EXPORT_OK],
);
BEGIN {
sub CR () {"\015"}
sub LF () {"\012"}
sub CRLF () {"\015\012"}
}
*CR = \CR();
*LF = \LF();
*CRLF = \CRLF();
sub sockaddr_in {
if (@_ == 6 && !wantarray) { # perl5.001m compat; use this && die
my($af, $port, @quad) = @_;
carp "6-ARG sockaddr_in call is deprecated" if $^W;
pack_sockaddr_in($port, inet_aton(join('.', @quad)));
} elsif (wantarray) {
croak "usage: (port,iaddr) = sockaddr_in(sin_sv)" unless @_ == 1;
unpack_sockaddr_in(@_);
} else {
croak "usage: sin_sv = sockaddr_in(port,iaddr))" unless @_ == 2;
pack_sockaddr_in(@_);
}
}
sub sockaddr_un {
if (wantarray) {
croak "usage: (filename) = sockaddr_un(sun_sv)" unless @_ == 1;
unpack_sockaddr_un(@_);
} else {
croak "usage: sun_sv = sockaddr_un(filename)" unless @_ == 1;
pack_sockaddr_un(@_);
}
}
sub AUTOLOAD {
my($constname);
($constname = $AUTOLOAD) =~ s/.*:://;
my $val = constant($constname, @_ ? $_[0] : 0);
if ($! != 0) {
my ($pack,$file,$line) = caller;
croak "Your vendor has not defined Socket macro $constname, used";
}
eval "sub $AUTOLOAD { $val }";
goto &$AUTOLOAD;
}
bootstrap Socket $VERSION;
1;
| wolispace/cow_windows_server | oldcow_on_apache/perl/lib/Socket.pm | Perl | apache-2.0 | 8,279 |
#
# Copyright 2022 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package storage::netapp::ontap::snmp::mode::clusternodes;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
use centreon::plugins::templates::catalog_functions qw(catalog_status_threshold_ng);
use Digest::MD5 qw(md5_hex);
sub custom_node_status_output {
my ($self, %options) = @_;
return sprintf(
"node status: %s",
$self->{result_values}->{node_status}
);
}
sub custom_bbu_status_output {
my ($self, %options) = @_;
return sprintf(
"nvram battery status: %s",
$self->{result_values}->{bbu_status}
);
}
sub custom_cpu_calc {
my ($self, %options) = @_;
my $diff_uptime = $options{new_datas}->{$self->{instance} . '_cpuUptime'} - $options{old_datas}->{$self->{instance} . '_cpuUptime'};
my $diff_busy = $options{new_datas}->{$self->{instance} . '_cpuBusyTime'} - $options{old_datas}->{$self->{instance} . '_cpuBusyTime'};
if ($diff_uptime == 0) {
$self->{result_values}->{cpu_used} = 0;
} else {
$self->{result_values}->{cpu_used} = $diff_busy * 100 / $diff_uptime;
}
return 0;
}
sub custom_port_link_status_output {
my ($self, %options) = @_;
return sprintf(
"operational status: %s [admin: %s]",
$self->{result_values}->{opstatus},
$self->{result_values}->{admstatus}
);
}
sub custom_port_health_output {
my ($self, %options) = @_;
return sprintf(
"health: %s",
$self->{result_values}->{health}
);
}
sub node_long_output {
my ($self, %options) = @_;
return sprintf(
"checking node '%s'",
$options{instance_value}->{name}
);
}
sub prefix_node_output {
my ($self, %options) = @_;
return sprintf(
"node '%s' ",
$options{instance_value}->{name}
);
}
sub prefix_port_output {
my ($self, %options) = @_;
return sprintf(
"port '%s' [role: %s] ",
$options{instance_value}->{port_id},
$options{instance_value}->{role}
);
}
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'nodes', type => 3, cb_prefix_output => 'prefix_node_output', cb_long_output => 'node_long_output',
indent_long_output => ' ', message_multiple => 'All nodes are ok',
group => [
{ name => 'status', type => 0, skipped_code => { -10 => 1 } },
{ name => 'cpu', type => 0, skipped_code => { -10 => 1 } },
{ name => 'env', type => 0, skipped_code => { -10 => 1 } },
{ name => 'ports', type => 1, cb_prefix_output => 'prefix_port_output', message_multiple => 'ports are ok', display_long => 1, skipped_code => { -10 => 1 } }
]
}
];
$self->{maps_counters}->{status} = [
{ label => 'node-status', type => 2, critical_default => '%{node_status} eq "clusterComLost"', set => {
key_values => [
{ name => 'node_status' }, { name => 'node_name' }
],
closure_custom_output => $self->can('custom_node_status_output'),
closure_custom_perfdata => sub { return 0; },
closure_custom_threshold_check => \&catalog_status_threshold_ng
}
},
{ label => 'bbu-status', type => 2, critical_default => '%{bbu_status} !~ /fullyCharged|ok/i', set => {
key_values => [
{ name => 'bbu_status' }, { name => 'node_name' }
],
closure_custom_output => $self->can('custom_bbu_status_output'),
closure_custom_perfdata => sub { return 0; },
closure_custom_threshold_check => \&catalog_status_threshold_ng
}
}
];
$self->{maps_counters}->{cpu} = [
{ label => 'cpu-utilization', nlabel => 'node.cpu.utilization.percentage', set => {
key_values => [ { name => 'cpuUptime', diff => 1 }, { name => 'cpuBusyTime', diff => 1 } ],
closure_custom_calc => $self->can('custom_cpu_calc'),
output_template => 'cpu utilization: %.2f%%',
output_use => 'cpu_used', threshold_use => 'cpu_used',
perfdatas => [
{ value => 'cpu_used', template => '%s', min => 0, max => 100, unit => '%', label_extra_instance => 1 }
]
}
}
];
$self->{maps_counters}->{env} = [
{ label => 'fan-failed', nlabel => 'node.hardware.fans.failed.count', set => {
key_values => [ { name => 'envFailedFanCount' } ],
output_template => 'number of fans failed: %s',
perfdatas => [
{ template => '%s', min => 0, label_extra_instance => 1 }
]
}
},
{ label => 'psu-failed', nlabel => 'node.hardware.power_supplies.failed.count', set => {
key_values => [ { name => 'envFailedPSUCount' } ],
output_template => 'number of power supplies failed: %s',
perfdatas => [
{ template => '%s', min => 0, label_extra_instance => 1 }
]
}
},
{ label => 'temperature-overrange', nlabel => 'node.hardware.temperatures.over_range.count', set => {
key_values => [ { name => 'envOverTemp' } ],
output_template => 'number of temperatures over range: %s',
perfdatas => [
{ template => '%s', min => 0, label_extra_instance => 1 }
]
}
}
];
$self->{maps_counters}->{ports} = [
{ label => 'port-link-status', type => 2, critical_default => '%{admstatus} eq "up" and %{opstatus} ne "up"', set => {
key_values => [
{ name => 'admstatus' }, { name => 'opstatus' }, { name => 'port_id' }, { name => 'node_name' }
],
closure_custom_output => $self->can('custom_port_link_status_output'),
closure_custom_perfdata => sub { return 0; },
closure_custom_threshold_check => \&catalog_status_threshold_ng
}
},
{ label => 'port-health', type => 2, warning_default => '%{health} eq "degraded"', set => {
key_values => [
{ name => 'health' }, { name => 'port_id' }, { name => 'node_name' }
],
closure_custom_output => $self->can('custom_port_health_output'),
closure_custom_perfdata => sub { return 0; },
closure_custom_threshold_check => \&catalog_status_threshold_ng
}
},
];
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options, force_new_perfdata => 1, statefile => 1);
bless $self, $class;
$options{options}->add_options(arguments => {
'filter-node-name:s' => { name => 'filter_node_name' },
'filter-port-id:s' => { name => 'filter_port_id' },
'filter-port-role:s' => { name => 'filter_port_role' }
});
return $self;
}
my $map_node_health = {
0 => 'clusterComLost', 1 => 'clusterComOk'
};
my $map_nvram_status = {
1 => 'ok', 2 => 'partiallyDischarged',
3 => 'fullyDischarged', 4 => 'notPresent',
5 => 'nearEndOfLife', 6 => 'atEndOfLife',
7 => 'unknown', 8 => 'overCharged', 9 => 'fullyCharged'
};
my $map_port_admin = {
0 => 'down', 1 => 'up'
};
my $map_port_state = {
0 => 'undef', 1 => 'off', 2 => 'up', 3 => 'down'
};
my $map_port_health = {
-1 => 'unknown', 0 => 'healthy', 1 => 'degraded'
};
my $map_port_role = {
0 => 'undef', 1 => 'cluster', 2 => 'data',
3 => 'node-mgmt', 4 => 'intercluster', 5 => 'cluster-mgmt'
};
my $mapping = {
health => { oid => '.1.3.6.1.4.1.789.1.25.2.1.11', map => $map_node_health }, # nodeHealth
cpuUptime => { oid => '.1.3.6.1.4.1.789.1.25.2.1.15' }, # nodeCpuUptime
cpuBusyTime => { oid => '.1.3.6.1.4.1.789.1.25.2.1.16' }, # nodeCpuBusyTime
bbuStatus => { oid => '.1.3.6.1.4.1.789.1.25.2.1.17', map => $map_nvram_status }, # nodeNvramBatteryStatus
envOverTemp => { oid => '.1.3.6.1.4.1.789.1.25.2.1.18' }, # nodeEnvOverTemperature
envFailedFanCount => { oid => '.1.3.6.1.4.1.789.1.25.2.1.19' }, # nodeEnvFailedFanCount
envFailedPSUCount => { oid => '.1.3.6.1.4.1.789.1.25.2.1.21' } # nodeEnvFailedPowerSupplyCount
};
my $mapping_port = {
port_id => { oid => '.1.3.6.1.4.1.789.1.22.2.1.2' }, # netportPort
role => { oid => '.1.3.6.1.4.1.789.1.22.2.1.3', map => $map_port_role }, # netportRole
opstatus => { oid => '.1.3.6.1.4.1.789.1.22.2.1.4', map => $map_port_state }, # netportLinkState
admstatus => { oid => '.1.3.6.1.4.1.789.1.22.2.1.14', map => $map_port_admin }, # netportUpAdmin
health => { oid => '.1.3.6.1.4.1.789.1.22.2.1.30', map => $map_port_health } # netportHealthStatus
};
sub add_ports {
my ($self, %options) = @_;
my $oid_netportNode = '.1.3.6.1.4.1.789.1.22.2.1.1';
my $snmp_result = $options{snmp}->get_table(
oid => $oid_netportNode
);
my $instances = {};
foreach (keys %$snmp_result) {
next if (!defined($self->{nodes}->{ $snmp_result->{$_} }));
/^$oid_netportNode\.(.*)$/;
$instances->{$1} = $snmp_result->{$_};
}
return if (scalar(keys %$instances) <= 0);
$options{snmp}->load(
oids => [ map($_->{oid}, values(%$mapping_port)) ],
instances => [ keys %$instances ],
instance_regexp => '^(.*)$'
);
$snmp_result = $options{snmp}->get_leef();
foreach (keys %$instances) {
my $result = $options{snmp}->map_instance(mapping => $mapping_port, results => $snmp_result, instance => $_);
if (defined($self->{option_results}->{filter_port_id}) && $self->{option_results}->{filter_port_id} ne '' &&
$result->{port_id} !~ /$self->{option_results}->{filter_port_id}/) {
$self->{output}->output_add(long_msg => "skipping port '" . $result->{port_id} . "'.", debug => 1);
next;
}
if (defined($self->{option_results}->{filter_port_role}) && $self->{option_results}->{filter_port_role} ne '' &&
$result->{role} !~ /$self->{option_results}->{filter_port_role}/) {
$self->{output}->output_add(long_msg => "skipping port '" . $result->{port_id} . "'.", debug => 1);
next;
}
$self->{nodes}->{ $instances->{$_} }->{ports}->{ $result->{port_id} } = $result;
$self->{nodes}->{ $instances->{$_} }->{ports}->{ $result->{port_id} }->{node_name} = $instances->{$_};
}
}
sub manage_selection {
my ($self, %options) = @_;
my $oid_nodeName = '.1.3.6.1.4.1.789.1.25.2.1.1';
my $snmp_result = $options{snmp}->get_table(
oid => $oid_nodeName,
nothing_quit => 1
);
$self->{nodes} = {};
foreach (keys %$snmp_result) {
/$oid_nodeName\.(.*)$/;
my $instance = $1;
my $name = $snmp_result->{$_};
if (defined($self->{option_results}->{filter_node_name}) && $self->{option_results}->{filter_node_name} ne '' &&
$name !~ /$self->{option_results}->{filter_node_name}/) {
$self->{output}->output_add(long_msg => "skipping node '" . $name . "'.", debug => 1);
next;
}
$self->{nodes}->{$name} = {
instance => $instance,
name => $name,
status => { name => $name },
ports => {}
};
}
return if (scalar(keys %{$self->{nodes}}) <= 0);
$options{snmp}->load(
oids => [ map($_->{oid}, values(%$mapping)) ],
instances => [ map($_->{instance}, values(%{$self->{nodes}})) ],
instance_regexp => '^(.*)$'
);
$snmp_result = $options{snmp}->get_leef();
foreach (keys %{$self->{nodes}}) {
my $result = $options{snmp}->map_instance(mapping => $mapping, results => $snmp_result, instance => $self->{nodes}->{$_}->{instance});
$self->{nodes}->{$_}->{status}->{node_status} = $result->{node_status};
$self->{nodes}->{$_}->{status}->{bbu_status} = $result->{bbu_status};
$self->{nodes}->{$_}->{cpu} = {
cpuUptime => $result->{cpuUptime},
cpuBusyTime => $result->{cpuBusyTime}
};
$self->{nodes}->{$_}->{env} = {
envOverTemp => $result->{envOverTemp},
envFailedFanCount => $result->{envFailedFanCount},
envFailedPSUCount => $result->{envFailedPSUCount}
};
}
$self->add_ports(snmp => $options{snmp});
$self->{cache_name} = 'netapp_ontap_' . $self->{mode} . '_' . $options{snmp}->get_hostname() . '_' . $options{snmp}->get_port() . '_' .
md5_hex(
(defined($self->{option_results}->{filter_counters}) ? $self->{option_results}->{filter_counters} : 'all') . '_' .
(defined($self->{option_results}->{filter_node_name}) ? $self->{option_results}->{filter_node_name} : 'all') . '_' .
(defined($self->{option_results}->{filter_port_id}) ? $self->{option_results}->{filter_port_id} : 'all') . '_' .
(defined($self->{option_results}->{filter_port_role}) ? $self->{option_results}->{filter_port_role} : 'all')
);
}
1;
__END__
=head1 MODE
Check cluster nodes.
=over 8
=item B<--filter-node-name>
Filter nodes by name (can be a regexp).
=item B<--filter-port-id>
Filter ports by id (can be a regexp).
=item B<--filter-port-role>
Filter ports by role (can be a regexp).
=item B<--unknown-node-status>
Set unknown threshold for status.
Can used special variables like: %{node_status}, %{node_name}
=item B<--warning-node-status>
Set warning threshold for status.
Can used special variables like: %{node_status}, %{node_name}
=item B<--critical-node-status>
Set critical threshold for status (Default: '%{node_status} eq "clusterComLost"').
Can used special variables like: %{node_status}, %{node_name}
=item B<--unknown-bbu-status>
Set unknown threshold for status.
Can used special variables like: %{bbu_status}, %{node_name}
=item B<--warning-bbu-status>
Set warning threshold for status.
Can used special variables like: %{bbu_status}, %{node_name}
=item B<--critical-bbu-status>
Set critical threshold for status (Default: '%{bbu_status} !~ /fullyCharged|ok/i').
Can used special variables like: %{bbu_status}, %{node_name}
=item B<--unknown-port-link-status>
Set unknown threshold for status.
Can used special variables like: %{admstatus}, %{opstatus}, %{port_id}, %{node_name}
=item B<--warning-port-link-status>
Set warning threshold for status.
Can used special variables like: %{admstatus}, %{opstatus}, %{port_id}, %{node_name}
=item B<--critical-port-link-status>
Set critical threshold for status (Default: '%{admstatus} eq "up" and %{opstatus} ne "up"').
Can used special variables like: %{admstatus}, %{opstatus}, %{port_id}, %{node_name}
=item B<--unknown-port-health>
Set unknown threshold for status.
Can used special variables like: %{health}, %{port_id}, %{node_name}
=item B<--warning-port-health>
Set warning threshold for status (Default: '%{health} eq "degraded"').
Can used special variables like: %{health}, %{port_id}, %{node_name}
=item B<--critical-port-health>
Set critical threshold for status.
Can used special variables like: %{health}, %{port_id}, %{node_name}
=item B<--warning-*> B<--critical-*>
Thresholds.
Can be: 'cpu-utilization', 'temperature-overrange', 'fan-failed', 'psu-failed'.
=back
=cut
| centreon/centreon-plugins | storage/netapp/ontap/snmp/mode/clusternodes.pm | Perl | apache-2.0 | 16,369 |
#
# Copyright 2015 Electric Cloud, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
# Includes
# -------------------------------------------------------------------------
use Cwd;
use Carp;
use strict;
use Data::Dumper;
use utf8;
use Encode;
use warnings;
use diagnostics;
use open IO => ':encoding(utf8)';
use File::Basename;
use ElectricCommander;
use ElectricCommander::PropDB;
use ElectricCommander::PropMod qw(/myProject/libs);
use ChefHelper;
$| = 1;
# -------------------------------------------------------------------------
# Main functions
# -------------------------------------------------------------------------
###########################################################################
=head2 main
Title : main
Usage : main();
Function : Performs a Chef run
Returns : none
Args : named arguments: none
=cut
###########################################################################
sub main {
my $ec = ElectricCommander->new();
$ec->abortOnError(0);
# -------------------------------------------------------------------------
# Parameters
# -------------------------------------------------------------------------
my $knife_path =
( $ec->getProperty("knife_path") )->findvalue('//value')->string_value;
my $cookbook_name =
( $ec->getProperty("cookbook_name") )->findvalue('//value')->string_value;
my $copyright =
( $ec->getProperty("copyright") )->findvalue('//value')->string_value;
my $license =
( $ec->getProperty("license") )->findvalue('//value')->string_value;
my $email =
( $ec->getProperty("email") )->findvalue('//value')->string_value;
my $cookbook_path =
( $ec->getProperty("cookbook_path") )->findvalue('//value')->string_value;
my $readme_format =
( $ec->getProperty("readme_format") )->findvalue('//value')->string_value;
my $additional_options =
( $ec->getProperty("additional_options") )->findvalue('//value')
->string_value;
$ec->abortOnError(1);
#Variable that stores the command to be executed
my $command = $knife_path . " cookbook create";
my @cmd;
my %props;
#Prints procedure and parameters information
my $pluginKey = 'EC-Chef';
my $xpath = $ec->getPlugin($pluginKey);
my $pluginName = $xpath->findvalue('//pluginVersion')->value;
print "Using plugin $pluginKey version $pluginName\n";
print "Running procedure CreateCookbook\n";
#Parameters are checked to see which should be included
if ( $cookbook_name && $cookbook_name ne '' ) {
$command = $command . " " . $cookbook_name;
}
if ( $copyright && $copyright ne '' ) {
$command = $command . " --copyright " . $copyright;
}
if ( $license && $license ne '' ) {
$command = $command . " --license " . $license;
}
if ( $email && $email ne '' ) {
$command = $command . " --email " . $email;
}
if ( $cookbook_path && $cookbook_path ne '' ) {
$command = $command . " --cookbook-path " . $cookbook_path;
}
if ( $readme_format && $readme_format ne '' ) {
$command = $command . " --readme-format " . $readme_format;
}
if ( $additional_options && $additional_options ne '' ) {
$command = $command . " " . $additional_options;
}
$command = $command . " -d";
#Print out the command to be executed
print "\nCommand to be executed: \n$command \n\n";
#Execute the command
system("$command");
# To get exit code of process shift right by 8
my $exitCode = $? >> 8;
# Set outcome
setOutcomeFromExitCode($ec, $exitCode);
}
main();
| electric-cloud/EC-Chef | src/main/resources/project/drivers/CreateCookbookDriver.pl | Perl | apache-2.0 | 4,248 |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% This file is part of VivoMind Prolog Unicode Resources
% SPDX-License-Identifier: CC0-1.0
%
% VivoMind Prolog Unicode Resources is free software distributed using the
% Creative Commons CC0 1.0 Universal (CC0 1.0) - Public Domain Dedication
% license
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Last modified: March 15, 2012
%
% Original Unicode file header comments follow
/*
# PropList-6.1.0.txt
# Date: 2011-11-30, 01:49:54 GMT [MD]
#
# Unicode Character Database
# Copyright (c) 1991-2011 Unicode, Inc.
# For terms of use, see http://www.unicode.org/terms_of_use.html
# For documentation, see http://www.unicode.org/reports/tr44/
*/
unicode_terminal_punctuation(CodePoint) :-
( var(CodePoint) ->
% generate code point pairs
unicode_terminal_punctuation(CodePointStart, CodePointEnd),
between(CodePointStart, CodePointEnd, CodePoint)
; % try first-argument indexing first
unicode_terminal_punctuation(CodePoint, _) ->
true
; % look for a code point range that includes the given code point
unicode_terminal_punctuation(CodePointStart, CodePointEnd),
between(CodePointStart, CodePointEnd, CodePoint) ->
true
).
% ================================================
unicode_terminal_punctuation(0x0021, 0x0021). % Terminal_Punctuation # Po EXCLAMATION MARK
unicode_terminal_punctuation(0x002C, 0x002C). % Terminal_Punctuation # Po COMMA
unicode_terminal_punctuation(0x002E, 0x002E). % Terminal_Punctuation # Po FULL STOP
unicode_terminal_punctuation(0x003A, 0x003B). % Terminal_Punctuation # Po [2] COLON..SEMICOLON
unicode_terminal_punctuation(0x003F, 0x003F). % Terminal_Punctuation # Po QUESTION MARK
unicode_terminal_punctuation(0x037E, 0x037E). % Terminal_Punctuation # Po GREEK QUESTION MARK
unicode_terminal_punctuation(0x0387, 0x0387). % Terminal_Punctuation # Po GREEK ANO TELEIA
unicode_terminal_punctuation(0x0589, 0x0589). % Terminal_Punctuation # Po ARMENIAN FULL STOP
unicode_terminal_punctuation(0x05C3, 0x05C3). % Terminal_Punctuation # Po HEBREW PUNCTUATION SOF PASUQ
unicode_terminal_punctuation(0x060C, 0x060C). % Terminal_Punctuation # Po ARABIC COMMA
unicode_terminal_punctuation(0x061B, 0x061B). % Terminal_Punctuation # Po ARABIC SEMICOLON
unicode_terminal_punctuation(0x061F, 0x061F). % Terminal_Punctuation # Po ARABIC QUESTION MARK
unicode_terminal_punctuation(0x06D4, 0x06D4). % Terminal_Punctuation # Po ARABIC FULL STOP
unicode_terminal_punctuation(0x0700, 0x070A). % Terminal_Punctuation # Po [11] SYRIAC END OF PARAGRAPH..SYRIAC CONTRACTION
unicode_terminal_punctuation(0x070C, 0x070C). % Terminal_Punctuation # Po SYRIAC HARKLEAN METOBELUS
unicode_terminal_punctuation(0x07F8, 0x07F9). % Terminal_Punctuation # Po [2] NKO COMMA..NKO EXCLAMATION MARK
unicode_terminal_punctuation(0x0830, 0x083E). % Terminal_Punctuation # Po [15] SAMARITAN PUNCTUATION NEQUDAA..SAMARITAN PUNCTUATION ANNAAU
unicode_terminal_punctuation(0x085E, 0x085E). % Terminal_Punctuation # Po MANDAIC PUNCTUATION
unicode_terminal_punctuation(0x0964, 0x0965). % Terminal_Punctuation # Po [2] DEVANAGARI DANDA..DEVANAGARI DOUBLE DANDA
unicode_terminal_punctuation(0x0E5A, 0x0E5B). % Terminal_Punctuation # Po [2] THAI CHARACTER ANGKHANKHU..THAI CHARACTER KHOMUT
unicode_terminal_punctuation(0x0F08, 0x0F08). % Terminal_Punctuation # Po TIBETAN MARK SBRUL SHAD
unicode_terminal_punctuation(0x0F0D, 0x0F12). % Terminal_Punctuation # Po [6] TIBETAN MARK SHAD..TIBETAN MARK RGYA GRAM SHAD
unicode_terminal_punctuation(0x104A, 0x104B). % Terminal_Punctuation # Po [2] MYANMAR SIGN LITTLE SECTION..MYANMAR SIGN SECTION
unicode_terminal_punctuation(0x1361, 0x1368). % Terminal_Punctuation # Po [8] ETHIOPIC WORDSPACE..ETHIOPIC PARAGRAPH SEPARATOR
unicode_terminal_punctuation(0x166D, 0x166E). % Terminal_Punctuation # Po [2] CANADIAN SYLLABICS CHI SIGN..CANADIAN SYLLABICS FULL STOP
unicode_terminal_punctuation(0x16EB, 0x16ED). % Terminal_Punctuation # Po [3] RUNIC SINGLE PUNCTUATION..RUNIC CROSS PUNCTUATION
unicode_terminal_punctuation(0x17D4, 0x17D6). % Terminal_Punctuation # Po [3] KHMER SIGN KHAN..KHMER SIGN CAMNUC PII KUUH
unicode_terminal_punctuation(0x17DA, 0x17DA). % Terminal_Punctuation # Po KHMER SIGN KOOMUUT
unicode_terminal_punctuation(0x1802, 0x1805). % Terminal_Punctuation # Po [4] MONGOLIAN COMMA..MONGOLIAN FOUR DOTS
unicode_terminal_punctuation(0x1808, 0x1809). % Terminal_Punctuation # Po [2] MONGOLIAN MANCHU COMMA..MONGOLIAN MANCHU FULL STOP
unicode_terminal_punctuation(0x1944, 0x1945). % Terminal_Punctuation # Po [2] LIMBU EXCLAMATION MARK..LIMBU QUESTION MARK
unicode_terminal_punctuation(0x1AA8, 0x1AAB). % Terminal_Punctuation # Po [4] TAI THAM SIGN KAAN..TAI THAM SIGN SATKAANKUU
unicode_terminal_punctuation(0x1B5A, 0x1B5B). % Terminal_Punctuation # Po [2] BALINESE PANTI..BALINESE PAMADA
unicode_terminal_punctuation(0x1B5D, 0x1B5F). % Terminal_Punctuation # Po [3] BALINESE CARIK PAMUNGKAH..BALINESE CARIK PAREREN
unicode_terminal_punctuation(0x1C3B, 0x1C3F). % Terminal_Punctuation # Po [5] LEPCHA PUNCTUATION TA-ROL..LEPCHA PUNCTUATION TSHOOK
unicode_terminal_punctuation(0x1C7E, 0x1C7F). % Terminal_Punctuation # Po [2] OL CHIKI PUNCTUATION MUCAAD..OL CHIKI PUNCTUATION DOUBLE MUCAAD
unicode_terminal_punctuation(0x203C, 0x203D). % Terminal_Punctuation # Po [2] DOUBLE EXCLAMATION MARK..INTERROBANG
unicode_terminal_punctuation(0x2047, 0x2049). % Terminal_Punctuation # Po [3] DOUBLE QUESTION MARK..EXCLAMATION QUESTION MARK
unicode_terminal_punctuation(0x2E2E, 0x2E2E). % Terminal_Punctuation # Po REVERSED QUESTION MARK
unicode_terminal_punctuation(0x3001, 0x3002). % Terminal_Punctuation # Po [2] IDEOGRAPHIC COMMA..IDEOGRAPHIC FULL STOP
unicode_terminal_punctuation(0xA4FE, 0xA4FF). % Terminal_Punctuation # Po [2] LISU PUNCTUATION COMMA..LISU PUNCTUATION FULL STOP
unicode_terminal_punctuation(0xA60D, 0xA60F). % Terminal_Punctuation # Po [3] VAI COMMA..VAI QUESTION MARK
unicode_terminal_punctuation(0xA6F3, 0xA6F7). % Terminal_Punctuation # Po [5] BAMUM FULL STOP..BAMUM QUESTION MARK
unicode_terminal_punctuation(0xA876, 0xA877). % Terminal_Punctuation # Po [2] PHAGS-PA MARK SHAD..PHAGS-PA MARK DOUBLE SHAD
unicode_terminal_punctuation(0xA8CE, 0xA8CF). % Terminal_Punctuation # Po [2] SAURASHTRA DANDA..SAURASHTRA DOUBLE DANDA
unicode_terminal_punctuation(0xA92F, 0xA92F). % Terminal_Punctuation # Po KAYAH LI SIGN SHYA
unicode_terminal_punctuation(0xA9C7, 0xA9C9). % Terminal_Punctuation # Po [3] JAVANESE PADA PANGKAT..JAVANESE PADA LUNGSI
unicode_terminal_punctuation(0xAA5D, 0xAA5F). % Terminal_Punctuation # Po [3] CHAM PUNCTUATION DANDA..CHAM PUNCTUATION TRIPLE DANDA
unicode_terminal_punctuation(0xAADF, 0xAADF). % Terminal_Punctuation # Po TAI VIET SYMBOL KOI KOI
unicode_terminal_punctuation(0xAAF0, 0xAAF1). % Terminal_Punctuation # Po [2] MEETEI MAYEK CHEIKHAN..MEETEI MAYEK AHANG KHUDAM
unicode_terminal_punctuation(0xABEB, 0xABEB). % Terminal_Punctuation # Po MEETEI MAYEK CHEIKHEI
unicode_terminal_punctuation(0xFE50, 0xFE52). % Terminal_Punctuation # Po [3] SMALL COMMA..SMALL FULL STOP
unicode_terminal_punctuation(0xFE54, 0xFE57). % Terminal_Punctuation # Po [4] SMALL SEMICOLON..SMALL EXCLAMATION MARK
unicode_terminal_punctuation(0xFF01, 0xFF01). % Terminal_Punctuation # Po FULLWIDTH EXCLAMATION MARK
unicode_terminal_punctuation(0xFF0C, 0xFF0C). % Terminal_Punctuation # Po FULLWIDTH COMMA
unicode_terminal_punctuation(0xFF0E, 0xFF0E). % Terminal_Punctuation # Po FULLWIDTH FULL STOP
unicode_terminal_punctuation(0xFF1A, 0xFF1B). % Terminal_Punctuation # Po [2] FULLWIDTH COLON..FULLWIDTH SEMICOLON
unicode_terminal_punctuation(0xFF1F, 0xFF1F). % Terminal_Punctuation # Po FULLWIDTH QUESTION MARK
unicode_terminal_punctuation(0xFF61, 0xFF61). % Terminal_Punctuation # Po HALFWIDTH IDEOGRAPHIC FULL STOP
unicode_terminal_punctuation(0xFF64, 0xFF64). % Terminal_Punctuation # Po HALFWIDTH IDEOGRAPHIC COMMA
unicode_terminal_punctuation(0x1039F, 0x1039F). % Terminal_Punctuation # Po UGARITIC WORD DIVIDER
unicode_terminal_punctuation(0x103D0, 0x103D0). % Terminal_Punctuation # Po OLD PERSIAN WORD DIVIDER
unicode_terminal_punctuation(0x10857, 0x10857). % Terminal_Punctuation # Po IMPERIAL ARAMAIC SECTION SIGN
unicode_terminal_punctuation(0x1091F, 0x1091F). % Terminal_Punctuation # Po PHOENICIAN WORD SEPARATOR
unicode_terminal_punctuation(0x10B3A, 0x10B3F). % Terminal_Punctuation # Po [6] TINY TWO DOTS OVER ONE DOT PUNCTUATION..LARGE ONE RING OVER TWO RINGS PUNCTUATION
unicode_terminal_punctuation(0x11047, 0x1104D). % Terminal_Punctuation # Po [7] BRAHMI DANDA..BRAHMI PUNCTUATION LOTUS
unicode_terminal_punctuation(0x110BE, 0x110C1). % Terminal_Punctuation # Po [4] KAITHI SECTION MARK..KAITHI DOUBLE DANDA
unicode_terminal_punctuation(0x11141, 0x11143). % Terminal_Punctuation # Po [3] CHAKMA DANDA..CHAKMA QUESTION MARK
unicode_terminal_punctuation(0x111C5, 0x111C6). % Terminal_Punctuation # Po [2] SHARADA DANDA..SHARADA DOUBLE DANDA
unicode_terminal_punctuation(0x12470, 0x12473). % Terminal_Punctuation # Po [4] CUNEIFORM PUNCTUATION SIGN OLD ASSYRIAN WORD DIVIDER..CUNEIFORM PUNCTUATION SIGN DIAGONAL TRICOLON
% Total code points: 176
| LogtalkDotOrg/logtalk3 | library/unicode_data/unicode_prop_list/unicode_terminal_punctuation.pl | Perl | apache-2.0 | 9,424 |
#
# Copyright 2022 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package hardware::devices::abb::cms700::snmp::mode::listsensors;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$options{options}->add_options(arguments => {});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
}
my $oid_GroupName = '.1.3.6.1.4.1.51055.1.20';
my $oid_BranchNamesens = '.1.3.6.1.4.1.51055.1.19';
my $mapping = {
Phasesens => { oid => '.1.3.6.1.4.1.51055.1.21' },
Groupsens => { oid => '.1.3.6.1.4.1.51055.1.22' },
};
sub manage_selection {
my ($self, %options) = @_;
my %groups;
my $snmp_result = $options{snmp}->get_table(oid => $oid_GroupName);
foreach my $oid (keys %$snmp_result) {
next if ($oid !~ /^$oid_GroupName\.(.*)/);
next if ($snmp_result->{$oid} eq '');
$groups{$1} = $snmp_result->{$oid};
}
my %sensors;
$snmp_result = $options{snmp}->get_table(oid => $oid_BranchNamesens);
foreach my $oid (keys %$snmp_result) {
next if ($oid !~ /^$oid_BranchNamesens\.(.*)/);
next if ($snmp_result->{$oid} eq '');
my $instance = $1;
if (defined($self->{option_results}->{filter_name}) && $self->{option_results}->{filter_name} ne '' &&
$snmp_result->{$oid} !~ /$self->{option_results}->{filter_name}/) {
$self->{output}->output_add(long_msg => "skipping sensor '" . $snmp_result->{$oid} . "'.", debug => 1);
next;
}
$sensors{$instance} = $snmp_result->{$oid};
}
$options{snmp}->load(
oids => [
$mapping->{Phasesens}->{oid},
$mapping->{Groupsens}->{oid},
],
instances => [ keys %sensors ],
instance_regexp => '^(.*)$'
);
my $snmp_result_data = $options{snmp}->get_leef(nothing_quit => 1);
foreach my $oid (keys %$snmp_result_data) {
next if ($oid !~ /^$mapping->{Phasesens}->{oid}\.(.*)/);
my $instance = $1;
my $result = $options{snmp}->map_instance(
mapping => $mapping,
results => $snmp_result_data,
instance => $instance
);
$self->{sensors}->{$instance}->{name} = $sensors{$instance};
$self->{sensors}->{$instance}->{phase} = ($result->{Phasesens} != 0) ? $result->{Phasesens} : '-';
$self->{sensors}->{$instance}->{group} =
(defined($groups{$result->{Groupsens}})) ? $groups{$result->{Groupsens}} : '-';
}
}
sub run {
my ($self, %options) = @_;
$self->manage_selection(%options);
foreach my $instance (sort keys %{$self->{sensors}}) {
$self->{output}->output_add(
long_msg => sprintf("[name = %s] [group = %s] [phase = %s]",
$self->{sensors}->{$instance}->{name},
$self->{sensors}->{$instance}->{group},
$self->{sensors}->{$instance}->{phase})
);
}
$self->{output}->output_add(
severity => 'OK',
short_msg => 'List sensors:'
);
$self->{output}->display(nolabel => 1, force_ignore_perfdata => 1, force_long_output => 1);
$self->{output}->exit();
}
sub disco_format {
my ($self, %options) = @_;
$self->{output}->add_disco_format(elements => ['name', 'group', 'phase']);
}
sub disco_show {
my ($self, %options) = @_;
$self->manage_selection(%options);
foreach my $instance (sort keys %{$self->{sensors}}) {
$self->{output}->add_disco_entry(
name => $self->{sensors}->{$instance}->{name},
group => $self->{sensors}->{$instance}->{group},
phase => $self->{sensors}->{$instance}->{phase},
);
}
}
1;
__END__
=head1 MODE
List sensors.
=over 8
=back
=cut
| centreon/centreon-plugins | hardware/devices/abb/cms700/snmp/mode/listsensors.pm | Perl | apache-2.0 | 4,610 |
package VMOMI::HostTargetTransport;
use parent 'VMOMI::DynamicData';
use strict;
use warnings;
our @class_ancestors = (
'DynamicData',
);
our @class_members = ( );
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/HostTargetTransport.pm | Perl | apache-2.0 | 389 |
#!/usr/bin/env perl
use strict;
use warnings;
use utf8;
my @mpc = split(/\n/, `mpc -f '%artist%--%title%'`);
my @status = split( / /, $mpc[1] );
# Playing status
my $play = $status[0];
if ( $play =~ /playing/ ) {
$play = '>';
}
elsif ( $play =~ /paused/ ) {
$play = '||';
}
else {
$play = '[]';
}
$status[5] =~ s/\((.+)\)/$1/g;
my $percent = `echo $status[5] | dzen2-dbar`;
chomp($percent);
my @vol = split(/ /, $mpc[2]);
my $volume = $vol[1];
my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) =
localtime(time);
$min = "0" . $min if length($min) == 1;
my $active = `xdotool getwindowfocus getwindowname`;
chomp($active);
my @cur = split(/ /, `wmctrl -d | grep \\\*`);
my $music_string = $mpc[0] . "[ $play ]" . $percent . "\n";
my $page = $cur[0] + 1;
$music_string .= " [" . $page . "] ";
$music_string .= "{$active}";
$music_string .= " [$hour:$min]";
$music_string .= " Vol: $volume";
print " " . $music_string;
| Difrex/scripts | dzen2/panel.pl | Perl | bsd-3-clause | 949 |
#!/usr/local/bin/perl -w
=head1 NAME
get_ensembl_data.pl -- this program provides methods to extract data from
an ensembl database for the maize ftp site.
=head1 SYNOPSIS
perl get_ensembl_data.pl [options]
Options:
-h --help
-m --man
-r --registry_file
-s --species
-o --output
=head1 OPTIONS
gets data from an ensembl database as specified
B<-h --help>
Print a brief help message and exits.
B<-m --man>
Print man page and exit
B<-r --registry_file>
Use this Ensembl registry file for database connection info.
B<-s --species>
Use this species entry from the registry file [REQUIRED].
B<-s --output>
a location to place output for the switches specified.
=head1 DESCRIPTION
B<This program>
Extracts information from an ensembl database as specified by the user
=cut
use strict;
use warnings;
use Getopt::Long;
use Pod::Usage;
use Data::Dumper qw(Dumper); # For debug
use DBI;
use FindBin qw($Bin) ;
use File::Basename qw(dirname);
use vars qw($BASEDIR);
BEGIN{
# Set the perl libraries
$BASEDIR = dirname($Bin);
unshift @INC, $BASEDIR.'/ensembl-live/ensembl/modules';
unshift @INC, $BASEDIR.'/bioperl-live';
}
use Bio::EnsEMBL::Registry;
use Bio::EnsEMBL::SimpleFeature;
use Bio::Seq;
use Bio::SeqIO;
use vars qw($I $ENS_DBA);
my $help=0;
my $man=0;
my($species, $file, $outdir);
GetOptions
(
"help|?" => \$help,
"man" => \$man,
"species=s" => \$species,
"registry_file=s" => \$file,
"output=s" => \$outdir,
) or pod2usage(2);
pod2usage(-verbose => 2) if $man;
pod2usage(1) if $help;
# Validate file paths
$file ||= $BASEDIR.'/conf/ensembl.registry';
map{
-e $_ || ( warn( "File $_ does not exist\n" ) && pod2usage(1) );
-r $_ || ( warn( "Cannot read $_\n" ) && pod2usage(1) );
-f $_ || ( warn( "File $_ is not plain-text\n" ) && pod2usage(1) );
-s $_ || ( warn( "File $_ is empty\n" ) && pod2usage(1) );
} $file;
####MAIN####
# Load the ensembl registry file
$species || ( warn( "Need a --species\n" ) && pod2usage(1) );
Bio::EnsEMBL::Registry->load_all( $file );
$ENS_DBA = Bio::EnsEMBL::Registry->get_DBAdaptor( $species, 'core' );
$ENS_DBA || ( warn( "No core DB for $species set in $file\n" ) &&
pod2usage(1) );
my $slice_adaptor = $ENS_DBA->get_adaptor('Slice');
my $clones = latest_clones ($slice_adaptor);
bacs ($slice_adaptor, $clones);
contigs ($slice_adaptor, $clones); # contains the genes and proteins methods
####SUBS####
sub latest_clones {
my $slice_adaptor = shift;
# get all the 'current' clones
my @slices = grep {
scalar @{ $_->get_all_Attributes('current-version') } > 0
} @{ $slice_adaptor->fetch_all('clone') };
my %clones = ();
for my $slice (@slices){
my $seq_region = $slice->seq_region_name();
$clones{$seq_region} = 1;
}
return (\%clones);
}
sub bacs {
my $slice_adaptor = shift;
my $clones = shift;
# get all the clones
my @slices = @{ $slice_adaptor->fetch_all('clone') };
open (BACS, ">$outdir/BACS.fasta");
foreach my $slice (@slices){
my $seq_region = $slice->seq_region_name();
next unless (exists ($clones->{$seq_region}));
# next if $seq_region eq "AC198200.3";
my $strand = $slice->strand();
warn "Working on $seq_region\n";
# force the forward direction
if ($strand < 1){
$slice = $slice->invert;
}
# get stuff
my $location = $slice->name();
my $type = $slice->coord_system->name();
my $sequence = $slice->seq();
print BACS ">$seq_region\n$sequence\n";
}
close (BACS);
}
sub contigs {
my $slice_adaptor = shift;
my $clones = shift;
# get all contig slices
my @slices = @{ $slice_adaptor->fetch_all('contig') };
open (CONTIGS, ">$outdir/BAC_contigs.fasta");
# cycle through the slices and dump the files of interest
foreach my $slice (@slices){
my $seq_region = $slice->seq_region_name();
my ($accession, $contig) = split (/\-/, $seq_region);
next unless (exists ($clones->{$accession}));
# next if ($accession eq "AC198200.3");
my $strand = $slice->strand();
warn "Working on $seq_region\n";
# force the forward direction
if ($strand < 1){
$slice = $slice->invert;
}
# get stuff
my $location = $slice->name();
my $type = $slice->coord_system->name();
my $sequence = $slice->seq();
# output the contigs if desired
print CONTIGS ">$seq_region\n$sequence\n";
# output the genes and translations
open (ALL_GENES, ">>$outdir/ALL_GENES.fasta");
open (ALL_PTS, ">>$outdir/ALL_TRANSLATIONS.fasta");
open (TE_GENES, ">>$outdir/TE-LIKE_GENES.fasta");
open (G_GENES, ">>$outdir/NON-TE-LIKE_GENES.fasta");
open (TE_PTS, ">>$outdir/TE-LIKE_TRANSLATIONS.fasta");
open (G_PTS, ">>$outdir/NON-TE-LIKE_TRANSLATIONS.fasta");
my @genes = @{ $slice->get_all_Genes };
foreach my $gene (@genes){
my $gene_id = $gene->dbID();
my $stable_id = $gene->stable_id();
my $seq_region = $gene->slice->seq_region_name();
my $start = $gene->start();
my $end = $gene->end();
my $strand = $gene->strand();
my $biotype = $gene->biotype();
my $sequence = $gene->seq();
warn "$stable_id\n";
print ALL_GENES ">$stable_id:$seq_region:$start-$end:$strand:$biotype\n$sequence\n";
if ($biotype eq "transposon_pseudogene"){
print TE_GENES ">$stable_id:$seq_region:$start-$end:$strand:$biotype\n$sequence\n";
}
else {
print G_GENES ">$stable_id:$seq_region:$start-$end:$strand:$biotype\n$sequence\n";
}
# get the translations
foreach my $trans (@{ $gene->get_all_Transcripts }) {
next if (!$trans->translation);
my $identifier = $trans->stable_id;
# check to see if there is a stop codon
my $tseq = $trans->translate();
my $aa = $tseq->seq;
if ($aa =~ /\*/) {
print STDERR "Translation of $identifier has stop codons ",
"- Skipping! (in ", $trans->slice->name(), ")\n";
next;
}
else {
# compose
print ALL_PTS ">$identifier\n$aa\n";
if ($biotype eq "transposon_pseudogene"){
print TE_PTS ">$identifier\n$aa\n";
}
else {
print G_PTS ">$identifier\n$aa\n";
}
}
}
}
close (TE_GENES);
close (G_GENES);
close (TE_PTS);
close (G_PTS);
close (ALL_GENES);
close (ALL_PTS);
}
close (CONTIGS);
}
| warelab/gramene-ensembl | maize/load-scripts/get_maize_ftp_data.pl | Perl | mit | 6,764 |
'Should work #1':
{|xml||
<att num="123" str="randString" id="5" />
|}.
'Should work #2':
{|xml||
<att str="randString" num="123" id="5" />
|}.
'Should work #3':
{|xml||
<att str="randString" id="5" />
|}.
'Should work #4':
{|xml||
<att id="5" />
|}.
'Invalid Type'(fail):
{|xml||
<att num="asd" id="5" />
|}.
'Required missing'(fail):
{|xml||
<att num="asd" />
|}.
'Unspecified attribute'(fail):
{|xml||
<att xx="true" id="5" />
|}.
'Prohibited attribute'(fail):
{|xml||
<att id="5" never="aloha" />
|}. | jonakalkus/xml-validate | test/validation/attribute_2.pl | Perl | mit | 519 |
#
# Copyright 2016 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package hardware::sensors::serverscheck::sensorgateway::snmp::mode::components::sensors;
use strict;
use warnings;
my $oid_control = '.1.3.6.1.4.1.17095.3';
my $list_oids = {
1 => 1,
2 => 5,
3 => 9,
4 => 13,
5 => 17,
};
sub load {
my (%options) = @_;
push @{$options{request}}, { oid => $oid_control };
}
sub check {
my ($self) = @_;
$self->{output}->output_add(long_msg => "Checking sensors");
$self->{components}->{sensors} = {name => 'sensors', total => 0, skip => 0};
return if ($self->check_exclude(section => 'sensors'));
foreach my $i (sort keys %{$list_oids}) {
if (!defined($self->{results}->{$oid_control}->{'.1.3.6.1.4.1.17095.3.' . ($list_oids->{$i} + 1) . '.0'}) ||
$self->{results}->{$oid_control}->{'.1.3.6.1.4.1.17095.3.' . ($list_oids->{$i} + 1) . '.0'} !~ /([0-9\.]+)/) {
$self->{output}->output_add(long_msg => sprintf("skip sensor '%s': no values",
$i));
next;
}
my $name = $self->{results}->{$oid_control}->{'.1.3.6.1.4.1.17095.3.' . ($list_oids->{$i}) . '.0'};
my $value = $self->{results}->{$oid_control}->{'.1.3.6.1.4.1.17095.3.' . ($list_oids->{$i} + 1) . '.0'};
next if ($self->check_exclude(section => 'sensors', instance => $name));
$self->{components}->{sensors}->{total}++;
$self->{output}->output_add(long_msg => sprintf("sensor '%s' value is %s.",
$name, $value));
my ($exit, $warn, $crit, $checked) = $self->get_severity_numeric(section => 'sensors', instance => $name, value => $value);
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("sensor '%s' value is %s",
$name, $value));
}
$self->{output}->perfdata_add(label => $name,
value => $value,
warning => $warn,
critical => $crit);
}
}
1; | bcournaud/centreon-plugins | hardware/sensors/serverscheck/sensorgateway/snmp/mode/components/sensors.pm | Perl | apache-2.0 | 3,052 |
#!/usr/bin/perl -w
use strict;
##converts fq to fa
if (!defined @ARGV){
die "Do not run directly, to be called by relocaTE.pl\n";
}
my ( $infq, $outfa ) = ( $ARGV[0], $ARGV[1] );
if ( $infq !~ /\.(fq|fastq)$/ ) {
die
"Was expecting a fq file to convert to a fa file, but I have this instead $infq\n";
}
open INFQ, $infq or die "Can't open $infq $!";
open OUTFA, ">$outfa" or die $!;
while ( my $header = <INFQ> ) {
my $seq = <INFQ>;
my $qual_header = <INFQ>;
my $qual = <INFQ>;
if ( substr( $header, 0, 1 ) ne '@' ) {
die "ERROR: expected \'\@\' but saw $header";
}
print OUTFA ">", substr( $header, 1 );
print OUTFA $seq;
}
close INFQ;
close OUTFA;
| stajichlab/RelocaTE | scripts/relocaTE_fq2fa.pl | Perl | bsd-3-clause | 697 |
=pod
=head1 NAME
EVP_CIPHER_meth_new, EVP_CIPHER_meth_dup, EVP_CIPHER_meth_free,
EVP_CIPHER_meth_set_iv_length, EVP_CIPHER_meth_set_flags,
EVP_CIPHER_meth_set_impl_ctx_size, EVP_CIPHER_meth_set_init,
EVP_CIPHER_meth_set_do_cipher, EVP_CIPHER_meth_set_cleanup,
EVP_CIPHER_meth_set_set_asn1_params, EVP_CIPHER_meth_set_get_asn1_params,
EVP_CIPHER_meth_set_ctrl, EVP_CIPHER_meth_get_init,
EVP_CIPHER_meth_get_do_cipher, EVP_CIPHER_meth_get_cleanup,
EVP_CIPHER_meth_get_set_asn1_params, EVP_CIPHER_meth_get_get_asn1_params,
EVP_CIPHER_meth_get_ctrl
- Routines to build up EVP_CIPHER methods
=head1 SYNOPSIS
#include <openssl/evp.h>
Deprecated since OpenSSL 3.0, can be hidden entirely by defining
B<OPENSSL_API_COMPAT> with a suitable version value, see
L<openssl_user_macros(7)>:
EVP_CIPHER *EVP_CIPHER_meth_new(int cipher_type, int block_size, int key_len);
EVP_CIPHER *EVP_CIPHER_meth_dup(const EVP_CIPHER *cipher);
void EVP_CIPHER_meth_free(EVP_CIPHER *cipher);
int EVP_CIPHER_meth_set_iv_length(EVP_CIPHER *cipher, int iv_len);
int EVP_CIPHER_meth_set_flags(EVP_CIPHER *cipher, unsigned long flags);
int EVP_CIPHER_meth_set_impl_ctx_size(EVP_CIPHER *cipher, int ctx_size);
int EVP_CIPHER_meth_set_init(EVP_CIPHER *cipher,
int (*init)(EVP_CIPHER_CTX *ctx,
const unsigned char *key,
const unsigned char *iv,
int enc));
int EVP_CIPHER_meth_set_do_cipher(EVP_CIPHER *cipher,
int (*do_cipher)(EVP_CIPHER_CTX *ctx,
unsigned char *out,
const unsigned char *in,
size_t inl));
int EVP_CIPHER_meth_set_cleanup(EVP_CIPHER *cipher,
int (*cleanup)(EVP_CIPHER_CTX *));
int EVP_CIPHER_meth_set_set_asn1_params(EVP_CIPHER *cipher,
int (*set_asn1_parameters)(EVP_CIPHER_CTX *,
ASN1_TYPE *));
int EVP_CIPHER_meth_set_get_asn1_params(EVP_CIPHER *cipher,
int (*get_asn1_parameters)(EVP_CIPHER_CTX *,
ASN1_TYPE *));
int EVP_CIPHER_meth_set_ctrl(EVP_CIPHER *cipher,
int (*ctrl)(EVP_CIPHER_CTX *, int type,
int arg, void *ptr));
int (*EVP_CIPHER_meth_get_init(const EVP_CIPHER *cipher))(EVP_CIPHER_CTX *ctx,
const unsigned char *key,
const unsigned char *iv,
int enc);
int (*EVP_CIPHER_meth_get_do_cipher(const EVP_CIPHER *cipher))(EVP_CIPHER_CTX *ctx,
unsigned char *out,
const unsigned char *in,
size_t inl);
int (*EVP_CIPHER_meth_get_cleanup(const EVP_CIPHER *cipher))(EVP_CIPHER_CTX *);
int (*EVP_CIPHER_meth_get_set_asn1_params(const EVP_CIPHER *cipher))(EVP_CIPHER_CTX *,
ASN1_TYPE *);
int (*EVP_CIPHER_meth_get_get_asn1_params(const EVP_CIPHER *cipher))(EVP_CIPHER_CTX *,
ASN1_TYPE *);
int (*EVP_CIPHER_meth_get_ctrl(const EVP_CIPHER *cipher))(EVP_CIPHER_CTX *,
int type, int arg,
void *ptr);
=head1 DESCRIPTION
All of the functions described on this page are deprecated.
Applications should instead use the OSSL_PROVIDER APIs.
The B<EVP_CIPHER> type is a structure for symmetric cipher method
implementation.
EVP_CIPHER_meth_new() creates a new B<EVP_CIPHER> structure.
EVP_CIPHER_meth_dup() creates a copy of B<cipher>.
EVP_CIPHER_meth_free() destroys a B<EVP_CIPHER> structure.
EVP_CIPHER_meth_set_iv_length() sets the length of the IV.
This is only needed when the implemented cipher mode requires it.
EVP_CIPHER_meth_set_flags() sets the flags to describe optional
behaviours in the particular B<cipher>.
With the exception of cipher modes, of which only one may be present,
several flags can be or'd together.
The available flags are:
=over 4
=item EVP_CIPH_STREAM_CIPHER, EVP_CIPH_ECB_MODE EVP_CIPH_CBC_MODE,
EVP_CIPH_CFB_MODE, EVP_CIPH_OFB_MODE, EVP_CIPH_CTR_MODE, EVP_CIPH_GCM_MODE,
EVP_CIPH_CCM_MODE, EVP_CIPH_XTS_MODE, EVP_CIPH_WRAP_MODE,
EVP_CIPH_OCB_MODE, EVP_CIPH_SIV_MODE
The cipher mode.
=item EVP_CIPH_VARIABLE_LENGTH
This cipher is of variable length.
=item EVP_CIPH_CUSTOM_IV
Storing and initialising the IV is left entirely to the
implementation.
=item EVP_CIPH_ALWAYS_CALL_INIT
Set this if the implementation's init() function should be called even
if B<key> is B<NULL>.
=item EVP_CIPH_CTRL_INIT
Set this to have the implementation's ctrl() function called with
command code B<EVP_CTRL_INIT> early in its setup.
=item EVP_CIPH_CUSTOM_KEY_LENGTH
Checking and setting the key length after creating the B<EVP_CIPHER>
is left to the implementation.
Whenever someone uses EVP_CIPHER_CTX_set_key_length() on a
B<EVP_CIPHER> with this flag set, the implementation's ctrl() function
will be called with the control code B<EVP_CTRL_SET_KEY_LENGTH> and
the key length in B<arg>.
=item EVP_CIPH_NO_PADDING
Don't use standard block padding.
=item EVP_CIPH_RAND_KEY
Making a key with random content is left to the implementation.
This is done by calling the implementation's ctrl() function with the
control code B<EVP_CTRL_RAND_KEY> and the pointer to the key memory
storage in B<ptr>.
=item EVP_CIPH_CUSTOM_COPY
Set this to have the implementation's ctrl() function called with
command code B<EVP_CTRL_COPY> at the end of EVP_CIPHER_CTX_copy().
The intended use is for further things to deal with after the
implementation specific data block has been copied.
The destination B<EVP_CIPHER_CTX> is passed to the control with the
B<ptr> parameter.
The implementation specific data block is reached with
EVP_CIPHER_CTX_get_cipher_data().
=item EVP_CIPH_FLAG_DEFAULT_ASN1
Use the default EVP routines to pass IV to and from ASN.1.
=item EVP_CIPH_FLAG_LENGTH_BITS
Signals that the length of the input buffer for encryption /
decryption is to be understood as the number of bits instead of
bytes for this implementation.
This is only useful for CFB1 ciphers.
=item EVP_CIPH_FLAG_CTS
Indicates that the cipher uses ciphertext stealing. This is currently
used to indicate that the cipher is a one shot that only allows a single call to
EVP_CipherUpdate().
=item EVP_CIPH_FLAG_CUSTOM_CIPHER
This indicates that the implementation takes care of everything,
including padding, buffering and finalization.
The EVP routines will simply give them control and do nothing more.
=item EVP_CIPH_FLAG_AEAD_CIPHER
This indicates that this is an AEAD cipher implementation.
=item EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK
Allow interleaving of crypto blocks, a particular optimization only applicable
to certain TLS ciphers.
=back
EVP_CIPHER_meth_set_impl_ctx_size() sets the size of the EVP_CIPHER's
implementation context so that it can be automatically allocated.
EVP_CIPHER_meth_set_init() sets the cipher init function for
B<cipher>.
The cipher init function is called by EVP_CipherInit(),
EVP_CipherInit_ex(), EVP_EncryptInit(), EVP_EncryptInit_ex(),
EVP_DecryptInit(), EVP_DecryptInit_ex().
EVP_CIPHER_meth_set_do_cipher() sets the cipher function for
B<cipher>.
The cipher function is called by EVP_CipherUpdate(),
EVP_EncryptUpdate(), EVP_DecryptUpdate(), EVP_CipherFinal(),
EVP_EncryptFinal(), EVP_EncryptFinal_ex(), EVP_DecryptFinal() and
EVP_DecryptFinal_ex().
EVP_CIPHER_meth_set_cleanup() sets the function for B<cipher> to do
extra cleanup before the method's private data structure is cleaned
out and freed.
Note that the cleanup function is passed a B<EVP_CIPHER_CTX *>, the
private data structure is then available with
EVP_CIPHER_CTX_get_cipher_data().
This cleanup function is called by EVP_CIPHER_CTX_reset() and
EVP_CIPHER_CTX_free().
EVP_CIPHER_meth_set_set_asn1_params() sets the function for B<cipher>
to set the AlgorithmIdentifier "parameter" based on the passed cipher.
This function is called by EVP_CIPHER_param_to_asn1().
EVP_CIPHER_meth_set_get_asn1_params() sets the function for B<cipher>
that sets the cipher parameters based on an ASN.1 AlgorithmIdentifier
"parameter".
Both these functions are needed when there is a need for custom data
(more or other than the cipher IV).
They are called by EVP_CIPHER_param_to_asn1() and
EVP_CIPHER_asn1_to_param() respectively if defined.
EVP_CIPHER_meth_set_ctrl() sets the control function for B<cipher>.
EVP_CIPHER_meth_get_init(), EVP_CIPHER_meth_get_do_cipher(),
EVP_CIPHER_meth_get_cleanup(), EVP_CIPHER_meth_get_set_asn1_params(),
EVP_CIPHER_meth_get_get_asn1_params() and EVP_CIPHER_meth_get_ctrl()
are all used to retrieve the method data given with the
EVP_CIPHER_meth_set_*() functions above.
=head1 RETURN VALUES
EVP_CIPHER_meth_new() and EVP_CIPHER_meth_dup() return a pointer to a
newly created B<EVP_CIPHER>, or NULL on failure.
All EVP_CIPHER_meth_set_*() functions return 1.
All EVP_CIPHER_meth_get_*() functions return pointers to their
respective B<cipher> function.
=head1 SEE ALSO
L<EVP_EncryptInit(3)>
=head1 HISTORY
All of these functions were deprecated in OpenSSL 3.0.
The functions described here were added in OpenSSL 1.1.0.
The B<EVP_CIPHER> structure created with these functions became reference
counted in OpenSSL 3.0.
=head1 COPYRIGHT
Copyright 2016-2020 The OpenSSL Project Authors. All Rights Reserved.
Licensed under the Apache License 2.0 (the "License"). You may not use
this file except in compliance with the License. You can obtain a copy
in the file LICENSE in the source distribution or at
L<https://www.openssl.org/source/license.html>.
=cut
| jens-maus/amissl | openssl/doc/man3/EVP_CIPHER_meth_new.pod | Perl | bsd-3-clause | 10,306 |
=pod
=head1 NAME
DH_meth_new, DH_meth_free, DH_meth_dup, DH_meth_get0_name, DH_meth_set1_name,
DH_meth_get_flags, DH_meth_set_flags, DH_meth_get0_app_data,
DH_meth_set0_app_data, DH_meth_get_generate_key, DH_meth_set_generate_key,
DH_meth_get_compute_key, DH_meth_set_compute_key, DH_meth_get_bn_mod_exp,
DH_meth_set_bn_mod_exp, DH_meth_get_init, DH_meth_set_init, DH_meth_get_finish,
DH_meth_set_finish, DH_meth_get_generate_params,
DH_meth_set_generate_params - Routines to build up DH methods
=head1 SYNOPSIS
#include <openssl/dh.h>
Deprecated since OpenSSL 3.0, can be hidden entirely by defining
B<OPENSSL_API_COMPAT> with a suitable version value, see
L<openssl_user_macros(7)>:
DH_METHOD *DH_meth_new(const char *name, int flags);
void DH_meth_free(DH_METHOD *dhm);
DH_METHOD *DH_meth_dup(const DH_METHOD *dhm);
const char *DH_meth_get0_name(const DH_METHOD *dhm);
int DH_meth_set1_name(DH_METHOD *dhm, const char *name);
int DH_meth_get_flags(const DH_METHOD *dhm);
int DH_meth_set_flags(DH_METHOD *dhm, int flags);
void *DH_meth_get0_app_data(const DH_METHOD *dhm);
int DH_meth_set0_app_data(DH_METHOD *dhm, void *app_data);
int (*DH_meth_get_generate_key(const DH_METHOD *dhm))(DH *);
int DH_meth_set_generate_key(DH_METHOD *dhm, int (*generate_key)(DH *));
int (*DH_meth_get_compute_key(const DH_METHOD *dhm))
(unsigned char *key, const BIGNUM *pub_key, DH *dh);
int DH_meth_set_compute_key(DH_METHOD *dhm,
int (*compute_key)(unsigned char *key, const BIGNUM *pub_key, DH *dh));
int (*DH_meth_get_bn_mod_exp(const DH_METHOD *dhm))
(const DH *dh, BIGNUM *r, const BIGNUM *a, const BIGNUM *p,
const BIGNUM *m, BN_CTX *ctx, BN_MONT_CTX *m_ctx);
int DH_meth_set_bn_mod_exp(DH_METHOD *dhm,
int (*bn_mod_exp)(const DH *dh, BIGNUM *r, const BIGNUM *a,
const BIGNUM *p, const BIGNUM *m, BN_CTX *ctx,
BN_MONT_CTX *m_ctx));
int (*DH_meth_get_init(const DH_METHOD *dhm))(DH *);
int DH_meth_set_init(DH_METHOD *dhm, int (*init)(DH *));
int (*DH_meth_get_finish(const DH_METHOD *dhm))(DH *);
int DH_meth_set_finish(DH_METHOD *dhm, int (*finish)(DH *));
int (*DH_meth_get_generate_params(const DH_METHOD *dhm))
(DH *, int, int, BN_GENCB *);
int DH_meth_set_generate_params(DH_METHOD *dhm,
int (*generate_params)(DH *, int, int, BN_GENCB *));
=head1 DESCRIPTION
All of the functions described on this page are deprecated.
Applications should instead use the provider APIs.
The B<DH_METHOD> type is a structure used for the provision of custom DH
implementations. It provides a set of functions used by OpenSSL for the
implementation of the various DH capabilities.
DH_meth_new() creates a new B<DH_METHOD> structure. It should be given a
unique B<name> and a set of B<flags>. The B<name> should be a NULL terminated
string, which will be duplicated and stored in the B<DH_METHOD> object. It is
the callers responsibility to free the original string. The flags will be used
during the construction of a new B<DH> object based on this B<DH_METHOD>. Any
new B<DH> object will have those flags set by default.
DH_meth_dup() creates a duplicate copy of the B<DH_METHOD> object passed as a
parameter. This might be useful for creating a new B<DH_METHOD> based on an
existing one, but with some differences.
DH_meth_free() destroys a B<DH_METHOD> structure and frees up any memory
associated with it.
DH_meth_get0_name() will return a pointer to the name of this DH_METHOD. This
is a pointer to the internal name string and so should not be freed by the
caller. DH_meth_set1_name() sets the name of the DH_METHOD to B<name>. The
string is duplicated and the copy is stored in the DH_METHOD structure, so the
caller remains responsible for freeing the memory associated with the name.
DH_meth_get_flags() returns the current value of the flags associated with this
DH_METHOD. DH_meth_set_flags() provides the ability to set these flags.
The functions DH_meth_get0_app_data() and DH_meth_set0_app_data() provide the
ability to associate implementation specific data with the DH_METHOD. It is
the application's responsibility to free this data before the DH_METHOD is
freed via a call to DH_meth_free().
DH_meth_get_generate_key() and DH_meth_set_generate_key() get and set the
function used for generating a new DH key pair respectively. This function will
be called in response to the application calling DH_generate_key(). The
parameter for the function has the same meaning as for DH_generate_key().
DH_meth_get_compute_key() and DH_meth_set_compute_key() get and set the
function used for computing a new DH shared secret respectively. This function
will be called in response to the application calling DH_compute_key(). The
parameters for the function have the same meaning as for DH_compute_key().
DH_meth_get_bn_mod_exp() and DH_meth_set_bn_mod_exp() get and set the function
used for computing the following value:
r = a ^ p mod m
This function will be called by the default OpenSSL function for
DH_generate_key(). The result is stored in the B<r> parameter. This function
may be NULL unless using the default generate key function, in which case it
must be present.
DH_meth_get_init() and DH_meth_set_init() get and set the function used
for creating a new DH instance respectively. This function will be
called in response to the application calling DH_new() (if the current default
DH_METHOD is this one) or DH_new_method(). The DH_new() and DH_new_method()
functions will allocate the memory for the new DH object, and a pointer to this
newly allocated structure will be passed as a parameter to the function. This
function may be NULL.
DH_meth_get_finish() and DH_meth_set_finish() get and set the function used
for destroying an instance of a DH object respectively. This function will be
called in response to the application calling DH_free(). A pointer to the DH
to be destroyed is passed as a parameter. The destroy function should be used
for DH implementation specific clean up. The memory for the DH itself should
not be freed by this function. This function may be NULL.
DH_meth_get_generate_params() and DH_meth_set_generate_params() get and set the
function used for generating DH parameters respectively. This function will be
called in response to the application calling DH_generate_parameters_ex() (or
DH_generate_parameters()). The parameters for the function have the same
meaning as for DH_generate_parameters_ex(). This function may be NULL.
=head1 RETURN VALUES
DH_meth_new() and DH_meth_dup() return the newly allocated DH_METHOD object
or NULL on failure.
DH_meth_get0_name() and DH_meth_get_flags() return the name and flags
associated with the DH_METHOD respectively.
All other DH_meth_get_*() functions return the appropriate function pointer
that has been set in the DH_METHOD, or NULL if no such pointer has yet been
set.
DH_meth_set1_name() and all DH_meth_set_*() functions return 1 on success or
0 on failure.
=head1 SEE ALSO
L<DH_new(3)>, L<DH_new(3)>, L<DH_generate_parameters(3)>, L<DH_generate_key(3)>,
L<DH_set_method(3)>, L<DH_size(3)>, L<DH_get0_pqg(3)>
=head1 HISTORY
All of these functions were deprecated in OpenSSL 3.0.
The functions described here were added in OpenSSL 1.1.0.
=head1 COPYRIGHT
Copyright 2016-2020 The OpenSSL Project Authors. All Rights Reserved.
Licensed under the Apache License 2.0 (the "License"). You may not use
this file except in compliance with the License. You can obtain a copy
in the file LICENSE in the source distribution or at
L<https://www.openssl.org/source/license.html>.
=cut
| jens-maus/amissl | openssl/doc/man3/DH_meth_new.pod | Perl | bsd-3-clause | 7,610 |
package fastaOO;
use strict;
use warnings;
our @EXPORT = qw ( destroy getFH getChrom getPos );
my $dna = 0;
my $fh;
my $fileName;
my @chromPos;
if (($ARGV[0]) && ( $ARGV[0] eq "test" ))
{
&selftest();
}
#sub selftest
#{
# print "SELF TESTING\n";
# my $inFile = '/var/rolf/input/Debaryomyces_hansenii_CBS767_CHROMOSSOMES.fasta';
# my @pairs;
# $pairs[0] = [0, 1, 10];
# $pairs[1] = [1, 1, 11];
# $pairs[2] = [2, 50, 30];
# $pairs[3] = [0, 170, 20];
#
# use lib './';
# use fastaOO;
# my $fasta = fastaOO->new($inFile);
#
# foreach my $pair (@pairs)
# {
# my $chrom = $pair->[0];
# my $pos = $pair->[1];
# my $length = $pair->[2];
# print "GETTING CHROM $chrom POS $pos LENGTH $length\n";
# my $seq = $fasta->getPos($chrom, $pos, $length);
# print $seq, "\n";
# }
#
#
#}
sub new {
my $class = shift;
my $fileNam = $_[0];
my $self = {};
$fileName = $fileNam;
if ( ! -f $fileName ) {die "FILE $fileName DOESNT EXISTS. PLEASE CHECK."};
#print "OPENNING $fileName\n";
open ($fh, "<" , "$fileName") or die "COULD NOT OPEN $fileName: $!\n";
bless($self, $class);
return $self;
}
sub DESTROY
{
my $self = shift;
close $fh;
}
sub getFH
{
my $self = shift;
return $fh;
}
sub getChrom
{
my $self = shift;
my $chromNum = $_[0];
#print "GETTING CHROM $chromNum\n";
my $chromCount = -1;
my $on = 0;
my $seq = '';
my $start = 0;
if (defined $chromPos[$chromNum])
{
$start = $chromPos[$chromNum];
$chromCount = $chromNum - 1;
}
seek($fh, $start, 0);
while (my $line = <$fh>)
{
chomp $line;
if (substr($line, 0, 1) eq ">")
{
$chromCount++;
my $tell = (tell($fh) - 200);
$tell = 0 if ($tell < 0);
$chromPos[$chromCount] = $tell;
if ($on)
{
$on = 0;
last;
}
}
if ($on)
{
$seq .= $line;
}
if ($chromCount == $chromNum)
{
$on = 1;
}
}
return $seq;
}
sub getPos
{
my $self = shift;
my $chromNum = $_[0];
my $begin = $_[1];
my $length = $_[2];
my $end = $begin + $length;
my $diff = 0;
if ($dna)
{
$end -= 1;
$diff = 1;
}
#print "GETTING CHROM $chromNum BEGIN $begin LENGTH $length END $end\n";
#print "KNOWN CHROM POS: ",join(",", @{$chromPos}),"\n";
my $chromCount = -1;
my $on = 0;
my $seq = undef;
my $start = 0;
my $pos = 0;
my $foundChrom = 0;
my $foundPos = 0;
my @chroms;
if (defined $chromPos[$chromNum])
{
$start = $chromPos[$chromNum];
$chromCount = $chromNum - 1;
}
seek($fh, $start, 0);
while (my $line = <$fh>)
{
chomp $line;
if (substr($line, 0, 1) eq ">")
{
$chromCount++;
my $tell = (tell($fh) - 200);
$tell = 0 if ($tell < 0);
$chromPos[$chromCount] = $tell;
push(@chroms, $line);
if ($on)
{
$on = 0;
last;
}
}
if ($on)
{
my $lengthLine = length($line);
my $terminal = $pos + $lengthLine;
if ($terminal <= ($begin - $diff))
{
#print "IF1: BEGIN $begin END $end POS $pos LENGTHLINE $lengthLine TERMINAL $terminal\n";
$pos = $terminal;
next;
}
elsif ($pos > $end)
{
#print "IF2: BEGIN $begin END $end POS $pos LENGTHLINE $lengthLine TERMINAL $terminal\n";
last;
}
else
{
#print "IF3: BEGIN $begin END $end POS $pos LENGTHLINE $lengthLine TERMINAL $terminal\n";
for (my $p = 0; $p < $lengthLine; $p++)
{
my $posP = $pos + $p;
#print "\tPOS $pos P $p POSP $posP";
if (($posP >= ($begin - $diff)) && ($posP < $end))
{
#print " JOIN";
$seq .= substr($line, $p, 1);
}
#print "\n";
}
$pos = $terminal;
}
}
if ($chromCount == $chromNum)
{
$foundChrom = 1;
$on = 1;
}
}
die "CHROMOSSOME NOT FOUND" if ( ! $foundChrom );
if ( ! defined $seq )
{
print "GETTING CHROM $chromNum BEGIN $begin LENGTH $length END $end POS $pos\n";
print join("\t\n", @chroms), "\n";
die "SEQUENCE NOT FOUND" ;
}
return $seq;
}
1;
| sauloal/projects | probes/hadoop2/filters/fastaOO.pm | Perl | mit | 5,184 |
#
# Copyright 2017 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package database::mysql::mode::uptime;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
use POSIX;
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
$options{options}->add_options(arguments =>
{
"warning:s" => { name => 'warning', },
"critical:s" => { name => 'critical', },
"seconds" => { name => 'seconds', },
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
if (($self->{perfdata}->threshold_validate(label => 'warning', value => $self->{option_results}->{warning})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong warning threshold '" . $self->{option_results}->{warning} . "'.");
$self->{output}->option_exit();
}
if (($self->{perfdata}->threshold_validate(label => 'critical', value => $self->{option_results}->{critical})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong critical threshold '" . $self->{option_results}->{critical} . "'.");
$self->{output}->option_exit();
}
}
sub run {
my ($self, %options) = @_;
# $options{sql} = sqlmode object
$self->{sql} = $options{sql};
$self->{sql}->connect();
if (!($self->{sql}->is_version_minimum(version => '5'))) {
$self->{output}->add_option_msg(short_msg => "MySQL version '" . $self->{sql}->{version} . "' is not supported (need version >= '5.x').");
$self->{output}->option_exit();
}
$self->{sql}->query(query => q{SHOW /*!50000 global */ STATUS LIKE 'Uptime'});
my ($dummy, $result) = $self->{sql}->fetchrow_array();
if (!defined($result)) {
$self->{output}->add_option_msg(short_msg => "Cannot get uptime.");
$self->{output}->option_exit();
}
my $value = $result;
my $exit_code = $self->{perfdata}->threshold_check(value => $value, threshold => [ { label => 'critical', exit_litteral => 'critical' }, { label => 'warning', exit_litteral => 'warning' } ]);
my $msg = sprintf("database is up since %d days", floor($value / 86400));
if (defined($self->{option_results}->{seconds})) {
$msg = sprintf("database is up since %d seconds", $value);
}
$self->{output}->output_add(severity => $exit_code,
short_msg => $msg);
$self->{output}->perfdata_add(label => 'uptime', unit => 's',
value => $value,
warning => $self->{perfdata}->get_perfdata_for_output(label => 'warning'),
critical => $self->{perfdata}->get_perfdata_for_output(label => 'critical'),
min => 0);
$self->{output}->display();
$self->{output}->exit();
}
1;
__END__
=head1 MODE
Check MySQL uptime.
=over 8
=item B<--warning>
Threshold warning.
=item B<--critical>
Threshold critical.
=item B<--seconds>
Display uptime in seconds.
=back
=cut
| maksimatveev/centreon-plugins | database/mysql/mode/uptime.pm | Perl | apache-2.0 | 4,024 |
#!/usr/bin/perl
## --------------------------------------------------------------------------
##
## Copyright 1996-2020 The NASM Authors - All Rights Reserved
## See the file AUTHORS included with the NASM distribution for
## the specific copyright holders.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following
## conditions are met:
##
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above
## copyright notice, this list of conditions and the following
## disclaimer in the documentation and/or other materials provided
## with the distribution.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
## CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
## INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
## MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
## DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
## NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
## HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
## CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
## EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
## --------------------------------------------------------------------------
#
# insns.pl
#
# Parse insns.dat and produce generated source code files
require 'x86/insns-iflags.ph';
# Opcode prefixes which need their own opcode tables
# LONGER PREFIXES FIRST!
@disasm_prefixes = qw(0F24 0F25 0F38 0F3A 0F7A 0FA6 0FA7 0F);
# This should match MAX_OPERANDS from nasm.h
$MAX_OPERANDS = 5;
# Add VEX/XOP prefixes
@vex_class = ( 'vex', 'xop', 'evex' );
$vex_classes = scalar(@vex_class);
@vexlist = ();
%vexmap = ();
for ($c = 0; $c < $vex_classes; $c++) {
$vexmap{$vex_class[$c]} = $c;
for ($m = 0; $m < 32; $m++) {
for ($p = 0; $p < 4; $p++) {
push(@vexlist, sprintf("%s%02X%01X", $vex_class[$c], $m, $p));
}
}
}
@disasm_prefixes = (@vexlist, @disasm_prefixes);
@bytecode_count = (0) x 256;
print STDERR "Reading insns.dat...\n";
@args = ();
undef $output;
foreach $arg ( @ARGV ) {
if ( $arg =~ /^\-/ ) {
if ( $arg =~ /^\-([abdin]|f[hc])$/ ) {
$output = $1;
} else {
die "$0: Unknown option: ${arg}\n";
}
} else {
push (@args, $arg);
}
}
die if (scalar(@args) != 2); # input output
($fname, $oname) = @args;
open(F, '<', $fname) || die "unable to open $fname";
%dinstables = ();
@bytecode_list = ();
$line = 0;
$insns = 0;
$n_opcodes = $n_opcodes_cc = 0;
while (<F>) {
$line++;
chomp;
next if ( /^\s*(\;.*|)$/ ); # comments or blank lines
unless (/^\s*(\S+)\s+(\S+)\s+(\S+|\[.*\])\s+(\S+)\s*$/) {
warn "line $line does not contain four fields\n";
next;
}
@fields = ($1, $2, $3, $4);
@field_list = ([@fields, 0]);
if ($fields[1] =~ /\*/) {
# This instruction has relaxed form(s)
if ($fields[2] !~ /^\[/) {
warn "line $line has an * operand but uses raw bytecodes\n";
next;
}
$opmask = 0;
@ops = split(/,/, $fields[1]);
for ($oi = 0; $oi < scalar @ops; $oi++) {
if ($ops[$oi] =~ /\*$/) {
if ($oi == 0) {
warn "line $line has a first operand with a *\n";
next;
}
$opmask |= 1 << $oi;
}
}
for ($oi = 1; $oi < (1 << scalar @ops); $oi++) {
if (($oi & ~$opmask) == 0) {
my @xops = ();
my $omask = ~$oi;
for ($oj = 0; $oj < scalar(@ops); $oj++) {
if ($omask & 1) {
push(@xops, $ops[$oj]);
}
$omask >>= 1;
}
push(@field_list, [$fields[0], join(',', @xops),
$fields[2], $fields[3], $oi]);
}
}
}
foreach $fptr (@field_list) {
@fields = @$fptr;
($formatted, $nd) = format_insn(@fields);
if ($formatted) {
$insns++;
$aname = "aa_$fields[0]";
push @$aname, $formatted;
}
if ( $fields[0] =~ /cc$/ ) {
# Conditional instruction
if (!defined($k_opcodes_cc{$fields[0]})) {
$k_opcodes_cc{$fields[0]} = $n_opcodes_cc++;
}
} else {
# Unconditional instruction
if (!defined($k_opcodes{$fields[0]})) {
$k_opcodes{$fields[0]} = $n_opcodes++;
}
}
if ($formatted && !$nd) {
push @big, $formatted;
my @sseq = startseq($fields[2], $fields[4]);
foreach $i (@sseq) {
if (!defined($dinstables{$i})) {
$dinstables{$i} = [];
}
push(@{$dinstables{$i}}, $#big);
}
}
}
}
close F;
#
# Generate the bytecode array. At this point, @bytecode_list contains
# the full set of bytecodes.
#
# Sort by descending length
@bytecode_list = sort { scalar(@$b) <=> scalar(@$a) } @bytecode_list;
@bytecode_array = ();
%bytecode_pos = ();
$bytecode_next = 0;
foreach $bl (@bytecode_list) {
my $h = hexstr(@$bl);
next if (defined($bytecode_pos{$h}));
push(@bytecode_array, $bl);
while ($h ne '') {
$bytecode_pos{$h} = $bytecode_next;
$h = substr($h, 2);
$bytecode_next++;
}
}
undef @bytecode_list;
@opcodes = sort { $k_opcodes{$a} <=> $k_opcodes{$b} } keys(%k_opcodes);
@opcodes_cc = sort { $k_opcodes_cc{$a} <=> $k_opcodes_cc{$b} } keys(%k_opcodes_cc);
if ( $output eq 'b') {
print STDERR "Writing $oname...\n";
open(B, '>', $oname);
print B "/* This file auto-generated from insns.dat by insns.pl" .
" - don't edit it */\n\n";
print B "#include \"nasm.h\"\n";
print B "#include \"insns.h\"\n\n";
print B "const uint8_t nasm_bytecodes[$bytecode_next] = {\n";
$p = 0;
foreach $bl (@bytecode_array) {
printf B " /* %5d */ ", $p;
foreach $d (@$bl) {
printf B "%#o,", $d;
$p++;
}
printf B "\n";
}
print B "};\n";
print B "\n";
print B "/*\n";
print B " * Bytecode frequencies (including reuse):\n";
print B " *\n";
for ($i = 0; $i < 32; $i++) {
print B " *";
for ($j = 0; $j < 256; $j += 32) {
print B " |" if ($j);
printf B " %3o:%4d", $i+$j, $bytecode_count[$i+$j];
}
print B "\n";
}
print B " */\n";
close B;
}
if ( $output eq 'a' ) {
print STDERR "Writing $oname...\n";
open(A, '>', $oname);
print A "/* This file auto-generated from insns.dat by insns.pl" .
" - don't edit it */\n\n";
print A "#include \"nasm.h\"\n";
print A "#include \"insns.h\"\n\n";
foreach $i (@opcodes, @opcodes_cc) {
print A "static const struct itemplate instrux_${i}[] = {\n";
$aname = "aa_$i";
foreach $j (@$aname) {
print A " ", codesubst($j), "\n";
}
print A " ITEMPLATE_END\n};\n\n";
}
print A "const struct itemplate * const nasm_instructions[] = {\n";
foreach $i (@opcodes, @opcodes_cc) {
print A " instrux_${i},\n";
}
print A "};\n";
close A;
}
if ( $output eq 'd' ) {
print STDERR "Writing $oname...\n";
open(D, '>', $oname);
print D "/* This file auto-generated from insns.dat by insns.pl" .
" - don't edit it */\n\n";
print D "#include \"nasm.h\"\n";
print D "#include \"insns.h\"\n\n";
print D "static const struct itemplate instrux[] = {\n";
$n = 0;
foreach $j (@big) {
printf D " /* %4d */ %s\n", $n++, codesubst($j);
}
print D "};\n";
foreach $h (sort(keys(%dinstables))) {
next if ($h eq ''); # Skip pseudo-instructions
print D "\nstatic const struct itemplate * const itable_${h}[] = {\n";
foreach $j (@{$dinstables{$h}}) {
print D " instrux + $j,\n";
}
print D "};\n";
}
@prefix_list = ();
foreach $h (@disasm_prefixes, '') {
for ($c = 0; $c < 256; $c++) {
$nn = sprintf("%s%02X", $h, $c);
if ($is_prefix{$nn} || defined($dinstables{$nn})) {
# At least one entry in this prefix table
push(@prefix_list, $h);
$is_prefix{$h} = 1;
last;
}
}
}
foreach $h (@prefix_list) {
print D "\n";
print D "static " unless ($h eq '');
print D "const struct disasm_index ";
print D ($h eq '') ? 'itable' : "itable_$h";
print D "[256] = {\n";
for ($c = 0; $c < 256; $c++) {
$nn = sprintf("%s%02X", $h, $c);
if ($is_prefix{$nn}) {
die "$fname:$line: ambiguous decoding of $nn\n"
if (defined($dinstables{$nn}));
printf D " /* 0x%02x */ { itable_%s, -1 },\n", $c, $nn;
} elsif (defined($dinstables{$nn})) {
printf D " /* 0x%02x */ { itable_%s, %u },\n", $c,
$nn, scalar(@{$dinstables{$nn}});
} else {
printf D " /* 0x%02x */ { NULL, 0 },\n", $c;
}
}
print D "};\n";
}
printf D "\nconst struct disasm_index * const itable_vex[NASM_VEX_CLASSES][32][4] =\n";
print D "{\n";
for ($c = 0; $c < $vex_classes; $c++) {
print D " {\n";
for ($m = 0; $m < 32; $m++) {
print D " { ";
for ($p = 0; $p < 4; $p++) {
$vp = sprintf("%s%02X%01X", $vex_class[$c], $m, $p);
printf D "%-15s",
($is_prefix{$vp} ? sprintf("itable_%s,", $vp) : 'NULL,');
}
print D "},\n";
}
print D " },\n";
}
print D "};\n";
close D;
}
if ( $output eq 'i' ) {
print STDERR "Writing $oname...\n";
open(I, '>', $oname);
print I "/* This file is auto-generated from insns.dat by insns.pl" .
" - don't edit it */\n\n";
print I "/* This file in included by nasm.h */\n\n";
print I "/* Instruction names */\n\n";
print I "#ifndef NASM_INSNSI_H\n";
print I "#define NASM_INSNSI_H 1\n\n";
print I "enum opcode {\n";
$maxlen = 0;
foreach $i (@opcodes, @opcodes_cc) {
print I "\tI_${i},\n";
$len = length($i);
$len++ if ( $i =~ /cc$/ ); # Condition codes can be 3 characters long
$maxlen = $len if ( $len > $maxlen );
}
print I "\tI_none = -1\n";
print I "};\n\n";
print I "#define MAX_INSLEN ", $maxlen, "\n";
print I "#define NASM_VEX_CLASSES ", $vex_classes, "\n";
print I "#define NO_DECORATOR\t{", join(',',(0) x $MAX_OPERANDS), "}\n";
print I "#define FIRST_COND_OPCODE I_", $opcodes_cc[0], "\n\n";
print I "#endif /* NASM_INSNSI_H */\n";
close I;
}
if ( $output eq 'n' ) {
print STDERR "Writing $oname...\n";
open(N, '>', $oname);
print N "/* This file is auto-generated from insns.dat by insns.pl" .
" - don't edit it */\n\n";
print N "#include \"tables.h\"\n\n";
print N "const char * const nasm_insn_names[] = {";
$first = 1;
foreach $i (@opcodes, @opcodes_cc) {
print N "," if ( !$first );
$first = 0;
$ilower = $i;
$ilower =~ s/cc$//; # Remove conditional cc suffix
$ilower =~ tr/A-Z/a-z/; # Change to lower case (Perl 4 compatible)
print N "\n\t\"${ilower}\"";
}
print N "\n};\n";
close N;
}
if ( $output eq 'fh') {
write_iflaggen_h();
}
if ( $output eq 'fc') {
write_iflag_c();
}
printf STDERR "Done: %d instructions\n", $insns;
# Count primary bytecodes, for statistics
sub count_bytecodes(@) {
my $skip = 0;
foreach my $bc (@_) {
if ($skip) {
$skip--;
next;
}
$bytecode_count[$bc]++;
if ($bc >= 01 && $bc <= 04) {
$skip = $bc;
} elsif (($bc & ~03) == 010) {
$skip = 1;
} elsif (($bc & ~013) == 0144) {
$skip = 1;
} elsif ($bc == 0172 || $bc == 0173) {
$skip = 1;
} elsif (($bc & ~3) == 0260 || $bc == 0270) { # VEX
$skip = 2;
} elsif (($bc & ~3) == 0240 || $bc == 0250) { # EVEX
$skip = 3;
} elsif ($bc == 0330) {
$skip = 1;
}
}
}
sub format_insn($$$$$) {
my ($opcode, $operands, $codes, $flags, $relax) = @_;
my $nd = 0;
my ($num, $flagsindex);
my @bytecode;
my ($op, @ops, @opsize, $opp, @opx, @oppx, @decos, @opevex);
return (undef, undef) if $operands eq "ignore";
# format the operands
$operands =~ s/\*//g;
$operands =~ s/:/|colon,/g;
@ops = ();
@opsize = ();
@decos = ();
if ($operands ne 'void') {
foreach $op (split(/,/, $operands)) {
my $opsz = 0;
@opx = ();
@opevex = ();
foreach $opp (split(/\|/, $op)) {
@oppx = ();
if ($opp =~ s/^(b(32|64)|mask|z|er|sae)$//) {
push(@opevex, $1);
}
if ($opp =~ s/(?<!\d)(8|16|32|64|80|128|256|512)$//) {
push(@oppx, "bits$1");
$opsz = $1 + 0;
}
$opp =~ s/^mem$/memory/;
$opp =~ s/^memory_offs$/mem_offs/;
$opp =~ s/^imm$/immediate/;
$opp =~ s/^([a-z]+)rm$/rm_$1/;
$opp =~ s/^rm$/rm_gpr/;
$opp =~ s/^reg$/reg_gpr/;
# only for evex insns, high-16 regs are allowed
if ($codes !~ /(^|\s)evex\./) {
$opp =~ s/^(rm_[xyz]mm)$/$1_l16/;
$opp =~ s/^([xyz]mm)reg$/$1_l16/;
}
push(@opx, $opp, @oppx) if $opp;
}
$op = join('|', @opx);
push(@ops, $op);
push(@opsize, $opsz);
push(@decos, (@opevex ? join('|', @opevex) : '0'));
}
}
$num = scalar(@ops);
while (scalar(@ops) < $MAX_OPERANDS) {
push(@ops, '0');
push(@opsize, 0);
push(@decos, '0');
}
$operands = join(',', @ops);
$operands =~ tr/a-z/A-Z/;
$decorators = "{" . join(',', @decos) . "}";
if ($decorators =~ /^{(0,)+0}$/) {
$decorators = "NO_DECORATOR";
}
$decorators =~ tr/a-z/A-Z/;
# Remember if we have an ARx flag
my $arx = undef;
# expand and uniqify the flags
my %flags;
foreach my $flag (split(',', $flags)) {
next if ($flag eq '');
if ($flag eq 'ND') {
$nd = 1;
} else {
$flags{$flag}++;
}
if ($flag eq 'NEVER' || $flag eq 'NOP') {
# These flags imply OBSOLETE
$flags{'OBSOLETE'}++;
}
if ($flag =~ /^AR([0-9]+)$/) {
$arx = $1+0;
}
}
if ($codes =~ /evex\./) {
$flags{'EVEX'}++;
} elsif ($codes =~ /(vex|xop)\./) {
$flags{'VEX'}++;
}
# Look for SM flags clearly inconsistent with operand bitsizes
if ($flags{'SM'} || $flags{'SM2'}) {
my $ssize = 0;
my $e = $flags{'SM2'} ? 2 : $MAX_OPERANDS;
for (my $i = 0; $i < $e; $i++) {
next if (!$opsize[$i]);
if (!$ssize) {
$ssize = $opsize[$i];
} elsif ($opsize[$i] != $ssize) {
die "$fname:$line: inconsistent SM flag for argument $i\n";
}
}
}
# Look for Sx flags that can never match operand bitsizes. If the
# intent is to never match (require explicit sizes), use the SX flag.
# This doesn't apply to registers that pre-define specific sizes;
# this should really be derived from include/opflags.h...
my %sflags = ( 'SB' => 8, 'SW' => 16, 'SD' => 32, 'SQ' => 64,
'SO' => 128, 'SY' => 256, 'SZ' => 512 );
my $s = defined($arx) ? $arx : 0;
my $e = defined($arx) ? $arx : $MAX_OPERANDS - 1;
foreach my $sf (keys(%sflags)) {
next if (!$flags{$sf});
for (my $i = $s; $i <= $e; $i++) {
if ($opsize[$i] && $ops[$i] !~ /\breg_(gpr|[cdts]reg)\b/) {
die "$fname:$line: inconsistent $sf flag for argument $i ($ops[$i])\n"
if ($opsize[$i] != $sflags{$sf});
}
}
}
$flagsindex = insns_flag_index(keys %flags);
die "$fname:$line: error in flags $flags\n" unless (defined($flagsindex));
@bytecode = (decodify($codes, $relax), 0);
push(@bytecode_list, [@bytecode]);
$codes = hexstr(@bytecode);
count_bytecodes(@bytecode);
("{I_$opcode, $num, {$operands}, $decorators, \@\@CODES-$codes\@\@, $flagsindex},", $nd);
}
#
# Look for @@CODES-xxx@@ sequences and replace them with the appropriate
# offset into nasm_bytecodes
#
sub codesubst($) {
my($s) = @_;
my $n;
while ($s =~ /\@\@CODES-([0-9A-F]+)\@\@/) {
my $pos = $bytecode_pos{$1};
if (!defined($pos)) {
die "$fname:$line: no position assigned to byte code $1\n";
}
$s = $` . "nasm_bytecodes+${pos}" . "$'";
}
return $s;
}
sub addprefix ($@) {
my ($prefix, @list) = @_;
my $x;
my @l = ();
foreach $x (@list) {
push(@l, sprintf("%s%02X", $prefix, $x));
}
return @l;
}
#
# Turn a code string into a sequence of bytes
#
sub decodify($$) {
# Although these are C-syntax strings, by convention they should have
# only octal escapes (for directives) and hexadecimal escapes
# (for verbatim bytes)
my($codestr, $relax) = @_;
if ($codestr =~ /^\s*\[([^\]]*)\]\s*$/) {
return byte_code_compile($1, $relax);
}
my $c = $codestr;
my @codes = ();
unless ($codestr eq 'ignore') {
while ($c ne '') {
if ($c =~ /^\\x([0-9a-f]+)(.*)$/i) {
push(@codes, hex $1);
$c = $2;
next;
} elsif ($c =~ /^\\([0-7]{1,3})(.*)$/) {
push(@codes, oct $1);
$c = $2;
next;
} else {
die "$fname:$line: unknown code format in \"$codestr\"\n";
}
}
}
return @codes;
}
# Turn a numeric list into a hex string
sub hexstr(@) {
my $s = '';
my $c;
foreach $c (@_) {
$s .= sprintf("%02X", $c);
}
return $s;
}
# Here we determine the range of possible starting bytes for a given
# instruction. We need only consider the codes:
# \[1234] mean literal bytes, of course
# \1[0123] mean byte plus register value
# \330 means byte plus condition code
# \0 or \340 mean give up and return empty set
# \34[4567] mean PUSH/POP of segment registers: special case
# \17[234] skip is4 control byte
# \26x \270 skip VEX control bytes
# \24x \250 skip EVEX control bytes
sub startseq($$) {
my ($codestr, $relax) = @_;
my $word;
my @codes = ();
my $c = $codestr;
my($c0, $c1, $i);
my $prefix = '';
@codes = decodify($codestr, $relax);
while (defined($c0 = shift(@codes))) {
$c1 = $codes[0];
if ($c0 >= 01 && $c0 <= 04) {
# Fixed byte string
my $fbs = $prefix;
while (defined($c0)) {
if ($c0 >= 01 && $c0 <= 04) {
while ($c0--) {
$fbs .= sprintf("%02X", shift(@codes));
}
} else {
last;
}
$c0 = shift(@codes);
}
foreach $pfx (@disasm_prefixes) {
if (substr($fbs, 0, length($pfx)) eq $pfx) {
$prefix = $pfx;
$fbs = substr($fbs, length($pfx));
last;
}
}
if ($fbs ne '') {
return ($prefix.substr($fbs,0,2));
}
unshift(@codes, $c0);
} elsif ($c0 >= 010 && $c0 <= 013) {
return addprefix($prefix, $c1..($c1+7));
} elsif (($c0 & ~013) == 0144) {
return addprefix($prefix, $c1, $c1|2);
} elsif ($c0 == 0330) {
return addprefix($prefix, $c1..($c1+15));
} elsif ($c0 == 0 || $c0 == 0340) {
return $prefix;
} elsif (($c0 & ~3) == 0260 || $c0 == 0270 ||
($c0 & ~3) == 0240 || $c0 == 0250) {
my($c,$m,$wlp);
$m = shift(@codes);
$wlp = shift(@codes);
$c = ($m >> 6);
$m = $m & 31;
$prefix .= sprintf('%s%02X%01X', $vex_class[$c], $m, $wlp & 3);
if ($c0 < 0260) {
my $tuple = shift(@codes);
}
} elsif ($c0 >= 0172 && $c0 <= 173) {
shift(@codes); # Skip is4 control byte
} else {
# We really need to be able to distinguish "forbidden"
# and "ignorable" codes here
}
}
return $prefix;
}
# EVEX tuple types offset is 0300. e.g. 0301 is for full vector(fv).
sub tupletype($) {
my ($tuplestr) = @_;
my %tuple_codes = (
'' => 000,
'fv' => 001,
'hv' => 002,
'fvm' => 003,
't1s8' => 004,
't1s16' => 005,
't1s' => 006,
't1f32' => 007,
't1f64' => 010,
't2' => 011,
't4' => 012,
't8' => 013,
'hvm' => 014,
'qvm' => 015,
'ovm' => 016,
'm128' => 017,
'dup' => 020,
);
if (defined $tuple_codes{$tuplestr}) {
return 0300 + $tuple_codes{$tuplestr};
} else {
die "$fname:$line: undefined tuple type : $tuplestr\n";
}
}
#
# This function takes a series of byte codes in a format which is more
# typical of the Intel documentation, and encode it.
#
# The format looks like:
#
# [operands: opcodes]
#
# The operands word lists the order of the operands:
#
# r = register field in the modr/m
# m = modr/m
# v = VEX "v" field
# i = immediate
# s = register field of is4/imz2 field
# - = implicit (unencoded) operand
# x = indeX register of mib. 014..017 bytecodes are used.
#
# For an operand that should be filled into more than one field,
# enter it as e.g. "r+v".
#
sub byte_code_compile($$) {
my($str, $relax) = @_;
my $opr;
my $opc;
my @codes = ();
my $litix = undef;
my %oppos = ();
my $i;
my ($op, $oq);
my $opex;
my %imm_codes = (
'ib' => 020, # imm8
'ib,u' => 024, # Unsigned imm8
'iw' => 030, # imm16
'ib,s' => 0274, # imm8 sign-extended to opsize or bits
'iwd' => 034, # imm16 or imm32, depending on opsize
'id' => 040, # imm32
'id,s' => 0254, # imm32 sign-extended to 64 bits
'iwdq' => 044, # imm16/32/64, depending on addrsize
'rel8' => 050,
'iq' => 054,
'rel16' => 060,
'rel' => 064, # 16 or 32 bit relative operand
'rel32' => 070,
'seg' => 074,
);
my %plain_codes = (
'o16' => 0320, # 16-bit operand size
'o32' => 0321, # 32-bit operand size
'odf' => 0322, # Operand size is default
'o64' => 0324, # 64-bit operand size requiring REX.W
'o64nw' => 0323, # Implied 64-bit operand size (no REX.W)
'a16' => 0310,
'a32' => 0311,
'adf' => 0312, # Address size is default
'a64' => 0313,
'!osp' => 0364,
'!asp' => 0365,
'f2i' => 0332, # F2 prefix, but 66 for operand size is OK
'f3i' => 0333, # F3 prefix, but 66 for operand size is OK
'mustrep' => 0336,
'mustrepne' => 0337,
'rex.l' => 0334,
'norexb' => 0314,
'norexx' => 0315,
'norexr' => 0316,
'norexw' => 0317,
'repe' => 0335,
'nohi' => 0325, # Use spl/bpl/sil/dil even without REX
'nof3' => 0326, # No REP 0xF3 prefix permitted
'norep' => 0331, # No REP prefix permitted
'wait' => 0341, # Needs a wait prefix
'resb' => 0340,
'np' => 0360, # No prefix
'jcc8' => 0370, # Match only if Jcc possible with single byte
'jmp8' => 0371, # Match only if JMP possible with single byte
'jlen' => 0373, # Length of jump
'hlexr' => 0271,
'hlenl' => 0272,
'hle' => 0273,
# This instruction takes XMM VSIB
'vsibx' => 0374,
'vm32x' => 0374,
'vm64x' => 0374,
# This instruction takes YMM VSIB
'vsiby' => 0375,
'vm32y' => 0375,
'vm64y' => 0375,
# This instruction takes ZMM VSIB
'vsibz' => 0376,
'vm32z' => 0376,
'vm64z' => 0376,
);
unless ($str =~ /^(([^\s:]*)\:*([^\s:]*)\:|)\s*(.*\S)\s*$/) {
die "$fname:$line: cannot parse: [$str]\n";
}
$opr = lc($2);
$tuple = lc($3); # Tuple type for AVX512
$opc = lc($4);
$op = 0;
for ($i = 0; $i < length($opr); $i++) {
my $c = substr($opr,$i,1);
if ($c eq '+') {
$op--;
} else {
if ($relax & 1) {
$op--;
}
$relax >>= 1;
$oppos{$c} = $op++;
}
}
$tup = tupletype($tuple);
my $last_imm = 'h';
my $prefix_ok = 1;
foreach $op (split(/\s*(?:\s|(?=[\/\\]))/, $opc)) {
my $pc = $plain_codes{$op};
if (defined $pc) {
# Plain code
push(@codes, $pc);
} elsif ($prefix_ok && $op =~ /^(66|f2|f3)$/) {
# 66/F2/F3 prefix used as an opcode extension
if ($op eq '66') {
push(@codes, 0361);
} elsif ($op eq 'f2') {
push(@codes, 0332);
} else {
push(@codes, 0333);
}
} elsif ($op =~ /^[0-9a-f]{2}$/) {
if (defined($litix) && $litix+$codes[$litix]+1 == scalar @codes &&
$codes[$litix] < 4) {
$codes[$litix]++;
push(@codes, hex $op);
} else {
$litix = scalar(@codes);
push(@codes, 01, hex $op);
}
$prefix_ok = 0;
} elsif ($op eq '/r') {
if (!defined($oppos{'r'}) || !defined($oppos{'m'})) {
die "$fname:$line: $op requires r and m operands\n";
}
$opex = (($oppos{'m'} & 4) ? 06 : 0) |
(($oppos{'r'} & 4) ? 05 : 0);
push(@codes, $opex) if ($opex);
# if mib is composed with two separate operands - ICC style
push(@codes, 014 + ($oppos{'x'} & 3)) if (defined($oppos{'x'}));
push(@codes, 0100 + (($oppos{'m'} & 3) << 3) + ($oppos{'r'} & 3));
$prefix_ok = 0;
} elsif ($op =~ m:^/([0-7])$:) {
if (!defined($oppos{'m'})) {
die "$fname:$line: $op requires an m operand\n";
}
push(@codes, 06) if ($oppos{'m'} & 4);
push(@codes, 0200 + (($oppos{'m'} & 3) << 3) + $1);
$prefix_ok = 0;
} elsif ($op =~ m:^/([0-3]?)r([0-7])$:) {
if (!defined($oppos{'r'})) {
die "$fname:$line: $op requires an r operand\n";
}
push(@codes, 05) if ($oppos{'r'} & 4);
push(@codes, 0171);
push(@codes, (($1+0) << 6) + (($oppos{'r'} & 3) << 3) + $2);
$prefix_ok = 0;
} elsif ($op =~ /^(vex|xop)(|\..*)$/) {
my $vexname = $1;
my $c = $vexmap{$vexname};
my ($m,$w,$l,$p) = (undef,2,undef,0);
my $has_nds = 0;
my @subops = split(/\./, $op);
shift @subops; # Drop prefix
foreach $oq (@subops) {
if ($oq eq '128' || $oq eq 'l0' || $oq eq 'lz') {
$l = 0;
} elsif ($oq eq '256' || $oq eq 'l1') {
$l = 1;
} elsif ($oq eq 'lig') {
$l = 2;
} elsif ($oq eq 'w0') {
$w = 0;
} elsif ($oq eq 'w1') {
$w = 1;
} elsif ($oq eq 'wig') {
$w = 2;
} elsif ($oq eq 'ww') {
$w = 3;
} elsif ($oq eq 'np' || $oq eq 'p0') {
$p = 0;
} elsif ($oq eq '66' || $oq eq 'p1') {
$p = 1;
} elsif ($oq eq 'f3' || $oq eq 'p2') {
$p = 2;
} elsif ($oq eq 'f2' || $oq eq 'p3') {
$p = 3;
} elsif ($oq eq '0f') {
$m = 1;
} elsif ($oq eq '0f38') {
$m = 2;
} elsif ($oq eq '0f3a') {
$m = 3;
} elsif ($oq =~ /^m([0-9]+)$/) {
$m = $1+0;
} elsif ($oq eq 'nds' || $oq eq 'ndd' || $oq eq 'dds') {
if (!defined($oppos{'v'})) {
die "$fname:$line: $vexname.$oq without 'v' operand\n";
}
$has_nds = 1;
} else {
die "$fname:$line: undefined \U$vexname\E subcode: $oq\n";
}
}
if (!defined($m) || !defined($w) || !defined($l) || !defined($p)) {
die "$fname:$line: missing fields in \U$vexname\E specification\n";
}
my $minmap = ($c == 1) ? 8 : 0; # 0-31 for VEX, 8-31 for XOP
if ($m < $minmap || $m > 31) {
die "$fname:$line: Only maps ${minmap}-31 are valid for \U${vexname}\n";
}
push(@codes, defined($oppos{'v'}) ? 0260+($oppos{'v'} & 3) : 0270,
($c << 6)+$m, ($w << 4)+($l << 2)+$p);
$prefix_ok = 0;
} elsif ($op =~ /^(evex)(|\..*)$/) {
my $c = $vexmap{$1};
my ($m,$w,$l,$p) = (undef,2,undef,0);
my $has_nds = 0;
my @subops = split(/\./, $op);
shift @subops; # Drop prefix
foreach $oq (@subops) {
if ($oq eq '128' || $oq eq 'l0' || $oq eq 'lz' || $oq eq 'lig') {
$l = 0;
} elsif ($oq eq '256' || $oq eq 'l1') {
$l = 1;
} elsif ($oq eq '512' || $oq eq 'l2') {
$l = 2;
} elsif ($oq eq 'w0') {
$w = 0;
} elsif ($oq eq 'w1') {
$w = 1;
} elsif ($oq eq 'wig') {
$w = 2;
} elsif ($oq eq 'ww') {
$w = 3;
} elsif ($oq eq 'np' || $oq eq 'p0') {
$p = 0;
} elsif ($oq eq '66' || $oq eq 'p1') {
$p = 1;
} elsif ($oq eq 'f3' || $oq eq 'p2') {
$p = 2;
} elsif ($oq eq 'f2' || $oq eq 'p3') {
$p = 3;
} elsif ($oq eq '0f') {
$m = 1;
} elsif ($oq eq '0f38') {
$m = 2;
} elsif ($oq eq '0f3a') {
$m = 3;
} elsif ($oq =~ /^m([0-9]+)$/) {
$m = $1+0;
} elsif ($oq eq 'nds' || $oq eq 'ndd' || $oq eq 'dds') {
if (!defined($oppos{'v'})) {
die "$fname:$line: evex.$oq without 'v' operand\n";
}
$has_nds = 1;
} else {
die "$fname:$line: undefined EVEX subcode: $oq\n";
}
}
if (!defined($m) || !defined($w) || !defined($l) || !defined($p)) {
die "$fname:$line: missing fields in EVEX specification\n";
}
if ($m > 15) {
die "$fname:$line: Only maps 0-15 are valid for EVEX\n";
}
push(@codes, defined($oppos{'v'}) ? 0240+($oppos{'v'} & 3) : 0250,
($c << 6)+$m, ($w << 4)+($l << 2)+$p, $tup);
$prefix_ok = 0;
} elsif (defined $imm_codes{$op}) {
if ($op eq 'seg') {
if ($last_imm lt 'i') {
die "$fname:$line: seg without an immediate operand\n";
}
} else {
$last_imm++;
if ($last_imm gt 'j') {
die "$fname:$line: too many immediate operands\n";
}
}
if (!defined($oppos{$last_imm})) {
die "$fname:$line: $op without '$last_imm' operand\n";
}
push(@codes, 05) if ($oppos{$last_imm} & 4);
push(@codes, $imm_codes{$op} + ($oppos{$last_imm} & 3));
$prefix_ok = 0;
} elsif ($op eq '/is4') {
if (!defined($oppos{'s'})) {
die "$fname:$line: $op without 's' operand\n";
}
if (defined($oppos{'i'})) {
push(@codes, 0172, ($oppos{'s'} << 3)+$oppos{'i'});
} else {
push(@codes, 05) if ($oppos{'s'} & 4);
push(@codes, 0174+($oppos{'s'} & 3));
}
$prefix_ok = 0;
} elsif ($op =~ /^\/is4\=([0-9]+)$/) {
my $imm = $1;
if (!defined($oppos{'s'})) {
die "$fname:$line: $op without 's' operand\n";
}
if ($imm < 0 || $imm > 15) {
die "$fname:$line: invalid imm4 value for $op: $imm\n";
}
push(@codes, 0173, ($oppos{'s'} << 4) + $imm);
$prefix_ok = 0;
} elsif ($op =~ /^([0-9a-f]{2})\+c$/) {
push(@codes, 0330, hex $1);
$prefix_ok = 0;
} elsif ($op =~ /^([0-9a-f]{2})\+r$/) {
if (!defined($oppos{'r'})) {
die "$fname:$line: $op without 'r' operand\n";
}
push(@codes, 05) if ($oppos{'r'} & 4);
push(@codes, 010 + ($oppos{'r'} & 3), hex $1);
$prefix_ok = 0;
} elsif ($op =~ /^\\([0-7]+|x[0-9a-f]{2})$/) {
# Escape to enter literal bytecodes
push(@codes, oct $1);
} else {
die "$fname:$line: unknown operation: $op\n";
}
}
return @codes;
}
| techkey/nasm | x86/insns.pl | Perl | bsd-2-clause | 35,177 |
#!/usr/bin/perl
#
# Copyright (C) 2003 by Virtusa Corporation
# http://www.virtusa.com
#
# Anuradha Ratnaweera
# http://www.linux.lk/~anuradha/
#
# We use addstr() instead of printw()
use Curses;
$mesg = "Enter a string: ";
initscr();
getmaxyx($row, $col);
addstr($row / 2, ($col - length($mesg)) / 2, $mesg);
getstr($str);
addstr($LINES - 2, 0, "You Entered: $str");
getch();
endwin();
| cpressey/ncurses_programs | perl/04.pl | Perl | mit | 392 |
#!/usr/bin/env perl
#Usage: perl generate_macros_xml.pl > macros.xml
#Note, this script uses einfo.py to get database info. It also uses manually compiled data stored at the bottom of this script that is based on: https://www.ncbi.nlm.nih.gov/books/NBK25499/table/chapter4.T._valid_values_of__retmode_and/?report=objectonly
#The data in the table on that page was manipulated to replace nulls with 'none', remove duplicates, and add missing formats based on correspondence with MLN.
##
## use einfo to retrieve all the valid databases
##
print STDERR "Retrieving database list\n";
my $dbxml = `python einfo.py --user_email "planemo@galaxyproject.org" --admin_email "planemo@galaxyproject.org;test@bx.psu.edu"`;
my(@dblist);
my $dbs = {};
my $dbfroms = {};
my $dbnames = {};
foreach(split(/\n/,$dbxml))
{
if(/<DbName>(.+)<\/DbName>/)
{
my $db = $1;
push(@dblist,$db);
$dbs->{$db} = 0;
$dbfroms->{$db} = 0;
$dbnames->{$db} = $_;
}
}
##
## Use einfo to retrieve all the valid links for each database (Note: some databases are not linked)
##
my $h = {};
foreach my $db (sort {$dbnames->{$a} cmp $dbnames->{$b}} @dblist)
{
sleep(2);
print STDERR "Retrieving info for $db\n";
my $response = `python einfo.py --db $db --user_email "planemo\@galaxyproject.org" --admin_email "planemo\@galaxyproject.org;test\@bx.psu.edu"`;
my $dolinks = 0;
my $link = "";
my $name = "";
foreach(split(/\n/,$response))
{
if(/<LinkList>/)
{
$dolinks = 1;
#Save whether there exist links from this database
$dbfroms->{$db} = 1;
}
elsif(!$dolinks)
{
if(/<MenuName>(.+)<\/MenuName>/)
{$dbnames->{$db} = "$1 ($db)"}
}
elsif($dolinks)
{
if(/<Name>(.+)<\/Name>/)
{$link=$1}
elsif(/<Menu>(.*)<\/Menu>/)
{$name=$1}
elsif(/<DbTo>(.+)<\/DbTo>/)
{
$dbto=$1;
push(@{$h->{$db}->{$dbto}},[$link,$name]);
$link="";
$name="";
}
}
}
}
my @sorted_dblist = sort {$dbnames->{$a} cmp $dbnames->{$b}} @dblist;
##
## Generate XML to govern the valid databases to use with efetch
##
my $efetch_dbhash = {}; #->{efetch-compatible-db}->{rettype-retmode-galaxy_format} = format_name (galaxy_format)
while(<DATA>)
{
chomp;
my($db,$galaxy_format,$retmode,$rettype,$format_name) = split(/\t/,$_);
$efetch_dbhash->{$db}->{"$rettype-$retmode-$galaxy_format"} =
"$format_name ($galaxy_format)";
}
#EFetch database select list
print << 'EOXML';
<xml name="dbselect_efetch" token_name="db_select" token_label="NCBI Database to Query">
<param name="@NAME@" type="select" label="@LABEL@">
EOXML
foreach my $db (grep {exists($dbs->{$_})}
sort {$dbnames->{$a} cmp $dbnames->{$b}}
keys(%$efetch_dbhash))
{
my $selected = '';
if($db eq 'pubmed')
{$selected = ' selected="True"'}
print << " EOXML";
<option value="$db"$selected>$dbnames->{$db}</option>
EOXML
}
print << 'EOXML';
</param>
</xml>
EOXML
#EFetch output formats
print << 'EOXML';
<xml name="efetchdb">
<conditional name="db">
<expand macro="dbselect_efetch" />
EOXML
foreach my $db (grep {exists($dbs->{$_})}
sort {$dbnames->{$a} cmp $dbnames->{$b}}
keys(%$efetch_dbhash))
{
print << " EOXML";
<when value="$db">
<param name="output_format" type="select" label="Output Format">
EOXML
foreach my $eutils_format (sort {$efetch_dbhash->{$db}->{$a} cmp
$efetch_dbhash->{$db}->{$b}}
keys(%{$efetch_dbhash->{$db}}))
{
print << " EOXML";
<option value="$eutils_format">$efetch_dbhash->{$db}->{$eutils_format}</option>
EOXML
}
print << " EOXML";
</param>
</when>
EOXML
}
print << 'EOXML';
</conditional>
</xml>
EOXML
##
## Create a select list for the databases linked *from*
##
print << 'EOXML';
<xml name="dbselect" token_name="db_select" token_label="NCBI Database to Query">
<param name="@NAME@" type="select" label="@LABEL@">
EOXML
foreach my $from (@sorted_dblist)
{
print << " EOXML";
<option value="$from">$dbnames->{$from}</option>
EOXML
}
print << 'EOXML';
</param>
</xml>
EOXML
##
## Create a select list for the databases linked *to*
##
print << 'EOXML';
<xml name="dbselect_linked" token_name="db_select_linked" token_label="NCBI Database to Use">
<param name="@NAME@" type="select" label="@LABEL@">
EOXML
foreach my $from (grep {$dbfroms->{$_}} @sorted_dblist)
{
print << " EOXML";
<option value="$from">$dbnames->{$from}</option>
EOXML
}
print << 'EOXML';
</param>
</xml>
EOXML
##
## Create empty entries for commands that take no *to* database or link
##
print << 'EOXML';
<xml name="none_link_macro">
<conditional name="db_to">
<param name="db_select_to" type="select" label="To NCBI Database (n/a)">
<option value="n/a">Not applicable</option>
</param>
<when value="n/a">
<param name="linkname" type="select" label="Link Name (n/a)">
<option value="n/a">Not applicable</option>
</param>
</when>
</conditional>
</xml>
<xml name="db_link_macro">
<conditional name="db_from_link">
<expand macro="dbselect_linked" name="db_select_from_link" label="From NCBI Database" />
EOXML
foreach(grep {$dbfroms->{$_}} @sorted_dblist)
{
print << " EOXML";
<when value="$_">
<expand macro="none_link_macro" name="db_select_none" label="To NCBI Database" />
</when>
EOXML
}
print << 'EOXML';
</conditional>
</xml>
EOXML
##
## This is the master macro for the command selection
##
print << 'EOXML';
<xml name="linkmacro">
<conditional name="cmd">
<param name="cmd_select" type="select" label="Link Method" help="Fetch UIDs from the 'To' Database that are linked to supplied UIDs in the 'From' database">
<option value="neighbor" selected="true">Neighbor (neighbor)</option>
<option value="neighbor_history">Neighbor, save result in history server (neighbor_history)</option>
<option value="neighbor_score">Neighbor Score (neighbor_score)</option>
<option value="acheck">Show available links to any database (acheck)</option>
<option value="ncheck">Show available links within the same database (ncheck)</option>
<option value="lcheck">Show available links to external sources (LinkOuts) (lcheck)</option>
<option value="llinks">Show available URLs and attributes for non-library LinkOut providers (llinks)</option>
<option value="llinkslib">Show available URLs and attributes for all LinkOut Providers (llinkslib)</option>
<option value="prlinks">Show available primary LinkOut Provider Links (prlinks)</option>
</param>
<when value="neighbor">
<expand macro="db_db_link_macro" name="link_select" label="Link name" />
<param name="output_format" type="select" label="Output Format">
<option value="xml">ID File (xml)</option>
<option value="json">ID File (json)</option>
<option value="text" selected="true">ID File (tabular)</option>
</param>
</when>
<when value="neighbor_history">
<expand macro="db_db_link_macro" name="link_select" label="Link name" />
<param name="output_format" type="select" label="Output Format">
<option value="json">History File (json)</option>
<option value="xml" selected="true">History File (xml)</option>
</param>
</when>
<when value="neighbor_score">
<expand macro="db_db_link_macro" name="link_select" label="Link name" />
<param name="output_format" type="select" label="Output Format">
<option value="xml">ID File (xml)</option>
<option value="json">ID File (json)</option>
<option value="text" selected="true">ID File (tabular)</option>
</param>
</when>
<when value="acheck">
<expand macro="db_link_macro" name="db_select_from_link" label="From NCBI Database" />
<param name="output_format" type="select" label="Output Format">
<option value="xml" selected="True">Link Description File (xml)</option>
<option value="json">Link Description File (json)</option>
</param>
</when>
<when value="ncheck">
<expand macro="db_link_macro" name="db_select_from_link" label="From NCBI Database" />
<param name="output_format" type="select" label="Output Format">
<option value="xml" selected="True">Link Description File (xml)</option>
<option value="json">Link Description File (json)</option>
</param>
</when>
<when value="lcheck">
<expand macro="db_link_macro" name="db_select_from_link" label="From NCBI Database" />
<param name="output_format" type="select" label="Output Format">
<option value="xml" selected="True">Link Description File (xml)</option>
<option value="json">Link Description File (json)</option>
</param>
</when>
<when value="llinks">
<expand macro="db_link_macro" name="db_select_from_link" label="From NCBI Database" />
<param name="output_format" type="select" label="Output Format">
<option value="xml" selected="True">Link Description File (xml)</option>
<option value="json">Link Description File (json)</option>
</param>
</when>
<when value="llinkslib">
<expand macro="db_link_macro" name="db_select_from_link" label="From NCBI Database" />
<param name="output_format" type="select" label="Output Format">
<option value="xml" selected="true">Link Description File (xml)</option>
<option value="json">Link Description File (json)</option>
</param>
</when>
<when value="prlinks">
<expand macro="db_link_macro" name="db_select_from_link" label="From NCBI Database" />
<param name="output_format" type="select" label="Output Format">
<option value="xml" selected="true">Link Description File (xml)</option>
<option value="json">Link Description File (json)</option>
</param>
</when>
</conditional>
</xml>
EOXML
##
## Create selections for valid links for command types neighbor, neighbor_history, and neighbor_score
##
print << 'EOXML';
<xml name="db_db_link_macro">
<conditional name="db_from_link">
<expand macro="dbselect_linked" name="db_select_from_link" label="From NCBI Database" />
EOXML
foreach my $from (grep {$dbfroms->{$_}} @sorted_dblist)
{
print STDERR ("Creating Links From: $from\n");
print << " EOXML";
<when value="$from">
<conditional name="db_to">
<param name="db_select_to" type="select" label="To NCBI Database">
EOXML
my @dbtos = (grep {exists($h->{$from}) && exists($h->{$from}->{$_})}
@sorted_dblist);
foreach(@dbtos)
{
print << " EOXML";
<option value="$_">$dbnames->{$_}</option>
EOXML
}
if(scalar(@dbtos) == 0)
{
#Provide an option for a self-link: from->from
print << " EOXML";
<option value="$from">$dbnames->{$from}</option>
EOXML
}
print << ' EOXML';
</param>
EOXML
if(exists($h->{$from}))
{
#There do exist links to invalid(/outdated/non-existant) databases that
#would result in an error if they are selected, so we use the original
#@dblist instead of the keys present in the sub hash of $h->{$from}, and
#then check for existence in the sub-hash
foreach my $to (grep {exists($h->{$from}->{$_})} @sorted_dblist)
{
print STDERR ("\tTo: $to Links: ",
join(',',map {$_->[0]} @{$h->{$from}->{$to}}),
"\n");
print << " EOXML";
<when value="$to">
<param name="linkname" type="select" label="Link Name">
<option value="None">All Links</option>
EOXML
foreach(sort {"$a->[1] ($a->[0])" cmp "$b->[1] ($b->[0])"}
@{$h->{$from}->{$to}})
{
print << " EOXML";
<option value="$_->[0]">$_->[1] ($_->[0])</option>
EOXML
}
print << " EOXML";
</param>
</when>
EOXML
}
}
else
{
##
## Add-on selections for self-links for command types neighbor,
## neighbor_history, and neighbor_score
## Note, I'm not sure this would yield a valid result from elink
##
#This shows $from, but this is the 'when' for db_to conditional
print << " EOXML";
<when value="$from">
<param name="linkname" type="select" label="Link Name">
<option value="none">All Links</option>
</param>
</when>
EOXML
}
print << ' EOXML';
</conditional>
</when>
EOXML
}
##
## Add-on selections for self-links for command types neighbor,
## neighbor_history, and neighbor_score
## Note, I'm not sure this would yield a valid result from elink
##
foreach my $from (grep {!exists($h->{$_})} @sorted_dblist)
{
print << "EOXML";
<when value=\"$from\">
<conditional name=\"db_to\">
<param name=\"db_select_to\" type=\"select\" label=\"To NCBI Database\">
<option value=\"none\">Not applicable</option>
</param>
<when value=\"none\">
<param name=\"linkname\" type=\"select\" label=\"Link Name\">
<option value=\"none\">Not applicable</option>
</param>
</when>
</conditional>
</when>
EOXML
}
##
## This is the corresponding code for using the selections to add the respective command line options
##
print << 'EOXML';
</conditional>
</xml>
EOXML
print << 'EOXML';
<token name="@LINK_TOKEN@">
<![CDATA[
#if $cmd.db_from_link.db_to.db_select_to == 'n/a':
none
#else:
$cmd.db_from_link.db_to.db_select_to
#end if
$cmd.db_from_link.db_select_from_link
$cmd.cmd_select
#if $cmd.output_format == 'json':
--retmode json
#elif $cmd.output_format == 'text':
--retmode uilist
#else:
--retmode xml
#end if
#if $cmd.db_from_link.db_to.linkname != 'None' and $cmd.cmd_select in ('neighbor', 'neighbor_history', 'neighbor_score'):
--linkname $cmd.db_from_link.db_to.linkname
#end if
]]>
</token>
EOXML
sub startXML
{
print << ' EOXML';
<?xml version="1.0"?>
<macros>
<token name="@PROFILE@">18.01</token>
<token name="@WRAPPER_VERSION@">1.70</token>
<token name="@EMAIL_ARGUMENTS@">
--user_email "$__user_email__"
#set admin_emails = ';'.join(str($__admin_users__).split(','))
--admin_email "$admin_emails"
</token>
<!-- TODO: citation -->
<token name="@REFERENCES@"><![CDATA[
]]></token>
<token name="@DISCLAIMER@"><![CDATA[
Usage Guidelines and Requirements
=================================
Frequency, Timing, and Registration of E-utility URL Requests
-------------------------------------------------------------
In order not to overload the E-utility servers, NCBI recommends that users
limit large jobs to either weekends or between 9:00 PM and 5:00 AM Eastern time
during weekdays. Failure to comply with this policy may result in an IP address
being blocked from accessing NCBI.
Minimizing the Number of Requests
---------------------------------
If a task requires searching for and/or downloading a large number of
records, it is much more efficient to use the Entrez History to upload
and/or retrieve these records in batches rather than using separate
requests for each record. Please refer to Application 3 in Chapter 3
for an example. Many thousands of IDs can be uploaded using a single
EPost request, and several hundred records can be downloaded using one
EFetch request.
Disclaimer and Copyright Issues
-------------------------------
In accordance with requirements of NCBI's E-Utilities, we must provide
the following disclaimer:
Please note that abstracts in PubMed may incorporate material that may
be protected by U.S. and foreign copyright laws. All persons
reproducing, redistributing, or making commercial use of this
information are expected to adhere to the terms and conditions asserted
by the copyright holder. Transmission or reproduction of protected
items beyond that allowed by fair use (PDF) as defined in the copyright
laws requires the written permission of the copyright owners. NLM
provides no legal advice concerning distribution of copyrighted
materials. Please consult your legal counsel. If you wish to do a large
data mining project on PubMed data, you can enter into a licensing
agreement and lease the data for free from NLM. For more information on
this please see `https://www.nlm.nih.gov/databases/download/data_distrib_main.html <https://www.nlm.nih.gov/databases/download/data_distrib_main.html>`__
The `full disclaimer <https://www.ncbi.nlm.nih.gov/home/about/policies/>`__ is available on
their website
Liability
~~~~~~~~~
For documents and software available from this server, the
U.S. Government does not warrant or assume any legal liability or
responsibility for the accuracy, completeness, or usefulness of any
information, apparatus, product, or process disclosed.
Endorsement
~~~~~~~~~~~
NCBI does not endorse or recommend any commercial
products, processes, or services. The views and opinions of authors
expressed on NCBI's Web sites do not necessarily state or reflect those
of the U.S. Government, and they may not be used for advertising or
product endorsement purposes.
External Links
~~~~~~~~~~~~~~
Some NCBI Web pages may provide links to other Internet
sites for the convenience of users. NCBI is not responsible for the
availability or content of these external sites, nor does NCBI endorse,
warrant, or guarantee the products, services, or information described
or offered at these other Internet sites. Users cannot assume that the
external sites will abide by the same Privacy Policy to which NCBI
adheres. It is the responsibility of the user to examine the copyright
and licensing restrictions of linked pages and to secure all necessary
permissions.
]]></token>
<token name="@LIST_OR_HIST@">
#if $query_source.qss == "history_json":
--history_file $query_source.history_file
#else if $query_source.qss == "history_xml":
--history_xml $query_source.history_xml
#else if $query_source.qss == "id_file":
--id_list $query_source.id_file
#else if $query_source.qss == "id_list":
--id $query_source.id_list
#else if $query_source.qss == "id_xml":
--id_xml $query_source.id_xml
#else if $query_source.qss == "id_json":
--id_json $query_source.id_json
#end if
</token>
<xml name="list_or_hist">
<conditional name="query_source">
<param name="qss" type="select" label="Enter Query IDs by..." help="Files output by ELink or ESearch are acceptable. Query IDs in an ELink result are ignored.">
<option value="history_json">History File (JSON)</option>
<option value="history_xml">History File (XML)</option>
<option value="id_file" selected="True">ID file (Tabular)</option>
<option value="id_xml">ID File (XML)</option>
<option value="id_json">ID File (JSON)</option>
<option value="id_list">Paste IDs</option>
</param>
<when value="history_json">
<param label="History File (JSON)" name="history_file" type="data" format="json" help="A JSON file containing the WebEnv ID and Query Key referencing the search on the NCBI history server"/>
</when>
<when value="history_xml">
<param label="History File (XML)" name="history_xml" type="data" format="xml" help="An XML file containing the WebEnv ID and Query Key referencing the search on the NCBI history server"/>
</when>
<when value="id_file">
<param label="ID File (Text)" name="id_file" type="data" format="text,tabular" help="A Text file containing one ID per line"/>
</when>
<when value="id_xml">
<param label="ID File (XML)" name="id_xml" type="data" format="xml" help="ESearch or ELink Result XML file"/>
</when>
<when value="id_json">
<param label="ID File (JSON)" name="id_json" type="data" format="json" help="ESearch or ELink Result JSON file"/>
</when>
<when value="id_list">
<param label="Paste ID List" name="id_list" type="text" area="true" help="Newline/Comma separated list of IDs"/>
</when>
</conditional>
</xml>
<xml name="citations">
<citations>
<citation type="bibtex">@Book{ncbiEutils,
author = {Eric Sayers},
title = {Entrez Programming Utilities Help},
year = {2010},
publisher = {National Center for Biotechnology Information, Bethesda, Maryland},
note = {https://www.ncbi.nlm.nih.gov/books/NBK25500/}
}</citation>
</citations>
</xml>
<xml name="requirements">
<requirements>
<requirement type="package" version="1.70">biopython</requirement>
</requirements>
</xml>
<token name="@EFETCH_FORMAT_TOKEN@">
<![CDATA[
## This token must go at the end of the efetch command
#set rettype, retmode, format = str($db.output_format).split('-')
#if retmode != "none":
--retmode $retmode
#end if
## Otherwise, defaults to a None/empty which implies 'default' to NCBI
#if rettype != "none":
--rettype $rettype
#end if
--galaxy_format $format
]]>
</token>
EOXML
}
sub endXML
{
print << ' EOXML';
</macros>
EOXML
}
BEGIN {startXML()}
END {endXML()}
##
## Output formats for efetch mapped to galaxy formats
##
#Based on:
#https://www.ncbi.nlm.nih.gov/books/NBK25499/table/chapter4.T._valid_values_of__retmode_and/?report=objectonly
#Note: While json works for esearch and elink, the only database that supports
#json (according to an NLM support ticket I have about this) is snp
#The output_format param value for these will be "rettype-retmode-format"
#db galaxy retmode rettype format_name
__DATA__
bioproject tabular text uilist List of UIDs
bioproject xml xml docsum Document summary
bioproject xml xml uilist List of UIDs
bioproject xml xml xml Full record
biosample tabular text uilist List of UIDs
biosample txt text full Full record
biosample xml xml docsum Document summary
biosample xml xml full Full record
biosample xml xml uilist List of UIDs
biosystems tabular text uilist List of UIDs
biosystems xml xml docsum Document summary
biosystems xml xml uilist List of UIDs
biosystems xml xml xml Full record
clinvar tabular text uilist List of UIDs
clinvar xml xml clinvarset ClinVar Set
clinvar xml xml docsum Document summary
clinvar xml xml uilist List of UIDs
clinvar xml none none Full
gds tabular text uilist List of UIDs
gds txt text summary Summary
gds xml xml docsum Document summary
gds xml xml uilist List of UIDs
gds xml none none Full
gene txt text gene_table Gene table
gene tabular text uilist List of UIDs
gene txt asn.1 none text ASN.1
gene xml xml docsum Document summary
gene xml xml none Full
gene xml xml uilist List of UIDs
gtr tabular text uilist List of UIDs
gtr xml xml docsum Document summary
gtr xml xml gtracc GTR Test Report
gtr xml xml uilist List of UIDs
gtr xml none none Full
homologene fasta text fasta FASTA
homologene tabular text alignmentscores Alignment scores
homologene tabular text uilist List of UIDs
homologene txt asn.1 none text ASN.1
homologene txt text homologene HomoloGene
homologene xml xml docsum Document summary
homologene xml xml none Full
homologene xml xml uilist List of UIDs
mesh tabular text uilist List of UIDs
mesh txt text full Full record
mesh xml xml docsum Document summary
mesh xml xml uilist List of UIDs
nlmcatalog tabular text uilist List of UIDs
nlmcatalog txt text none Full record
nlmcatalog xml xml docsum Document summary
nlmcatalog xml xml none Full
nlmcatalog xml xml uilist List of UIDs
nuccore binary asn.1 none binary ASN.1
nuccore fasta text fasta FASTA
nuccore fasta text fasta_cds_aa CDS protein FASTA
nuccore fasta text fasta_cds_na CDS nucleotide FASTA
nuccore genbank text gb GenBank flat file
nuccore genbank text gbwithparts GenBank flat file with full sequence (contigs)
nuccore tabular text acc Accession number(s)
nuccore txt text ft Feature table
nuccore tabular text seqid SeqID string
nuccore tabular text uilist List of UIDs
nuccore txt text none text ASN.1
nuccore xml xml docsum Document summary
nuccore xml xml fasta TinySeq
nuccore xml xml gb GBSeq
nuccore xml xml gbc INSDSeq
nuccore xml xml native Full record
nuccore xml xml uilist List of UIDs
nucest binary asn.1 none binary ASN.1
nucest fasta text fasta FASTA
nucest genbank text gb GenBank flat file
nucest tabular text acc Accession number(s)
nucest tabular text seqid SeqID string
nucest tabular text uilist List of UIDs
nucest txt text est EST report
nucest txt text none text ASN.1
nucest xml xml docsum Document summary
nucest xml xml fasta TinySeq
nucest xml xml gb GBSeq
nucest xml xml gbc INSDSeq
nucest xml xml native Full record
nucest xml xml uilist List of UIDs
nucgss binary asn.1 none binary ASN.1
nucgss fasta text fasta FASTA
nucgss genbank text gb GenBank flat file
nucgss tabular text acc Accession number(s)
nucgss tabular text seqid SeqID string
nucgss tabular text uilist List of UIDs
nucgss txt text gss GSS report
nucgss txt text none text ASN.1
nucgss xml xml docsum Document summary
nucgss xml xml fasta TinySeq
nucgss xml xml gb GBSeq
nucgss xml xml gbc INSDSeq
nucgss xml xml native Full record
nucgss xml xml uilist List of UIDs
pmc tabular text uilist List of UIDs
pmc txt text medline MEDLINE
pmc xml xml docsum Document summary
pmc xml xml none FULL
pmc xml xml uilist List of UIDs
popset binary asn.1 none binary ASN.1
popset fasta text fasta FASTA
popset genbank text gb GenBank flat file
popset tabular text acc Accession number(s)
popset tabular text seqid SeqID string
popset tabular text uilist List of UIDs
popset txt text none text ASN.1
popset xml xml docsum Document summary
popset xml xml fasta TinySeq
popset xml xml gb GBSeq
popset xml xml gbc INSDSeq
popset xml xml native Full record
popset xml xml uilist List of UIDs
protein binary asn.1 none binary ASN.1
protein fasta text fasta FASTA
protein tabular text acc Accession number(s)
protein txt text ft Feature table
protein tabular text seqid SeqID string
protein tabular text uilist List of UIDs
protein txt text gp GenPept flat file
protein txt text none text ASN.1
protein xml xml docsum Document summary
protein xml xml fasta TinySeq
protein xml xml gp GBSeq
protein xml xml gpc INSDSeq
protein xml xml ipg Identical Protein
protein xml xml native Full record
protein xml xml uilist List of UIDs
pubmed tabular text uilist List of UIDs
pubmed txt asn.1 none text ASN.1
pubmed txt text abstract Abstract
pubmed txt text medline MEDLINE
pubmed xml xml docsum Document summary
pubmed xml xml none Full
pubmed xml xml uilist List of UIDs
sequences fasta text fasta FASTA
sequences tabular text acc Accession number(s)
sequences tabular text seqid SeqID string
sequences tabular text uilist List of UIDs
sequences txt text none text ASN.1
sequences xml xml docsum Document summary
sequences xml xml uilist List of UIDs
sequences xml none none Full
snp fasta text fasta FASTA
snp json json docsum Document summary
snp json json uilist List of UIDs
snp tabular text ssexemplar SS Exemplar list
snp tabular text uilist List of UIDs
snp txt asn.1 none text ASN.1
snp txt text chr Chromosome report
snp txt text docset Summary
snp txt text flt Flat file
snp txt text rsr RS Cluster report
snp xml xml docsum Document summary
snp xml xml none XML
snp xml xml uilist List of UIDs
sra tabular text uilist List of UIDs
sra xml xml docsum Document summary
sra xml xml full Full
taxonomy tabular text uilist List of UIDs
taxonomy xml xml none Full
taxonomy xml xml docsum Document summary
taxonomy xml xml uilist List of UIDs
| loraine-gueguen/tools-iuc | tools/ncbi_entrez_eutils/generate_macros_xml.pl | Perl | mit | 28,827 |
package Class::Inspector::Functions;
use 5.006;
use strict;
use warnings;
use Exporter ();
use Class::Inspector ();
use vars qw(@ISA @EXPORT @EXPORT_OK %EXPORT_TAGS $VERSION);
BEGIN {
$VERSION = '1.27';
@ISA = 'Exporter';
@EXPORT = qw(
installed
loaded
filename
functions
methods
subclasses
);
@EXPORT_OK = qw(
resolved_filename
loaded_filename
function_refs
function_exists
);
#children
#recursive_children
%EXPORT_TAGS = ( ALL => [ @EXPORT_OK, @EXPORT ] );
foreach my $meth (@EXPORT, @EXPORT_OK) {
my $sub = Class::Inspector->can($meth);
no strict 'refs';
*{$meth} = sub {&$sub('Class::Inspector', @_)};
}
}
1;
__END__
=pod
=head1 NAME
Class::Inspector::Functions - Get information about a class and its structure
=head1 SYNOPSIS
use Class::Inspector::Functions;
# Class::Inspector provides a non-polluting,
# method based interface!
# Is a class installed and/or loaded
installed( 'Foo::Class' );
loaded( 'Foo::Class' );
# Filename related information
filename( 'Foo::Class' );
resolved_filename( 'Foo::Class' );
# Get subroutine related information
functions( 'Foo::Class' );
function_refs( 'Foo::Class' );
function_exists( 'Foo::Class', 'bar' );
methods( 'Foo::Class', 'full', 'public' );
# Find all loaded subclasses or something
subclasses( 'Foo::Class' );
=head1 DESCRIPTION
Class::Inspector::Functions is a function based interface of
L<Class::Inspector>. For a thorough documentation of the available
functions, please check the manual for the main module.
=head2 Exports
The following functions are exported by default.
installed
loaded
filename
functions
methods
subclasses
The following functions are exported only by request.
resolved_filename
loaded_filename
function_refs
function_exists
All the functions may be imported using the C<:ALL> tag.
=head1 SUPPORT
Bugs should be reported via the CPAN bug tracker
L<http://rt.cpan.org/NoAuth/ReportBug.html?Queue=Class-Inspector>
For other issues, or commercial enhancement or support, contact the author.
=head1 AUTHOR
Adam Kennedy E<lt>adamk@cpan.orgE<gt>
Steffen Mueller E<lt>smueller@cpan.orgE<gt>
=head1 SEE ALSO
L<http://ali.as/>, L<Class::Handle>
=head1 COPYRIGHT
Copyright 2002 - 2012 Adam Kennedy.
Class::Inspector::Functions copyright 2008 - 2009 Steffen Mueller.
This program is free software; you can redistribute
it and/or modify it under the same terms as Perl itself.
The full text of the license can be found in the
LICENSE file included with this module.
=cut
| leighpauls/k2cro4 | third_party/perl/perl/vendor/lib/Class/Inspector/Functions.pm | Perl | bsd-3-clause | 2,603 |
###########################################################################
#
# This file is auto-generated by the Perl DateTime Suite locale
# generator (0.05). This code generator comes with the
# DateTime::Locale distribution in the tools/ directory, and is called
# generate-from-cldr.
#
# This file as generated from the CLDR XML locale data. See the
# LICENSE.cldr file included in this distribution for license details.
#
# This file was generated from the source file sh_CS.xml
# The source file version number was 1.48, generated on
# 2009/05/05 23:06:40.
#
# Do not edit this file directly.
#
###########################################################################
package DateTime::Locale::sh_CS;
use strict;
use warnings;
use utf8;
use base 'DateTime::Locale::sr_Latn_RS';
sub cldr_version { return "1\.7\.1" }
{
my $first_day_of_week = "1";
sub first_day_of_week { return $first_day_of_week }
}
1;
__END__
=pod
=encoding utf8
=head1 NAME
DateTime::Locale::sh_CS
=head1 SYNOPSIS
use DateTime;
my $dt = DateTime->now( locale => 'sh_CS' );
print $dt->month_name();
=head1 DESCRIPTION
This is the DateTime locale package for Serbo-Croatian Serbia and Montenegro.
=head1 DATA
This locale inherits from the L<DateTime::Locale::sr_Latn_RS> locale.
It contains the following data.
=head2 Days
=head3 Wide (format)
ponedeljak
utorak
sreda
četvrtak
petak
subota
nedelja
=head3 Abbreviated (format)
pon
uto
sre
čet
pet
sub
ned
=head3 Narrow (format)
p
u
s
č
p
s
n
=head3 Wide (stand-alone)
ponedeljak
utorak
sreda
četvrtak
petak
subota
nedelja
=head3 Abbreviated (stand-alone)
pon
uto
sre
čet
pet
sub
ned
=head3 Narrow (stand-alone)
p
u
s
č
p
s
n
=head2 Months
=head3 Wide (format)
januar
februar
mart
april
maj
jun
jul
avgust
septembar
oktobar
novembar
decembar
=head3 Abbreviated (format)
jan
feb
mar
apr
maj
jun
jul
avg
sep
okt
nov
dec
=head3 Narrow (format)
j
f
m
a
m
j
j
a
s
o
n
d
=head3 Wide (stand-alone)
januar
februar
mart
april
maj
jun
jul
avgust
septembar
oktobar
novembar
decembar
=head3 Abbreviated (stand-alone)
jan
feb
mar
apr
maj
jun
jul
avg
sep
okt
nov
dec
=head3 Narrow (stand-alone)
j
f
m
a
m
j
j
a
s
o
n
d
=head2 Quarters
=head3 Wide (format)
1. kvartal
2. kvartal
3. kvartal
4. kvartal
=head3 Abbreviated (format)
Q1
Q2
Q3
Q4
=head3 Narrow (format)
1
2
3
4
=head3 Wide (stand-alone)
1. kvartal
2. kvartal
3. kvartal
4. kvartal
=head3 Abbreviated (stand-alone)
Q1
Q2
Q3
Q4
=head3 Narrow (stand-alone)
1
2
3
4
=head2 Eras
=head3 Wide
Pre nove ere
Nove ere
=head3 Abbreviated
p. n. e.
n. e
=head3 Narrow
p. n. e.
n. e
=head2 Date Formats
=head3 Full
2008-02-05T18:30:30 = utorak, 05. februar 2008.
1995-12-22T09:05:02 = petak, 22. decembar 1995.
-0010-09-15T04:44:23 = subota, 15. septembar -10.
=head3 Long
2008-02-05T18:30:30 = 05. februar 2008.
1995-12-22T09:05:02 = 22. decembar 1995.
-0010-09-15T04:44:23 = 15. septembar -10.
=head3 Medium
2008-02-05T18:30:30 = 05.02.2008.
1995-12-22T09:05:02 = 22.12.1995.
-0010-09-15T04:44:23 = 15.09.-10.
=head3 Short
2008-02-05T18:30:30 = 5.2.08.
1995-12-22T09:05:02 = 22.12.95.
-0010-09-15T04:44:23 = 15.9.-10.
=head3 Default
2008-02-05T18:30:30 = 05.02.2008.
1995-12-22T09:05:02 = 22.12.1995.
-0010-09-15T04:44:23 = 15.09.-10.
=head2 Time Formats
=head3 Full
2008-02-05T18:30:30 = 18.30.30 UTC
1995-12-22T09:05:02 = 09.05.02 UTC
-0010-09-15T04:44:23 = 04.44.23 UTC
=head3 Long
2008-02-05T18:30:30 = 18.30.30 UTC
1995-12-22T09:05:02 = 09.05.02 UTC
-0010-09-15T04:44:23 = 04.44.23 UTC
=head3 Medium
2008-02-05T18:30:30 = 18.30.30
1995-12-22T09:05:02 = 09.05.02
-0010-09-15T04:44:23 = 04.44.23
=head3 Short
2008-02-05T18:30:30 = 18.30
1995-12-22T09:05:02 = 09.05
-0010-09-15T04:44:23 = 04.44
=head3 Default
2008-02-05T18:30:30 = 18.30.30
1995-12-22T09:05:02 = 09.05.02
-0010-09-15T04:44:23 = 04.44.23
=head2 Datetime Formats
=head3 Full
2008-02-05T18:30:30 = utorak, 05. februar 2008. 18.30.30 UTC
1995-12-22T09:05:02 = petak, 22. decembar 1995. 09.05.02 UTC
-0010-09-15T04:44:23 = subota, 15. septembar -10. 04.44.23 UTC
=head3 Long
2008-02-05T18:30:30 = 05. februar 2008. 18.30.30 UTC
1995-12-22T09:05:02 = 22. decembar 1995. 09.05.02 UTC
-0010-09-15T04:44:23 = 15. septembar -10. 04.44.23 UTC
=head3 Medium
2008-02-05T18:30:30 = 05.02.2008. 18.30.30
1995-12-22T09:05:02 = 22.12.1995. 09.05.02
-0010-09-15T04:44:23 = 15.09.-10. 04.44.23
=head3 Short
2008-02-05T18:30:30 = 5.2.08. 18.30
1995-12-22T09:05:02 = 22.12.95. 09.05
-0010-09-15T04:44:23 = 15.9.-10. 04.44
=head3 Default
2008-02-05T18:30:30 = 05.02.2008. 18.30.30
1995-12-22T09:05:02 = 22.12.1995. 09.05.02
-0010-09-15T04:44:23 = 15.09.-10. 04.44.23
=head2 Available Formats
=head3 d (d)
2008-02-05T18:30:30 = 5
1995-12-22T09:05:02 = 22
-0010-09-15T04:44:23 = 15
=head3 Ed (E d.)
2008-02-05T18:30:30 = uto 5.
1995-12-22T09:05:02 = pet 22.
-0010-09-15T04:44:23 = sub 15.
=head3 EEEd (d EEE)
2008-02-05T18:30:30 = 5 uto
1995-12-22T09:05:02 = 22 pet
-0010-09-15T04:44:23 = 15 sub
=head3 hhmm (hh.mm a)
2008-02-05T18:30:30 = 06.30 popodne
1995-12-22T09:05:02 = 09.05 pre podne
-0010-09-15T04:44:23 = 04.44 pre podne
=head3 hhmmss (hh.mm.ss a)
2008-02-05T18:30:30 = 06.30.30 popodne
1995-12-22T09:05:02 = 09.05.02 pre podne
-0010-09-15T04:44:23 = 04.44.23 pre podne
=head3 Hm (H.mm)
2008-02-05T18:30:30 = 18.30
1995-12-22T09:05:02 = 9.05
-0010-09-15T04:44:23 = 4.44
=head3 hm (h:mm a)
2008-02-05T18:30:30 = 6:30 popodne
1995-12-22T09:05:02 = 9:05 pre podne
-0010-09-15T04:44:23 = 4:44 pre podne
=head3 Hms (H:mm:ss)
2008-02-05T18:30:30 = 18:30:30
1995-12-22T09:05:02 = 9:05:02
-0010-09-15T04:44:23 = 4:44:23
=head3 hms (h:mm:ss a)
2008-02-05T18:30:30 = 6:30:30 popodne
1995-12-22T09:05:02 = 9:05:02 pre podne
-0010-09-15T04:44:23 = 4:44:23 pre podne
=head3 M (L)
2008-02-05T18:30:30 = 2
1995-12-22T09:05:02 = 12
-0010-09-15T04:44:23 = 9
=head3 Md (d/M)
2008-02-05T18:30:30 = 5/2
1995-12-22T09:05:02 = 22/12
-0010-09-15T04:44:23 = 15/9
=head3 MEd (E, M-d)
2008-02-05T18:30:30 = uto, 2-5
1995-12-22T09:05:02 = pet, 12-22
-0010-09-15T04:44:23 = sub, 9-15
=head3 MMdd (MM-dd)
2008-02-05T18:30:30 = 02-05
1995-12-22T09:05:02 = 12-22
-0010-09-15T04:44:23 = 09-15
=head3 MMM (LLL)
2008-02-05T18:30:30 = feb
1995-12-22T09:05:02 = dec
-0010-09-15T04:44:23 = sep
=head3 MMMd (MMM d.)
2008-02-05T18:30:30 = feb 5.
1995-12-22T09:05:02 = dec 22.
-0010-09-15T04:44:23 = sep 15.
=head3 MMMdd (dd.MMM)
2008-02-05T18:30:30 = 05.feb
1995-12-22T09:05:02 = 22.dec
-0010-09-15T04:44:23 = 15.sep
=head3 MMMEd (E d. MMM)
2008-02-05T18:30:30 = uto 5. feb
1995-12-22T09:05:02 = pet 22. dec
-0010-09-15T04:44:23 = sub 15. sep
=head3 MMMMd (MMMM d.)
2008-02-05T18:30:30 = februar 5.
1995-12-22T09:05:02 = decembar 22.
-0010-09-15T04:44:23 = septembar 15.
=head3 MMMMdd (dd. MMMM)
2008-02-05T18:30:30 = 05. februar
1995-12-22T09:05:02 = 22. decembar
-0010-09-15T04:44:23 = 15. septembar
=head3 MMMMEd (E MMMM d)
2008-02-05T18:30:30 = uto februar 5
1995-12-22T09:05:02 = pet decembar 22
-0010-09-15T04:44:23 = sub septembar 15
=head3 ms (mm:ss)
2008-02-05T18:30:30 = 30:30
1995-12-22T09:05:02 = 05:02
-0010-09-15T04:44:23 = 44:23
=head3 y (y.)
2008-02-05T18:30:30 = 2008.
1995-12-22T09:05:02 = 1995.
-0010-09-15T04:44:23 = -10.
=head3 yM (y-M)
2008-02-05T18:30:30 = 2008-2
1995-12-22T09:05:02 = 1995-12
-0010-09-15T04:44:23 = -10-9
=head3 yMEd (EEE, d. M. yyyy.)
2008-02-05T18:30:30 = uto, 5. 2. 2008.
1995-12-22T09:05:02 = pet, 22. 12. 1995.
-0010-09-15T04:44:23 = sub, 15. 9. -010.
=head3 yMMM (y MMM)
2008-02-05T18:30:30 = 2008 feb
1995-12-22T09:05:02 = 1995 dec
-0010-09-15T04:44:23 = -10 sep
=head3 yMMMEd (EEE, d. MMM y.)
2008-02-05T18:30:30 = uto, 5. feb 2008.
1995-12-22T09:05:02 = pet, 22. dec 1995.
-0010-09-15T04:44:23 = sub, 15. sep -10.
=head3 yMMMM (y MMMM)
2008-02-05T18:30:30 = 2008 februar
1995-12-22T09:05:02 = 1995 decembar
-0010-09-15T04:44:23 = -10 septembar
=head3 yQ (y Q)
2008-02-05T18:30:30 = 2008 1
1995-12-22T09:05:02 = 1995 4
-0010-09-15T04:44:23 = -10 3
=head3 yQQQ (y QQQ)
2008-02-05T18:30:30 = 2008 Q1
1995-12-22T09:05:02 = 1995 Q4
-0010-09-15T04:44:23 = -10 Q3
=head3 yyMM (MM.yy)
2008-02-05T18:30:30 = 02.08
1995-12-22T09:05:02 = 12.95
-0010-09-15T04:44:23 = 09.-10
=head3 yyMMdd (dd.MM.yy)
2008-02-05T18:30:30 = 05.02.08
1995-12-22T09:05:02 = 22.12.95
-0010-09-15T04:44:23 = 15.09.-10
=head3 yyMMMd (d. MMM yy.)
2008-02-05T18:30:30 = 5. feb 08.
1995-12-22T09:05:02 = 22. dec 95.
-0010-09-15T04:44:23 = 15. sep -10.
=head3 yyQ (Q yy)
2008-02-05T18:30:30 = 1 08
1995-12-22T09:05:02 = 4 95
-0010-09-15T04:44:23 = 3 -10
=head3 yyQQQQ (QQQQ yy)
2008-02-05T18:30:30 = 1. kvartal 08
1995-12-22T09:05:02 = 4. kvartal 95
-0010-09-15T04:44:23 = 3. kvartal -10
=head3 yyyy (y.)
2008-02-05T18:30:30 = 2008.
1995-12-22T09:05:02 = 1995.
-0010-09-15T04:44:23 = -10.
=head3 yyyyMM (yyyy-MM)
2008-02-05T18:30:30 = 2008-02
1995-12-22T09:05:02 = 1995-12
-0010-09-15T04:44:23 = -010-09
=head3 yyyyMMMM (MMMM y.)
2008-02-05T18:30:30 = februar 2008.
1995-12-22T09:05:02 = decembar 1995.
-0010-09-15T04:44:23 = septembar -10.
=head2 Miscellaneous
=head3 Prefers 24 hour time?
Yes
=head3 Local first day of the week
ponedeljak
=head1 SUPPORT
See L<DateTime::Locale>.
=head1 AUTHOR
Dave Rolsky <autarch@urth.org>
=head1 COPYRIGHT
Copyright (c) 2008 David Rolsky. All rights reserved. This program is
free software; you can redistribute it and/or modify it under the same
terms as Perl itself.
This module was generated from data provided by the CLDR project, see
the LICENSE.cldr in this distribution for details on the CLDR data's
license.
=cut
| liuyangning/WX_web | xampp/perl/vendor/lib/DateTime/Locale/sh_CS.pm | Perl | mit | 10,360 |
package Alatar::Model::Refs::SqlArgumentReference;
use strict;
use Alatar::Model::SqlObject;
use Alatar::Model::Refs::SqlReference;
our @ISA = qw(Alatar::Model::Refs::SqlReference);
sub new {
my ($class,$owner,$name,$objectName) = @_;
my $this = $class->SUPER::new($owner,$name);
$this->{_objectName} = $objectName;
bless($this,$class);
return $this;
}
sub isSqlArgumentReference {
my ($this) = @_;
return 1;
}
sub getObjectType {
my ($this) = @_;
return 'SqlArgumentReference';
}
# setter and getter
sub setObjectName {
my ($this,$objectName) = @_;
$this->{_objectName} = $objectName;
}
sub getObjectName {
my ($this) = @_;
return $this->{_objectName};
}
1; | olivierauverlot/alatar | Alatar/Model/Refs/SqlArgumentReference.pm | Perl | mit | 704 |
/*Owner & Copyrights: Vance King Saxbe. A.*/package AutoLoader;
use strict;
use 5.006_001;
our($VERSION, $AUTOLOAD);
my $is_dosish;
my $is_epoc;
my $is_vms;
my $is_macos;
BEGIN {
$is_dosish = $^O eq 'dos' || $^O eq 'os2' || $^O eq 'MSWin32' || $^O eq 'NetWare';
$is_epoc = $^O eq 'epoc';
$is_vms = $^O eq 'VMS';
$is_macos = $^O eq 'MacOS';
$VERSION = '5.71';
}
AUTOLOAD {
my $sub = $AUTOLOAD;
my $filename = AutoLoader::find_filename( $sub );
my $save = $@;
local $!; # Do not munge the value.
eval { local $SIG{__DIE__}; require $filename };
if ($@) {
if (substr($sub,-9) eq '::DESTROY') {
no strict 'refs';
*$sub = sub {};
$@ = undef;
} elsif ($@ =~ /^Can't locate/) {
# The load might just have failed because the filename was too
# long for some old SVR3 systems which treat long names as errors.
# If we can successfully truncate a long name then it's worth a go.
# There is a slight risk that we could pick up the wrong file here
# but autosplit should have warned about that when splitting.
if ($filename =~ s/(\w{12,})\.al$/substr($1,0,11).".al"/e){
eval { local $SIG{__DIE__}; require $filename };
}
}
if ($@){
$@ =~ s/ at .*\n//;
my $error = $@;
require Carp;
Carp::croak($error);
}
}
$@ = $save;
goto &$sub;
}
sub find_filename {
my $sub = shift;
my $filename;
# Braces used to preserve $1 et al.
{
# Try to find the autoloaded file from the package-qualified
# name of the sub. e.g., if the sub needed is
# Getopt::Long::GetOptions(), then $INC{Getopt/Long.pm} is
# something like '/usr/lib/perl5/Getopt/Long.pm', and the
# autoload file is '/usr/lib/perl5/auto/Getopt/Long/GetOptions.al'.
#
# However, if @INC is a relative path, this might not work. If,
# for example, @INC = ('lib'), then $INC{Getopt/Long.pm} is
# 'lib/Getopt/Long.pm', and we want to require
# 'auto/Getopt/Long/GetOptions.al' (without the leading 'lib').
# In this case, we simple prepend the 'auto/' and let the
# C<require> take care of the searching for us.
my ($pkg,$func) = ($sub =~ /(.*)::([^:]+)$/);
$pkg =~ s#::#/#g;
if (defined($filename = $INC{"$pkg.pm"})) {
if ($is_macos) {
$pkg =~ tr#/#:#;
$filename = undef
unless $filename =~ s#^(.*)$pkg\.pm\z#$1auto:$pkg:$func.al#s;
} else {
$filename = undef
unless $filename =~ s#^(.*)$pkg\.pm\z#$1auto/$pkg/$func.al#s;
}
# if the file exists, then make sure that it is a
# a fully anchored path (i.e either '/usr/lib/auto/foo/bar.al',
# or './lib/auto/foo/bar.al'. This avoids C<require> searching
# (and failing) to find the 'lib/auto/foo/bar.al' because it
# looked for 'lib/lib/auto/foo/bar.al', given @INC = ('lib').
if (defined $filename and -r $filename) {
unless ($filename =~ m|^/|s) {
if ($is_dosish) {
unless ($filename =~ m{^([a-z]:)?[\\/]}is) {
if ($^O ne 'NetWare') {
$filename = "./$filename";
} else {
$filename = "$filename";
}
}
}
elsif ($is_epoc) {
unless ($filename =~ m{^([a-z?]:)?[\\/]}is) {
$filename = "./$filename";
}
}
elsif ($is_vms) {
# XXX todo by VMSmiths
$filename = "./$filename";
}
elsif (!$is_macos) {
$filename = "./$filename";
}
}
}
else {
$filename = undef;
}
}
unless (defined $filename) {
# let C<require> do the searching
$filename = "auto/$sub.al";
$filename =~ s#::#/#g;
}
}
return $filename;
}
sub import {
my $pkg = shift;
my $callpkg = caller;
#
# Export symbols, but not by accident of inheritance.
#
if ($pkg eq 'AutoLoader') {
if ( @_ and $_[0] =~ /^&?AUTOLOAD$/ ) {
no strict 'refs';
*{ $callpkg . '::AUTOLOAD' } = \&AUTOLOAD;
}
}
#
# Try to find the autosplit index file. Eg., if the call package
# is POSIX, then $INC{POSIX.pm} is something like
# '/usr/local/lib/perl5/POSIX.pm', and the autosplit index file is in
# '/usr/local/lib/perl5/auto/POSIX/autosplit.ix', so we require that.
#
# However, if @INC is a relative path, this might not work. If,
# for example, @INC = ('lib'), then
# $INC{POSIX.pm} is 'lib/POSIX.pm', and we want to require
# 'auto/POSIX/autosplit.ix' (without the leading 'lib').
#
(my $calldir = $callpkg) =~ s#::#/#g;
my $path = $INC{$calldir . '.pm'};
if (defined($path)) {
# Try absolute path name, but only eval it if the
# transformation from module path to autosplit.ix path
# succeeded!
my $replaced_okay;
if ($is_macos) {
(my $malldir = $calldir) =~ tr#/#:#;
$replaced_okay = ($path =~ s#^(.*)$malldir\.pm\z#$1auto:$malldir:autosplit.ix#s);
} else {
$replaced_okay = ($path =~ s#^(.*)$calldir\.pm\z#$1auto/$calldir/autosplit.ix#);
}
eval { require $path; } if $replaced_okay;
# If that failed, try relative path with normal @INC searching.
if (!$replaced_okay or $@) {
$path ="auto/$calldir/autosplit.ix";
eval { require $path; };
}
if ($@) {
my $error = $@;
require Carp;
Carp::carp($error);
}
}
}
sub unimport {
my $callpkg = caller;
no strict 'refs';
for my $exported (qw( AUTOLOAD )) {
my $symname = $callpkg . '::' . $exported;
undef *{ $symname } if \&{ $symname } == \&{ $exported };
*{ $symname } = \&{ $symname };
}
}
1;
__END__
=head1 NAME
AutoLoader - load subroutines only on demand
=head1 SYNOPSIS
package Foo;
use AutoLoader 'AUTOLOAD'; # import the default AUTOLOAD subroutine
package Bar;
use AutoLoader; # don't import AUTOLOAD, define our own
sub AUTOLOAD {
...
$AutoLoader::AUTOLOAD = "...";
goto &AutoLoader::AUTOLOAD;
}
=head1 DESCRIPTION
The B<AutoLoader> module works with the B<AutoSplit> module and the
C<__END__> token to defer the loading of some subroutines until they are
used rather than loading them all at once.
To use B<AutoLoader>, the author of a module has to place the
definitions of subroutines to be autoloaded after an C<__END__> token.
(See L<perldata>.) The B<AutoSplit> module can then be run manually to
extract the definitions into individual files F<auto/funcname.al>.
B<AutoLoader> implements an AUTOLOAD subroutine. When an undefined
subroutine in is called in a client module of B<AutoLoader>,
B<AutoLoader>'s AUTOLOAD subroutine attempts to locate the subroutine in a
file with a name related to the location of the file from which the
client module was read. As an example, if F<POSIX.pm> is located in
F</usr/local/lib/perl5/POSIX.pm>, B<AutoLoader> will look for perl
subroutines B<POSIX> in F</usr/local/lib/perl5/auto/POSIX/*.al>, where
the C<.al> file has the same name as the subroutine, sans package. If
such a file exists, AUTOLOAD will read and evaluate it,
thus (presumably) defining the needed subroutine. AUTOLOAD will then
C<goto> the newly defined subroutine.
Once this process completes for a given function, it is defined, so
future calls to the subroutine will bypass the AUTOLOAD mechanism.
=head2 Subroutine Stubs
In order for object method lookup and/or prototype checking to operate
correctly even when methods have not yet been defined it is necessary to
"forward declare" each subroutine (as in C<sub NAME;>). See
L<perlsub/"SYNOPSIS">. Such forward declaration creates "subroutine
stubs", which are place holders with no code.
The AutoSplit and B<AutoLoader> modules automate the creation of forward
declarations. The AutoSplit module creates an 'index' file containing
forward declarations of all the AutoSplit subroutines. When the
AutoLoader module is 'use'd it loads these declarations into its callers
package.
Because of this mechanism it is important that B<AutoLoader> is always
C<use>d and not C<require>d.
=head2 Using B<AutoLoader>'s AUTOLOAD Subroutine
In order to use B<AutoLoader>'s AUTOLOAD subroutine you I<must>
explicitly import it:
use AutoLoader 'AUTOLOAD';
=head2 Overriding B<AutoLoader>'s AUTOLOAD Subroutine
Some modules, mainly extensions, provide their own AUTOLOAD subroutines.
They typically need to check for some special cases (such as constants)
and then fallback to B<AutoLoader>'s AUTOLOAD for the rest.
Such modules should I<not> import B<AutoLoader>'s AUTOLOAD subroutine.
Instead, they should define their own AUTOLOAD subroutines along these
lines:
use AutoLoader;
use Carp;
sub AUTOLOAD {
my $sub = $AUTOLOAD;
(my $constname = $sub) =~ s/.*:://;
my $val = constant($constname, @_ ? $_[0] : 0);
if ($! != 0) {
if ($! =~ /Invalid/ || $!{EINVAL}) {
$AutoLoader::AUTOLOAD = $sub;
goto &AutoLoader::AUTOLOAD;
}
else {
croak "Your vendor has not defined constant $constname";
}
}
*$sub = sub { $val }; # same as: eval "sub $sub { $val }";
goto &$sub;
}
If any module's own AUTOLOAD subroutine has no need to fallback to the
AutoLoader's AUTOLOAD subroutine (because it doesn't have any AutoSplit
subroutines), then that module should not use B<AutoLoader> at all.
=head2 Package Lexicals
Package lexicals declared with C<my> in the main block of a package
using B<AutoLoader> will not be visible to auto-loaded subroutines, due to
the fact that the given scope ends at the C<__END__> marker. A module
using such variables as package globals will not work properly under the
B<AutoLoader>.
The C<vars> pragma (see L<perlmod/"vars">) may be used in such
situations as an alternative to explicitly qualifying all globals with
the package namespace. Variables pre-declared with this pragma will be
visible to any autoloaded routines (but will not be invisible outside
the package, unfortunately).
=head2 Not Using AutoLoader
You can stop using AutoLoader by simply
no AutoLoader;
=head2 B<AutoLoader> vs. B<SelfLoader>
The B<AutoLoader> is similar in purpose to B<SelfLoader>: both delay the
loading of subroutines.
B<SelfLoader> uses the C<__DATA__> marker rather than C<__END__>.
While this avoids the use of a hierarchy of disk files and the
associated open/close for each routine loaded, B<SelfLoader> suffers a
startup speed disadvantage in the one-time parsing of the lines after
C<__DATA__>, after which routines are cached. B<SelfLoader> can also
handle multiple packages in a file.
B<AutoLoader> only reads code as it is requested, and in many cases
should be faster, but requires a mechanism like B<AutoSplit> be used to
create the individual files. L<ExtUtils::MakeMaker> will invoke
B<AutoSplit> automatically if B<AutoLoader> is used in a module source
file.
=head1 CAVEATS
AutoLoaders prior to Perl 5.002 had a slightly different interface. Any
old modules which use B<AutoLoader> should be changed to the new calling
style. Typically this just means changing a require to a use, adding
the explicit C<'AUTOLOAD'> import if needed, and removing B<AutoLoader>
from C<@ISA>.
On systems with restrictions on file name length, the file corresponding
to a subroutine may have a shorter name that the routine itself. This
can lead to conflicting file names. The I<AutoSplit> package warns of
these potential conflicts when used to split a module.
AutoLoader may fail to find the autosplit files (or even find the wrong
ones) in cases where C<@INC> contains relative paths, B<and> the program
does C<chdir>.
=head1 SEE ALSO
L<SelfLoader> - an autoloader that doesn't use external files.
=head1 AUTHOR
C<AutoLoader> is maintained by the perl5-porters. Please direct
any questions to the canonical mailing list. Anything that
is applicable to the CPAN release can be sent to its maintainer,
though.
Author and Maintainer: The Perl5-Porters <perl5-porters@perl.org>
Maintainer of the CPAN release: Steffen Mueller <smueller@cpan.org>
=head1 COPYRIGHT AND LICENSE
This package has been part of the perl core since the first release
of perl5. It has been released separately to CPAN so older installations
can benefit from bug fixes.
This package has the same copyright and license as the perl core:
Copyright (C) 1993, 1994, 1995, 1996, 1997, 1998, 1999,
2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
by Larry Wall and others
All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of either:
a) the GNU General Public License as published by the Free
Software Foundation; either version 1, or (at your option) any
later version, or
b) the "Artistic License" which comes with this Kit.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See either
the GNU General Public License or the Artistic License for more details.
You should have received a copy of the Artistic License with this
Kit, in the file named "Artistic". If not, I'll be glad to provide one.
You should also have received a copy of the GNU General Public License
along with this program in the file named "Copying". If not, write to the
Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307, USA or visit their web page on the internet at
http://www.gnu.org/copyleft/gpl.html.
For those of you that choose to use the GNU General Public License,
my interpretation of the GNU General Public License is that no Perl
script falls under the terms of the GPL unless you explicitly put
said script under the terms of the GPL yourself. Furthermore, any
object code linked with perl does not automatically fall under the
terms of the GPL, provided such object code only adds definitions
of subroutines and variables, and does not otherwise impair the
resulting interpreter from executing any standard Perl script. I
consider linking in C subroutines in this manner to be the moral
equivalent of defining subroutines in the Perl language itself. You
may sell such an object file as proprietary provided that you provide
or offer to provide the Perl source, as specified by the GNU General
Public License. (This is merely an alternate way of specifying input
to the program.) You may also sell a binary produced by the dumping of
a running Perl script that belongs to you, provided that you provide or
offer to provide the Perl source as specified by the GPL. (The
fact that a Perl interpreter and your code are in the same binary file
is, in this case, a form of mere aggregation.) This is my interpretation
of the GPL. If you still have concerns or difficulties understanding
my intent, feel free to contact me. Of course, the Artistic License
spells all this out for your protection, so you may prefer to use that.
=cut
/*email to provide support at vancekingsaxbe@powerdominionenterprise.com, businessaffairs@powerdominionenterprise.com, For donations please write to fundraising@powerdominionenterprise.com*/////// | VanceKingSaxbeA/ASX-Engine | App/vdrive/.sys/miniperl/lib/Autoloader.pm | Perl | mit | 15,243 |
# 1PIF converter
#
# Copyright 2015 Mike Cappella (mike@cappella.us)
package Converters::Onepif 1.03;
our @ISA = qw(Exporter);
our @EXPORT = qw(do_init do_import do_export);
our @EXPORT_OK = qw();
use v5.14;
use utf8;
use strict;
use warnings;
#use diagnostics;
binmode STDOUT, ":utf8";
binmode STDERR, ":utf8";
use Utils::PIF;
use Utils::Utils;
use Utils::Normalize;
use JSON::PP;
use XML::Simple;
use XML::LibXSLT;
use XML::LibXML;
use Time::Piece;
my $header = qq/'1password data'/;
my %card_field_specs = (
bankacct => { textname => '', fields => [ ]},
creditcard => { textname => '', fields => [ ]},
database => { textname => '', fields => [ ]},
driverslicense => { textname => '', fields => [ ]},
email => { textname => '', fields => [ ]},
identity => { textname => '', fields => [ ]},
login => { textname => '', fields => [ ]},
membership => { textname => '', fields => [ ]},
note => { textname => '', fields => [ ]},
outdoorlicense => { textname => '', fields => [ ]},
passport => { textname => '', fields => [ ]},
password => { textname => '', fields => [ ]},
rewards => { textname => '', fields => [ ]},
server => { textname => '', fields => [ ]},
socialsecurity => { textname => '', fields => [ ]},
software => { textname => '', fields => [ ]},
wireless => { textname => '', fields => [ ]},
);
$DB::single = 1; # triggers breakpoint when debugging
sub do_init {
return {
'specs' => \%card_field_specs,
'imptypes' => undef,
'opts' => [ [ q{ --format <formatter> # use specified output formatter (see Formatters folder) },
'format=s' ],
[ q{ --percategory # create one file per category },
'percategory' ],
[ q{ --encodekey <key> # shift encode password using key },
'encodekey=s' ],
],
}
}
sub bycategory {
return -1 if $a->{'typeName'} eq 'webforms.WebForm';
return 1 if $b->{'typeName'} eq 'webforms.WebForm';
return $a->{'typeName'} cmp $b->{'typeName'}
}
my %exported;
my ($formatter, $proctype, $output_suffix);
sub do_import {
my ($file, $imptypes) = @_;
# open the formatter to process the items
#
my @formatters = glob join '/', 'Formatters', ($main::opts{'format'} // 'html_compact') . '.*';
if (@formatters == 0) {
@formatters = @formatters = map { /Formatters\/(.*)\.[^.]+$/; " $1" } glob join '/', 'Formatters', '*';
bail "No such formatter matches '$main::opts{'format'}'; the available formatters are:\n", join "\n", @formatters;
}
bail "More than one style formatter matches '$main::opts{'format'}'; specify one more precisely." if @formatters > 1;
($formatter, $proctype) = split '\.', (split '/', $formatters[0])[1];
$output_suffix = (split '_', $formatter)[0];
my $itemsref = get_items_from_1pif $file;
# Imptypes / exptypes filtering - types are one to one in this converter
# Also, tally exports by type
my (@newlist, $n);
for (@$itemsref) {
# skip 1Password system types (folders, saved searches, ...)
next if $_->{'typeName'} =~ /^system\.folder\./;
my $typekey = typename_to_typekey($_->{'typeName'});
if (! defined $typekey) {
say "Unknown typename: $_->{'typeName'}";
$typekey = 'UNKNOWN';
$n++;
}
else {
next if $imptypes and ! exists $imptypes->{$typekey};
$n++;
next if exists $main::opts{'exptypes'} and ! exists $main::opts{'exptypes'}->{$typekey};
}
$exported{$typekey}++;
push @newlist, $_;
}
$itemsref = \@newlist;
my @items = sort bycategory @$itemsref;
# Fixup the 'linked items' sections; remove the section, and combine the 't' values (the target record's title) into
# an array in the secureContents area under a new key named "Linked_Items". This simplifies processing for formatters,
# since it can be treated just like "tags".
for my $item (@items) {
my (@sections, @linked_items);
if (exists $item->{'secureContents'} and exists $item->{'secureContents'}{'sections'}) {
for my $section (@{$item->{'secureContents'}{'sections'}}) {
if ($section->{'name'} ne 'linked items') {
push @sections, $section;
}
else {
next unless exists $section->{'fields'};
for my $field (@{$section->{'fields'}}) {
push @linked_items, $field->{'t'} if $field->{'t'} ne '' and exists $field->{'v'};
}
if (@linked_items) {
$item->{'secureContents'}{'Linked_Items'} = \@linked_items;
}
}
}
delete $item->{'secureContents'}{'sections'};
$item->{'secureContents'}{'sections'} = \@sections if @sections;
}
}
# open the formatter and perform the transformation
#
my $output;
if ($proctype eq 'xsl') {
my $xsl_file = $formatters[0];
my $xsimple = XML::Simple->new();
debug "Creating XML...\n";
my $xml_str = $xsimple->XMLout(\@items,
NoAttr => 1,
XMLDecl => '<?xml version="1.0" encoding="UTF-8"?>');
my $xml_parser = XML::LibXML->new;
my $xslt_parser = XML::LibXSLT->new;
$xslt_parser->register_function("urn:perlfuncs", "epoch2date", \&epoch2date);
$xslt_parser->register_function("urn:perlfuncs", "monthYear", \&monthYear);
$xslt_parser->register_function("urn:perlfuncs", "address2str", \&address2str);
$xslt_parser->register_function("urn:perlfuncs", "encodepassword", \&encodepassword);
my $xml = eval { $xml_parser->parse_string($xml_str); }; die "XML parse failed: $@" if $@;
my $xsl = eval { $xml_parser->parse_file($xsl_file); }; die "XSL file parse failed: $@" if $@;
my $stylesheet = $xslt_parser->parse_stylesheet($xsl);
my $results = $stylesheet->transform($xml, header => $header);
$output = \($stylesheet->output_as_chars($results));
}
elsif ($proctype eq 'pm') {
my $module = ($formatters[0] =~ s/\//::/r);
$module =~ s/\.pm$//;
eval {
require $formatters[0];
$module->import();
1;
} or do {
my $error = $@;
main::Usage(1, "Error: failed to load style formatter module '$formatter'\n$error");
};
$output = $module->do_process(\@items);
}
else {
bail "Unsupported style formatter type: $proctype";
}
#debug "\n", $output; # needs to be updated to $$output or iterate through $output hash
debug "Done\n";
summarize_import('item', $n);
return $output;
}
sub do_export {
my $output = shift;
my $ntotal = 0;
my @files;
if (%exported) {
my @categories = ref $output eq 'HASH' ? keys %$output : ( '' );
for (@categories) {
my $file = $main::opts{'outfile'};
my $catname = lc ($_ =~ s/ /_/gr);
$file =~ s/([\\\/]1P)_import\.1pif$/myjoin('_', $1, 'converted', $catname) . ".$output_suffix"/e;
debug "Output file: ", $file;
push @files, $file;
open my $io, ">:encoding(utf8)", $file
or bail "Unable to open 1PIF file: $file\n$!";
print $io ref($output) eq 'HASH' ? $output->{$_} : $$output;
close $io;
}
for my $type (keys %exported) {
$ntotal += $exported{$type};
verbose "Exported $exported{$type} $type ", pluralize('item', $exported{$type});
}
}
verbose "Exported $ntotal total ", pluralize('item', $ntotal);
if ($ntotal) {
if (@files > 1) {
verbose "The following files are ready to use:";
verbose "\t$_" for @files;
}
else {
verbose "Your output file is $files[0]";
}
}
}
# functions used by XML formatters
#
sub epoch2date {
my ($t,$notime) = @_;
$t = localtime $t->[0]->textContent;
return $notime ? $t->ymd : join ' ', $t->ymd, $t->hms;
}
sub monthYear {
my $val = $_[0][0]->textContent;
$val =~ s/^(\d{4})(\d{2})$/$1-$2/;
return $val;
}
sub address2str {
my $val = $_[0][0]->textContent;
my @addrs;
for (qw/street city state zip country/) {
if (my $found = $_[0][0]->find($_)->[0]) {
push @addrs, [ $_, $found->textContent ] if defined $found->textContent and $found->textContent ne '';
}
}
return @addrs ? ( myjoin ", ", map { join ': ', $_->[0], $_->[1] } @addrs) : '';
}
sub encodepassword {
my $pw = $_[0][0]->textContent;
my @chars = unpack("C*", $pw);
my @keys = unpack("C*", $main::opts{'encodekey'});
my @shifts;
for (@keys) {
push @shifts, $_- ord '1' - 1;
}
my $ret;
for (my $i = 0; $i < @chars; $i++) {
my $val = $chars[$i] - $shifts[$i % @shifts];
$val += 95 if $val < 32; # wrap over non-printing chars
$ret .= sprintf "%02x", $val;
}
debug $ret;
return $ret;
}
1;
| mikecappella/onepassword-utilities | convert_to_1p4/Converters/Onepif.pm | Perl | mit | 8,518 |
#!/usr/bin/perl
# Program 'stringAndLists'
# Given: A string s of length at most 200 letters and four integers
# a, b, c, d
# Return: The slice of this string from indices a through b and c through
# d (with space in between), inclusively
# Date: 2015 Sept 18
# Version: 5.16
use strict;
use warnings;
open( IN1, $ARGV[0] ) || die "input error.\n";
my $message = <IN1>; # first line is one string
chomp $message; # remove newline character
my @str2 = split " ", <IN1>; # second line has numbers to be split by spaces
close IN1;
# find lengths of strings to extract in next step
my $one = $str2[1] - $str2[0] + 1; # Plus one for 1 based numbering
my $two = $str2[3] - $str2[2] + 1; # for lists
# find actual words between indices indicated in input file
my $word1 = substr $message, $str2[0], $one;
my $word2 = substr $message, $str2[2], $two;
# substr STRING, BEGIN, LENGTH
# save answer in file
open( MYFILE, '> out.txt' );
print MYFILE "$word1 $word2";
close( MYFILE );
| erictleung/rosalind-programming | python-village/INI3_Strings_and_Lists/stringAndLists.pl | Perl | mit | 988 |
role Bailador::Template {
method render($template, @params) { ... }
}
| sergot/Bailador | lib/Bailador/Template.pm | Perl | mit | 74 |
#!/usr/bin/env perl
use strict;
use warnings;
use File::Copy;
use File::Spec;
use File::Which;
use File::Basename;
use Getopt::Long;
my %options = (
code_root => '~mh12/bin/MetaFinisherSC-5.1/srcRefactor/',
);
my $ops_ok = GetOptions(
\%options,
'code_root|c=s',
'noclean',
);
if ($#ARGV != 2 or !($ops_ok)) {
print STDERR "usage: $0 <reads.fasta> <assemble.fasta> <output directory>
Wrapper script to run BIGMAC. Assumes nucmer is in your path.
Options:
-c, --code_root
Path to the srcRefactor/ directory [$options{code_root}]
--noclean
Do not clean intermediate files. By default,
almost everthing is deleted
";
exit(1);
}
$options{code_root} = File::Spec->rel2abs(glob($options{code_root}));
my $reads = File::Spec->rel2abs($ARGV[0]);
my $input_contigs = File::Spec->rel2abs($ARGV[1]);
my $outdir = File::Spec->rel2abs($ARGV[2]);
# find MUMmer directory
print "Finding MUMmer directory...\n";
my $nucmer = which('nucmer');
die "nucmer not found in path. Cannot continue" unless defined($nucmer);
$nucmer = readlink($nucmer);
print "nucmer after following symlinks: $nucmer\n";
my $mummer_dir = dirname($nucmer);
print "MUMmer dir: $mummer_dir\n";
# setup data directory and symlinks to run bigmac
mkdir $outdir or die $!;
chdir $outdir or die $!;
mkdir 'data' or die $!;
system_call(q~perl -pe 's/>[^\$]*$/">Seg" . ++$n ."\n"/ge'~ . " $reads > data/LR.fasta");
system_call(q~perl -pe 's/>[^\$]*$/">Seg" . ++$n ."\n"/ge'~ . " $input_contigs > data/LC.fasta");
symlink "$options{code_root}/", 'srcRefactor' or die $!;
print "\nMade symlinks in output directory $outdir\n\n";
# run bigmac
system_call("python -m srcRefactor.misassemblyFixerLib.mFixer data $mummer_dir");
system_call("python -m srcRefactor.repeatPhaserLib.aSplitter data $mummer_dir");
if ($options{noclean}) {
print "--noclean used, so keeping all files. Symlinking to final contigs file\n";
symlink 'data/abun.fasta', 'contigs_out.fasta' or die $!;
}
else {
print "Tidying up temporary files\n";
rename 'data/abun.fasta', 'contigs_out.fasta' or die $!;
system_call("rm -fr data srcRefactor");
}
sub system_call {
my $cmd = shift;
print "Running command: $cmd\n";
if (system($cmd)) {
print STDERR "Error in system call:\n$cmd\n";
exit(1);
}
print "Command finished: $cmd\n";
print "_____________________________________________________________________\n\n";
}
| martinghunt/bioinf-scripts | perl/bigmac_wrapper.pl | Perl | mit | 2,442 |
#!/usr/bin/env perl
=head1 NAME
FixTrialPluralTypes.pm
=head1 SYNOPSIS
mx-run FixTrialPluralTypes [options] -H hostname -D dbname -u username [-F]
this is a subclass of L<CXGN::Metadata::Dbpatch>
see the perldoc of parent class for more details.
=head1 DESCRIPTION
This patch fixes the odd "Preliminary Yield Trials" and "Advanced Yeld Trials" and "Advanced Yield Trials" terms that should be singular terms
This subclass uses L<Moose>. The parent class uses L<MooseX::Runnable>
=head1 AUTHOR
=head1 COPYRIGHT & LICENSE
Copyright 2010 Boyce Thompson Institute for Plant Research
This program is free software; you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
package FixTrialPluralTypes;
use Moose;
use Bio::Chado::Schema;
use Try::Tiny;
use SGN::Model::Cvterm;
extends 'CXGN::Metadata::Dbpatch';
has '+description' => ( default => <<'' );
This patch fixes the odd "Preliminary Yield Trials" and "Advanced Yeld Trials" and "Advanced Yield Trials" terms that should be singular terms
has '+prereq' => (
default => sub {
[],
},
);
sub patch {
my $self=shift;
print STDOUT "Executing the patch:\n " . $self->name . ".\n\nDescription:\n ". $self->description . ".\n\nExecuted by:\n " . $self->username . " .";
print STDOUT "\nChecking if this db_patch was executed before or if previous db_patches have been executed.\n";
print STDOUT "\nExecuting the SQL commands.\n";
my $schema = Bio::Chado::Schema->connect( sub { $self->dbh->clone } );
print STDERR "INSERTING CV TERMS...\n";
my $correct_ayt_cvterm_id = SGN::Model::Cvterm->get_cvterm_row($schema, 'Advanced Yield Trial', 'project_type')->cvterm_id();
my $correct_pyt_cvterm_id = SGN::Model::Cvterm->get_cvterm_row($schema, 'Preliminary Yield Trial', 'project_type')->cvterm_id();
my $previously_saved_wrong_ayt_projectprops_sql = "SELECT projectprop_id FROM projectprop join cvterm on(type_id=cvterm_id) where name='Advanced Yeld Trials' OR name='Advanced Yield Trials';";
my $update_previously_saved_wrong_ayt_projectprops_sql = "UPDATE projectprop SET type_id = $correct_ayt_cvterm_id WHERE projectprop_id = ?;";
my $h1 = $schema->storage->dbh()->prepare($previously_saved_wrong_ayt_projectprops_sql);
my $h_update = $schema->storage->dbh()->prepare($update_previously_saved_wrong_ayt_projectprops_sql);
$h1->execute();
while (my ($projectprop_id) = $h1->fetchrow_array()) {
$h_update->execute($projectprop_id);
}
my $previously_saved_wrong_pyt_projectprops_sql = "SELECT projectprop_id FROM projectprop join cvterm on(type_id=cvterm_id) where name='Preliminary Yield Trials';";
my $update_previously_saved_wrong_pyt_projectprops_sql = "UPDATE projectprop SET type_id = $correct_pyt_cvterm_id WHERE projectprop_id = ?;";
my $h2 = $schema->storage->dbh()->prepare($previously_saved_wrong_pyt_projectprops_sql);
my $h2_update = $schema->storage->dbh()->prepare($update_previously_saved_wrong_pyt_projectprops_sql);
$h2->execute();
while (my ($projectprop_id) = $h2->fetchrow_array()) {
$h2_update->execute($projectprop_id);
}
my $wrong_trial_types_rs = $schema->resultset("Cv::Cvterm")->search({
name => {-in => ["Advanced Yeld Trials", "Advanced Yield Trials", "Preliminary Yield Trials"]}
});
while (my $r = $wrong_trial_types_rs->next){
$r->delete;
}
print "You're done!\n";
}
####
1; #
####
| solgenomics/sgn | db/00103/FixTrialPluralTypes.pm | Perl | mit | 3,479 |
package Genesis::Vault;
use strict;
use warnings;
use Genesis;
use Genesis::UI;
use JSON::PP qw/decode_json/;
use UUID::Tiny ();
### Class Variables {{{
my (@all_vaults, $default_vault, $current_vault);
# }}}
### Class Methods {{{
# new - raw instantiation of a vault object {{{
sub new {
my ($class, $url, $name, $verify, $namespace) = @_;
return bless({
url => $url,
name => $name,
verify => $verify ? 1 : 0, # Cleans out JSON::Boolean types
namespace => $namespace,
id => sprintf("%s-%06d",$name,rand(1000000))
}, $class);
}
# }}}
# create - create a new safe target and target it {{{
sub create {
my ($class, $url, $name, %opts) = @_;
my $default = $class->default(1);
my @cmd = ('safe', 'target', $url, $name);
push(@cmd, '-k') if $opts{skip_verify};
push(@cmd, '-n', $opts{namespace}) if $opts{namespace};
push(@cmd, '--no-strongbox') if $opts{no_strongbox};
my ($out,$rc,$err) = run({stderr => 0, env => {VAULT_ADDR => "", SAFE_TARGET => ""}}, @cmd);
run('safe','target',$default->{name}) if $default; # restore original system target if there was one
bail(
"Could not create new Safe target #C{%s} pointing at #M{%s}:\n %s",
$name, $url, $err
) if $rc;
my $vault = $class->new($url, $name, !$opts{skip_verify}, $opts{namespace});
for (0..scalar(@all_vaults)-1) {
if ($all_vaults[$_]->{name} eq $name) {
$all_vaults[$_] = $vault;
return $vault;
}
}
push(@all_vaults, $vault);
return $vault;
}
# }}}
# target - builder for vault based on locally available vaults {{{
sub target {
my ($class,$target,%opts) = @_;
$opts{default_vault} ||= $class->default;
my $url;
if ($target) {
($url, my @targets) = _get_targets($target);
if (scalar(@targets) <1) {
bail "#R{[ERROR]} Safe target \"#M{%s}\" not found. Please create it\n".
"and authorize against it before re-attempting this command.",
$target;
}
if (scalar(@targets) >1) {
bail "#R{[ERROR]} Multiple safe targets use url #M{%s}:\n%s\n".
"\nYour ~/.saferc file cannot have more than one target for the given url. Please".
"remove any duplicate targets before re-attempting this command.",
$url, join("", map {" - #C{$_}\n"} @targets);
}
} else {
die_unless_controlling_terminal
"#R{[ERROR]} Cannot interactively select vault unless in a controlling terminal - terminating!";
my $w = (sort {$b<=>$a} map {length($_->{name})} $class->find)[0];
my (%uses,@labels,@choices);
$uses{$_->{url}}++ for $class->find;
for ($class->find) {
next unless $uses{$_->{url}} == 1;
push(@choices, $_->{url});
push(@labels, [csprintf(
"#%s{%-*.*s} #R{%-10.10s} #%s{%s}",
$_->{name} eq $opts{default_vault}->{name} ? "G" : "-",
$w, $w, $_->{name},
$_->{url} =~ /^https/ ? ($_->{verify} ? "" : "(noverify)") : "(insecure)",
$_->{name} eq $opts{default_vault}->{name} ? "Y" : "-",
$_->{url}
),$_->{name}]);
}
my $msg = csprintf("#u{Select Vault:}\n");
my @invalid_urls = grep {$uses{$_} > 1} keys(%uses);
if (scalar(@invalid_urls)) {
$msg .= csprintf("\n".
"#Y{Note:} One or more vault targets have been omitted because they are alias for\n".
" the same URL, which is incompatible with Genesis's distributed model.\n".
" If you need one of the omitted targets, please ensure there is only one\n".
" target alias that uses its URL.\n");
}
bail("#R{[ERROR]} There are no valid vault targets found on this system.")
unless scalar(@choices);
$url = prompt_for_choice(
$msg,
\@choices,
$uses{$opts{default_vault}->{url}} == 1 ? $opts{default_vault}->{url} : undef,
\@labels
)
}
my $vault = ($class->find(url => $url))[0];
return $vault->connect_and_validate()
}
# }}}
# attach - builder for vault based on loaded environment {{{
sub attach {
my ($class, $url, $insecure) = @_;
# Allow vault target and insecure to be specified by ENV variables.
$url = $ENV{substr($url,1)} if substr($url,0,1) eq '$';
$insecure = $ENV{substr($insecure,1)} if substr($insecure,0,1) eq '$';
bail "#R{[ERROR]} No vault target specified"
unless $url;
bail "#R{[ERROR]} Expecting vault target '$url' to be a url"
unless _target_is_url($url);
($url, my @targets) = _get_targets($url);
if (scalar(@targets) <1) {
bail "#R{[ERROR]} Safe target for #M{%s} not found. Please run\n\n".
" #C{safe target <name> \"%s\"%s\n\n".
"then authenticate against it using the correct auth method before\n".
"re-attempting this command.",
$url, $url,($insecure?" -k":"");
}
if (scalar(@targets) >1) {
bail "#R{[ERROR]} Multiple safe targets found for #M{%s}:\n%s\n".
"\nYour ~/.saferc file cannot have more than one target for the given url.\n" .
"Please remove any duplicate targets before re-attempting this command.",
$url, join("", map {" - #C{$_}\n"} @targets);
}
my $vault = $class->new($url, $targets[0], !$insecure);
return $vault->connect_and_validate;
}
# }}}
# rebind - builder for rebinding to a previously selected vault (for callbacks) {{{
sub rebind {
# Special builder with less checking for callback support
my ($class) = @_;
bail("Cannot rebind to vault in callback due to missing environment variables!")
unless $ENV{GENESIS_TARGET_VAULT};
my $vault;
if (is_valid_uri($ENV{GENESIS_TARGET_VAULT})) {
$vault = ($class->find(url => $ENV{GENESIS_TARGET_VAULT}))[0];
bail("Cannot rebind to vault at address '$ENV{GENESIS_TARGET_VAULT}` - not found in .saferc")
unless $vault;
trace "Rebinding to $ENV{GENESIS_TARGET_VAULT}: Matches %s", $vault && $vault->{name} || "<undef>";
} else {
# Check if its a named vault and if it matches the default (legacy mode)
if ($ENV{GENESIS_TARGET_VAULT} eq $class->default->{name}) {
$vault = $class->default()->ref_by_name();
trace "Rebinding to default vault `$ENV{GENESIS_TARGET_VAULT}` (legacy mode)";
}
}
return unless $vault;
return $vault->set_as_current;
}
# }}}
# find - return vaults that match filter (defaults to all) {{{
sub find {
my ($class, %filter) = @_;
@all_vaults = (
map {Genesis::Vault->new($_->{url},$_->{name},$_->{verify})}
sort {$a->{name} cmp $b->{name}}
@{ read_json_from(run({env => {VAULT_ADDR => "", SAFE_TARGET => ""}}, "safe targets --json")) }
) unless @all_vaults;
my @matches = @all_vaults;
for my $quality (keys %filter) {
@matches = grep {$_->{$quality} eq $filter{$quality}} @matches;
}
return @matches;
}
# }}}
# find_by_target - return all vaults matching url associated with specified target alias or url {{{
sub find_by_target {
my ($class, $target) = @_;
my ($url, @aliases) = _get_targets($target);
return map {$class->find(name => $_)} @aliases;
}
# }}}
# default - return the default vault (targeted by system) {{{
sub default {
my ($class,$refresh) = @_;
unless ($default_vault && !$refresh) {
my $json = read_json_from(run({env => {VAULT_ADDR => "", SAFE_TARGET => ""}},"safe target --json"));
$default_vault = (Genesis::Vault->find(name => $json->{name}))[0];
}
return $default_vault;
}
# }}}
# current - return the last vault returned by attach, target, or rebind {{{
sub current {
return $current_vault
}
# }}}
# clear_all - clear all cached data {{{
sub clear_all {
for (@all_vaults) {
delete($_->{_env});
}
@all_vaults=();
$default_vault=undef;
$current_vault=undef;
return $_[0]; # chaining Genesis::Vault
}
# }}}
# }}}
### Instance Methods {{{
# public accessors: url, name, verify, tls {{{
sub url { $_[0]->{url}; }
sub name { $_[0]->{name}; }
sub verify { $_[0]->{verify}; }
sub tls { $_[0]->{url} =~ "^https://"; }
#}}}
# connect_and_validate - connect to the vault and validate that its connected {{{
sub connect_and_validate {
my ($self) = @_;
unless ($self->is_current) {
printf STDERR csprintf("\n#yi{Verifying availability of vault '%s' (%s)...}", $self->name, $self->url)
unless in_callback || under_test;
my $status = $self->status;
if ($status eq 'unauthenticated') {
$self->authenticate;
$status = $self->initialized ? 'ok' : 'uninitialized';
}
error("#%s{%s}\n", $status eq "ok"?"G":"R", $status)
unless in_callback || under_test;
debug "Vault status: $status";
bail("#R{[ERROR]} Could not connect to vault%s",
(in_callback || under_test) ? sprintf(" '%s' (%s): status is %s)", $self->name, $self->url,$status):""
) unless $status eq "ok";
}
return $self->set_as_current;
}
# }}}
# authenticate - attempt to log in with credentials available in environment variables {{{
sub authenticate {
my $self = shift;
my $ref = $self->ref();
my $auth_types = [
{method => 'approle', label => "AppRole", vars => [qw/VAULT_ROLE_ID VAULT_SECRET_ID/]},
{method => 'token', label => "Vault Token", vars => [qw/VAULT_AUTH_TOKEN/]},
{method => 'userpass', label => "Username/Password", vars => [qw/VAULT_USERNAME VAULT_PASSWORD/]},
{method => 'github', label => "Github Peronal Access Token", vars => [qw/VAULT_GITHUB_TOKEN/]},
];
return $self if $self->authenticated;
my %failed;
for my $auth (@$auth_types) {
my @vars = @{$auth->{vars}};
if (scalar(grep {$ENV{$_}} @vars) == scalar(@vars)) {
debug "Attempting to authenticate with $auth->{label} to #M{$ref} vault";
my ($out, $rc) = $self->query(
'safe auth ${1} < <(echo "$2")', $auth->{method}, join("\n", map {$ENV{$_}} @vars)
);
return $self if $self->authenticated;
debug "Authentication with $auth->{label} to #M{$ref} vault failed!";
$failed{$auth->{method}} = 1;
}
}
# Last chance, check if we're already authenticated; otherwise bail.
# This also forces a update to the token, so we don't have to explicitly do that here.
return $self if $self->authenticated;
bail(
"#R{[ERROR]} Could not successfully authenticate against #M{$ref} vault with #C{safe}.\n\n".
" Genesis can automatically authenticate with safe in the following ways:\n".
join("", map {
my $a=$_;
sprintf(
" - #G{%s}, supplied by %s%s\n",
$a->{label},
join(' and ', map {"#y{\$$_}"} @{$a->{vars}}),
($failed{$a->{method}}) ? " #R{[present, but failed]}" : ""
)
} @{$auth_types})
);
}
# }}}
# authenticated - returns true if authenticated {{{
sub authenticated {
my $self = shift;
delete($self->{_env}); # Force a fresh token retrieval
return unless $self->token;
my ($auth,$rc,$err) = read_json_from($self->query({stderr => '/dev/null'},'safe auth status --json'));
return $rc == 0 && $auth->{valid};
}
# }}}
# initialized - returns true if initialized for Genesis {{{
sub initialized {
my $self = shift;
my $secrets_mount = $ENV{GENESIS_SECRETS_MOUNT} || "/secret/";
$self->has($secrets_mount.'handshake') || $self->has('/secret/handshake')
}
# }}}
# query - make safe calls against this vault {{{
sub query {
my $self = shift;
my $opts = ref($_[0]) eq "HASH" ? shift : {};
my @cmd = @_;
unshift(@cmd, 'safe') unless $cmd[0] eq 'safe' || $cmd[0] =~ /^safe /;
$opts->{env} ||= {};
$opts->{env}{DEBUG} = ""; # safe DEBUG is disruptive
$opts->{env}{SAFE_TARGET} = $self->ref unless defined($opts->{env}{SAFE_TARGET});
return run($opts, @cmd);
}
# }}}
# get - get a key or all keys under for a given path {{{
sub get {
my ($self, $path, $key) = @_;
if (defined($key)) {
my ($out,$rc) = $self->query('get', "$path:$key");
return $out if $rc == 0;
debug(
"#R{[ERROR]} Could not read #C{%s:%s} from vault at #M{%s}",
$path, $key,$self->{url}
);
return undef;
}
my ($json,$rc,$err) = read_json_from($self->query('export', $path));
if ($rc || $err) {
debug(
"#R{[ERROR]} Could not read all key/value pairs from #C{%s} in vault at #M{%s}:%s\nexit code: %s",
$path,$self->{url},$err,$rc
);
return {};
}
$path =~ s/^\///; # Trim leading / as safe doesn't honour it
return $json->{$path} if (ref($json) eq 'HASH') && defined($json->{$path});
# Safe 1.1.0 is backwards compatible, but leaving this in for futureproofing
if (ref($json) eq "ARRAY" and scalar(@$json) == 1) {
if ($json->[0]{export_version}||0 == 2) {
return $json->[0]{data}{$path}{versions}[-1]{value};
}
}
bail "Safe version incompatibility - cannot export path $path";
}
# }}}
# set - write a secret to the vault (prompts for value if not given) {{{
sub set {
my ($self, $path, $key, $value) = @_;
if (defined($value)) {
my ($out,$rc) = $self->query('set', $path, "${key}=${value}");
bail(
"#R{[ERROR]} Could not write #C{%s:%s} to vault at #M{%s}:\n%s",
$path, $key,$self->{url},$out
) unless $rc == 0;
return $value;
} else {
# Interactive - you must supply the prompt before hand
die_unless_controlling_terminal
"#R{[ERROR]} Cannot interactively provide secrets unless in a controlling terminal - terminating!";
my ($out,$rc) = $self->query({interactive => 1},'set', $path, $key);
bail(
"#R{[ERROR]} Could not write #C{%s:%s} to vault at #M{%s}",
$path, $key,$self->{url}
) unless $rc == 0;
return $self->get($path,$key);
}
}
# }}}
# has - return true if vault has given key {{{
sub has {
my ($self, $path, $key) = @_;
return $self->query({ passfail => 1 }, 'exists', defined($key) ? "$path:$key" : $path);
}
# }}}
# paths - return all paths found under the given prefixes (or all if no prefix given) {{{
sub paths {
my ($self, @prefixes) = @_;
# TODO: Once safe stops returning invalid pathts, the following will work:
# return lines($self->query('paths', @prefixes));
# instead, we have to do this less efficient routine
return lines($self->query('paths')) unless scalar(@prefixes);
my @all_paths=();
for my $prefix (@prefixes) {
my @paths = lines($self->query('paths', $prefix));
if (scalar(@paths) == 1 && $paths[0] eq $prefix) {
next unless $self->has($prefix);
}
push(@all_paths, @paths);
}
return @all_paths;
}
# }}}
# keys - return all path:key pairs under the given prefixes (or all if no prefix given) {{{
sub keys {
my ($self, @prefixes) = @_;
return lines($self->query('paths','--keys')) unless scalar(@prefixes);
my @all_paths=();
for my $prefix (@prefixes) {
my @paths = lines($self->query('paths', '--keys', $prefix));
next if (scalar(@paths) == 1 && $paths[0] eq $prefix);
push(@all_paths, @paths);
}
return @all_paths;
}
# }}}
# status - returns status of vault: sealed, unreachable, unauthenticated, uninitialized or ok {{{
sub status {
my $self = shift;
# See if the url is reachable to start with
$self->url =~ qr(^http(s?)://(.*?)(?::([0-9]*))?$) or
bail("Invalid vault target URL #C{%s}: expecting http(s)://ip-or-domain(:port)", $self->url);
my $ip = $2;
my $port = $3 || ($1 eq "s" ? 443 : 80);
my $status = tcp_listening($ip,$port);
return "unreachable - $status" unless $status eq 'ok';
my ($out,$rc) = $self->query({stderr => "&1"}, "vault", "status");
if ($rc != 0) {
$out =~ /exit status ([0-9])/;
return "sealed" if $1 == 2;
return "unreachable";
}
return "unauthenticated" unless $self->authenticated;
return "uninitialized" unless $self->initialized;
return "ok"
}
# }}}
# env - return the environment variables needed to directly access the vault {{{
sub env {
my $self = shift;
unless (defined $self->{_env}) {
$self->{_env} = read_json_from(
run({
stderr =>'/dev/null',
env => {SAFE_TARGET => $self->ref }
},'safe', 'env', '--json')
);
$self->{_env}{VAULT_SKIP_VERIFY} ||= "";
# die on missing VAULT_ADDR env?
}
return $self->{_env};
}
# }}}
# token - the authentication token for the active vault {{{
sub token {
my $self = shift;
return $self->env->{VAULT_TOKEN};
}
# }}}
# ref - the reference to be used when identifying the vault (name or url) {{{
sub ref {
my $self = shift;
return $self->{$self->{ref_by} || 'url'};
}
# }}}
# ref_by_name - use the name of the vault as its reference (legacy mode) {{{
sub ref_by_name {
$_[0]->{ref_by} = 'name';
$_[0];
}
# }}}
# set_as_current - set this vault as the current Genesis vault {{{
sub set_as_current {
$current_vault = shift;
}
sub is_current {
$current_vault && $current_vault->{id} eq $_[0]->{id};
}
# }}}
# process_kit_secret_plans - perform actions on the kit secrets: add,recreate,renew,check,remove {{{
sub process_kit_secret_plans {
my ($self, $action, $env, $update, %opts) = @_;
$opts{invalid} ||= 0;
bug("#R{[Error]} Unknown action '$action' for processing kit secrets")
if ($action !~ /^(add|recreate|renew|remove)$/);
$update->('wait', msg => "Parsing kit secrets descriptions");
my @plans = parse_kit_secret_plans(
$env->dereferenced_kit_metadata,
[$env->features],
root_ca_path => $env->root_ca_path,
paths => $opts{paths});
my @errors = map {my ($p,$t,$m) = describe_kit_secret_plan(%$_); sprintf "%s: %s", $p, $m} grep {$_->{type} eq 'error'} @plans;
$update->('wait-done', result => (@errors ? 'error' : 'ok'), msg => join("\n", @errors));
return if (@errors);
if ($opts{invalid}) {
@plans = $self->_get_failed_secret_plans($action, $env, $update, $opts{invalid} == 2, @plans);
return $update->('empty', msg => sprintf(
"No %s secrets found%s.",
($opts{invalid} == 2) ? "invalid" : "problematic",
@{$opts{paths}} ? " under the specified paths/filters" : ""
)
) unless scalar(@plans);
}
#Filter out any path that has no plan - only x509 has support for renew
#TODO: make this generalized if other things are supported in the future
@plans = grep {$_->{type} eq 'x509'} @plans if $action eq 'renew';
return $update->('empty') unless scalar(@plans);
if ($action =~ /^(remove|recreate|renew)$/ && !$opts{no_prompt} && !$opts{interactive}) {
(my $actioned = $action) =~ s/e?$/ed/;
my $permission = $update->('prompt',
class => 'warning',
msg => sprintf(
"The following secrets will be ${actioned} under path '#C{%s}':\n %s",
$env->secrets_base,
join("\n ",
map {bullet $_, inline => 1}
map {_get_plan_paths($_)}
@plans
)
),
prompt => "Type 'yes' to $action these secrets");
return $update->('abort', msg => "\nAborted!\n")
if $permission ne 'yes';
}
my ($result, $err, $idx);
$update->('init', total => scalar(@plans));
for (@plans) {
my ($path, $label, $details) = describe_kit_secret_plan(%$_);
$update->('start-item', path => $path, label => $label, details => $details);
if ($opts{interactive}) {
my $confirm = $update->('inline-prompt',
prompt => sprintf("%s [y/n/q]?", $action),
);
if ($confirm ne 'y') {
$update->('done-item', result => 'skipped');
return $update->('abort', msg => "#Y{Quit!}\n") if ($confirm eq 'q');
next;
}
}
my $now_t = Time::Piece->new(); # To prevent clock jitter
my @command = _generate_secret_command($action, $env->secrets_base, %$_);
if ($_->{type} eq "provided") {
if ($action eq 'add' || ($action eq 'recreate' && $_->{fixed})) {
my $path = $env->secrets_base.$_->{path};
my (undef, $missing) = $self->query('exists',$path);
if (!$missing) {
$update->('done-item', result => 'skipped');
next;
}
}
if (!@command) {
$update->('done-item', result => 'error', msg => "Cannot prompt for user input from a non-controlling terminal");
last;
}
my $interactive = 1;
$update->("notify", msg => "#Yi{user input required:\n}");
if (CORE::ref($command[0]) eq 'CODE') {
my $precommand = shift @command;
my @precommand_args;
while (my $arg = shift @command) {
last if $arg eq '--';
push @precommand_args, $arg;
}
$interactive = $precommand->(@precommand_args);
}
if (@command) {
$update->('notify', msg=> "\nsaving user input ... ", nonl => 1) if ! $interactive;
my ($out,$rc) = $self->query({interactive => $interactive}, @command);
$update->('notify', msg=> "\nsaving user input ... ", nonl => 1) if $interactive;
$update->('done-item', result => ($rc ? 'error': 'ok'));
last if $rc;
}
} else {
my ($out, $rc) = $self->query(@command);
$out = join("\n", grep {
my (undef, $key) = split(':',$path);
$_ !~ /^$key: [a-f0-9]{8}(-[a-f0-9]{4}){4}[a-f0-9]{8}$/;
} split("\n", $out )) if ( $_->{type} eq 'uuid');
if ($out =~ /refusing to .* as it is already present/ ||
$out =~ /refusing to .* as the following keys would be clobbered:/) {
$update->('done-item', result => 'skipped')
} elsif ( $action eq 'renew' && $out =~ /Renewed x509 cert.*expiry set to (.*)$/) {
my $expires = $1;
eval {
(my $exp_gmt = $1) =~ s/UTC/GMT/;
my $expires_t = Time::Piece->strptime($exp_gmt, "%b %d %Y %H:%M %Z");
my $days = sprintf("%.0f",($expires_t - $now_t)->days());
$update->('done-item', result => 'ok', msg => _checkbox(1)."Expiry updated to $expires ($days days)");
};
$update->('done-item', result => 'ok', msg => "Expiry updated to $expires") if $@;
} elsif ($_->{type} eq 'dhparams' && $out && !$rc) {
if ($out =~ /Generating DH parameters.*This is going to take a long time.*\+\+\*\+\+\*\s*$/s) {
$update->('done-item', result => 'ok')
} else {
$update->('done-item', result => 'error', msg => $out);
}
} elsif (!$out) {
$update->('done-item', result => 'ok')
} else {
$update->('done-item', result => 'error', msg => $out);
}
last if ($rc);
}
}
return $update->('completed');
}
# }}}
# validate_kit_secrets - validate kit secrets {{{
sub validate_kit_secrets {
my ($self, $action, $env, $update, %opts) = @_;
$opts{validate} ||= 0;
bug("#R{[Error]} Unknown action '$action' for checking kit secrets")
if ($action !~ /^(check|validate)$/);
$update->('wait', msg => "Parsing kit secrets descriptions");
my @plans = parse_kit_secret_plans(
$env->dereferenced_kit_metadata,
[$env->features],
root_ca_path => $env->root_ca_path,
paths => $opts{paths});
my @errors = map {my ($p,$t,$m) = describe_kit_secret_plan(%$_); sprintf "%s: %s", $p, $m} grep {$_->{type} eq 'error'} @plans;
$update->('wait-done', result => (@errors ? 'error' : 'ok'), msg => join("\n", @errors));
return if (@errors);
$update->('wait', msg => "Retrieving all existing secrets");
my ($secret_contents,$err) =$self->all_secrets_for($env);
$update->('wait-done', result => ($err ? 'error' : 'ok'), msg => $err);
return if $err;
$update->('init', total => scalar(@plans));
for my $plan (@plans) {
my ($path, $label, $details) = describe_kit_secret_plan(%$plan);
$update->('start-item', path => $path, label => $label, details => $details);
my ($result, $msg) = _validate_kit_secret($action,$plan,$secret_contents,$env->secrets_base,\@plans);
$update->('done-item', result => $result, msg => $msg, action => ($plan->{type} eq 'provided' ? 'check' : $action));
}
return $update->('completed');
}
# }}}
# all_secrets_under - return hash for all secrets under the given path {{{
sub all_secrets_for {
my ($self, $env) = @_;
my ($secret_contents,$err);
my $root_path = $env->secrets_base;
debug "Turning off debug and trace output while retrieving secrets";
local $ENV{GENESIS_TRACE}='';
local $ENV{GENESIS_DEBUG}='';
my @cmd = ('export', $env->secrets_base);
my $root_ca_path = $env->root_ca_path;
push @cmd, $root_ca_path if $root_ca_path;
my $raw_secrets = $self->query(@cmd);
return ({}, "Root CA certificate not found")
if $raw_secrets =~ /^!! no secret exists at path \`$root_ca_path\`/;
return ({})
if $raw_secrets =~ /^!! no secret exists at path/;
eval {
$secret_contents = decode_json($raw_secrets);
};
$err = "Could not retrieve existing secrets for $root_path" if $@;
return($secret_contents, $err);
}
# }}}
# }}}
### Private Methods {{{
# _expected_kit_secret_keys - list keys expected for a given kit secret {{{
sub _expected_kit_secret_keys {
my (%plan) = @_;
my @keys;
my $type = $plan{type};
if ($type eq 'x509') {
@keys = qw(certificate combined key);
push(@keys, qw(crl serial)) if $plan{is_ca};
} elsif ($type eq 'rsa') {
@keys = qw(private public);
} elsif ($type eq 'ssh') {
@keys = qw(private public fingerprint);
} elsif ($type eq 'dhparams') {
@keys = qw(dhparam-pem);
} elsif ($type =~ /^(random|provided|uuid)$/) {
my (undef,$key) = split(":",$plan{path});
@keys = ($key);
push(@keys, $plan{destination} || "$key-".$plan{format})
if $plan{format};
}
return @keys;
}
# }}}
# _get_failed_secret_plans - list the plans for failed secrets {{{
sub _get_failed_secret_plans {
my ($self, $scope, $env, $update, $include_warnings, @plans) = @_;
$update->('wait', msg => "Retrieving all existing secrets");
my ($secret_contents,$err) =$self->all_secrets_for($env);
$update->('wait-done', result => ($err ? 'error' : 'ok'), msg => $err);
return () if $err;
my @failed;
my ($total, $idx) = (scalar(@plans), 0);
$update->('init', action => "Checking for failed".($scope eq 'recreate' ? ' or missing' : '')." secrets", total => scalar(@plans));
for my $plan (@plans) {
my ($path, $label, $details) = describe_kit_secret_plan(%$plan);
$update->('start-item', path => $path, label => $label, details => $details);
my ($result, $msg) = _validate_kit_secret('validate',$plan,$secret_contents,$env->secrets_base, \@plans);
if ($result eq 'error' || ($result eq 'warn' && $include_warnings) || ($result eq 'missing' && $scope eq 'recreate')) {
$update->('done-item', result => $result, action => 'validate', msg => $msg) ;
push @failed, $plan;
} else {
$update->('done-item', result => 'ok', action => 'validate')
}
}
$update->('notify', msg => sprintf("Found %s invalid%s secrets", scalar(@failed), $include_warnings ? " or problematic" : ""));
return @failed;
}
# }}}
# }}}
### Public helper functions {{{
# parse_kit_secret_plans - get the list of secrets specified by the kit {{{
sub parse_kit_secret_plans {
my ($metadata, $features, %opts) = @_;
trace "Parsing plans for kit secrets";
my $plans = _get_kit_secrets($metadata, $features);
# Sort the plans in order of application (check for cyclical ca relations)
my $groups = {};
push(@{$groups->{$plans->{$_}{type}} ||= []}, $_) for (sort(CORE::keys(%$plans)));
my @ordered_plans = _process_x509_plans(
$plans,
delete($groups->{x509}),
$opts{root_ca_path},
$opts{validate});
# Add in all the other types that don't require prerequesites
for my $type (sort(CORE::keys %$groups)) {
for my $path (sort @{$groups->{$type}}) {
my $ok = 1;
if ($opts{validate}) {
my $validate_sub = "_validate_${type}_plan";
$ok = (\&{$validate_sub})->($plans,$path,\@ordered_plans) if exists(&{$validate_sub});
}
push @ordered_plans, $plans->{$path} if $ok;
}
}
if ($opts{paths} && @{$opts{paths}}) {
my @explicit_paths;
my @filtered_paths;
my $filtered = 0;
for my $filter (@{$opts{paths}}) { #and each filter with previous results
if (grep {$_->{path} eq $filter} @ordered_plans) { # explicit path
push @explicit_paths, $filter;
next;
}
my @or_paths;
@filtered_paths = map {$_->{path}} @ordered_plans # start will all possible paths
unless $filtered++; # initialize on first use
while (defined $filter) {
my @paths;
($filter, my $remainder) = $filter =~ /(.*?)(?:\|\|(.*))?$/; # or
debug "Parsing left half of an or-filter: $filter || $remainder" if $remainder;
if ($filter =~ /(.*?)(!)?=(.*)$/) { # plan properties
my ($key,$negate,$value) = ($1,$2,$3);
@paths = map {$_->{path}} grep {defined($_->{$key}) && ($negate ? $_->{$key} ne $value : $_->{$key} eq $value)} @ordered_plans;
debug "Parsing plan properties filter: $key = '$value' => ".join(", ",@paths);
} elsif ($filter =~ m'^(!)?/(.*?)/(i)?$') { # path regex
my ($match,$pattern,$reopt) = (($1 || '') ne '!', $2, ($3 || ''));
debug "Parsing plan path regex filter: path %s~ /%s/%s", $match?'=':'!', $pattern, $reopt;
my $re; eval "\$re = qr/\$pattern/$reopt";
@paths = map {$_->{path}} grep {$match ? $_->{path} =~ $re : $_->{path} !~ $re} @ordered_plans;
} else {
bail "\n#R{[ERROR]} Could not understand path filter of '%s'", $filter;
}
@or_paths = uniq(@or_paths, @paths); # join together the results of successive 'or's
$filter = $remainder;
}
my %and_paths = map {($_,1)} @filtered_paths;
@filtered_paths = grep {$and_paths{$_}} @or_paths; #and together each feature
}
my %filter_map = map {($_,1)} (@filtered_paths, @explicit_paths);
@ordered_plans = grep { $filter_map{$_->{path}} } (@ordered_plans);
}
trace "Completed parsing plans for kit secrets";
return @ordered_plans;
}
# }}}
# describe_kit_secret_plan - get a printable slug for the a kit secret plan {{{
sub describe_kit_secret_plan {
my (%plan) = @_;
my ($path,$type,@features);
$path = $plan{path};
if ($plan{type} eq 'x509') {
$type = "X509 certificate";
@features = (
$plan{is_ca} ? 'CA' : undef,
$plan{self_signed}
? ($plan{self_signed} == 2 ? 'explicitly self-signed' : 'self-signed')
: ($plan{signed_by} ? "signed by '$plan{signed_by}'" : undef )
);
} elsif ($plan{type} eq 'random') {
$type = "random password";
@features = (
$plan{size} . ' bytes',
$plan{fixed} ? 'fixed' : undef
)
} elsif ($plan{type} eq 'uuid') {
$type = "UUID";
my $namespace = $plan{namespace} ? "ns:$plan{namespace}" : undef;
$namespace =~ s/^ns:NS_/ns:@/ if $namespace;
if ($plan{version} =~ /^(v1|time)/i) {
@features = ('random:time based (v1)')
} elsif ($plan{version} =~ /^(v3|md5)/i) {
@features = (
'static:md5-hash (v3)',
"'$plan{name}'",
$namespace
);
} elsif ($plan{version} =~ /^(v4|random)/i) {
@features = ('random:system RNG based (v4)')
} elsif ($plan{version} =~ /^(v5|sha1)/i) {
@features = (
'static:sha1-hash (v5)',
"'$plan{name}'",
$namespace,
);
}
push(@features, 'fixed') if $plan{fixed};
} elsif ($plan{type} eq 'dhparams') {
$type = "Diffie-Hellman key exchange parameters";
@features = (
$plan{size} . ' bits',
$plan{fixed} ? 'fixed' : undef
);
} elsif ($plan{type} =~ /^(ssh|rsa)$/) {
$type = uc($plan{type})." public/private keypair";
@features = (
$plan{size} . ' bits',
$plan{fixed} ? 'fixed' : undef
);
} elsif ($plan{type} eq 'error') {
$type = "ERROR";
@features = (
$plan{error},
);
debug("Error encountered in secret plan $path:");
dump_var plan => \%plan;
} elsif ($plan{type} eq 'provided') {
$type = "user-provided";
@features = (
$plan{prompt}
);
} else {
$type = "ERROR";
@features = (
"Unsupported secret type '$plan{type}'"
);
}
return ($path,$type,join (", ", grep {$_} @features));
}
# }}}
# }}}
### Private helper functions {{{
# _target_is_url - determine if target is in valid URL form {{{
sub _target_is_url {
my $target = lc(shift);
return 0 unless $target =~ qr(^https?://([^:/]+)(?::([0-9]+))?$);
return 0 if $2 && $2 > 65535;
my @comp = split(/\./, $1);
return 1 if scalar(@comp) == 4 && scalar(grep {$_ =~ /^[0-9]+$/ && $_ >=0 && $_ < 256} @comp) == 4;
return 1 if scalar(grep {$_ !~ /[a-z0-9]([-_0-9a-z]*[a-z0-9])*/} @comp) == 0;
return 0;
}
# }}}
# _get_targets - find all matching safe targets for the provided name or url {{{
sub _get_targets {
my $target = shift;
unless (_target_is_url($target)) {
my $target_vault = (Genesis::Vault->find(name => $target))[0];
return (undef) unless $target_vault;
$target = $target_vault->{url};
}
my @names = map {$_->{name}} Genesis::Vault->find(url => $target);
return ($target, @names);
}
# }}}
# _get_kit_secrets - get the raw secrets from the kit.yml file {{{
sub _get_kit_secrets {
my ($meta, $features) = @_;
my $plans = {};
for my $feature ('base', @{$features || []}) {
if ($meta->{certificates}{$feature}) {
for my $path (CORE::keys %{ $meta->{certificates}{$feature} }) {
if ($path =~ ':') {
$plans->{$path} = {type=>'error', error=>"Bad Request:\n- Path cannot contain colons"};
next;
}
my $data = $meta->{certificates}{$feature}{$path};
if (CORE::ref($data) eq 'HASH') {
for my $k (CORE::keys %$data) {
my $ext_path = "$path/$k";
$plans->{$ext_path} = $data->{$k};
if (CORE::ref($plans->{$ext_path}) eq 'HASH') {
$plans->{$ext_path}{type} = "x509";
$plans->{$ext_path}{base_path} = $path;
} else {
$plans->{$ext_path} = {type => 'error', error => "Badly formed x509 request:\nExpecting hash map, got '$plans->{$ext_path}'"};
}
# In-the-wild POC conflict fix for cf-genesis-kit v1.8.0-v1.10.x
$plans->{$ext_path}{signed_by} = "application/certs/ca"
if ($plans->{$ext_path}{signed_by} || '') eq "base.application/certs.ca";
}
} else {
$plans->{$path} = {type => 'error', error => "Badly formed x509 request:\n- expecting certificate specification in the form of a hash map"};
}
}
}
if ($meta->{credentials}{$feature}) {
for my $path (CORE::keys %{ $meta->{credentials}{$feature} }) {
if ($path =~ ':') {
$plans->{$path} = {type=>'error', error=>"Bad credential request:\n- Path cannot contain colons"};
next;
}
my $data = $meta->{credentials}{$feature}{$path};
if (CORE::ref($data) eq "HASH") {
for my $k (CORE::keys %$data) {
if ($k =~ ':') {
$plans->{"$path:$k"} = {type=>'error', error=>"Bad credential request:\n- Key cannot contain colons"};
next;
}
my $cmd = $data->{$k};
if ($cmd =~ m/^random\b/) {
if ($cmd =~ m/^random\s+(\d+)(\s+fmt\s+(\S+)(\s+at\s+(\S+))?)?(\s+allowed-chars\s+(\S+))?(\s+fixed)?$/) {
$plans->{"$path:$k"} = {
type => 'random',
size => $1,
format => $3,
destination => $5,
valid_chars => $7,
fixed => (!!$8)
};
} else {
$plans->{"$path:$k"} = {
type => "error",
error => "Bad random password request:\n".
"- Expected usage: random <size> [fmt <format> [at <key>]] ".
"[allowed-chars <chars>] [fixed]\n".
" Got: $cmd"
};
}
} elsif ($cmd =~ m/^uuid\b/) {
if ($cmd =~ m/^uuid(?:\s+(v[1345]|time|md5|random|sha1))?(?:\s+namespace (?:([a-f0-9]{8}-(?:[a-f0-9]{4}-){3}[a-f0-9]{12})|(dns|url|oid|x500)))?(?:\s+name (.*?))?(\s+fixed)?$/i) {
$plans->{"$path:$k"} = {
type => 'uuid',
version => uc($1||"v4"),
namespace => $2 || ($3 ? "NS_".uc($3) : undef),
name => $4,
fixed => (!!$5)
};
} else {
$plans->{"$path:$k"} = {
type => "error",
error => "Bad UUID request:\n".
"- Expected usage: uuid [v1|time|v3|md5|v4|random|v5|sha1] ".
"[namespace (dns|url|oid|x500|<UUID namespace>] [name <name>] [fixed]\n".
" Got: $cmd"
};
}
} else {
$plans->{"$path:$k"} = {type => "error", error => "Bad credential request:\n- Bad generate-password format '$cmd'"};
}
}
} elsif ($data =~ m/^(ssh|rsa)\s+(\d+)(\s+fixed)?$/) {
$plans->{$path} = {type => $1, size=> $2, fixed => (!!$3) };
} elsif ($data =~ m/^dhparams?\s+(\d+)(\s+fixed)?$/) {
$plans->{$path} = {type => 'dhparams', size => $1, fixed => (!!$2) }
} elsif ($data =~ m/^random .?$/) {
$plans->{$path} = {type => 'error', error => "Bad credential request:\n- Random password request for a path must be specified per key in a hashmap"};
} elsif ($data =~ m/^uuid .?$/) {
$plans->{$path} = {type => 'error', error => "Bad credential request:\n- UUID request for a path must be specified per key in a hashmap"};
} else {
$plans->{$path} = {type => 'error', error => "Bad credential request:\n- Unrecognized request '$data'"};
}
}
}
if ($meta->{provided}{$feature}) {
if (CORE::ref($meta->{provided}{$feature}) eq 'HASH') {
for my $path (CORE::keys %{ $meta->{provided}{$feature} }) {
if ($path =~ ':') {
$plans->{$path} = {type=>'error', error=>"Bad provided secret description:\n- Path cannot contain colons"};
next;
}
my $data = $meta->{provided}{$feature}{$path};
if (CORE::ref($data) eq "HASH") {
my $type = $data->{type} || 'generic';
if ($type eq 'generic') {
if (!defined($data->{keys}) || CORE::ref($data->{keys}) ne 'HASH') {
$plans->{$path} = {type=>'error', error=>"Bad generic provided secret description:\n- Missing 'keys' hash"};
next;
}
for my $k (CORE::keys %{$data->{keys}}) {
if ($k =~ ':') {
$plans->{"$path:$k"} = {type=>'error', error=>"Bad generic provided secret description:\n- Key cannot contain colons"};
next;
}
$plans->{"$path:$k"} = {
type => 'provided',
subtype => $data->{keys}{$k}{type},
sensitive => (defined($data->{keys}{$k}{sensitive}) ? !!$data->{keys}{$k}{sensitive} : 1),
multiline => (!!$data->{keys}{$k}{multiline}),
prompt => $data->{keys}{$k}{prompt} || "Value for $path $k",
fixed => (!!$data->{keys}{$k}{fixed})
};
}
} else {
$plans->{$path} = {type => 'error', error => "Bad provided secrets description:\n- Unrecognized type '$type'; expecting one of: generic"};
}
} elsif (CORE::ref($data)) {
my $reftype = lc(CORE::ref($data));
$plans->{$path} = {type => 'error', error => "Bad provided secrets path:\n- Expecting hashmap, got $reftype"};
} else {
$plans->{$path} = {type => 'error', error => "Bad provided secrets path:\n- Expecting hashmap, '$data'"};
}
}
} elsif (CORE::ref($meta->{provided}{$feature})) {
my $reftype = lc(CORE::ref($meta->{provided}{$feature}));
$plans->{$feature} = {type => 'error', error => "Bad provided secrets feature block:\n- Expecting hashmap of paths, got $reftype"};
} else {
$plans->{$feature} = {type => 'error', error => "Bad provided secrets feature block:\n- Expecting hashmap of paths, got '$meta->{provided}{$feature}'"};
}
}
}
$plans->{$_}{path} = $_ for CORE::keys %$plans;
return $plans;
}
# }}}
# _generate_secret_command - create safe command list that performs the requested action on the secret endpoint {{{
sub _generate_secret_command {
my ($action,$root_path, %plan) = @_;
my @cmd;
if ($action eq 'remove') {
@cmd = ('rm', '-f', $root_path.$plan{path});
if ($plan{type} eq 'random' && $plan{format}) {
my ($secret_path,$secret_key) = split(":", $plan{path},2);
my $fmt_path = sprintf("%s:%s", $root_path.$secret_path, $plan{destination} ? $plan{destination} : $secret_key.'-'.$plan{format});
push @cmd, '--', 'rm', '-f', $fmt_path;
}
} elsif ($plan{type} eq 'x509') {
my %action_map = (add => 'issue',
recreate => 'issue',
renew => 'renew');
my @names = @{$plan{names} || []};
push(@names, sprintf("ca.n%09d.%s", rand(1000000000),$plan{base_path})) if $plan{is_ca} && ! scalar(@names);
@cmd = (
'x509',
$action_map{$action},
$root_path.$plan{path},
'--ttl', $plan{valid_for} || ($plan{is_ca} ? '10y' : '1y'),
);
push(@cmd, '--signed-by', ($plan{signed_by_abs_path} ? '' : $root_path).$plan{signed_by}) if $plan{signed_by};
if ($action_map{$action} eq 'issue') {
push(@cmd, '--ca') if $plan{is_ca};
push(@cmd, '--name', $_) for (@names);
if (CORE::ref($plan{usage}) eq 'ARRAY') {
push(@cmd, '--key-usage', $_) for (@{$plan{usage}} ? @{$plan{usage}} : qw/no/);
}
} elsif ($action_map{$action} eq 'renew') {
my ($cert_name) = @names;
push(@cmd, '--subject', "cn=$cert_name")
if $cert_name and envset("GENESIS_RENEW_SUBJECT");
push(@cmd, '--name', $_) for (@names);
my ($usage) = _get_x509_plan_usage(\%plan);
if (CORE::ref($usage) eq 'ARRAY') {
push(@cmd, '--key-usage', $_) for (@{$usage} ? @{$usage} : qw/no/);
}
}
} elsif ($action eq 'renew') {
# Nothing else supports renew -- return empty action
debug "No safe command for renew $plan{type}";
return ();
} elsif ($plan{type} eq 'random') {
@cmd = ('gen', $plan{size},);
my ($path, $key) = split(':',$plan{path});
push(@cmd, '--policy', $plan{valid_chars}) if $plan{valid_chars};
push(@cmd, $root_path.$path, $key);
if ($plan{format}) {
my $dest = $plan{destination} || "$key-".$plan{format};
push(@cmd, '--no-clobber') if $action eq 'add' || ($action eq 'recreate' && $plan{fixed});
push(@cmd, '--', 'fmt', $plan{format}, $root_path.$path, $key, $dest);
}
} elsif ($plan{type} eq 'dhparams') {
@cmd = ('dhparam', $plan{size}, $root_path.$plan{path});
} elsif (grep {$_ eq $plan{type}} (qw/ssh rsa/)) {
@cmd = ($plan{type}, $plan{size}, $root_path.$plan{path});
} elsif ($plan{type} eq 'provided') {
if (in_controlling_terminal) {
if ($plan{multiline}) {
my $file=workdir().'/secret_contents';
push (@cmd, sub {use Genesis::UI; print "[2A"; mkfile_or_fail($file,prompt_for_block @_); 0}, $plan{prompt}, '--', 'set', split(':', $root_path.$plan{path}."\@$file", 2))
} else {
my $op = $plan{sensitive} ? 'set' : 'ask';
push (@cmd, 'prompt', $plan{prompt}, '--', $op, split(':', $root_path.$plan{path}));
}
}
debug "safe command: ".join(" ", @cmd);
dump_var plan => \%plan;
return @cmd;
} elsif ($plan{type} eq 'uuid') {
my $version=(\&{"UUID::Tiny::UUID_".$plan{version}})->();
my $ns=(\&{"UUID::Tiny::UUID_".$plan{namespace}})->() if ($plan{namespace}||'') =~ m/^NS_/;
$ns ||= $plan{namespace};
my $uuid = UUID::Tiny::create_uuid_as_string($version, $ns, $plan{name});
#error "UUID: $uuid ($plan{path})";
my ($path, $key) = split(':',$plan{path});
@cmd = ('set', $root_path.$path, "$key=$uuid");
} else {
push(@cmd, 'prompt', 'bad request');
debug "Requested to create safe path for an bad plan";
dump_var plan => \%plan;
}
push(@cmd, '--no-clobber') if ($action eq 'add' || ($plan{fixed} && $action eq 'recreate'));
debug "safe command: ".join(" ", @cmd);
dump_var plan => \%plan;
return @cmd;
}
# }}}
# _process_x509_plans - determine signing changes, add defaults and specify build order {{{
sub _process_x509_plans {
my ($plans, $paths, $root_ca_path, $validate) = @_;
my @paths = @{$paths || []};
my $base_cas = {};
for (grep {$_ =~ /\/ca$/ || ($plans->{$_}{is_ca}||'') =~ 1} @paths) {
$plans->{$_}{is_ca} = 1;
push(@{$base_cas->{$plans->{$_}{base_path}} ||= []}, $_);
}
for my $base_path (CORE::keys %$base_cas) {
next unless my $count = scalar(@{$base_cas->{$base_path}});
my ($base_ca, $err);
if ($count == 1) {
# Use the ca for the base path
$base_ca = $base_cas->{$base_path}[0];
} elsif (grep {$_ eq "$base_path/ca"} @{$base_cas->{$base_path}}) {
# Use the default ca if there's more than one
$base_ca = "$base_path/ca";
} else {
# Ambiguous - flag this further down
$err = "Unspecified/ambiguous signing CA";
}
my @signable_certs = grep {!$plans->{$_}{is_ca}
&& $plans->{$_}{base_path} eq $base_path
&& !$plans->{$_}{signed_by}
} @paths;
for (@signable_certs) {
if ($err) {
$plans->{$_}{type} = "error";
$plans->{$_}{error} = "Ambiguous or missing signing CA"
} else {
$plans->{$_}{signed_by} = $base_ca;
}
}
}
my $signers = {};
for (@paths) {
my $signer = $plans->{$_}{signed_by} || '';
push (@{$signers->{$signer} ||= []}, $_);
}
$signers->{$_} = [sort @{$signers->{$_}}] for (CORE::keys %$signers);
_sign_unsigned_x509_plans($signers->{''}, $plans, $root_ca_path );
my @ordered_plans;
my $target = '';
while (1) {
_sign_x509_plans($target,$signers,$plans,\@ordered_plans,$validate);
$target = _next_signer($signers);
last unless $target;
}
# Find unresolved signage paths
for (grep {$plans->{$_}{type} eq 'x509' && !$plans->{$_}{__processed}} sort(CORE::keys %$plans)) {
$plans->{$_}{type} = "error";
$plans->{$_}{error} = "Could not find associated signing CA";
push(@ordered_plans, $plans->{$_})
}
return @ordered_plans;
}
# }}}
# _sign_unsigned_x509_plans - sign unsigned plans with the root CA if present, otherwise self-signed {{{
sub _sign_unsigned_x509_plans {
my ($cert_paths, $plans, $root_ca) = @_;
for my $path (@{$cert_paths||[]}) {
next unless $plans->{$path}{type} eq 'x509' && !$plans->{$path}{signed_by};
if ($root_ca) {
$plans->{$path}{signed_by} = $root_ca;
$plans->{$path}{signed_by_abs_path} = 1;
} else {
$plans->{$path}{self_signed} = 1;
}
}
}
# }}}
# _sign_x509_plans - process the certs in order of signer {{{
sub _sign_x509_plans {
my ($signer,$certs_by_signer,$src_plans,$ordered_plans,$validate) = @_;
if ($signer) {
if (! grep {$_->{path} eq $signer} (@$ordered_plans)) {
my ($idx) = grep {$certs_by_signer->{$signer}[$_] eq $signer} ( 0 .. scalar(@{$certs_by_signer->{$signer}})-1);
if (defined($idx)) {
# I'm signing myself - must be a CA
unshift(@{$certs_by_signer->{$signer}}, splice(@{$certs_by_signer->{$signer}}, $idx, 1));
$src_plans->{$signer}{self_signed} = 2; #explicitly self-signed
$src_plans->{$signer}{signed_by} = "";
$src_plans->{$signer}{is_ca} = 1;
}
}
}
while (my $cert = shift(@{$certs_by_signer->{$signer}})) {
if (grep {$_->{path} eq $cert} (@$ordered_plans)) {
# Cert has been added already - bail
$src_plans->{$cert} ||= {};
$src_plans->{$cert}{type} = 'error';
$src_plans->{$cert}{error} = 'Cyclical CA signage detected';
return;
}
$src_plans->{$cert}{__processed} = 1;
push(@$ordered_plans, $src_plans->{$cert})
if ((!$validate) || _validate_x509_plan($src_plans,$cert,$ordered_plans));
_sign_x509_plans($cert,$certs_by_signer,$src_plans,$ordered_plans,$validate)
if scalar(@{$certs_by_signer->{$cert} || []});
}
}
# }}}
# _next_signer - determine next signer so none are orphaned {{{
sub _next_signer {
my $signers = shift;
my @available_targets = grep {scalar(@{$signers->{$_}})} sort(CORE::keys %$signers);
while (@available_targets) {
my $candidate = shift @available_targets;
# Dont use a signer if its signed by a remaining signer
next if grep {$_ eq $candidate} map { @{$signers->{$_}} } @available_targets;
return $candidate;
}
return undef;
}
# }}}
# _validate_x509_plan - check the cert plan is valid {{{
sub _validate_x509_plan {
my ($plans,$cert_name, $ordered_plans) = @_;
my %cert = %{$plans->{$cert_name}};
my $err = "";
$err .= "\n- Invalid valid_for argument: expecting <positive_number>[ymdh], got $cert{valid_for}"
unless !$cert{valid_for} || ($cert{valid_for} || '') =~ /^[1-9][0-9]*[ymdh]$/;
if ($cert{names}) {
if (CORE::ref($cert{names}) eq 'HASH') {
$err .= "\n- Invalid names argument: expecting an array of one or more strings, got a hashmap";
} elsif (CORE::ref($cert{names}) eq '') {
$err .= "\n- Invalid names argument: expecting an array of one or more strings, got the string '$cert{names}'"
} elsif (CORE::ref($cert{names}) eq 'ARRAY') {
if (! scalar @{$cert{names}}) {
$err .= "\n- Invalid names argument: expecting an array of one or more strings, got an empty list";
} elsif (grep {!$_} @{$cert{names}}) {
$err .= "\n- Invalid names argument: cannot have an empty name entry";
} elsif (grep {CORE::ref($_) ne ""} @{$cert{names}}) {
$err .= "\n- Invalid names argument: cannot have an entry that is not a string";
}
}
}
if ($cert{usage}) {
if (CORE::ref($cert{usage}) eq 'ARRAY') {
my %valid_keys = map {$_, 1} _x509_key_usage();
my @invalid_keys = grep {!$valid_keys{$_}} @{$cert{usage}};
$err .= sprintf("\n- Invalid usage argument - unknown usage keys: '%s'\n Valid keys are: '%s'",
join("', '", sort @invalid_keys), join("', '", sort(CORE::keys %valid_keys)))
if (@invalid_keys);
} else {
$err .= "\n- Invalid usage argument: expecting an array of one or more strings, got ".
(CORE::ref($cert{usage}) ? lc('a '.CORE::ref($cert{usage})) : "the string '$cert{usage}'");
}
}
$err .= "\n- Invalid is_ca argument: expecting boolean value, got '$cert{is_ca}'"
unless (!defined($cert{is_ca}) || $cert{is_ca} =~ /^1?$/);
if ($cert{signed_by}) {
$err .= "\n- Invalid signed_by argument: expecting relative vault path string, got '$cert{signed_by}'"
unless ($cert{signed_by} =~ /^[a-z0-9_-]+(\/[a-z0-9_-]+)+$/i);
$err .= "\n- CA Common Name Conflict - can't share CN '".@{$cert{names}}[0]."' with signing CA"
if (
(CORE::ref($plans->{$cert{signed_by}})||'' eq "HASH") &&
$plans->{$cert{signed_by}}{names} &&
CORE::ref($cert{names}) eq 'ARRAY' &&
CORE::ref($plans->{$cert{signed_by}}{names}) eq 'ARRAY' &&
@{$cert{names}}[0] eq @{$plans->{$cert{signed_by}}{names}}[0]
);
}
if ($err) {
$plans->{$cert_name} = {%cert, type => 'error', error => "Bad X509 certificate request: $err"};
push @$ordered_plans, $plans->{$cert_name};
return undef;
}
return 1;
}
# }}}
# _validate_ssh_plan - check the ssh plan is valid {{{
sub _validate_ssh_plan {
my ($plans,$path, $ordered_plans) = @_;
my %plan = %{$plans->{$path}};
my $err = "";
$err .= "\n- Invalid size argument: expecting 1024-16384, got $plan{size}"
if ($plan{size} !~ /^\d+$/ || $plan{size} < 1024 || $plan{size} > 16384);
if ($err) {
push @$ordered_plans, {%plan, type => 'error', error => "Bad SSH request: $err"};
return undef;
}
return 1;
}
# }}}
# _validate_uuid_plan - check the uuid plan is valid {{{
sub _validate_uuid_plan {
my ($plans,$path, $ordered_plans) = @_;
my %plan = %{$plans->{$path}};
my $err = "";
my $version = $plan{version};
if ($version =~ m/^(v3|v5|md5|sha1)$/i) {
if (! defined($plan{name})) {
$err .= "\n- $version UUIDs require a name argument to be specified"
}
} else {
my @errors;
push (@errors, 'name') if defined($plan{name});
push (@errors, 'namespace') if defined($plan{namespace});
if (@errors) {
$err .= "\n- $version UUIDs cannot take ".join(" or ", @errors)." argument".(@errors > 1 ? 's' : '');
}
}
if ($err) {
push @$ordered_plans, {%plan, type => 'error', error => "Bad UUID request: $err"};
return undef;
}
return 1;
}
# }}}
# _validate_kit_secret - list keys expected for a given kit secret {{{
sub _validate_kit_secret {
my ($scope,$plan,$secret_values,$root_path,$plans) = @_;
# Existance
my ($path,$key) = split(':', $root_path.$plan->{path});
$path =~ s#^/?(.*?)/?$#$1#;
$path =~ s#/{2,}#/#g;
my $values = $secret_values->{$path};
return ('missing') unless defined($values)
&& CORE::ref($values) eq 'HASH'
&& (!defined($key) || defined($values->{$key}));
my @keys = _expected_kit_secret_keys(%$plan);
return (
'error',
sprintf("Cannot process secret type '%s': unknown type",$plan->{type})
) unless @keys;
my $errors = join("\n", map {sprintf("%smissing key ':%s'", _checkbox(0), $_)} grep {! exists($values->{$_})} @keys);
return ('missing',$errors) if $errors;
return ('ok') unless $scope eq 'validate';
return ('ok', '') if $plan->{type} eq 'provided';
my $validate_sub=sprintf("_validate_%s_secret", $plan->{type});
return ('ok', '') unless (exists(&{$validate_sub}));
my ($results, @validations) = (\&{$validate_sub})->($path, $key, $plan, $secret_values, $plans, $root_path);
my $show_all_messages = ! envset("GENESIS_HIDE_PROBLEMATIC_SECRETS");
my %priority = ('error' => 0, 'warn' => 1, 'ok' => 2);
my @results_levels = sort {$priority{$a}<=>$priority{$b}}
uniq('ok', map {$_ ? ($_ =~ /^(error|warn)$/ ? $_ : 'ok') : 'error'}
map {$_->[0]}
values %$results);
return (
$results_levels[0],
join("\n", map {_checkbox($_->[0]).$_->[1]}
grep {$show_all_messages || $priority{$_->[0]} <= $priority{$results_levels[0]}}
map {$results->{$_}}
grep {exists $results->{$_}}
@validations));
}
# }}}
# _validate_x509_secret - validate an x509 secret value {{{
sub _validate_x509_secret {
my ($path, $path_key, $plan, $all_secrets, $all_plans, $root_path) = @_;
my $values = $all_secrets->{$path};
my %results;
# Get Cert Info
my $key = $values->{key};
my $cert = $values->{certificate};
my ($keyModulus) = run('openssl rsa -in <(echo "$1") -modulus -noout', $key) =~ /Modulus=(\S*)/;
my $certInfo = run('openssl x509 -in <(echo "$1") -text -fingerprint -modulus -noout', $cert);
my ($issuerCN, $since, $expires, $subjectCN, $fingerprint, $modulus) =
$certInfo =~ /Issuer: CN\s*=\s*(\S*).*Not Before: ([^\n]*).*Not After : ([^\n]*).*Subject: CN\s*=\s*([^\r\n]+?)\s*[\r\n]+.*Fingerprint=(\S*).*Modulus=(\S*)/ms;
my $is_ca = $certInfo =~ /X509v3 Basic Constraints:.*(CA:TRUE).*Signature Algorithm/ms;
my (undef, $sanInfo) = $certInfo =~ /\n( *)X509v3 Subject Alternative Name:\s*?((?:[\n\r]+\1.*)+)/;
my @SANs = ($sanInfo || '') =~ /(?:IP Address|DNS):([^,\n\r]+)/g;
@SANs = map {s/\s*$//; $_} @SANs;
# Validate CN if kit requests on explicitly
my $cn_str = ${$plan->{names}}[0];
if ($cn_str) {
my $match = $subjectCN eq $cn_str;
$results{cn} = [
$match ? 'ok' : 'warn',
sprintf("Subject Name '%s'%s", $cn_str, $match ? '' : " (found '$subjectCN')")
];
}
# Validate SAN
my (%sans,%desired_sans);
@sans{grep {@{$plan->{names}} || $_ ne $subjectCN} @SANs}=();
@desired_sans{ @{$plan->{names}} }=();
my @extra_sans = sort(grep {!exists $desired_sans{$_}} CORE::keys %sans);
my @missing_sans = sort(grep {!exists $sans{$_}} CORE::keys %desired_sans);
if (!scalar(@extra_sans) && !scalar(@missing_sans)) {
$results{san} = ['ok', 'Subject Alt Names: '.(@SANs ? join(", ",map {"'$_'"} @{$plan->{names}}) : '#i{none}')]
if scalar(%sans);
} else {
$results{san} = ['warn', 'Subject Alt Names ('. join('; ',(
@missing_sans ? "missing: ".join(", ", @missing_sans):(),
@extra_sans? "extra: ".join(", ", @extra_sans) : ()
)).")"];
}
# Signage and Modulus Agreement
if ($plan->{is_ca}) {
$results{is_ca} = [ !!$is_ca, "CA Certificate" ];
} else {
$results{is_ca} = [ !$is_ca ? 'ok' : 'warn', 'Not a CA Certificate' ];
}
my ($subjectKeyID) = $certInfo =~ /X509v3 Subject Key Identifier: *[\n\r]+\s+([A-F0-9:]+)\s*$/m;
my ($authKeyID) = $certInfo =~ /X509v3 Authority Key Identifier: *[\n\r]+\s+keyid:([A-F0-9:]+)\s*$/m;
my $signed_by_str;
my $self_signed = (!$plan->{signed_by} || $plan->{signed_by} eq $plan->{path});
if ($self_signed) {
$results{self_signed} = [
($subjectKeyID && $authKeyID) ? $subjectKeyID eq $authKeyID : $issuerCN eq $subjectCN,
"Self-Signed"
];
} else {
my $signer_path = $plan->{signed_by_abs_path} ? $plan->{signed_by} : $root_path.$plan->{signed_by};
$signer_path =~ s#^/##;
my $ca_cert = $all_secrets->{$signer_path}{certificate};
if ($ca_cert) {
my $caSubjectKeyID;
if ($authKeyID) {
# Try to use the subject and authority key identifiers if they exist
my $caInfo = run('openssl x509 -in <(echo "$1") -text -noout', $ca_cert);
($caSubjectKeyID) = $caInfo =~ /X509v3 Subject Key Identifier: *[\r\n]+\s+([A-F0-9:]+)\s*$/m;
}
if ($caSubjectKeyID) {
$results{signed} = [
$authKeyID eq $caSubjectKeyID,
"Signed by ".$plan->{signed_by}
];
} else {
# Otherwise try to validate the full chain if we have access all the certs
my $ca_plan;
my $full_cert_chain='';
while (1) {
last unless $signer_path && defined($all_secrets->{$signer_path});
$full_cert_chain = $all_secrets->{$signer_path}{certificate}.$full_cert_chain;
($ca_plan) = grep {$root_path.$_->{path} eq '/'.$signer_path} @$all_plans;
last unless ($ca_plan && $ca_plan->{signed_by});
($signer_path = $ca_plan->{signed_by_abs_path}
? $ca_plan->{signed_by}
: $root_path.$ca_plan->{signed_by}
) =~ s#^/##
}
my $out = run(
'openssl verify -verbose -CAfile <(echo "$1") <(echo "$2")',
$full_cert_chain, $values->{certificate}
);
my $signed;
if ($out =~ /error \d+ at \d+ depth lookup/) {
#fine, we'll check via safe itself - last resort because it takes time
my $signer_path = $plan->{signed_by_abs_path} ? $plan->{signed_by} : $root_path.$plan->{signed_by};
$signer_path =~ s#^/##;
my ($safe_out,$rc) = Genesis::Vault::current->query('x509','validate','--signed-by', $signer_path, $root_path.$plan->{path});
$signed = $rc == 0 && $safe_out =~ qr/$plan->{path} checks out/;
} else {
$signed = $out =~ /: OK$/;
}
$results{signed} = [
$signed,
sprintf("Signed by %s%s", $plan->{signed_by}, $signed ? '' : (
$subjectCN eq $issuerCN ? " (maybe self-signed?)" : " (signed by CN '$issuerCN')"
))
];
}
} else {
$results{signed} = [
'error',
sprintf("Signed by %s (specified CA not found - %s)", $plan->{signed_by},
($subjectCN eq $issuerCN ? "maybe self-signed?" : "found signed by CN '$issuerCN'")
)
];
}
}
$results{modulus_agreement} = [$modulus eq $keyModulus, "Modulus Agreement"];
# Validate TTL
my $now_t = Time::Piece->new();
my $since_t = Time::Piece->strptime($since, "%b %d %H:%M:%S %Y %Z");
my $expires_t = Time::Piece->strptime($expires, "%b %d %H:%M:%S %Y %Z");
my $valid_str;
my $days_left;
if ($since_t < $now_t) {
if ($now_t < $expires_t) {
$days_left = ($expires_t - $now_t)->days();
$valid_str = sprintf("expires in %.0f days (%s)", ($expires_t - $now_t)->days(), $expires);
} else {
$valid_str = sprintf("expired %.0f days ago (%s)", ($now_t - $expires_t)->days(), $expires);
}
} else {
$valid_str = "not yet valid (starts $since)";
}
$results{valid} = [$valid_str =~ /^expires/ ? ($days_left > 30 ? 'ok' : 'warn') : 'error', "Valid: ".$valid_str];
# Validate Usage
my ($usage, $usage_str, $usage_type) = _get_x509_plan_usage($plan);
my $usage_results = _x509_key_usage($certInfo,$usage);
$usage_type = 'warn' unless ($usage_results->{found}); # no enforcement if no keys specified
if (!defined($usage_results->{extra}) && !defined($usage_results->{missing})) {
$results{usage} = [
'ok',
$usage_str . (@$usage ? ": ".join(", ", @$usage) : '')
];
} else {
my @extra_usage = @{$usage_results->{extra}||[]};
my @missing_usage = @{$usage_results->{missing}||[]};
my $usage_err_str = " (". join('; ',(
@missing_usage ? "missing: ".join(", ", @missing_usage):(),
@extra_usage ? "extra: " .join(", ", @extra_usage ):()
)).")";
$results{usage} = [
$usage_type,
$usage_str . $usage_err_str
];
}
return (\%results, qw/is_ca self_signed signed valid modulus_agreement cn san usage/);
}
# }}}
# _validate_dhparans_secret - validate an x509 secret value {{{
sub _validate_dhparams_secret {
my ($path, $path_key, $plan, $all_secrets, $all_plans, $root_path) = @_;
my $values = $all_secrets->{$path};
my $pem = $values->{'dhparam-pem'};
my $pemInfo = run('openssl dhparam -in <(echo "$1") -text -check -noout', $pem);
my ($size) = $pemInfo =~ /DH Parameters: \((\d+) bit\)/;
my $pem_ok = $pemInfo =~ /DH parameters appear to be ok\./;
my $size_ok = $size == $plan->{size};
return ({
valid => [$pem_ok, "Valid"],
size => [$size_ok, sprintf("%s bits%s", $plan->{size}, $size_ok ? '' : " (found $size bits)" )]
}, qw/valid size/);
}
# }}}
# _validate_ssh_secret - validate an SSH secret value {{{
sub _validate_ssh_secret {
my ($path, $path_key, $plan, $all_secrets, $all_plans, $root_path) = @_;
my $values = $all_secrets->{$path};
my %results;
my ($rendered_public,$priv_rc) = run('ssh-keygen -y -f /dev/stdin <<<"$1"', $values->{private});
$results{priv} = [
!$priv_rc,
"Valid private key"
];
my ($pub_sig,$pub_rc) = run('ssh-keygen -B -f /dev/stdin <<<"$1"', $values->{public});
$results{pub} = [
!$pub_rc,
"Valid public key"
];
if (!$priv_rc) {
my ($rendered_sig,$rendered_rc) = run('ssh-keygen -B -f /dev/stdin <<<"$1"', $rendered_public);
$results{agree} = [
$rendered_sig eq $pub_sig,
"Public/Private key Agreement"
];
}
if (!$pub_rc) {
my ($bits) = $pub_sig =~ /^\s*([0-9]*)/;
$results{size} = [
$bits == $plan->{size} ? 'ok' : 'warn',
sprintf("%s bits%s", $plan->{size}, ($bits == $plan->{size}) ? '' : " (found $bits bits)" )
];
}
return (\%results, qw/priv pub agree size/)
}
# }}}
# _validate_RSA_secret - validate an RSA secret value {{{
sub _validate_rsa_secret {
my ($path, $path_key, $plan, $all_secrets, $all_plans, $root_path) = @_;
my $values = $all_secrets->{$path};
my %results;
my ($priv_modulus,$priv_rc) = run('openssl rsa -noout -modulus -in <(echo "$1")', $values->{private});
$results{priv} = [
!$priv_rc,
"Valid private key"
];
my ($pub_modulus,$pub_rc) = run('openssl rsa -noout -modulus -in <(echo "$1") -pubin', $values->{public});
$results{pub} = [
!$pub_rc,
"Valid public key"
];
if (!$pub_rc) {
my ($pub_info, $pub_rc2) = run('openssl rsa -noout -text -inform PEM -in <(echo "$1") -pubin', $values->{public});
my ($bits) = ($pub_rc2) ? () : $pub_info =~ /Key:\s*\(([0-9]*) bit\)/;
my $size_ok = ($bits || 0) == $plan->{size};
$results{size} = [
$size_ok ? 'ok' : 'warn',
sprintf("%s bit%s", $plan->{size}, $size_ok ? '' : ($bits ? " (found $bits bits)" : " (could not read size)"))
];
if (!$priv_rc) {
$results{agree} = [
$priv_modulus eq $pub_modulus,
"Public/Private key agreement"
];
}
}
return (\%results, qw/priv pub agree size/)
}
# }}}
# _validate_random_secret - validate randomly generated string secret value {{{
sub _validate_random_secret {
my ($path, $key, $plan, $all_secrets, $all_plans, $root_path) = @_;
my $values = $all_secrets->{$path};
my %results;
my $length_ok = $plan->{size} == length($values->{$key});
$results{length} = [
$length_ok ? 'ok' : 'warn',
sprintf("%s characters%s", $plan->{size}, $length_ok ? '' : " - got ". length($values->{$key}))
];
if ($plan->{valid_chars}) {
(my $valid_chars = $plan->{valid_chars}) =~ s/^\^/\\^/;
my $valid_chars_ok = $values->{$key} =~ /^[$valid_chars]*$/;
$results{valid_chars} = [
$valid_chars_ok ? 'ok' : 'warn',
sprintf("Only uses characters '%s'%s", $valid_chars,
$valid_chars_ok ? '' : " (found invalid characters in '$values->{$key}')"
)
];
}
if ($plan->{format}) {
my ($secret_path,$secret_key) = split(":", $plan->{path},2);
my $fmt_key = $plan->{destination} ? $plan->{destination} : $secret_key.'-'.$plan->{format};
$results{formatted} = [
exists($values->{$fmt_key}),
sprintf("Formatted as %s in ':%s'%s", $plan->{format}, $fmt_key,
exists($values->{$fmt_key}) ? '' : " ( not found )"
)
];
}
return (\%results, qw/length valid_chars formatted/);
}
# }}}
# _validate_uuid_secret - validate UUID secret value {{{
sub _validate_uuid_secret {
my ($path, $key, $plan, $all_secrets, $all_plans, $root_path) = @_;
my $values = $all_secrets->{$path};
my %results;
my @validations = qw/valid/;
my $version = $plan->{version};
if (UUID::Tiny::is_uuid_string $values->{$key}) {
$results{valid} = ['ok', "Valid UUID string"];
push @validations, '';
if ($version =~ m/^(v3|md5|v5|sha1)$/i) {
my $v=(\&{"UUID::Tiny::UUID_$version"})->();
my $ns=(\&{"UUID::Tiny::UUID_".$plan->{namespace}})->() if ($plan->{namespace}||'') =~ m/^NS_/;
$ns ||= $plan->{namespace};
my $uuid = UUID::Tiny::create_uuid_as_string($v, $ns, $plan->{name});
$results{hash} = [
$uuid eq $values->{$key},
"Correct for given name and namespace".($uuid eq $values->{$key} ? '' : ": expected $uuid, got $values->{$key}")
];
push @validations, 'hash';
}
} else {
$results{valid} = ['error', "valid UUID: expecting xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx, got ".$values->{$key}];
}
return (\%results, @validations);
}
# }}}
# _x509_key_usage - specify allowed usage values, and map openssl identifiers to tokens {{{
sub _x509_key_usage {
my ($openssl_text, $check) = @_;
my %keyUsageLookup = (
"Digital Signature" => "digital_signature",
"Non Repudiation" => "non_repudiation",
"Content Commitment" => "content_commitment", #Newer version of non_repudiation
"Key Encipherment" => "key_encipherment",
"Data Encipherment" => "data_encipherment",
"Key Agreement" => "key_agreement",
"Certificate Sign" => "key_cert_sign",
"CRL Sign" => "crl_sign",
"Encipher Only" => "encipher_only",
"Decipher Only" => "decipher_only",
);
my %extendedKeyUsageLookup = (
"TLS Web Client Authentication" => "client_auth",
"TLS Web Server Authentication" => "server_auth",
"Code Signing" => "code_signing",
"E-mail Protection" => "email_protection",
"Time Stamping" => "timestamping"
);
return uniq(values %keyUsageLookup, values %extendedKeyUsageLookup)
unless defined($openssl_text);
my %found = ();
my ($specified_keys) = $openssl_text =~ /X509v3 Key Usage:.*[\n\r]+\s*([^\n\r]+)/;
my ($specified_ext) = $openssl_text =~ /X509v3 Extended Key Usage:.*[\n\r]\s*+([^\n\r]+)/;
if ($specified_keys) {
my @keys = split(/,\s+/,$specified_keys);
chomp @keys;
$found{$_} = 1 for (grep {$_} map {$keyUsageLookup{$_}} @keys);
}
if ($specified_ext) {
my @keys = split(/,\s+/,$specified_ext);
chomp @keys;
$found{$_} = 1 for (grep {$_} map {$extendedKeyUsageLookup{$_}} @keys);
}
my @found = sort(grep {$found{$_}} CORE::keys %found);
return CORE::keys(%found) unless (CORE::ref($check) eq "ARRAY");
$found{$_}-- for uniq(@$check);
if ( exists($found{non_repudiation}) && exists($found{content_commitment}) &&
(abs($found{non_repudiation} + $found{content_commitment}) < 1)) {
# if both non_repudiation and content_commitment are found and/or requested,
# then as long is the total sum is less than |1|, it is considered requested
# and found (ie not both requested and none found or both found and none requested)
$found{non_repudiation} = $found{content_commitment} = 0;
}
my @extra = sort(grep {$found{$_} > 0} CORE::keys %found);
my @missing = sort(grep {$found{$_} < 0} CORE::keys %found);
return {
extra => (@extra ? \@extra : undef),
missing => (@missing ? \@missing : undef),
found => (@found ? \@found : undef)
}
}
#}}}
# _get_plan_paths - list all paths for the given plan {{{
sub _get_plan_paths {
my $plan = shift;
my @paths = $plan->{path};
if ($plan->{type} eq 'random' && $plan->{format}) {
my ($path,$key) = split(':',$plan->{path},2);
push @paths, $path.":".($plan->{destination} ? $plan->{destination} : $key.'-'.$plan->{format})." (paired with $plan->{path})";
}
return @paths;
}
#}}}
# _get_x509_plan_usage - get the usage and its description for a given x509 plan {{{
sub _get_x509_plan_usage {
my $plan = shift;
my ($usage, $usage_str);
my $usage_type = 'warn'; # set to 'error' for mismatch enforcement
if (defined($plan->{usage})) {
$usage = ($plan->{usage});
$usage_str = "Specified key usage";
if (!scalar @$usage) {
$usage_str = "No key usage";
}
} elsif ($plan->{is_ca}) {
$usage_type = 'warn';
$usage = [qw/server_auth client_auth crl_sign key_cert_sign/];
$usage_str = "Default CA key usage";
} else {
$usage_type = 'warn';
$usage = [qw/server_auth client_auth/];
$usage_str = "Default key usage";
}
return ($usage, $usage_str, $usage_type);
}
# }}}
# _checkbox - make a checkbox {{{
sub _checkbox {
return bullet($_[0] eq 'warn' ? 'warn' : ($_[0] && $_[0] ne 'error' ? 'good' : 'bad'), '', box => 1, inline => 1, indent => 0);
}
# }}}
# }}}
1;
=head1 NAME
Genesis::Vault
=head1 DESCRIPTION
This module provides utilities for interacting with a Vault through safe.
=head1 Class Methods
=head2 new($url,$name,$verify)
Returns a blessed Genesis::Vault object based on the URL, target name and TLS verify values provided.
B<NOTE:> This should not be called directly, as it provides no error checking or validations.
=head2 target($target, %opts)
Returns a C<Genesis::Vault> object representing the vault at the given target
or presents the user with an interactive prompt to specify a target. This is
intended to be used when setting up a deployment repo for the first time, or
selecting a new vault for an existing deployment repo.
In the case that the target is passed in, the target will be validated to
ensure that it is known, a url or alias and that its url is unique (not being
used by any other aliases); A C<Genesis::Vault> object for that target is
returned if it is valid, otherwise, an error will be raised.
In the case that the target is not passed in, all unique-url aliases will be
presented for selection, with the current system target being shown as a
default selection. If there are aliases that share urls, a warning will be
presented to the user that some invalid targets are not shown due to that.
The user then enters the number corresponding to the desired target, and a
C<Genesis::Vault> object corresponding to that slection is returned. This
requires that the caller is in a controlling terminal, otherwise the program
will terminate.
C<%opts> can be the following values:
=over
=item default_vault
A C<Genesis::Vault> that will be used as the default
vault selection in the interactive prompt. If not provided, the current system
target vault will be used. Has no effect when not in interactive mode.
=back
In either cases, the target will be validated that it is reachable, authorized
and ready to be used, and will set that vault as the C<current> vault for the
class.
=head2 attach($url, $insecure)
Returns a C<Genesis::Vault> object for the given url according to the user's
.saferc file.
This will result in an error if the url is not known in the .saferc or if it
is not unique to a single alias, as well as if the url is not a valid url.
The C<insecure> does not matter for the attach, but does change the error
output for describing how to add the target to the local safe configuration if
it is missing.
=head2 rebind
This is used to rebind to the previous vault when in a callback from a Genesis-
run hook. It uses the C<GENESIS_TARGET_VAULT> environment variable that is set
prior to running a hook, and only ensures that the vault is known to the system.
=head2 find(%conditions)
Without any conditions, this will return all system-defined safe targets as
Genesis::Vault objects. Specifying hash elemements of the property => value
filters the selection to those that have that property value (compared as string)
Valid properties are C<url>, C<name>, C<tls> and C<verify>.
=head2 find_by_target($alias_or_url)
This will return all Vaults that use the same url as the given alias or url.
=head2 default
This will return the Vault that is the set target of the system, or null if
there is no current system target.
=head2 current
This will return the Vault that was the last Vault targeted by Genesis::Vault
methods of target, attach or rebind, or by the explicit set_as_current method
on a Vault object.
=head2 clear_all
This method removes all cached Vault objects and the C<current> and C<default>
values. Though mainly used for providing a clean slate for testing, it could
also be useful if the system's safe configuration changes and those changes need
to be picked up by Genesis during a run.
=head1 Instance Methods
Each C<Genesis::Vault> object is composed of the properties of url, its name
(alias) as it is known on the local system, and its verify (binary opposite of
skip-ssl-validation). While these properties can be queried directly, it is
better to use the accessor methods by the same name
=head2 url
Returns the url for the Vault object, in the form of:
C<schema://host_name_or_ip:port>
The :port is optional, and is understood to be 80 for http schema or 443 for
https.
=head2 name
Returns the name (aka alias) of the vault as it is known on the local system.
Because the same Vault target url may be known by a different name on each
system, the use of the alias is not considered an precise identifier for a
Vault, and only used for convenience in display output or specifying a target
initially.
=head2 verify
Returns a boolean true if the vault target's certificate will be validated
when it is connected, or false if not. Only applicable to https urls, though
http will default to true.
=head2 tls
Convenience method to check if using https (true) or http (false) rather than
having to substring or regex the url.
=head2 query
Allows caller to pass a generic query to the selected vault. The user can
specify anything that would normally come after `safe ...` on the command line,
but not the -T <target> option will NOT have any effect.
This can take the same arguments and returns the same structure that a
C<Genesis::run> method would, with two caveats:
=over
=item *
Setting the environment variable SAFE_TARGET will get overwritten with the url
of the Vault object being operated on.
=item *
Setting the DEBUG environment variable will get unset because it is disruptive
to the call. If you want to see the call being made so you can debug it, run
the Genesis command with -T or set the GENESIS_TRACE variable to 1
=back
=head2 get($path[, $key])
Return the string of the given path and key, or return the entire content under
the given path if no key is given. The path does not have to be an end node
that contains keys; it can be a branch path, in which case all the sub-paths
and their key:value pairs will be returned.
=head2 set($path, $key[, $value])
If a value is specified, it will set that value (as a string) to the given key
on the specified path. If no value is provided, an interactive mode will be
started where the user will be prompted to enter the value. This will be
'dotted' out on the screen, and the user will have to enter the same value
again to confirm the correctness of their entry.
=head2 has($path[, $key])
Returns true if the vault contains the path and optionally the key if given.
Equivalent to C<safe exists $path> or C<safe exists $path:$key> as appropriate.
=head2 paths([@prefixes])
Returns a list of all paths in the vault if no prefix was specified, or all
paths that can be found under the specified prefixes. If you ask for
overlapping prefixes, paths that match multiple prefixes will be returned
multiple times.
Note that this will only return node paths (paths that contain keys on their
last path segment, so if a vault only contains
B<secret/this/is/my/long/path:key> and you asked for paths, it would only
return that entry, not each partial path.
=head2 keys
Similar to C<paths> above, but also includes the B<:key> suffix for each key
under the matching paths.
=head2 status
Returns the status of the vault. This is a string value that can be one of the
following:
=over
=item unreachable
This means that the vault url or port is not responding to connection attempts.
This may be because the C<vault> executable has stopped working, or due to
networking issue (e.g.: VPN not connected)
=item unauthenticated
This means that the vault is responding, but the local safe token has expired
or not been set. Run C<safe auth ...> to connect, then try the command again.
=item sealed
The vault is sealed, and must be unsealed by the administrator before you can
access it.
=item uninitialized
The vault is responding and authenticated, but does not look like it was
correctly initialized with safe.
This may be a basic vault that was stood up manually -- to resolve this, simply
run `safe set secret/handshake knock=knock` once you're sure your talking to
the correct vault. If you are using a different secret mount in your
environments, replace '/secret/' with the same mount that your environments
use.
=item ok
The vault is operating normally and the user is authenticated.
=back
=head2 env
This returns a hash of the environment variable names and values for
configuring the vault for things that use the basic Hashicorp vault environment
variables to target a vault, such as C<spruce>. This can be fed directly into
the C<Genesis::run> commands C<env> option.
=head2 token
The authentication token for the vault, as stored in the C<.saferc> file.
=head2 set_as_current
Set the vault object as the current vault object used by this run of Genesis.
This is sometimes needed when dealing with legacy aspects of genesis
(pipelines, params from kit.yml) where there is no passing in of the C<Env> or
C<Top> object.
This is automatically called by C<target>, C<attach> and C<rebind> and
generally doesn't need to be manually set, but there are a few circumstances
that it may be necessary, so this was exposed as a public method.
=cut
# vim: fdm=marker:foldlevel=1:noet
| starkandwayne/genesis | lib/Genesis/Vault.pm | Perl | mit | 76,611 |
# Text::Aligner - Align text in columns
package Text::Aligner;
use strict;
use warnings;
use 5.008;
BEGIN {
use Exporter ();
use vars qw ($VERSION @ISA @EXPORT @EXPORT_OK %EXPORT_TAGS);
$VERSION = '0.13';
@ISA = qw (Exporter);
@EXPORT = qw ();
@EXPORT_OK = qw ( align);
%EXPORT_TAGS = ();
}
# this is a non-method, and currently the only user interface
sub align ($@) {
my $ali = Text::Aligner->new( shift);
$ali->_alloc( map ref eq 'SCALAR' ? $$_ : $_, @_);
if ( defined wantarray ) {
my @just = map $ali->_justify( ref eq 'SCALAR' ? $$_ : $_), @_;
return @just if wantarray;
return join "\n", @just, '';
} else {
for ( @_ ) {
$_ = $ali->_justify( $_) for ref eq 'SCALAR' ? $$_ : $_; # one-shot
}
}
}
### class Text::Aligner
sub _new { # internal constructor
my $class = shift;
my ( $width, $pos) = @_; # both method-or-coderef (this is very general)
bless {
width => $width,
pos => $pos,
left => Text::Aligner::MaxKeeper->new,
right => Text::Aligner::MaxKeeper->new,
}, $class;
}
# Construct an aligner
sub new {
my ( $class, $spec) = @_;
$spec ||= 0; # left alignment is default
my $al;
if ( !ref( $spec) and $spec =~ s/^auto/num/ ) {
$al = Text::Aligner::Auto->_new( $spec);
} else {
$al = $class->_new( _compile_alispec( $spec));
}
$al;
}
# return left and right field widths for an object
sub _measure0 {
my $al = shift;
my $obj = shift;
$obj = '' unless defined $obj;
my ( $w, $p);
if ( ref $obj ) {
( $w, $p) = ( $obj->$al->{ width}->(), $obj->$al->{ pos}->() );
} else {
( $w, $p) = ( $al->{ width}->( $obj), $al->{ pos}->( $obj) );
}
$_ ||= 0 for $w, $p;
( $p, $w - $p);
}
use Term::ANSIColor 2.02;
# return left and right field widths for an object
sub _measure {
my $al = shift;
my $obj = shift;
$obj = '' unless defined $obj;
my ( $wmeth, $pmeth) = @{ $al}{ qw( width pos)};
# support colorized strings
$obj = Term::ANSIColor::colorstrip($obj) unless ref $obj;
my $w = ref $wmeth ? $wmeth->( $obj) : $obj->$wmeth;
my $p = ref $pmeth ? $pmeth->( $obj) : $obj->$pmeth;
$_ ||= 0 for $w, $p;
( $p, $w - $p);
}
# Return left and right maxima, or nothing if the aligner is empty
sub _status {
my @lr = ( $_[ 0]->{ left}->max, $_[ 0]->{ right}->max);
# $l and $r should be both defined or undefined, unless the
# MaxKeeper memory is corrupted by forgetting unremembered things.
return unless defined( $lr[ 0]) and defined( $lr[ 1]);
@lr;
}
# remember alignment requirements
sub _alloc {
my $al = shift;
for ( @_ ) {
# $_ ||= ''; print "allocing '$_'\n";
my ( $l, $r) = $al->_measure( $_);
$al->{ left}->remember( $l); # space needed left of pos
$al->{ right}->remember( $r); # ...and right of pos
}
$al;
}
# release alignment requirement. it disturbs an aligner deeply to forget
# things it hasn't remembered. the effects may be delayed.
sub _forget {
my $al = shift;
for ( map defined() ? $_ : '', @_ ) {
# print "forgetting '$_'\n";
my ( $l, $r) = $al->_measure( $_);
$al->{ left}->forget( $l);
$al->{ right}->forget( $r);
}
$al;
}
sub _spaces {
my ($repeat_count) = @_;
return (($repeat_count > 0) ? (' ' x $repeat_count) : '');
}
# justify a string. a string is aligned within the aligner's field, and
# filled with blanks or cut to size, as appropriate. a string that has
# been allocated will never be trimmed (that is the point of allocation).
# if the aligner is empty it returns the string unaltered.
sub _justify {
my $al = shift;
my $str = shift;
# print "justifying '$str'\n";
$str .= ''; # stringify (objects, numbers, undef)
my ( $l_pad, $r_pad) = $al->_padding( $str);
substr( $str, 0, -$l_pad) = '' if $l_pad < 0; # trim if negative
substr( $str, $r_pad) = '' if $r_pad < 0; # ... both ends
return _spaces($l_pad) . $str . _spaces($r_pad); # pad if positive
}
# return two numbers that indicate how many blanks are needed on each side
# of a string to justify it. Negative values mean trim that many characters.
# an empty aligner returns ( 0, 0), so doesn't change anything.
sub _padding {
my $al = shift;
my $str = shift;
my ( $this_l, $this_r) = $al->_measure( $str);
my ( $l_pad, $r_pad) = ( 0, 0);
if ( $al->_status ) {
( $l_pad, $r_pad) = $al->_status;
$l_pad -= $this_l;
$r_pad -= $this_r;
}
( $l_pad, $r_pad);
}
# _compile_alispec() returns positioners according to specification. In
# effect, it is the interpreter for alignment specifications.
sub _compile_alispec { # it's a dirty job...
my $width = sub { length shift }; # this is always so for string aligners
my $pos; # the positioner we actually compile
local $_ = shift || ''; # alignment specification
if ( ref() eq 'Regexp' ) {
my $regex = $_; # lexical copy!
$pos = sub {
local $_ = shift;
return m/$regex/ ? $-[ 0] : length; # assume match after string
};
} else {
s/^left/0/;
s/^center/0.5/;
s/^right/1/;
if ( _is_number( $_) ) {
my $proportion = $_; # use lexical copy
$pos = sub { int( $proportion*length shift) };
} elsif ( $_ =~ /^(?:num|point)(?:\((.*))?/ ) {
my $point = defined $1 ? $1 : '';
$point =~ s/\)$//; # ignore trailing paren, if present
length $point or $point = '.';
$pos = sub { index( shift() . $point, $point) }
} else {
$pos = sub { 0 };
}
}
( $width, $pos);
}
# decide if a string is a number. (see perlfaq4).
sub _is_number {
my ($x) = @_;
return 0 unless defined $x;
return 0 if $x !~ /\d/;
return 1 if $x =~ /^-?\d+\.?\d*$/;
$x = Term::ANSIColor::colorstrip($x);
$x =~ /^-?\d+\.?\d*$/
}
package Text::Aligner::Auto;
# Combined numeric and left alignment. Numbers are aligned numerically,
# other strings are left-aligned. The resulting columns are interleaved
# flush left and filled on the right if necessary.
sub _new { # only called by Text::Aligner->new()
my $class = shift;
my $numspec = shift; # currently ignored
bless {
num => Text::Aligner->new( 'num'), # align numbers among themselves
other => Text::Aligner->new, # left-align anything else
}, $class;
}
sub _alloc {
my $aa = shift;
my @num = grep _is_number( $_), @_;
my @other = grep !_is_number( $_), @_;
$aa->{ num}->_alloc( @num);
$aa->{ other}->_alloc( @other);
$aa;
}
sub _forget {
my $aa = shift;
$aa->{ num}->_forget( grep _is_number( $_), @_);
$aa->{ other}->_forget( grep !_is_number( $_), @_);
$aa;
}
# Justify as required
sub _justify {
my ( $aa, $str) = @_;
# align according to type
$str = $aa->{ _is_number( $str) ? 'num' : 'other'}->_justify( $str);
my $combi = Text::Aligner->new; # left-justify pre-aligned string
# initialise to size of partial aligners. (don't initialise from
# empty aligner)
$combi->_alloc( $aa->{ num}->_justify( '')) if $aa->{ num}->_status;
$combi->_alloc( $aa->{ other}->_justify( '')) if $aa->{ other}->_status;
$combi->_justify( $str);
}
# for convenience
BEGIN { # import _is_number()
*_is_number = \ &Text::Aligner::_is_number;
}
package Text::Aligner::MaxKeeper;
# Keep the maximum of a dynamic set of numbers. Optimized for the case of
# a relatively small range of numbers that may occur repeatedly.
sub new {
bless {
max => undef,
seen => {},
}, shift;
}
sub max { $_[ 0]->{ max} }
sub remember {
my ( $mk, $val) = @_;
_to_max( $mk->{ max}, $val);
$mk->{ seen}->{ $val}++;
$mk;
}
sub forget {
my ( $mk, $val) = @_;
if ( exists $mk->{ seen}->{ $val} ) {
my $seen = $mk->{ seen};
unless ( --$seen->{ $val} ) {
delete $seen->{ $val};
if ( $mk->{ max} == $val ) {
# lost the maximum, recalculate
undef $mk->{ max};
_to_max( $mk->{ max}, keys %$seen);
}
}
}
$mk;
}
sub _to_max {
my $var = \ shift;
defined $_ and ( not defined $$var or $$var < $_) and $$var = $_ for @_;
$$var;
}
########################################### main pod documentation begin ##
1; #this line is important and will help the module return a true value
__END__
=pod
=encoding UTF-8
=head1 NAME
Text::Aligner
=head1 VERSION
version 0.13
=head1 SYNOPSIS
use Text::Aligner qw( align );
# Print the words "just a test!" right-justified each on a line:
my @lines = align( 'right', qw( just a test!);
print "$_\n" for @lines;
=head1 DESCRIPTION
Text::Aligner exports a single function, align(), which is
used to justify strings to various alignment styles. The
alignment specification is the first argument, followed by
any number of scalars which are subject to alignment.
The operation depends on context. In list context, a list of
the justified scalars is returned. In scalar context, the
justified arguments are joined into a single string with newlines
appended. The original arguments remain unchanged. In void
context, in-place justification is attempted. In this case, all
arguments must be lvalues.
Align() also does one level of scalar dereferencing. That is,
whenever one of the arguments is a scalar reference, the scalar
pointed to is aligned instead. Other references are simply stringified.
An undefined argument is interpreted as an empty string without
complaint.
Alignment respects colorizing escape sequences a la L<Term::ANSIColor>
which means it knows that these sequences don't take up space on
the screen.
=head1 NAME
Text::Aligner - module to align text.
=head1 ALIGNMENT
The first argument of the align() function is an alignment style, a
single scalar.
It can be one of the strings "left", "right", "center", "num", "point",
or "auto", or a regular expression (qr/.../), or a coderef.
A default style of "left" is assumed for every other value, including
"" and undef.
"left", "right" and "center" have the obvious meanings. These can
also be given as numbers 0, 1, and 0.5 respectively. (Other numbers
are also possible, but probably not very useful).
"num", and its synonym "point", specify that the decimal points be
aligned (assumed on the right, unless present). Arbitrary (non-numeric)
strings are also aligned in this manner, so they end up one column left
of the (possibly assumed) decimal point, flush right with any integers.
For the occasional string like "inf", or "-" for missing values, this
may be the right place. A string-only column ends up right-aligned
(unless there are points present).
The "auto" style separates numeric strings (that are composed of
"-", ".", and digits in the usual manner) and aligns them numerically.
Other strings are left aligned with the number that sticks out
farthest to the left. This gives left alignment for string-only
columns and numeric alignment for columns of numbers. In mixed
columns, strings are reasonably placed to serve as column headings
or intermediate titles.
With "num" (and "point") it is possible to specify another character
for the decimal point in the form "num(,)". In fact, you can specify
any string after a leading "(", and the closing ")" is optional.
"point(=>)" could be used to align certain pieces of Perl code. This
option is currently not available with "auto" alignment (because
recognition of numbers is Anglo-centric).
If a regular expression is specified, the points are aligned where
the first match of the regex starts. A match is assumed immediately
after the string if it doesn't match.
A regular expression is a powerful way of alignment specification. It
can replace most others easily, except center alignment and, of course,
the double action of "auto".
=head1 POSITIONERS
For entirely self-defined forms of alignment, a coderef, also known
as a positioner, can be given instead of an alignment style. This
code will be called once or more times with the string to be aligned
as its argument. It must return two numbers, a width and a position,
that describe how to align a string with other strings.
The width should normally be the length of the string. The position
defines a point relative to the beginning of the string, which is
aligned with the positions given for other strings.
A zero position for all strings results in left alignment, positioning
to the end of the string results in right alignment, and returning
half the length gives center alignment. "num" alignment is realized
by marking the position of the decimal point.
Note that the position you return is a relative measure. Adding a
constant value to all positions results in no change in alignment.
It doesn't have to point inside the string (as in right alignment,
where it points one character past the end of the string).
The first return value of a positioner should almost always be the
length of the given string. However, it may be useful to lie about
the string length if the string contains escape sequences that occupy
no place on screen.
=head1 SUBROUTINES
=head2 align($style, $str)
See above.
=head2 new(...)
For internal use.
=head1 USAGE
use Text::Aligner qw( align );
align( $style, $str, ...);
$style must be given and must be an alignment specification.
Any number of scalars can follow. An argument that contains a
scalar reference is dereferenced before it is used. In scalar
and list context, the aligned strings are returned. In void
context, the values are aligned in place and must be lvalues.
=head1 BUGS
None known as of release, but...
=head1 AUTHOR
Anno Siegel
CPAN ID: ANNO
=head1 COPYRIGHT
Copyright (c) 2002 Anno Siegel. All rights reserved.
This program is free software; you can redistribute
it and/or modify it under the terms of the ISC license.
(This program had been licensed under the same terms as Perl itself up to
version 1.118 released on 2011, and was relicensed by permission of its
originator).
The full text of the license can be found in the
LICENSE file included with this module.
=head1 SEE ALSO
perl(1)
L<Text::Table> .
=head1 AUTHOR
Shlomi Fish <shlomif@cpan.org>
=head1 COPYRIGHT AND LICENSE
This software is Copyright (c) 2002 by Anno Siegel.
This is free software, licensed under:
The MIT (X11) License
=cut
| 4ureliek/TEanalysis | Lib/Text/Aligner.pm | Perl | mit | 14,666 |
package Feeds;
use strict;
use warnings;
use XML::RSS::Parser;
use utf8;
use Aquareader::Login qw (get_current_user);
use Aquareader::News qw (insert_news_if_necessary delete_news_for_feed);
my $db = $CreateDB::dbh;
# Takes a feed's url and a user's id and returns the feed's id from the feeds' table.
sub get_feed_id_by_url ($) {
my ($url) = @_;
my $user_id = Login::get_current_user();
my $stget = $db->prepare("SELECT id FROM feeds WHERE url = ? AND user_id = ?")
or die "Can't prepare statement: $DBI::errstr";
$stget->execute($url, $user_id)
or die "Can't execute statement: $DBI::errstr";
my $id = $stget->fetchrow_array();
$stget->finish();
# If this feed doesn't exist returns 0.
return defined($id) ? $id : 0 ;
}
# Takes a feed's id.
# Returns a feed's url corresponding to this id.
sub get_feed_url_by_id ($) {
my ($feed_id) = @_;
my $stget = $db->prepare("SELECT url FROM feeds WHERE id = ?")
or die "Can't prepare statement: $DBI::errstr";
$stget->execute($feed_id)
or die "Can't execute statement: $DBI::errstr";
my $url = $stget->fetchrow_array();
$stget->finish();
return $url;
}
# Takes a feed's url and a category.
# Inserts the feed into the feeds table.
# After finishing returns an appropriate message.
sub insert_feed ($ $) {
my ($feed_url, $category_id) = @_;
my $user_id = Login::get_current_user();
my $rss_parser = new XML::RSS::Parser;
my $feed = $rss_parser->parse_uri($feed_url);
# The actual inserting is wrapped in eval{} in case there isn't a connection or some other error occurs
eval {
my $feed_name = $feed->query('/channel/title');
my $stins = $db->prepare("INSERT INTO feeds(user_id, name, category_id, url) VALUES (?, ?, ?, ?)")
or die "Can't prepare statement: $DBI::errstr";
$stins->execute($user_id, $feed_name->text_content, $category_id, $feed_url)
or die "Can't execute statement: $DBI::errstr";
$stins->finish();
# Inserting the news from this feed.
News::insert_news_if_necessary(get_feed_id_by_url($feed_url));
};
}
# Takes a feed url.
# Checks if the inputed feed is already inserted.
# If not, returns an appropriate message.
# if it's not inserted, returns an empty string.
sub validate_insert_feed ($) {
my ($feed_url) = @_;
if ($feed_url =~ /^\s*$/) {
return "Please fill in the blank.";
}
# Checks if an account with the same username already exists
if (get_feed_id_by_url($feed_url)) {
return 'This feed has already been added.';
}
my $rss_parser = new XML::RSS::Parser;
my $feed = $rss_parser->parse_uri($feed_url);
if (! defined ($feed)) {
return "Error in parsing the feed url.";
}
return "";
}
# Takes a feed's url and a category's name
# Changes the feed's category to the given one as an argument.
sub change_feed_category ($ $) {
my ($feed_id, $new_category_id) = @_;
my $stupd = $db->prepare("UPDATE feeds SET category_id = ? WHERE id = ?")
or die "Can't prepare statement: $DBI::errstr";
$stupd->execute($new_category_id, $feed_id)
or die "Can't execute statement: $DBI::errstr";
$stupd->finish();
}
# Takes a feed's id.
# Deletes all the news from this feed from the news table and then deletes it from the feeds table.
sub delete_feed ($) {
my ($feed_id) = @_;
# Deleting the news:
News::delete_news_for_feed($feed_id);
# Deleting the feed:
my $stdel = $db->prepare ("DELETE FROM feeds WHERE id = ?")
or die "Can't prepare statement: $DBI::errstr";
$stdel->execute($feed_id)
or die "Can't execute statement: $DBI::errstr";
$stdel->finish();
}
# Takes a category's id
# Returns an array of hash references with the ids and names of the feeds for that category
sub get_feeds_for_category($) {
my ($category_id) = @_;
my @feeds;
my $stget = $db->prepare ("SELECT id, name FROM feeds WHERE category_id = ? ORDER BY id ASC")
or die "Can't prepare statement: $DBI::errstr";
$stget->execute($category_id)
or die "Can't execute statement: $DBI::errstr";
while (my ($id, $name) = $stget->fetchrow_array()) {
my %single_feed = ( "id" => $id,
"name" => $name);
push @feeds, (\%single_feed);
}
$stget->finish();
return \@feeds;
}
# Returns a hash reference with keys - the feeds' ids and values - the corresponding names
sub get_feeds_names {
my $user_id = Login::get_current_user();
my $stget = $db->prepare("SELECT id, name FROM feeds WHERE user_id = ?")
or die "Can't prepare statement: $DBI::errstr";
$stget->execute($user_id);
my %hash_feeds;
while (my ($id, $name) = $stget->fetchrow_array()) {
$hash_feeds{$id} = $name;
}
$stget->finish();
return \%hash_feeds;
}
# Returns the id of the first feed that appears in the list of feeds for the current user
sub get_first_feed_id {
my $user_id = Login::get_current_user();
my $stget = $db->prepare("SELECT id FROM feeds WHERE user_id = ? ORDER BY category_id ASC LIMIT 1 ")
or die "Can't prepare statement: $DBI::errstr";
$stget->execute($user_id)
or die "Can't execute statement: $DBI::errstr";
my $id = $stget->fetchrow_array();
$stget->finish();
return $id;
}
1; | milanov/Aquareader | Aquareader/Feeds.pm | Perl | mit | 5,036 |
#!/usr/bin/perl
use strict;
use warnings;
use Data::Dumper;
use LWP::Simple;
use HTML::TreeBuilder;
binmode STDOUT, ':utf8';
my $c = get('http://www.metro.ru/stations/codes/');
my $t = HTML::TreeBuilder->new_from_content( $c );
my @table = $t->look_down(class => 'data station-codes');
my $table = $table[0];
my @tds = $table->look_down(_tag => 'td');
my $metro = { };
my $i = 0;
foreach my $td (@tds) {
my $text = $td->as_text;
Encode::_utf8_on( $text );
if ($i == 0) {
$metro->{ $text } = 1;
}
$i++;
$i = 0 if ($i == 3);
}
print "array(";
foreach my $m (sort { $a cmp $b } keys $metro) {
print "\n\t'$m' => '$m',";
}
print "\n)";
| oachkatzlschwoaf/EVENTS | bin/util/get_metro.pl | Perl | mit | 682 |
#!/usr/bin/perl -w
# vim:set tw=80 sts=2 sw=2 et:
use strict;
use warnings;
use Irssi;
our $VERSION = '0.1.0';
our %IRSSI = (
authors => 'Donald King',
contact => 'chronos@chronos-tachyon.net',
name => 'irssi-libnotify',
description => 'Uses notify-send(1) to alert you when someone is talking to you.',
url => 'https://github.com/chronos-tachyon/irssi-libnotify',
license => 'GNU General Public License',
changed => '2014-04-29 15:46-07:00',
);
sub notify ($$) {
my($target, $msg) = @_;
my @argv = (
'notify-send',
'--icon=user-available',
'--category=im.received',
"${target}: ${msg}",
);
system(@argv);
}
sub priv_msg ($$$$$) {
my ($server, $msg, $nick, $address, $target) = @_;
notify($nick, $msg);
}
sub hilight ($$$) {
my ($dest, $text, $stripped) = @_;
if ($dest->{level} & MSGLEVEL_HILIGHT) {
notify($dest->{target}, $stripped);
}
}
Irssi::signal_add_last('message private', \&priv_msg);
Irssi::signal_add_last('print text', \&hilight);
| chronos-tachyon/irssi-libnotify | libnotify.pl | Perl | mit | 1,033 |
package Diversion::Lookup;
use v5.18;
use Moo;
with "Diversion::Service";
has what => (
is => "ro",
required => 1,
);
sub bulk_lookup_by_id {
my ($self, $vals) = @_;
my @missing = @$vals;
my %lookup;
while (@missing) {
my $question_marks = join(',' , map { '?' } @missing);
my $what = $self->what;
my $table = "lookup_${what}";
my $sql_lookup = qq{ SELECT `id`,`${what}` FROM `${table}` WHERE `id` IN ($question_marks) };
$self->db_open(
"lookup",
sub {
my ($dbh) = @_;
my $rows = $dbh->selectall_arrayref($sql_lookup, {}, @missing);
for (@$rows) {
$lookup{ $_->[0] } = $_->[1];
}
}
);
my @still_missing = grep { !defined($lookup{$_}) } @missing;
if (@still_missing) {
$self->db_open(
"lookup",
sub {
my ($dbh) = @_;
my $placeholder = join(', ' , map { '(?)' } @still_missing);
$dbh->do(qq{ INSERT INTO $table (`$what`) VALUES ${placeholder} ON DUPLICATE KEY UPDATE id=id }, undef, @still_missing);
}
);
}
@missing = @still_missing;
}
return \%lookup;
}
sub bulk_lookup {
my ($self, $vals) = @_;
my @missing = @$vals;
my %lookup;
while (@missing) {
my $question_marks = join(',' , map { '?' } @missing);
my $what = $self->what;
my $table = "lookup_${what}";
my $sql_lookup = qq{ SELECT `id`,`${what}` FROM `${table}` WHERE `${what}` IN ($question_marks) };
$self->db_open(
"lookup",
sub {
my ($dbh) = @_;
my $rows = $dbh->selectall_arrayref($sql_lookup, {}, @missing);
for (@$rows) {
$lookup{ $_->[1] } = $_->[0];
}
}
);
my @still_missing = grep { !defined($lookup{$_}) } @missing;
if (@still_missing) {
$self->db_open(
"lookup",
sub {
my ($dbh) = @_;
my $placeholder = join(', ' , map { '(?)' } @still_missing);
$dbh->do(qq{ INSERT INTO $table (`$what`) VALUES ${placeholder} ON DUPLICATE KEY UPDATE id=id }, undef, @still_missing);
}
);
}
@missing = @still_missing;
}
return \%lookup;
}
sub lookup {
my ($self, $val) = @_;
my $what = $self->what;
my $table = "lookup_${what}";
my $dbh = $self->db_open("lookup");
my $sql_lookup = qq{ SELECT `id` FROM `${table}` WHERE `${what}` = ? LIMIT 1 };
my $ret = $dbh->selectcol_arrayref($sql_lookup, {}, $val);
defined($ret) or die $DBI::err;
return $ret->[0] if defined($ret->[0]);
$dbh->do(qq{ INSERT INTO $table (`$what`) VALUES (?) }, {}, $val);
$ret = $dbh->selectcol_arrayref($sql_lookup, {}, $val);
defined($ret) or die $DBI::err;
return $ret->[0] if defined($ret->[0]);
}
no Moo;
1;
| gugod/Diversion | lib/Diversion/Lookup.pm | Perl | cc0-1.0 | 3,121 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2020] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
=head1 NAME
Bio::EnsEMBL::DBSQL::OntologyTermAdaptor
=head1 SYNOPSIS
my $goa =
$registry->get_adaptor( 'Multi', 'Ontology', 'OntologyTerm' );
my $term = $goa->fetch_by_accession('GO:0010885');
my @children = @{ $goa->fetch_all_by_parent_term($term) };
my @descendants = @{ $goa->fetch_all_by_ancestor_term($term) };
my @parents = @{ $goa->fetch_all_by_child_term($term) };
my @ancestors = @{ $goa->fetch_all_by_descendant_term($term) };
my %ancestor_chart = %{ $goa->_fetch_ancestor_chart($term) };
=head1 DESCRIPTION
An abstract adaptor class for fetching ontology
terms, creates Bio::EnsEMBL::OntologyTerm objects.
=head1 METHODS
=cut
package Bio::EnsEMBL::DBSQL::OntologyTermAdaptor;
use strict;
use warnings;
use DBI qw( :sql_types );
use Bio::EnsEMBL::Utils::Exception qw( throw );
use Bio::EnsEMBL::Utils::Scalar qw( assert_ref );
use Bio::EnsEMBL::OntologyTerm;
use base qw( Bio::EnsEMBL::DBSQL::BaseAdaptor );
=head2 fetch_all_by_name
Arg [1] : String, name of term, or SQL pattern
Arg [2] : (optional) String, name of ontology
Arg [3] : (optional) Boolean, search through obsolete terms as well
Description : Fetches ontology term(s) given a name, a synonym, or a
SQL pattern like "%splice_site%"
Example :
my ($term) =
@{ $ot_adaptor->fetch_by_name( 'DNA_binding_site', 'SO' ) };
# Will find terms in both SO and GO:
my @terms = @{ $ot_adaptor->fetch_by_name('%splice_site%') };
Return type : listref of Bio::EnsEMBL::OntologyTerm
=cut
sub fetch_all_by_name {
my ( $this, $pattern, $ontology, $include_obsolete ) = @_;
my $statement = q(
SELECT DISTINCT
term.term_id,
term.accession,
term.name,
term.definition,
term.subsets,
term.is_root,
term.is_obsolete,
ontology.name,
ontology.namespace,
ontology.data_version
FROM ontology
JOIN term USING (ontology_id)
LEFT JOIN synonym USING (term_id)
WHERE ( term.name LIKE ? OR synonym.name LIKE ? ));
if ( defined($ontology) ) {
$statement .= " AND ontology.name = ?";
}
$statement .= " AND term.is_obsolete = 0" unless $include_obsolete;
my $sth = $this->prepare($statement);
$sth->bind_param( 1, $pattern, SQL_VARCHAR );
$sth->bind_param( 2, $pattern, SQL_VARCHAR );
if ( defined($ontology) ) {
$sth->bind_param( 3, $ontology, SQL_VARCHAR );
}
$sth->execute();
my ( $dbid, $accession, $name, $definition, $subsets, $is_root, $is_obsolete, $namespace, $ontology_version );
$sth->bind_columns(
\( $dbid, $accession, $name, $definition, $subsets, $is_root, $is_obsolete, $ontology, $namespace, $ontology_version ) );
my @terms;
while ( $sth->fetch() ) {
$subsets ||= '';
push @terms,
Bio::EnsEMBL::OntologyTerm->new(
'-dbid' => $dbid,
'-adaptor' => $this,
'-accession' => $accession,
'-is_root' => $is_root,
'-is_obsolete' => $is_obsolete,
'-ontology' => $ontology,
'-ontology_version' => $ontology_version,
'-namespace' => $namespace,
'-subsets' => [ split( /,/, $subsets ) ],
'-name' => $name,
'-definition' => $definition,
'-synonyms' => $this->_fetch_synonyms_by_dbID($dbid)
);
}
return \@terms;
} ## end sub fetch_all_by_name
=head2 fetch_by_accession
Arg [1] : String
Arg [2] : (optional) Boolean, search through obsolete terms as well
Description : Fetches an ontology term given an accession.
Example :
my $term = $ot_adaptor->fetch_by_accession('GO:0030326');
Return type : Bio::EnsEMBL::OntologyTerm
=cut
sub fetch_by_accession {
my ( $this, $accession, $include_obsolete ) = @_;
my $statement = q(
SELECT term.term_id,
term.accession,
term.name,
term.definition,
term.subsets,
term.is_root,
term.is_obsolete,
ontology.name,
ontology.namespace,
ontology.data_version
FROM ontology
JOIN term USING (ontology_id)
WHERE term.accession = ?);
$statement .= " AND term.is_obsolete = 0" unless $include_obsolete;
my $sth = $this->prepare($statement);
$sth->bind_param( 1, $accession, SQL_VARCHAR );
$sth->execute();
my ( $dbid, $name, $definition, $subsets, $is_root, $is_obsolete, $ontology, $namespace, $ontology_version );
$sth->bind_columns(
\( $dbid, $accession, $name, $definition, $subsets, $is_root, $is_obsolete, $ontology, $namespace, $ontology_version ) );
$sth->fetch();
$sth->finish();
my $term;
if (!$dbid) {
$term = $this->fetch_by_alt_id($accession);
} else {
$subsets ||= '';
$term =
Bio::EnsEMBL::OntologyTerm->new(
'-dbid' => $dbid,
'-adaptor' => $this,
'-accession' => $accession,
'-is_root' => $is_root,
'-is_obsolete'=> $is_obsolete,
'-ontology' => $ontology,
'-ontology_version' => $ontology_version,
'-namespace' => $namespace,
'-subsets' => [ split( /,/, $subsets ) ],
'-name' => $name,
'-definition' => $definition,
'-synonyms' => $this->_fetch_synonyms_by_dbID($dbid)
);
}
return $term;
} ## end sub fetch_by_accession
=head2 fetch_by_alt_id
Arg [1] : String
Description : Fetches an ontology term given an alt_id.
Example :
my $term = $ot_adaptor->fetch_by_alt_id('GO:0019952');
Return type : Bio::EnsEMBL::OntologyTerm
=cut
sub fetch_by_alt_id {
my ( $this, $accession ) = @_;
my $statement = q(
SELECT term.term_id,
alt_id.accession,
term.name,
term.definition,
term.subsets,
term.is_root,
term.is_obsolete,
ontology.name,
ontology.namespace,
ontology.data_version
FROM ontology
JOIN term USING (ontology_id)
JOIN alt_id USING (term_id)
WHERE alt_id.accession = ?);
my $sth = $this->prepare($statement);
$sth->bind_param( 1, $accession, SQL_VARCHAR );
$sth->execute();
my ( $dbid, $name, $definition, $subsets, $is_root, $is_obsolete, $ontology, $namespace, $ontology_version );
$sth->bind_columns(
\( $dbid, $accession, $name, $definition, $subsets, $is_root, $is_obsolete, $ontology, $namespace, $ontology_version ) );
$sth->fetch();
# early exit in the event of bad $accession
unless ($dbid) {return;}
$subsets ||= '';
my $term =
Bio::EnsEMBL::OntologyTerm->new(
'-dbid' => $dbid,
'-adaptor' => $this,
'-accession' => $accession,
'-ontology' => $ontology,
'-ontology_version' => $ontology_version,
'-namespace' => $namespace,
'-subsets' => [ split( /,/, $subsets ) ],
'-name' => $name,
'-definition' => $definition,
'-synonyms' => $this->_fetch_synonyms_by_dbID($dbid)
);
$sth->finish();
return $term;
} ## end sub fetch_by_alt_id
=head2 fetch_all_by_parent_term
Arg [1] : Bio::EnsEMBL::OntologyTerm
The term whose children terms should be fetched.
Description : Given a parent ontology term, returns a list of
its immediate children terms.
Example :
my @children =
@{ $ot_adaptor->fetch_all_by_parent_term($term) };
Return type : listref of Bio::EnsEMBL::OntologyTerm
=cut
sub fetch_all_by_parent_term {
my ( $this, $term, $ontology, $include_obsolete ) = @_;
assert_ref( $term, 'Bio::EnsEMBL::OntologyTerm' );
my @terms;
if ( !$term->{'child_terms_fetched'} ) {
my $statement = q(
SELECT child_term.term_id,
child_term.accession,
child_term.name,
child_term.definition,
child_term.subsets,
child_term.is_root,
child_term.is_obsolete,
rt.name,
ontology.data_version
FROM term child_term
JOIN relation ON (relation.child_term_id = child_term.term_id)
JOIN relation_type rt USING (relation_type_id)
JOIN ontology ON (ontology.ontology_id = relation.ontology_id)
WHERE relation.parent_term_id = ?
AND ontology.name = ?);
$statement .= " AND child_term.is_obsolete = 0" unless $include_obsolete;
my $sth = $this->prepare($statement);
$sth->bind_param( 1, $term->dbID(), SQL_INTEGER );
if (!defined $ontology) {
$ontology = $term->{'ontology'};
}
$sth->bind_param( 2, $ontology, SQL_VARCHAR );
$sth->execute();
my ( $dbid, $accession, $name, $definition, $subsets, $is_root, $is_obsolete, $relation, $ontology_version );
$sth->bind_columns(
\( $dbid, $accession, $name, $definition, $subsets, $is_root, $is_obsolete, $relation, $ontology_version ) );
while ( $sth->fetch() ) {
$subsets ||= '';
my $child_term =
Bio::EnsEMBL::OntologyTerm->new(
'-dbid' => $dbid,
'-adaptor' => $this,
'-accession' => $accession,
'-is_root' => $is_root,
'-is_obsolete' => $is_obsolete,
'-ontology' => $term->{'ontology'},
'-ontology_version' => $ontology_version,
'-namespace' => $term->{'namespace'},
'-subsets' => [ split( /,/, $subsets ) ],
'-name' => $name,
'-definition' => $definition,
'-synonyms' => $this->_fetch_synonyms_by_dbID($dbid)
);
push( @terms, $child_term );
push( @{ $term->{'children'}{$relation} }, $child_term );
}
$term->{'child_terms_fetched'} = 1;
} else {
foreach my $relation ( values( %{ $term->{'children'} } ) ) {
push( @terms, @{$relation} );
}
}
return \@terms;
} ## end sub fetch_all_by_parent_term
=head2 fetch_all_by_ancestor_term
Arg [1] : Bio::EnsEMBL::OntologyTerm
The term whose descendant terms should be fetched.
Description : Given a parent ontology term, returns a list of
all its descendant terms, down to and including
any leaf terms. Relations of the type 'is_a' and
'part_of' are followed.
Example :
my @descendants =
@{ $ot_adaptor->fetch_all_by_ancestor_term($term) };
Return type : listref of Bio::EnsEMBL::OntologyTerm
=cut
sub fetch_all_by_ancestor_term {
my ( $this, $term, $ontology ) = @_;
assert_ref( $term, 'Bio::EnsEMBL::OntologyTerm' );
my $statement = q(
SELECT DISTINCT
child_term.term_id,
child_term.accession,
child_term.name,
child_term.definition,
child_term.subsets,
child_term.is_root,
child_term.is_obsolete,
ontology.data_version,
closure.distance
FROM term child_term
JOIN closure ON (closure.child_term_id = child_term.term_id)
JOIN ontology ON (closure.ontology_id = ontology.ontology_id)
WHERE closure.parent_term_id = ?
AND closure.distance > 0
AND closure.ontology_id = child_term.ontology_id
AND ontology.name = ?
ORDER BY closure.distance, child_term.accession);
my $sth = $this->prepare($statement);
$sth->bind_param( 1, $term->dbID(), SQL_INTEGER );
if (!defined $ontology) {
$ontology = $term->{'ontology'};
}
$sth->bind_param( 2, $ontology, SQL_VARCHAR );
$sth->execute();
my ( $dbid, $accession, $name, $definition, $subsets, $is_root, $is_obsolete, $ontology_version, $closure_distance );
$sth->bind_columns(
\( $dbid, $accession, $name, $definition, $subsets, $is_root, $is_obsolete, $ontology_version, $closure_distance ) );
my @terms;
while ( $sth->fetch() ) {
$subsets ||= '';
push( @terms,
Bio::EnsEMBL::OntologyTerm->new(
'-dbid' => $dbid,
'-adaptor' => $this,
'-accession' => $accession,
'-is_root' => $is_root,
'-is_obsolete' => $is_obsolete,
'-ontology' => $term->{'ontology'},
'-ontology_version' => $ontology_version,
'-namespace' => $term->{'namespace'},
'-subsets' => [ split( /,/, $subsets ) ],
'-name' => $name,
'-definition' => $definition,
'-synonyms' => $this->_fetch_synonyms_by_dbID($dbid)
) );
}
return \@terms;
} ## end sub fetch_all_by_ancestor_term
=head2 fetch_all_by_child_term
Arg [1] : Bio::EnsEMBL::OntologyTerm
The term whose parent terms should be fetched.
Description : Given a child ontology term, returns a list of
its immediate parent terms.
Example :
my @parents = @{ $ot_adaptor->fetch_all_by_child_term($term) };
Return type : listref of Bio::EnsEMBL::OntologyTerm
=cut
sub fetch_all_by_child_term {
my ( $this, $term, $ontology ) = @_;
assert_ref( $term, 'Bio::EnsEMBL::OntologyTerm' );
my @terms;
if ( !$term->{'parent_terms_fetched'} ) {
my $statement = q(
SELECT parent_term.term_id,
parent_term.accession,
parent_term.name,
parent_term.definition,
parent_term.subsets,
parent_term.is_root,
parent_term.is_obsolete,
rt.name,
ontology.data_version
FROM term parent_term
JOIN relation ON (relation.parent_term_id = parent_term.term_id)
JOIN relation_type rt USING (relation_type_id)
JOIN ontology ON (ontology.ontology_id = relation.ontology_id)
WHERE relation.child_term_id = ?
AND ontology.name = ?);
my $sth = $this->prepare($statement);
$sth->bind_param( 1, $term->dbID(), SQL_INTEGER );
if (!defined $ontology) {
$ontology = $term->{'ontology'};
}
$sth->bind_param( 2, $ontology, SQL_VARCHAR );
$sth->execute();
my ( $dbid, $accession, $name, $definition, $subsets, $is_root, $is_obsolete, $relation, $ontology_version );
$sth->bind_columns(
\( $dbid, $accession, $name, $definition, $subsets, $is_root, $is_obsolete, $relation, $ontology_version ) );
while ( $sth->fetch() ) {
$subsets ||= '';
my $parent_term =
Bio::EnsEMBL::OntologyTerm->new(
'-dbid' => $dbid,
'-adaptor' => $this,
'-accession' => $accession,
'-is_root' => $is_root,
'-is_obsolete' => $is_obsolete,
'-ontology' => $term->{'ontology'},
'-ontology_version' => $ontology_version,
'-namespace' => $term->{'namespace'},
'-subsets' => [ split( /,/, $subsets ) ],
'-name' => $name,
'-definition' => $definition,
'-synonyms' => $this->_fetch_synonyms_by_dbID($dbid)
);
push( @terms, $parent_term );
push( @{ $term->{'parents'}{$relation} }, $parent_term );
}
$term->{'parent_terms_fetched'} = 1;
} else {
foreach my $relation ( values( %{ $term->{'parents'} } ) ) {
push( @terms, @{$relation} );
}
}
return \@terms;
} ## end sub fetch_all_by_child_term
=head2 fetch_all_by_descendant_term
Arg [1] : Bio::EnsEMBL::OntologyTerm
The term whose ancestor terms should be fetched.
Arg [2] : (optional) String
The subset within the ontolgy to which the query
should be restricted. The subset may be specified as
a SQL pattern, e.g., "%goslim%" (but "goslim%" might
not do what you expect), or as a specific subset name,
e.g., "goslim_generic".
Arg [3] : (optional) Boolean
If true (non-zero), only return the closest
term(s). If this argument is true, and the
previous argument is left undefined, this method
will return the parent(s) of the given term.
Arg [4] : (optional) Boolean
If true we will allow the retrieval of terms whose distance
to the current term is 0. If false then we will only return
those which are above the current term in the ontology
Description : Given a child ontology term, returns a list of
all its ancestor terms, up to and including any
root term. Relations of the type 'is_a' and
'part_of' are followed. Optionally, only terms in
a given subset of the ontology may be returned,
and additionally one may ask to only get the
closest term(s) to the given child term.
Example :
my @ancestors =
@{ $ot_adaptor->fetch_all_by_descendant_term($term) };
Return type : listref of Bio::EnsEMBL::OntologyTerm
=cut
sub fetch_all_by_descendant_term {
my ( $this, $term, $subset, $closest_only, $allow_zero_distance, $ontology ) = @_;
assert_ref( $term, 'Bio::EnsEMBL::OntologyTerm' );
$closest_only ||= 0;
my $statement = q(
SELECT DISTINCT
parent_term.term_id,
parent_term.accession,
parent_term.name,
parent_term.definition,
parent_term.subsets,
parent_term.is_root,
parent_term.is_obsolete,
closure.distance,
ontology.data_version
FROM term parent_term
JOIN closure ON (closure.parent_term_id = parent_term.term_id)
JOIN ontology ON (closure.ontology_id = ontology.ontology_id)
WHERE closure.child_term_id = ?
AND closure.distance > ?
AND closure.ontology_id = parent_term.ontology_id
AND ontology.name = ?);
if ( defined($subset) ) {
if ( index( $subset, '%' ) != -1 ) {
$statement .= q(
AND parent_term.subsets LIKE ?);
} else {
$statement .= q(
AND FIND_IN_SET(?, parent_term.subsets) > 0);
}
}
$statement .= q(
ORDER BY closure.distance, parent_term.accession);
my $sth = $this->prepare($statement);
$sth->bind_param( 1, $term->dbID(), SQL_INTEGER );
my $query_distance = ($allow_zero_distance) ? -1 : 0;
$sth->bind_param( 2, $query_distance, SQL_INTEGER );
if (!defined $ontology) {
$ontology = $term->{'ontology'};
}
$sth->bind_param( 3, $ontology, SQL_VARCHAR );
if ( defined($subset) ) {
$sth->bind_param( 4, $subset, SQL_VARCHAR );
}
$sth->execute();
my ( $dbid, $accession, $name, $definition, $subsets, $is_root, $is_obsolete, $distance, $ontology_version );
$sth->bind_columns(
\( $dbid, $accession, $name, $definition, $subsets, $is_root, $is_obsolete, $distance, $ontology_version ) );
my @terms;
my $min_distance;
while ( $sth->fetch() ) {
$subsets ||= '';
$min_distance ||= $distance;
if ( !$closest_only || $distance == $min_distance ) {
push( @terms,
Bio::EnsEMBL::OntologyTerm->new(
'-dbid' => $dbid,
'-adaptor' => $this,
'-accession' => $accession,
'-is_root' => $is_root,
'-is_obsolete' => $is_obsolete,
'-ontology' => $term->{'ontology'},
'-ontology_version' => $ontology_version,
'-namespace' => $term->{'namespace'},
'-subsets' => [ split( /,/, $subsets ) ],
'-name' => $name,
'-definition' => $definition,
'-synonyms' => $this->_fetch_synonyms_by_dbID($dbid)
) );
} else {
$sth->finish();
last;
}
}
return \@terms;
} ## end sub fetch_all_by_descendant_term
sub _fetch_synonyms_by_dbID {
my ( $this, $dbID ) = @_;
my $statement = q(
SELECT synonym.name
FROM synonym
WHERE synonym.term_id = ?);
my $sth = $this->prepare($statement);
$sth->bind_param( 1, $dbID, SQL_INTEGER );
$sth->execute();
my $synonym;
$sth->bind_col( 1, \$synonym );
my @synonyms;
while ( $sth->fetch() ) {
push( @synonyms, $synonym );
}
return \@synonyms;
}
=head2 _fetch_ancestor_chart
Arg [1] : Bio::EnsEMBL::OntologyTerm
The term whose ancestor terms should be fetched.
Description : Given a child ontology term, returns a hash
structure containing its ancestor terms, up to and
including any root term. Relations of the type
'is_a' and 'part_of' are included.
Example :
my %chart = %{ $ot_adaptor->_fetch_ancestor_chart($term) };
Return type : A reference to a hash structure like this:
{
'GO:XXXXXXX' => {
'term' => # ref to Bio::EnsEMBL::OntologyTerm object
'is_a' => [...], # listref of Bio::EnsEMBL::OntologyTerm
'part_of' => [...], # listref of Bio::EnsEMBL::OntologyTerm
},
'GO:YYYYYYY' => {
# Similarly for all ancestors,
# and including the query term itself.
}
}
=cut
sub _fetch_ancestor_chart {
my ( $this, $term, $ontology ) = @_;
assert_ref( $term, 'Bio::EnsEMBL::OntologyTerm' );
my $statement = q(
SELECT subparent_term.term_id,
parent_term.term_id,
relation_type.name
FROM closure
JOIN relation
ON (relation.parent_term_id = closure.parent_term_id
AND relation.child_term_id = closure.subparent_term_id
AND closure.ontology_id = relation.ontology_id)
JOIN relation_type USING (relation_type_id)
JOIN term subparent_term
ON (subparent_term.term_id = closure.subparent_term_id)
JOIN term parent_term ON (parent_term.term_id = closure.parent_term_id)
JOIN ontology ON (ontology.ontology_id = closure.ontology_id)
WHERE closure.child_term_id = ?
AND ontology.name = ?
ORDER BY closure.distance);
my $sth = $this->prepare($statement);
$sth->bind_param( 1, $term->dbID(), SQL_INTEGER );
if (!defined $ontology) {
$ontology = $term->{'ontology'};
}
$sth->bind_param( 2, $ontology, SQL_VARCHAR );
$sth->execute();
my ( $subparent_id, $parent_id, $relation );
$sth->bind_columns( \( $subparent_id, $parent_id, $relation ) );
my %id_chart;
my %acc_chart;
while ( $sth->fetch() ) {
if ( !exists( $id_chart{$parent_id} ) ) {
$id_chart{$parent_id} = {};
}
push( @{ $id_chart{$subparent_id}{$relation} }, $parent_id );
}
my @terms = @{ $this->fetch_all_by_dbID_list( [ keys(%id_chart) ] ) };
foreach my $term (@terms) {
$id_chart{ $term->dbID() }{'term'} = $term;
$acc_chart{ $term->accession() }{'term'} = $term;
}
foreach my $term (@terms) {
my $accession = $term->accession();
my $dbID = $term->dbID();
foreach my $relation ( keys( %{ $id_chart{$dbID} } ) ) {
if ( $relation eq 'term' ) { next }
foreach my $id ( @{ $id_chart{$dbID}{$relation} } ) {
push( @{ $acc_chart{$accession}{$relation} },
$id_chart{$id}{'term'} );
}
}
}
return \%acc_chart;
} ## end sub _fetch_ancestor_chart
#-----------------------------------------------------------------------
# Useful public methods that implement functionality not properly
# provided by the parent class Bio::EnsEMBL::DBSQL::BaseAdaptor.
sub fetch_by_dbID {
my ( $this, $dbid, $include_obsolete ) = @_;
my $statement = q(
SELECT term.term_id,
term.accession,
term.name,
term.definition,
term.subsets,
term.is_root,
term.is_obsolete,
ontology.name,
ontology.namespace,
ontology.data_version
FROM ontology
JOIN term USING (ontology_id)
WHERE term.term_id = ?);
$statement .= " AND term.is_obsolete = 0" unless $include_obsolete;
my $sth = $this->prepare($statement);
$sth->bind_param( 1, $dbid, SQL_INTEGER );
$sth->execute();
my ( $accession, $name, $definition, $subsets, $is_root, $is_obsolete, $ontology,
$namespace, $ontology_version );
$sth->bind_columns(
\( $dbid, $accession, $name, $definition, $subsets, $is_root, $is_obsolete, $ontology, $namespace, $ontology_version
) );
$sth->fetch();
unless ($accession) {return;}
$subsets ||= '';
my $term =
Bio::EnsEMBL::OntologyTerm->new(
'-dbid' => $dbid,
'-adaptor' => $this,
'-accession' => $accession,
'-is_root' => $is_root,
'-is_obsolete' => $is_obsolete,
'-ontology' => $ontology,
'-ontology_version' => $ontology_version,
'-namespace' => $namespace,
'-subsets' => [ split( /,/, $subsets ) ],
'-name' => $name,
'-definition' => $definition,
'-synonyms' => $this->_fetch_synonyms_by_dbID($dbid)
);
$sth->finish();
return $term;
} ## end sub fetch_by_dbID
sub fetch_all_by_dbID_list {
my ( $this, $dbids, $include_obsolete ) = @_;
if ( !@{$dbids} ) { return [] }
my $stmt = q(
SELECT term.term_id,
term.accession,
term.name,
term.definition,
term.subsets,
term.is_root,
term.is_obsolete,
ontology.name,
ontology.namespace,
ontology.data_version
FROM ontology
JOIN term USING (ontology_id)
WHERE term.term_id IN (%s));
my $statement = sprintf(
$stmt,
join(
',',
map {
$this->dbc()->db_handle()->quote( $_, SQL_INTEGER )
} @{$dbids} ) );
$statement .= " AND term.is_obsolete = 0" unless $include_obsolete;
my $sth = $this->prepare($statement);
$sth->execute();
my ( $dbid, $accession, $name, $definition, $subsets, $is_root, $is_obsolete, $ontology,
$namespace, $ontology_version );
$sth->bind_columns( \( $dbid, $accession, $name, $definition,
$subsets, $is_root, $is_obsolete, $ontology, $namespace, $ontology_version ) );
my @terms;
while ( $sth->fetch() ) {
$subsets ||= '';
push( @terms,
Bio::EnsEMBL::OntologyTerm->new(
'-dbid' => $dbid,
'-adaptor' => $this,
'-accession' => $accession,
'-is_root' => $is_root,
'-is_obsolete' => $is_obsolete,
'-ontology' => $ontology,
'-ontology_version' => $ontology_version,
'-namespace' => $namespace,
'-subsets' => [ split( /,/, $subsets ) ],
'-name' => $name,
'-definition' => $definition,
'-synonyms' => $this->_fetch_synonyms_by_dbID($dbid)
) );
}
return \@terms;
} ## end sub fetch_all_by_dbID_list
=head2 fetch_all_alt_ids
Arg [1] : String
Description : Fetches all alt_ids for a given ontology term
Example :
my ($accessions) =
@{ $ot_adaptor->fetch_all_alt_ids( 'GO:0000003' ) };
Return type : listref of accessions
=cut
sub fetch_all_alt_ids {
my ($this, $accession) = @_;
my $statement = q(
SELECT alt_id.accession
FROM term
JOIN alt_id USING (term_id)
WHERE term.accession = ?);
my $sth = $this->prepare($statement);
$sth->bind_param(1, $accession, SQL_VARCHAR);
$sth->execute();
my (@accessions, $alt_id);
$sth->bind_columns( \$alt_id);
while ( $sth->fetch() ) {
push( @accessions, $alt_id);
}
return \@accessions;
}
=head2 fetch_all_roots
Arg [1] : (optional) String, name of ontology
Description : Fetches all roots for all ontologies
Optionally, can be restricted to a given ontology
Example :
my ($terms) =
@{ $ot_adaptor->fetch_all_roots( 'SO' ) };
# Will find terms in EFO, SO and GO:
my @terms = @{ $ot_adaptor->fetch_all_roots() };
Return type : listref of Bio::EnsEMBL::OntologyTerm
=cut
sub fetch_all_roots {
my ($this, $ontology_name) = @_;
my $statement = q(
SELECT term.term_id,
term.accession,
term.name,
term.definition,
term.subsets,
term.is_root,
term.is_obsolete,
ontology.name,
ontology.namespace,
ontology.data_version
FROM ontology
JOIN term USING (ontology_id)
WHERE is_root = 1);
if (defined $ontology_name) {
$statement .= " AND ontology.name = ?";
}
my $sth = $this->prepare($statement);
if (defined $ontology_name) {
$sth->bind_param( 1, $ontology_name, SQL_VARCHAR );
}
$sth->execute();
my ( $dbid, $accession, $name, $definition, $subsets, $is_root, $is_obsolete, $ontology,
$namespace, $ontology_version );
$sth->bind_columns( \( $dbid, $accession, $name, $definition,
$subsets, $is_root, $is_obsolete, $ontology, $namespace, $ontology_version ) );
my @terms;
while ( $sth->fetch() ) {
$subsets ||= '';
push( @terms,
Bio::EnsEMBL::OntologyTerm->new(
'-dbid' => $dbid,
'-adaptor' => $this,
'-accession' => $accession,
'-is_root' => $is_root,
'-is_obsolete' => $is_obsolete,
'-ontology' => $ontology,
'-ontology_version' => $ontology_version,
'-namespace' => $namespace,
'-subsets' => [ split( /,/, $subsets ) ],
'-name' => $name,
'-definition' => $definition,
'-synonyms' => $this->_fetch_synonyms_by_dbID($dbid)
));
}
return \@terms;
}
sub fetch_all {
my ($this, $include_obsolete) = @_;
my $statement = q(
SELECT term.term_id,
term.accession,
term.name,
term.definition,
term.subsets,
term.is_root,
term.is_obsolete,
ontology.name,
ontology.namespace,
ontology.data_version
FROM ontology
JOIN term USING (ontology_id));
$statement .= " WHERE term.is_obsolete = 0" unless $include_obsolete;
my $sth = $this->prepare($statement);
$sth->execute();
my ( $dbid, $accession, $name, $definition, $subsets, $is_root, $is_obsolete, $ontology,
$namespace, $ontology_version );
$sth->bind_columns( \( $dbid, $accession, $name, $definition,
$subsets, $is_root, $is_obsolete, $ontology, $namespace, $ontology_version ) );
my @terms;
while ( $sth->fetch() ) {
$subsets ||= '';
push( @terms,
Bio::EnsEMBL::OntologyTerm->new(
'-dbid' => $dbid,
'-adaptor' => $this,
'-accession' => $accession,
'-is_root' => $is_root,
'-is_obsolete' => $is_obsolete,
'-ontology' => $ontology,
'-ontology_version' => $ontology_version,
'-namespace' => $namespace,
'-subsets' => [ split( /,/, $subsets ) ],
'-name' => $name,
'-definition' => $definition,
'-synonyms' => $this->_fetch_synonyms_by_dbID($dbid)
) );
}
return \@terms;
} ## end sub fetch_all
1;
| james-monkeyshines/ensembl | modules/Bio/EnsEMBL/DBSQL/OntologyTermAdaptor.pm | Perl | apache-2.0 | 33,978 |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2015,2016 by Delphix. All rights reserved.
#
# Program Name : dx_upgrade_db.pl
# Description : Upgrade a DB
# Author : Marcin Przepiorowski
# Created: 13 Apr 2015 (v2.0.0)
#
#
use strict;
use warnings;
use JSON;
use Getopt::Long qw(:config no_ignore_case no_auto_abbrev); #avoids conflicts with ex host and help
use File::Basename;
use Pod::Usage;
use FindBin;
use Data::Dumper;
my $abspath = $FindBin::Bin;
use lib '../lib';
use Databases;
use Engine;
use Group_obj;
use Toolkit_helpers;
my $version = $Toolkit_helpers::version;
GetOptions(
'help|?' => \(my $help),
'd|engine=s' => \(my $dx_host),
'envinst=s' => \(my $envinst),
'name=s' => \(my $dbname),
'type=s' => \(my $type),
'group=s' => \(my $group),
'host=s' => \(my $host),
'envname=s' => \(my $envname),
'reponame=s' => \(my $repositoryname),
'debug:n' => \(my $debug),
'dever=s' => \(my $dever),
'all' => (\my $all),
'version' => \(my $print_version),
'configfile|c=s' => \(my $config_file)
) or pod2usage(-verbose => 1, -input=>\*DATA);
pod2usage(-verbose => 2, -input=>\*DATA) && exit if $help;
die "$version\n" if $print_version;
my $engine_obj = new Engine ($dever, $debug);
$engine_obj->load_config($config_file);
if (defined($all) && defined($dx_host)) {
print "Option all (-all) and engine (-d|engine) are mutually exclusive \n";
pod2usage(-verbose => 1, -input=>\*DATA);
exit (1);
}
if ( ! ( defined($envinst) ) ) {
print "Options -envinst is required. \n";
pod2usage(-verbose => 1, -input=>\*DATA);
exit (1);
}
Toolkit_helpers::check_filer_options (1,$type, $group, $host, $dbname, $envname);
my $ret = 0;
# this array will have all engines to go through (if -d is specified it will be only one engine)
my $engine_list = Toolkit_helpers::get_engine_list($all, $dx_host, $engine_obj);
for my $engine ( sort (@{$engine_list}) ) {
# main loop for all work
if ($engine_obj->dlpx_connect($engine)) {
print "Can't connect to Dephix Engine $dx_host\n\n";
$ret = $ret + 1;
next;
};
# load objects for current engine
my $databases = new Databases( $engine_obj, $debug);
my $groups = new Group_obj($engine_obj, $debug);
# filter implementation
my $db_list = Toolkit_helpers::get_dblist_from_filter($type, $group, $host, $dbname, $databases, $groups, $envname, undef, undef, undef, undef, undef, $repositoryname, $debug);
if (! defined($db_list)) {
print "There is no DB selected to process on $engine . Please check filter definitions. \n";
$ret = $ret + 1;
next;
}
# for filtered databases on current engine - display status
for my $dbitem ( @{$db_list} ) {
my $dbobj = $databases->getDB($dbitem);
$ret = $ret + $dbobj->upgradeVDB($envinst);
}
}
exit $ret;
__DATA__
=head1 SYNOPSIS
dx_upgrade_db [ -engine|d <delphix identifier> | -all ] [ -configfile file ]
< -envinst OracleHome/MSSQLinstance >
< -group group_name | -name db_name | -host host_name | -type dsource|vdb | -envname name >
[ -help]
[ -debug]
=head1 DESCRIPTION
Upgrade a DB specified by filter parameter using home/instance defined in envinst parameter
=head1 ARGUMENTS
=head2 Delphix Engine selection - if not specified a default host(s) from dxtools.conf will be used.
=over 10
=item B<-engine|d>
Specify Delphix Engine name from dxtools.conf file
=item B<-all>
Display databases on all Delphix appliance
=item B<-configfile file>
Location of the configuration file.
A config file search order is as follow:
- configfile parameter
- DXTOOLKIT_CONF variable
- dxtools.conf from dxtoolkit location
=back
=head2 Filters
Filter databases using one of the following filters
=over 4
=item B<-group name>
Group Name
=item B<-name name>
Database Name
=item B<-host name>
Host Name
=item B<-type type>
Type (dsource|vdb)
=item B<-envname name>
Environment name
=item B<-reponame name>
Filter using reponame
=back
=head1 OPTIONS
=over 2
=item B<-help>
Print this screen
=item B<-debug>
Turn on debugging
=back
=head1 EXAMPLES
Upgrade of MS SQL ( including enabling and disabling VDB)
dx_ctl_db -d Landshark43 -name autotest -action disable
Disabling database autotest.
Starting job JOB-830 for database autotest.
0 - 5 - 10 - 20 - 30 - 100
Job JOB-830 finised with state: COMPLETED
dx_upgrade_db -d Landshark43 -name autotest -envinst MSSQL2012
Waiting for all actions to complete. Parent action is ACTION-1698
Upgrade completed with success.
dx_ctl_db -d Landshark43 -name autotest -action enable
Enabling database autotest.
Starting job JOB-831 for database autotest.
0 - 25 - 75 - 100
Job JOB-831 finised with state: COMPLETED
=cut
| delphix/dxtoolkit | bin/dx_upgrade_db.pl | Perl | apache-2.0 | 5,274 |
package Paws::EC2::LoadPermissionModifications;
use Moose;
has Add => (is => 'ro', isa => 'ArrayRef[Paws::EC2::LoadPermissionRequest]');
has Remove => (is => 'ro', isa => 'ArrayRef[Paws::EC2::LoadPermissionRequest]');
1;
### main pod documentation begin ###
=head1 NAME
Paws::EC2::LoadPermissionModifications
=head1 USAGE
This class represents one of two things:
=head3 Arguments in a call to a service
Use the attributes of this class as arguments to methods. You shouldn't make instances of this class.
Each attribute should be used as a named argument in the calls that expect this type of object.
As an example, if Att1 is expected to be a Paws::EC2::LoadPermissionModifications object:
$service_obj->Method(Att1 => { Add => $value, ..., Remove => $value });
=head3 Results returned from an API call
Use accessors for each attribute. If Att1 is expected to be an Paws::EC2::LoadPermissionModifications object:
$result = $service_obj->Method(...);
$result->Att1->Add
=head1 DESCRIPTION
This class has no description
=head1 ATTRIBUTES
=head2 Add => ArrayRef[L<Paws::EC2::LoadPermissionRequest>]
The load permissions to add.
=head2 Remove => ArrayRef[L<Paws::EC2::LoadPermissionRequest>]
The load permissions to remove.
=head1 SEE ALSO
This class forms part of L<Paws>, describing an object used in L<Paws::EC2>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/EC2/LoadPermissionModifications.pm | Perl | apache-2.0 | 1,532 |
#!/usr/bin/perl -w
# Copyright 2012 Arnab Ghoshal
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
# This script normalizes the TIMIT phonetic transcripts that have been
# extracted in a format where each line contains an utterance ID followed by
# the transcript, e.g.:
# fcke0_si1111 h# hh ah dx ux w iy dcl d ix f ay n ih q h#
my $usage = "Usage: timit_norm_trans.pl -i transcript -m phone_map -from [60|48] -to [48|39] > normalized\n
Normalizes phonetic transcriptions for TIMIT, by mapping the phones to a
smaller set defined by the -m option. This script assumes that the mapping is
done in the \"standard\" fashion, i.e. to 48 or 39 phones. The input is
assumed to have 60 phones (+1 for glottal stop, which is deleted), but that can
be changed using the -from option. The input format is assumed to be utterance
ID followed by transcript on the same line.\n";
use strict;
use Getopt::Long;
die "$usage" unless(@ARGV >= 1);
my ($in_trans, $phone_map, $num_phones_out);
my $num_phones_in = 60;
GetOptions ("i=s" => \$in_trans, # Input transcription
"m=s" => \$phone_map, # File containing phone mappings
"from=i" => \$num_phones_in, # Input #phones: must be 60 or 48
"to=i" => \$num_phones_out ); # Output #phones: must be 48 or 39
die $usage unless(defined($in_trans) && defined($phone_map) &&
defined($num_phones_out));
if ($num_phones_in != 60 && $num_phones_in != 48) {
die "Can only used 60 or 48 for -from (used $num_phones_in)."
}
if ($num_phones_out != 48 && $num_phones_out != 39) {
die "Can only used 48 or 39 for -to (used $num_phones_out)."
}
unless ($num_phones_out < $num_phones_in) {
die "Argument to -from ($num_phones_in) must be greater than that to -to ($num_phones_out)."
}
open(M, "<$phone_map") or die "Cannot open mappings file '$phone_map': $!";
my (%phonemap, %seen_phones);
my $num_seen_phones = 0;
while (<M>) {
chomp;
next if ($_ =~ /^q\s*.*$/); # Ignore glottal stops.
m:^(\S+)\s+(\S+)\s+(\S+)$: or die "Bad line: $_";
my $mapped_from = ($num_phones_in == 60)? $1 : $2;
my $mapped_to = ($num_phones_out == 48)? $2 : $3;
if (!defined($seen_phones{$mapped_to})) {
$seen_phones{$mapped_to} = 1;
$num_seen_phones += 1;
}
$phonemap{$mapped_from} = $mapped_to;
}
if ($num_seen_phones != $num_phones_out) {
die "Trying to map to $num_phones_out phones, but seen only $num_seen_phones";
}
open(T, "<$in_trans") or die "Cannot open transcription file '$in_trans': $!";
while (<T>) {
chomp;
$_ =~ m:^(\S+)\s+(.+): or die "Bad line: $_";
my $utt_id = $1;
my $trans = $2;
$trans =~ s/q//g; # Remove glottal stops.
$trans =~ s/^\s*//; $trans =~ s/\s*$//; # Normalize spaces
print $utt_id;
for my $phone (split(/\s+/, $trans)) {
print " $phonemap{$phone}"
}
print "\n";
}
| chagge/Kaldi-timit | s5/local/timit_norm_trans.pl | Perl | apache-2.0 | 3,361 |
package Forge::Model::R::Oos::MarketPlaceSetting;
use strict;
use base qw(Forge::Model::R::Oos);
__PACKAGE__->meta->setup(
table => 'market_place_setting',
columns => [
id => { type => 'bigint', not_null => 1 },
version => { type => 'bigint', not_null => 1 },
active => { type => 'boolean', not_null => 1 },
code => { type => 'varchar', length => 255, not_null => 1 },
description => { type => 'varchar', length => 4000 },
market_place_id => { type => 'bigint', not_null => 1 },
value => { type => 'varchar', length => 255 },
],
primary_key_columns => [ 'id' ],
foreign_keys => [
market_place => {
class => 'Forge::Model::R::Oos::MarketPlace',
key_columns => { market_place_id => 'id' },
},
],
);
__PACKAGE__->meta->make_manager_class('market_place_setting');
1;
| ant-i/db-crud | lib/Forge/Model/R/Oos/MarketPlaceSetting.pm | Perl | apache-2.0 | 953 |
#!/usr/local/bin/perl
use strict;
use warnings;
use Configuration;
use Cwd 'abs_path';
#-------------------------------------------------------------------
# Daniel Bolanos 2010
# Boulder Language Technologies / University of Colorado at Boulder
#
# description: decoding (multiple processors)
#
# parameters:
#
# $ARGV[0] = acoustic models directory
# $ARGV[1] = experiment id
#
#-------------------------------------------------------------------
my $cpuCores = 4;
# load parameters
my $dirModels = $ARGV[0]; # dir containing acoustic models
my $experimentId = $ARGV[1]; # experiment id
# check parameters
if ((scalar @ARGV) != 2) {
die("Error: wrong number of parameters");
}
my $iterationStart = 14;
my $iterationEnd = 26;
my $dirExperiment = "$WSJ::dirData/experiments/$experimentId";
my $fileConfiguration = "./config/configuration.txt";
# create the experiment directory
system("mkdir -p $dirExperiment");
# copy the configuration file
my $fileConfigurationExperiment = "$dirExperiment/configuration.txt";
system("cp \"$fileConfiguration\" \"$fileConfigurationExperiment\"");
# run the decoding for acoustic model
opendir(INPUT_DIR, $dirModels) || die("unable to open the directory: $dirModels");
my @subdirs = grep(/^\d+$/,readdir(INPUT_DIR));
foreach my $subdir (sort {$a <=> $b} @subdirs) {
# process subdir?
my $iteration = $subdir;
if (($iteration < $iterationStart) || ($iteration > $iterationEnd)) {
next;
}
my $fileHMM = "$dirModels/$subdir/models.bin";
if (!(-e $fileHMM)) {
die("acoustic model file: \"$fileHMM\" was not found");
}
# create the decoding folder
my $dirDecoding = "$dirExperiment/$subdir";
system("mkdir -p $dirDecoding");
# run the decoding in multiple processors
system("run.pl $cpuCores ./decode.pl \"\"$fileConfigurationExperiment\" \"$dirDecoding\" \"$fileHMM\"\"");
}
closedir(INPUT_DIR);
| nlphacker/bavieca | tasks/wsj/scripts/test/20k/decodeExperiment.pl | Perl | apache-2.0 | 1,891 |
valid([]).
valid([Head|Tail]) :-
fd_all_different(Head),
valid(Tail).
print9(Solution) :-
format("\
~d ~d ~d ~d ~d ~d ~d ~d ~d~n\
~d ~d ~d ~d ~d ~d ~d ~d ~d~n\
~d ~d ~d ~d ~d ~d ~d ~d ~d~n\
~d ~d ~d ~d ~d ~d ~d ~d ~d~n\
~d ~d ~d ~d ~d ~d ~d ~d ~d~n\
~d ~d ~d ~d ~d ~d ~d ~d ~d~n\
~d ~d ~d ~d ~d ~d ~d ~d ~d~n\
~d ~d ~d ~d ~d ~d ~d ~d ~d~n\
~d ~d ~d ~d ~d ~d ~d ~d ~d~n",
Solution).
sudoku(Puzzle, Solution) :-
Solution = Puzzle,
Puzzle = [S11, S12, S13, S14, S15, S16, S17, S18, S19,
S21, S22, S23, S24, S25, S26, S27, S28, S29,
S31, S32, S33, S34, S35, S36, S37, S38, S39,
S41, S42, S43, S44, S45, S46, S47, S48, S49,
S51, S52, S53, S54, S55, S56, S57, S58, S59,
S61, S62, S63, S64, S65, S66, S67, S68, S69,
S71, S72, S73, S74, S75, S76, S77, S78, S79,
S81, S82, S83, S84, S85, S86, S87, S88, S89,
S91, S92, S93, S94, S95, S96, S97, S98, S99],
fd_domain(Solution, 1, 9),
Row1 = [S11, S12, S13, S14, S15, S16, S17, S18, S19],
Row2 = [S21, S22, S23, S24, S25, S26, S27, S28, S29],
Row3 = [S31, S32, S33, S34, S35, S36, S37, S38, S39],
Row4 = [S41, S42, S43, S44, S45, S46, S47, S48, S49],
Row5 = [S51, S52, S53, S54, S55, S56, S57, S58, S59],
Row6 = [S61, S62, S63, S64, S65, S66, S67, S68, S69],
Row7 = [S71, S72, S73, S74, S75, S76, S77, S78, S79],
Row8 = [S81, S82, S83, S84, S85, S86, S87, S88, S89],
Row9 = [S91, S92, S93, S94, S95, S96, S97, S98, S99],
Col1 = [S11, S21, S31, S41, S51, S61, S71, S81, S91],
Col2 = [S12, S22, S32, S42, S52, S62, S72, S82, S92],
Col3 = [S13, S23, S33, S43, S53, S63, S73, S83, S93],
Col4 = [S14, S24, S34, S44, S54, S64, S74, S84, S94],
Col5 = [S15, S25, S35, S45, S55, S65, S75, S85, S95],
Col6 = [S16, S26, S36, S46, S56, S66, S76, S86, S96],
Col7 = [S17, S27, S37, S47, S57, S67, S77, S87, S97],
Col8 = [S18, S28, S38, S48, S58, S68, S78, S88, S98],
Col9 = [S19, S29, S39, S49, S59, S69, S79, S89, S99],
Square1 = [S11, S12, S13, S21, S22, S23, S31, S32, S33],
Square2 = [S14, S15, S16, S24, S25, S26, S34, S35, S36],
Square3 = [S17, S18, S19, S27, S28, S29, S37, S38, S39],
Square4 = [S41, S42, S43, S51, S52, S53, S61, S62, S63],
Square5 = [S44, S45, S46, S54, S55, S56, S64, S65, S66],
Square6 = [S47, S48, S49, S57, S58, S59, S67, S68, S69],
Square7 = [S71, S72, S73, S81, S82, S83, S91, S92, S93],
Square8 = [S74, S75, S76, S84, S85, S86, S94, S95, S96],
Square9 = [S77, S78, S79, S87, S88, S89, S97, S98, S99],
valid([Row1, Row2, Row3, Row4, Row5, Row6, Row7, Row8, Row9,
Col1, Col2, Col3, Col4, Col5, Col6, Col7, Col8, Col9,
Square1, Square2, Square3, Square4, Square5, Square6, Square7,
Square8, Square9]).
| Leonidas-from-XIV/7langs7weeks | prolog/sudoku9.pl | Perl | apache-2.0 | 2,838 |
package Google::Ads::AdWords::v201809::PlacesOfInterestOperand::Category;
use strict;
use warnings;
sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201809'};
# derivation by restriction
use base qw(
SOAP::WSDL::XSD::Typelib::Builtin::string);
1;
__END__
=pod
=head1 NAME
=head1 DESCRIPTION
Perl data type class for the XML Schema defined simpleType
PlacesOfInterestOperand.Category from the namespace https://adwords.google.com/api/adwords/cm/v201809.
Categories to identify places of interest.
This clase is derived from
SOAP::WSDL::XSD::Typelib::Builtin::string
. SOAP::WSDL's schema implementation does not validate data, so you can use it exactly
like it's base type.
# Description of restrictions not implemented yet.
=head1 METHODS
=head2 new
Constructor.
=head2 get_value / set_value
Getter and setter for the simpleType's value.
=head1 OVERLOADING
Depending on the simple type's base type, the following operations are overloaded
Stringification
Numerification
Boolification
Check L<SOAP::WSDL::XSD::Typelib::Builtin> for more information.
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| googleads/googleads-perl-lib | lib/Google/Ads/AdWords/v201809/PlacesOfInterestOperand/Category.pm | Perl | apache-2.0 | 1,146 |
#
# Copyright 2022 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package centreon::common::ibm::nos::snmp::mode::cpu;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'cpu', type => 0, cb_prefix_output => 'prefix_message_output', skipped_code => { -10 => 1 } }
];
$self->{maps_counters}->{cpu} = [
{ label => 'average-1m', nlabel => 'cpu.utilization.1m.percentage', set => {
key_values => [ { name => 'average_1m' } ],
output_template => '%.2f %% (1m)',
perfdatas => [
{ value => 'average_1m', template => '%.2f',
min => 0, max => 100, unit => '%' }
]
}
},
{ label => 'average-5m', nlabel => 'cpu.utilization.5m.percentage', set => {
key_values => [ { name => 'average_5m' } ],
output_template => '%.2f %% (5m)',
perfdatas => [
{ value => 'average_5m', template => '%.2f',
min => 0, max => 100, unit => '%' }
]
}
}
];
}
sub prefix_message_output {
my ($self, %options) = @_;
return "CPU average usage: ";
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options, force_new_perfdata => 1);
bless $self, $class;
return $self;
}
sub manage_selection {
my ($self, %options) = @_;
my $oid_mpCpuStatsUtil1Minute = '.1.3.6.1.4.1.26543.2.5.1.2.2.3.0';
my $oid_mpCpuStatsUtil5Minutes = '.1.3.6.1.4.1.26543.2.5.1.2.2.6.0';
my $result = $options{snmp}->get_leef(oids => [$oid_mpCpuStatsUtil1Minute, $oid_mpCpuStatsUtil5Minutes], nothing_quit => 1);
$self->{cpu} = {
average_1m => $result->{$oid_mpCpuStatsUtil1Minute},
average_5m => $result->{$oid_mpCpuStatsUtil5Minutes}
}
}
1;
__END__
=head1 MODE
Check CPU usage (over the last minute).
=over 8
=item B<--warning-*> B<--critical-*>
Thresholds.
Can be: 'average-1m' (%), 'average-5m' (%).
=back
=cut
| centreon/centreon-plugins | centreon/common/ibm/nos/snmp/mode/cpu.pm | Perl | apache-2.0 | 2,879 |
package Configuration;
use JSON;
use Moose;
# configuration file
has 'configuration_file' => (is => 'ro', isa => 'Str', required => 1, trigger => \&_load);
# all available configurations
has '_configurations' => (is => 'rw', isa => 'HashRef', lazy => 0 );
sub _load {
my $this = shift;
my $config = $this->configuration_file();
my $config_json = undef;
{
local $/ = undef;
open CONFIG_FILE, $config or die "Unable to open config file ($config): $!";
$config_json = <CONFIG_FILE> ;
close CONFIG_FILE;
}
=pod
my $line = $_;
if ( $line =~ m/^\#/ ) {
print STDERR "Skipping commented out configuration entry: $line\n";
next;
}
=cut
# parse JSON config
my $config_obj = decode_json( $config_json );
if ( ! $config_obj ) {
die "Invalid configuration ...";
}
my $appearance_models = $config_obj->{ "appearance-models" };
my $summarizers_specs = $config_obj->{ "models" };
my %temp_summarizers;
foreach my $summarizers_spec (keys( %{ $summarizers_specs } )) {
my $spec = $summarizers_specs->{ $summarizers_spec };
if ( $summarizers_spec =~ m/\*/ ) {
foreach my $appearance_model ( keys( %{ $appearance_models } ) ) {
my $summarizers_spec_copy = $summarizers_spec;
$summarizers_spec_copy =~ s/\*/$appearance_model/sg;
my %temp_spec = %{ $spec };
$temp_spec{ "appearance-model" } = $appearance_model;
$temp_summarizers{ $summarizers_spec_copy } = \%temp_spec;
}
}
else {
$temp_summarizers{ $summarizers_spec } = $spec;
}
}
# update models
$config_obj->{ 'models' } = \%temp_summarizers;
# set _configurations
$this->_configurations( $config_obj );
}
# TODO: more intelligent implementation to autogenerate methods ?
sub feature_set {
my $this = shift;
my $model_key = shift;
return $this->_configurations()->{ $model_key }->{ 'feature_set' };
}
no Moose;
1;
| ypetinot/web-summarization | src/perl/Configuration.pm | Perl | apache-2.0 | 1,909 |
package VMOMI::OvfFileItem;
use parent 'VMOMI::DynamicData';
use strict;
use warnings;
our @class_ancestors = (
'DynamicData',
);
our @class_members = (
['deviceId', undef, 0, ],
['path', undef, 0, ],
['compressionMethod', undef, 0, 1],
['chunkSize', undef, 0, 1],
['size', undef, 0, 1],
['cimType', undef, 0, ],
['create', 'boolean', 0, ],
);
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/OvfFileItem.pm | Perl | apache-2.0 | 598 |
package Perun::TasksAgent;
use strict;
use warnings;
use Perun::Common;
my $manager = 'tasksManager';
use fields qw(_agent _manager);
sub new
{
my $self = fields::new(shift);
$self->{_agent} = shift;
$self->{_manager} = $manager;
return $self;
}
sub getTaskResultsForDestinations
{
return Perun::Common::callManagerMethod('getTaskResultsForDestinations', '[]TaskResult', @_);
}
1;
| balcirakpeter/perun | perun-cli/Perun/TasksAgent.pm | Perl | bsd-2-clause | 394 |
#!/usr/bin/perl
# <copyright>
# Copyright (c) 2013 Intel Corporation. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# </copyright>
use strict;
use warnings;
use File::Glob ":glob";
use Encode qw{ encode };
use FindBin;
use lib "$FindBin::Bin/lib";
use tools;
use Platform ":vars";
our $VERSION = "0.04";
my $escape = qr{%};
my $placeholder = qr{(\d)\$(s|l?[du])};
my $sections =
{
meta => { short => "prp" }, # "prp" stands for "property".
strings => { short => "str" },
formats => { short => "fmt" },
messages => { short => "msg" },
hints => { short => "hnt" },
};
my @sections = qw{ meta strings formats messages hints };
# Assign section properties: long name, set number, base number.
map( $sections->{ $sections[ $_ ] }->{ long } = $sections[ $_ ], ( 0 .. @sections - 1 ) );
map( $sections->{ $sections[ $_ ] }->{ set } = ( $_ + 1 ), ( 0 .. @sections - 1 ) );
map( $sections->{ $sections[ $_ ] }->{ base } = ( ( $_ + 1 ) << 16 ), ( 0 .. @sections - 1 ) );
# Properties of Meta section.
my @properties = qw{ Language Country LangId Version Revision };
sub _generate_comment($$$) {
my ( $data, $open, $close ) = @_;
my $bulk =
$open . " Do not edit this file! " . $close . "\n" .
$open . " The file was generated from " . get_file( $data->{ "%meta" }->{ source } ) .
" by " . $tool . " on " . localtime() . ". " . $close . "\n";
return $bulk;
}; # sub _generate_comment
sub msg2sgn($) {
# Convert message string to signature. Signature is a list of placeholders in sorted order.
# For example, signature of "%1$s value \"%2$s\" is invalid." is "%1$s %2$s".
my ( $msg ) = @_;
my @placeholders;
pos( $msg ) = 0;
while ( $msg =~ m{\G.*?$escape$placeholder}g ) {
$placeholders[ $1 - 1 ] = "%$1\$$2";
}; # while
for ( my $i = 1; $i <= @placeholders; ++ $i ) {
if ( not defined( $placeholders[ $i - 1 ] ) ) {
$placeholders[ $i - 1 ] = "%$i\$-";
}; # if
}; # for $i
return join( " ", @placeholders );
}; # sub msg2sgn
sub msg2src($) {
# Convert message string to a C string constant.
my ( $msg ) = @_;
if ( $target_os eq "win" ) {
$msg =~ s{$escape$placeholder}{\%$1!$2!}g;
}; # if
return $msg;
}; # sub msg2src
my $special =
{
"n" => "\n",
"t" => "\t",
};
sub msg2mc($) {
my ( $msg ) = @_;
$msg = msg2src( $msg ); # Get windows style placeholders.
$msg =~ s{\\(.)}{ exists( $special->{ $1 } ) ? $special->{ $1 } : $1 }ge;
return $msg;
}; # sub msg2mc
sub parse_message($) {
my ( $msg ) = @_;
pos( $msg ) = 0;
for ( ; ; ) {
if ( $msg !~ m{\G.*?$escape}gc ) {
last;
}
if ( $msg !~ m{\G$placeholder}gc ) {
return "Bad %-sequence near \"%" . substr( $msg, pos( $msg ), 7 ) . "\"";
}; # if
}; # forever
return undef;
}; # sub parse_message
sub parse_source($) {
my ( $name ) = @_;
my @bulk = read_file( $name, -layer => ":utf8" );
my $data = {};
my $line;
my $n = 0; # Line number.
my $obsolete = 0; # Counter of obsolete entries.
my $last_idx;
my %idents;
my $section;
my $error =
sub {
my ( $n, $line, $msg ) = @_;
runtime_error( "Error parsing $name line $n: " . "$msg:\n" . " $line" );
}; # sub
foreach $line ( @bulk ) {
++ $n;
# Skip empty lines and comments.
if ( $line =~ m{\A\s*(\n|#)} ) {
$last_idx = undef;
next;
}; # if
# Parse section header.
if ( $line =~ m{\A-\*-\s*([A-Z_]*)\s*-\*-\s*\n\z}i ) {
$section = ( lc( $1 ) );
if ( not grep( $section eq $_, @sections ) ) {
$error->( $n, $line, "Unknown section \"$section\" specified" );
}; # if
if ( exists( $data->{ $section } ) ) {
$error->( $n, $line, "Multiple sections of the same type specified" );
}; # if
%idents = (); # Clean list of known message identifiers.
next;
}; # if
if ( not defined( $section ) ) {
$error->( $n, $line, "Section heading expected" );
}; # if
# Parse section body.
if ( $section eq "meta" ) {
if ( $line =~ m{\A([A-Z_][A-Z_0-9]*)\s+"(.*)"\s*?\n?\z}i ) {
# Parse meta properties (such as Language, Country, and LangId).
my ( $property, $value ) = ( $1, $2 );
if ( not grep( $_ eq $property , @properties ) ) {
$error->( $n, $line, "Unknown property \"$property\" specified" );
}; # if
if ( exists( $data->{ "%meta" }->{ $property } ) ) {
$error->( $n, $line, "Property \"$property\" has already been specified" );
}; # if
$data->{ "%meta" }->{ $property } = $value;
$last_idx = undef;
next;
}; # if
$error->( $n, $line, "Property line expected" );
}; # if
# Parse message.
if ( $line =~ m{\A([A-Z_][A-Z_0-9]*)\s+"(.*)"\s*?\n?\z}i ) {
my ( $ident, $message ) = ( $1, $2 );
if ( $ident eq "OBSOLETE" ) {
# If id is "OBSOLETE", add a unique suffix. It provides convenient way to mark
# obsolete messages.
++ $obsolete;
$ident .= $obsolete;
}; # if
if ( exists( $idents{ $ident } ) ) {
$error->( $n, $line, "Identifier \"$ident\" is redefined" );
}; # if
# Check %-sequences.
my $err = parse_message( $message );
if ( $err ) {
$error->( $n, $line, $err );
}; # if
# Save message.
push( @{ $data->{ $section } }, [ $ident, $message ] );
$idents{ $ident } = 1;
$last_idx = @{ $data->{ $section } } - 1;
next;
}; # if
# Parse continuation line.
if ( $line =~ m{\A\s*"(.*)"\s*\z} ) {
my $message = $1;
if ( not defined( $last_idx ) ) {
$error->( $n, $line, "Unexpected continuation line" );
}; # if
# Check %-sequences.
my $err = parse_message( $message );
if ( $err ) {
$error->( $n, $line, $err );
}; # if
# Save continuation.
$data->{ $section }->[ $last_idx ]->[ 1 ] .= $message;
next;
}; # if
$error->( $n, $line, "Message definition expected" );
}; # foreach
$data->{ "%meta" }->{ source } = $name;
foreach my $section ( @sections ) {
if ( not exists( $data->{ $section } ) ) {
$data->{ $section } = [];
}; # if
}; # foreach $section
foreach my $property ( @properties ) {
if ( not defined( $data->{ "%meta" }->{ $property } ) ) {
runtime_error(
"Error parsing $name: " .
"Required \"$property\" property is not specified"
);
}; # if
push( @{ $data->{ meta } }, [ $property, $data->{ "%meta" }->{ $property } ] );
}; # foreach
return $data;
}; # sub parse_source
sub generate_enum($$$) {
my ( $data, $file, $prefix ) = @_;
my $bulk = "";
$bulk =
_generate_comment( $data, "//", "//" ) .
"\n" .
"enum ${prefix}_id {\n\n" .
" // A special id for absence of message.\n" .
" ${prefix}_null = 0,\n\n";
foreach my $section ( @sections ) {
my $props = $sections->{ $section }; # Section properties.
my $short = $props->{ short }; # Short section name, frequently used.
$bulk .=
" // Set #$props->{ set }, $props->{ long }.\n" .
" ${prefix}_${short}_first = $props->{ base },\n";
foreach my $item ( @{ $data->{ $section } } ) {
my ( $ident, undef ) = @$item;
$bulk .= " ${prefix}_${short}_${ident},\n";
}; # foreach
$bulk .= " ${prefix}_${short}_last,\n\n";
}; # foreach $type
$bulk .= " ${prefix}_xxx_lastest\n\n";
$bulk .=
"}; // enum ${prefix}_id\n" .
"\n" .
"typedef enum ${prefix}_id ${prefix}_id_t;\n" .
"\n";
$bulk .=
"\n" .
"// end of file //\n";
write_file( $file, \$bulk );
}; # sub generate_enum
sub generate_signature($$) {
my ( $data, $file ) = @_;
my $bulk = "";
$bulk .= "// message catalog signature file //\n\n";
foreach my $section ( @sections ) {
my $props = $sections->{ $section }; # Section properties.
my $short = $props->{ short }; # Short section name, frequently used.
$bulk .= "-*- " . uc( $props->{ long } ) . "-*-\n\n";
foreach my $item ( @{ $data->{ $section } } ) {
my ( $ident, $msg ) = @$item;
$bulk .= sprintf( "%-40s %s\n", $ident, msg2sgn( $msg ) );
}; # foreach
$bulk .= "\n";
}; # foreach $type
$bulk .= "// end of file //\n";
write_file( $file, \$bulk );
}; # sub generate_signature
sub generate_default($$$) {
my ( $data, $file, $prefix ) = @_;
my $bulk = "";
$bulk .=
_generate_comment( $data, "//", "//" ) .
"\n";
foreach my $section ( @sections ) {
$bulk .=
"static char const *\n" .
"__${prefix}_default_${section}" . "[] =\n" .
" {\n" .
" NULL,\n";
foreach my $item ( @{ $data->{ $section } } ) {
my ( undef, $msg ) = @$item;
$bulk .= " \"" . msg2src( $msg ) . "\",\n";
}; # while
$bulk .=
" NULL\n" .
" };\n" .
"\n";
}; # foreach $type
$bulk .=
"struct kmp_i18n_section {\n" .
" int size;\n" .
" char const ** str;\n" .
"}; // struct kmp_i18n_section\n" .
"typedef struct kmp_i18n_section kmp_i18n_section_t;\n" .
"\n" .
"static kmp_i18n_section_t\n" .
"__${prefix}_sections[] =\n" .
" {\n" .
" { 0, NULL },\n";
foreach my $section ( @sections ) {
$bulk .=
" { " . @{ $data->{ $section } } . ", __${prefix}_default_${section} },\n";
}; # foreach $type
$bulk .=
" { 0, NULL }\n" .
" };\n" .
"\n";
$bulk .=
"struct kmp_i18n_table {\n" .
" int size;\n" .
" kmp_i18n_section_t * sect;\n" .
"}; // struct kmp_i18n_table\n" .
"typedef struct kmp_i18n_table kmp_i18n_table_t;\n" .
"\n" .
"static kmp_i18n_table_t __kmp_i18n_default_table =\n" .
" {\n" .
" " . @sections . ",\n" .
" __kmp_i18n_sections\n" .
" };\n" .
"\n" .
"// end of file //\n";
write_file( $file, \$bulk );
}; # sub generate_default
sub generate_message_unix($$) {
my ( $data, $file ) = @_;
my $bulk = "";
$bulk .=
_generate_comment( $data, "\$", "\$" ) .
"\n" .
"\$quote \"\n\n";
foreach my $section ( @sections ) {
$bulk .=
"\$ " . ( "-" x 78 ) . "\n\$ $section\n\$ " . ( "-" x 78 ) . "\n\n" .
"\$set $sections->{ $section }->{ set }\n" .
"\n";
my $n = 0;
foreach my $item ( @{ $data->{ $section } } ) {
my ( undef, $msg ) = @$item;
++ $n;
$bulk .= "$n \"" . msg2src( $msg ) . "\"\n";
}; # foreach
$bulk .= "\n";
}; # foreach $type
$bulk .=
"\n" .
"\$ end of file \$\n";
write_file( $file, \$bulk, -layer => ":utf8" );
}; # sub generate_message_linux
sub generate_message_windows($$) {
my ( $data, $file ) = @_;
my $bulk = "";
my $language = $data->{ "%meta" }->{ Language };
my $langid = $data->{ "%meta" }->{ LangId };
$bulk .=
_generate_comment( $data, ";", ";" ) .
"\n" .
"LanguageNames = ($language=$langid:msg_$langid)\n" .
"\n";
$bulk .=
"FacilityNames=(\n";
foreach my $section ( @sections ) {
my $props = $sections->{ $section }; # Section properties.
$bulk .=
" $props->{ short }=" . $props->{ set } ."\n";
}; # foreach $section
$bulk .=
")\n\n";
foreach my $section ( @sections ) {
my $short = $sections->{ $section }->{ short };
my $n = 0;
foreach my $item ( @{ $data->{ $section } } ) {
my ( undef, $msg ) = @$item;
++ $n;
$bulk .=
"MessageId=$n\n" .
"Facility=$short\n" .
"Language=$language\n" .
msg2mc( $msg ) . "\n.\n\n";
}; # foreach $item
}; # foreach $section
$bulk .=
"\n" .
"; end of file ;\n";
$bulk = encode( "UTF-16LE", $bulk ); # Convert text to UTF-16LE used in Windows* OS.
write_file( $file, \$bulk, -binary => 1 );
}; # sub generate_message_windows
#
# Parse command line.
#
my $input_file;
my $enum_file;
my $signature_file;
my $default_file;
my $message_file;
my $id;
my $prefix = "";
get_options(
Platform::target_options(),
"enum-file=s" => \$enum_file,
"signature-file=s" => \$signature_file,
"default-file=s" => \$default_file,
"message-file=s" => \$message_file,
"id|lang-id" => \$id,
"prefix=s" => \$prefix,
);
if ( @ARGV == 0 ) {
cmdline_error( "No source file specified -- nothing to do" );
}; # if
if ( @ARGV > 1 ) {
cmdline_error( "Too many source files specified" );
}; # if
$input_file = $ARGV[ 0 ];
my $generate_message;
if ( $target_os =~ m{\A(?:lin|lrb|mac)\z} ) {
$generate_message = \&generate_message_unix;
} elsif ( $target_os eq "win" ) {
$generate_message = \&generate_message_windows;
} else {
runtime_error( "OS \"$target_os\" is not supported" );
}; # if
#
# Do the work.
#
my $data = parse_source( $input_file );
if ( defined( $id ) ) {
print( $data->{ "%meta" }->{ LangId }, "\n" );
}; # if
if ( defined( $enum_file ) ) {
generate_enum( $data, $enum_file, $prefix );
}; # if
if ( defined( $signature_file ) ) {
generate_signature( $data, $signature_file );
}; # if
if ( defined( $default_file ) ) {
generate_default( $data, $default_file, $prefix );
}; # if
if ( defined( $message_file ) ) {
$generate_message->( $data, $message_file );
}; # if
exit( 0 );
__END__
=pod
=head1 NAME
B<message-converter.pl> -- Convert message catalog source file into another text forms.
=head1 SYNOPSIS
B<message-converter.pl> I<option>... <file>
=head1 OPTIONS
=over
=item B<--enum-file=>I<file>
Generate enum file named I<file>.
=item B<--default-file=>I<file>
Generate default messages file named I<file>.
=item B<--lang-id>
Print language identifier of the message catalog source file.
=item B<--message-file=>I<file>
Generate message file.
=item B<--signature-file=>I<file>
Generate signature file.
Signatures are used for checking compatibility. For example, to check a primary
catalog and its translation to another language, signatures of both catalogs should be generated
and compared. If signatures are identical, catalogs are compatible.
=item B<--prefix=>I<prefix>
Prefix to be used for all C identifiers (type and variable names) in enum and default messages
files.
=item B<--os=>I<str>
Specify OS name the message formats to be converted for. If not specified expolicitly, value of
LIBOMP_OS environment variable is used. If LIBOMP_OS is not defined, host OS is detected.
Depending on OS, B<message-converter.pl> converts message formats to GNU style or MS style.
=item Standard Options
=over
=item B<--doc>
=item B<--manual>
Print full documentation and exit.
=item B<--help>
Print short help message and exit.
=item B<--version>
Print version string and exit.
=back
=back
=head1 ARGUMENTS
=over
=item I<file>
A name of input file.
=back
=head1 DESCRIPTION
=head2 Message Catalog File Format
It is plain text file in UTF-8 encoding. Empty lines and lines beginning with sharp sign (C<#>) are
ignored. EBNF syntax of content:
catalog = { section };
section = header body;
header = "-*- " section-id " -*-" "\n";
body = { message };
message = message-id string "\n" { string "\n" };
section-id = identifier;
message-id = "OBSOLETE" | identifier;
identifier = letter { letter | digit | "_" };
string = """ { character } """;
Identifier starts with letter, with following letters, digits, and underscores. Identifiers are
case-sensitive. Setion identifiers are fixed: C<META>, C<STRINGS>, C<FORMATS>, C<MESSAGES> and
C<HINTS>. Message identifiers must be unique within section. Special C<OBSOLETE> pseudo-identifier
may be used many times.
String is a C string literal which must not cross line boundaries.
Long messages may occupy multiple lines, a string per line.
Message may include printf-like GNU-style placeholders for arguments: C<%I<n>$I<t>>,
where I<n> is argument number (C<1>, C<2>, ...),
I<t> -- argument type, C<s> (string) or C<d> (32-bit integer).
See also comments in F<i18n/en_US.txt>.
=head2 Output Files
This script can generate 3 different text files from single source:
=over
=item Enum file.
Enum file is a C include file, containing definitions of message identifiers, e. g.:
enum kmp_i18n_id {
// Set #1, meta.
kmp_i18n_prp_first = 65536,
kmp_i18n_prp_Language,
kmp_i18n_prp_Country,
kmp_i18n_prp_LangId,
kmp_i18n_prp_Version,
kmp_i18n_prp_Revision,
kmp_i18n_prp_last,
// Set #2, strings.
kmp_i18n_str_first = 131072,
kmp_i18n_str_Error,
kmp_i18n_str_UnknownFile,
kmp_i18n_str_NotANumber,
...
// Set #3, fotrmats.
...
kmp_i18n_xxx_lastest
}; // enum kmp_i18n_id
typedef enum kmp_i18n_id kmp_i18n_id_t;
=item Default messages file.
Default messages file is a C include file containing default messages to be embedded into
application (and used if external message catalog does not exist or could not be open):
static char const *
__kmp_i18n_default_meta[] =
{
NULL,
"English",
"USA",
"1033",
"2",
"20090806",
NULL
};
static char const *
__kmp_i18n_default_strings[] =
{
"Error",
"(unknown file)",
"not a number",
...
NULL
};
...
=item Message file.
Message file is an input for message compiler, F<gencat> on Linux* OS and OS X*, or F<mc.exe> on
Windows* OS.
Here is the example of Linux* OS message file:
$quote "
1 "Japanese"
2 "Japan"
3 "1041"
4 "2"
5 "Based on Enlish message catalog revision 20090806"
...
Example of Windows* OS message file:
LanguageNames = (Japanese=10041:msg_1041)
FacilityNames = (
prp=1
str=2
fmt=3
...
)
MessageId=1
Facility=prp
Language=Japanese
Japanese
.
...
=item Signature.
Signature is a processed source file: comments stripped, strings deleted, but placeholders kept and
sorted.
-*- FORMATS-*-
Info %1$d %2$s
Warning %1$d %2$s
Fatal %1$d %2$s
SysErr %1$d %2$s
Hint %1$- %2$s
Pragma %1$s %2$s %3$s %4$s
The purpose of signatures -- compare two message source files for compatibility. If signatures of
two message sources are the same, binary message catalogs will be compatible.
=back
=head1 EXAMPLES
Generate include file containing message identifiers:
$ message-converter.pl --enum-file=kmp_i18n_id.inc en_US.txt
Generate include file contating default messages:
$ message-converter.pl --default-file=kmp_i18n_default.inc en_US.txt
Generate input file for message compiler, Linux* OS example:
$ message-converter.pl --message-file=ru_RU.UTF-8.msg ru_RU.txt
Generate input file for message compiler, Windows* OS example:
> message-converter.pl --message-file=ru_RU.UTF-8.mc ru_RU.txt
=cut
# end of file #
| hfinkel/libomp_oss-bgq | tools/message-converter.pl | Perl | bsd-3-clause | 22,255 |
% Copyright (c) 2014-2016, Johan Nordlander, Jonas Duregård, Michał Pałka,
% Patrik Jansson and Josef Svenningsson
% All rights reserved.
%
% Redistribution and use in source and binary forms, with or without
% modification, are permitted provided that the following conditions are met:
%
% * Redistributions of source code must retain the above copyright notice,
% this list of conditions and the following disclaimer.
% * Redistributions in binary form must reproduce the above copyright
% notice, this list of conditions and the following disclaimer in the
% documentation and/or other materials provided with the distribution.
% * Neither the name of the Chalmers University of Technology nor the names of its
% contributors may be used to endorse or promote products derived from this
% software without specific prior written permission.
%
% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
% DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
% FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
% DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
% SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
% CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
% OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
% OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Static info examples %%%%%%%%%%%%%%%%%%%%%%%%%%%%
:- consult('semantics.pl').
e:p0:i ==> e:p3:i.
_ ==> _ :- false.
event(r1:i, timing(300)).
event(r1:i, data_received(e:p3)).
implementation(r1:i, return(void)).
minimum_start_interval(r1:i, 10).
can_be_invoked_concurrently(r2:i).
server_call_point(r1:i, sync(o:p2)).
%%%%%%%%%%%%%%%%
example(L,R) :-
% Cont1 = fn(X,rte_Exit(x, Cont2)),
% Cont2 = fn(X,rte_Exit(x, fn(Y,return(void)))),
Proc1 = rinst(r1:i,nil,[],rte_Enter(x,_Cont1)),
Proc2 = rinst(r2:i,nil,[],rte_Enter(x,_Cont2)),
Block = [excl(x:i,free) | [Proc1 | [Proc2 | []]]],
[excl(z:i,free) | Block] --- L ---> R.
| josefs/autosar | sem/test.pl | Perl | bsd-3-clause | 2,360 |
#! /usr/bin/perl
use strict;
use warnings;
BEGIN
{
unshift @INC, '../modules/';
}
use File::Basename;
use Getopt::Std;
use Jls::Array;
use Jls::File;
use Jls::Fasta;
use Jls::BarcodeValidator;
use Jls::Set;
# Check command line and retrieve the user-specified arguments
my $opts = {};
my $prog = basename($0);
getopts('hci:o:s:', $opts); # The hidden option 'c' turns off compression.
if ($opts->{'h'}) { usage($prog); exit(0); } # help
elsif (! $opts->{'i'} || ! $opts->{'o'}) {
usage($prog); exit(0); # help
}
# Inputs and checks the Fasta data files.
my $fas = Jls::BarcodeValidator::lc_unambiguous_nucleotides( # Sequences are {a,c,g,t,-}.
Jls::BarcodeValidator::string2fastas( # Dies on invalid Fasta format.
Jls::File::file2string($opts->{'i'})));
my $all_sample_uids = Jls::BarcodeValidator::cfas2barcode_uids($fas); # Fasta file order of barcodes
# In the canonical barcode Fasta array, the defline is >barcode_uid
# and now each object's sequence:
# is in lower case,
# lacks white space,
# and is on a single line.
# Checks the multiple alignment lengths and the barcode formats in the data file.
die '"' . $opts->{'i'} . '" must contain at least two multiple alignment sequences.'
unless (@$fas > 1);
die '"' . $opts->{'i'} . '" must be a multiple sequence alignment file.'
. "\n" . 'Including their padding with "-", the sequences must have equal length.'
unless Jls::BarcodeValidator::seqs_are_same_length($fas);
die '"' . $opts->{'i'} . '" must have deflines with a single valid barcode identifier.'
unless (Jls::BarcodeValidator::barcode_uids_are_valid($fas));
die '"' . $opts->{'i'} . '" must have deflines with a unique valid barcode identifier.'
unless (Jls::BarcodeValidator::barcode_uids_are_distinct($fas));
# Alphabetizes and compresses the barcode deflines (concatenates deflines with the same sequence).
my $cfas = $opts->{'c'} ? $fas : Jls::BarcodeValidator::compress($fas);
my $all_new_sample_uids; # array reference to barcode uids for the new samples
if ($opts->{'s'}) {
my @ss = split("\n", Jls::File::file2string($opts->{'s'}));
$all_new_sample_uids = \@ss;
my $unique_new_sample_uids = Jls::Array::unique($all_new_sample_uids);
die 'Some new samples have duplicated barcode UIDs.'
unless (@$unique_new_sample_uids == @$all_new_sample_uids);
my $intersection = Jls::Set::intersection($all_new_sample_uids, $all_sample_uids);
die 'Some new samples are not in the multiple alignment Fasta barcode file.'
unless (@$intersection == @$all_new_sample_uids);
}
my $new_sample_uids = Jls::BarcodeValidator::samples_with_nonunique_species($all_sample_uids,
$all_new_sample_uids);
my $sample_query_uid2pci_h = {};
foreach my $cfa (@$cfas) {
my $uids = Jls::BarcodeValidator::cfa_def2barcode_uids($cfa->{'def'});
my $sample_query_uids = Jls::Set::intersection($uids, $new_sample_uids);
if (! @$sample_query_uids) { next; }
my $nearest_neighbor_uids = Jls::BarcodeValidator::cfa_defs2barcode_uids(
Jls::BarcodeValidator::nearest_neighbors_in_key2distance_h(
Jls::BarcodeValidator::key2distance_h(
Jls::BarcodeValidator::cfa_def2distance_statistics_h($cfa, $cfas)),
@$uids == 1 ? $cfa->{'def'} : undef));
foreach my $sample_query_uid (@$sample_query_uids) {
my $fraction_of_same_species =
Jls::BarcodeValidator::fraction_of_same_species ($sample_query_uid,
$nearest_neighbor_uids);
$sample_query_uid2pci_h->{$sample_query_uid} = $fraction_of_same_species;
}
}
my $pci_stats = Jls::BarcodeValidator::pci_stats_h($sample_query_uid2pci_h);
my @keys = keys %$sample_query_uid2pci_h;
my $sample_query_uids = Jls::Array::reorder(\@keys, $new_sample_uids);
my $o_s = $pci_stats->{'average_pci'} . "\t" . $pci_stats->{'st_dev_pci'};
foreach my $sample_uid (@$sample_query_uids) {
$o_s .= "\n" . $sample_uid . "\t" . $sample_query_uid2pci_h->{$sample_uid};
}
open(OUT, '>' . $opts->{'o'}) or die $prog . ' could not open "' . $opts->{'o'} . '"' . "\n";
print OUT $o_s;
close(OUT);
exit (0);
#########################################################################################
# Usage display
sub usage
# program_name_
{
my $program_name_ = shift;
my $version = '1.0';
my $p = $program_name_ . ' ' . $version;
print STDERR <<USAGE
$program_name_ (Version $version)
Usage: perl $program_name_ [options]
This program calculates the PCI, the \'probability of correct identification\'.
The program inputs:
(1) a mandatory input filename for multiple alignment Fasta file for all samples,
(2) a mandatory output filename, and
(3) an optional input filename,
its contents being barcode UIDs for \'new samples\', each on a separate line.
The barcode UIDs for the new samples must be distinct.
The barcode UIDs for new samples must be a subset
of the barcode UIDs for all samples in the multiple alignment Fasta file.
The default set of new samples is the set of all samples in the Fasta file.
The program checks the input for fatal errors in Fasta or barcode UID format, as below.
It then outputs a file with results from the PCI calculations, with lines as follows.
The first line has the PCI average over species, a tab, and a standard deviation (described below).
Each line thereafter has
the barcode UID for each new sample, a tab, and the sample PCI (described below).
The lines are output in the order of the optional set of barcode UIDs for new samples,
or in the order they appear within the multiple alignment Fasta file,
if the optional set is omitted.
The Fasta file must contain at least two records.
Each record is separated by at least one \"\\n\" (newline charater) from the record following it.
Each record has three parts, in order:
(1) a single mandatory definition line, beginning with '>',
(2) an optional set of comment lines beginning with ';', and
(3) a multiply aligned sequence.
All multiply aligned sequences must have the same length, after any white space is removed.
If the study uses several barcode loci,
the multiple alignment should contain concatenations of the corresponding sequences,
with sequences for missing loci filled with '-' (gap characters).
The Fasta definition line (defline) contains barcode sample UIDs corresponding to the sequence.
The defline starts with '>'.
Each '>' in the defline corresponds to a UID,
which is the first set of contiguous non-white space characters following the '>'.
Permitting multiple UIDs in a defline is a convenience to permit computational efficiency.
Internally in fact, the program groups UIDs corresponding to a single sequence,
so that you do not need to (although you may, if you wish).
White space and then arbitary characters (excluding '>') may follow each UID in the defline.
The UID consists of 3 parts, separated by '_' (underscore), e.g., Acacia_exuvialis_JLS2:
(1) Genus of the sample
(2) Species of the sample
(3) The non-empty unique identifier of the sample (which may contain '_' characters)
The computer assesses species by exact case-sensitive match of both Genus and Species.
Thus, misspellings in Genus and Species create error (because they create novel species names).
Two samples may not share the same UID.
The program checks that each sample barcode UID is indeed unique.
As its distance between pairs of sequences, the PCI calculation uses p-distance.
Sequences are converted internally to lower case,
so p-distance between pairs of sequences is the fraction of
multiple alignment columns containing two characters from {a,c,g,t}
where the two characters are not the same.
The PCI is calculated only for each new sample
where the Fasta file contains another sample with the same species as the new sample.
The PCI for each new sample is the fraction of its nearest neighbors with the same species.
The species PCI for each species is the sample PCI, averaged over all new samples from the species.
The PCI average is the species PCI, averaged over all species represented in the new samples.
The standard deviation given for the PCI average is the square root of the expectation of
the variance of PCI average, the variance of the PCI for each new sample being estimated as
p * (1 - p), where p is the estimated PCI of the new sample.
Program options are arguments with \'-\' followed by a letter.
An option requiring further input(s) appears with a colon.
The default for an option, if any, is indicated in parentheses.
-c Turns off defline compression (computationally inefficient, recommended only for debugging).
-i : the input multiple alignment barcode file
-o : the output file containing the PCI calculations
-s (all barcode UIDs in the input file) : the barcode UIDs for the new samples, separated by ':'
USAGE
}
#########################################################################################
1; | Biomatters/barcode-validator | validation/resources/com/biomatters/plugins/barcoding/validator/validation/pci/program/validator_pci.pl | Perl | bsd-3-clause | 9,336 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.