code stringlengths 2 1.05M | repo_name stringlengths 5 101 | path stringlengths 4 991 | language stringclasses 3 values | license stringclasses 5 values | size int64 2 1.05M |
|---|---|---|---|---|---|
package AsposeWordsCloud::Object::Bookmarks;
require 5.6.0;
use strict;
use warnings;
use utf8;
use JSON qw(decode_json);
use Data::Dumper;
use Module::Runtime qw(use_module);
use Log::Any qw($log);
use Date::Parse;
use DateTime;
use base "AsposeWordsCloud::Object::BaseObject";
#
#
#
#NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually.
#
my $swagger_types = {
'BookmarkList' => 'ARRAY[Bookmark]',
'link' => 'Link'
};
my $attribute_map = {
'BookmarkList' => 'BookmarkList',
'link' => 'link'
};
# new object
sub new {
my ($class, %args) = @_;
my $self = {
#
'BookmarkList' => $args{'BookmarkList'},
#
'link' => $args{'link'}
};
return bless $self, $class;
}
# get swagger type of the attribute
sub get_swagger_types {
return $swagger_types;
}
# get attribute mappping
sub get_attribute_map {
return $attribute_map;
}
1;
| farooqsheikhpk/Aspose_Words_Cloud | SDKs/Aspose.Words-Cloud-SDK-for-Perl/lib/AsposeWordsCloud/Object/Bookmarks.pm | Perl | mit | 964 |
#
# Copyright 2017 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package storage::netapp::snmp::mode::components::communication;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
my %map_com_states = (
1 => 'initializing',
2 => 'transitioning',
3 => 'active',
4 => 'inactive',
5 => 'reconfiguring',
6 => 'nonexistent',
);
my $oid_enclChannelShelfAddr = '.1.3.6.1.4.1.789.1.21.1.2.1.3';
my $oid_enclContactState = '.1.3.6.1.4.1.789.1.21.1.2.1.2';
sub load {
my ($self) = @_;
push @{$self->{request}}, { oid => $oid_enclContactState };
}
sub check {
my ($self) = @_;
$self->{output}->output_add(long_msg => "Checking communications");
$self->{components}->{communication} = {name => 'communications', total => 0, skip => 0};
return if ($self->check_filter(section => 'communication'));
for (my $i = 1; $i <= $self->{number_shelf}; $i++) {
my $shelf_addr = $self->{shelf_addr}->{$oid_enclChannelShelfAddr . '.' . $i};
my $com_state = $map_com_states{$self->{results}->{$oid_enclContactState}->{$oid_enclContactState . '.' . $i}};
next if ($self->check_filter(section => 'communication', instance => $shelf_addr));
$self->{components}->{communication}->{total}++;
$self->{output}->output_add(long_msg => sprintf("Shelve '%s' communication state is '%s'",
$shelf_addr, $com_state));
my $exit = $self->get_severity(section => 'communication', value => $com_state);
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("Shelve '%s' communication state is '%s'",
$shelf_addr, $com_state));
}
}
}
1;
| maksimatveev/centreon-plugins | storage/netapp/snmp/mode/components/communication.pm | Perl | apache-2.0 | 2,614 |
#!/usr/bin/env perl
########################################################################
# Authors: Christopher Henry, Scott Devoid, Paul Frybarger
# Contact email: chenry@mcs.anl.gov
# Development location: Mathematics and Computer Science Division, Argonne National Lab
########################################################################
use strict;
use warnings;
use Data::Dumper;
use Bio::KBase::workspace::ScriptHelpers qw(printObjectInfo get_ws_client workspace workspaceURL parseObjectMeta parseWorkspaceMeta printObjectMeta);
use Bio::KBase::fbaModelServices::ScriptHelpers qw(fbaws get_fba_client runFBACommand universalFBAScriptCode );
my $manpage =
"
NAME
pa-importprobanno -- import a probabilistic annotation object from a three column file
DESCRIPTION
The following is an example of the data file:
gene role probability
kb|g.0.peg.1 2-iminoacetate synthase (ThiH) (EC 4.1.99.19) 0.1
kb|g.0.peg.2 2-iminoacetate synthase (ThiH) (EC 4.1.99.19) 0.3
kb|g.0.peg.3 2-iminoacetate synthase (ThiH) (EC 4.1.99.19) 0.5
EXAMPLES
Import probabilistic annotation data for E. coli K12 genome:
> pa-importprobanno 'kb|g.0' MyProbAnnoFile.txt
SEE ALSO
pa-calculate
pa-getprobanno
pa-annotate
AUTHORS
Christopher Henry
";
#Defining globals describing behavior
my $primaryArgs = ["Genome ID","Probanno filename"];
my $servercommand = "import_phenotypes";
my $script = "fba-importpheno";
my $translation = {
"Genome ID" => "genome",
"outputid" => "outputid",
workspace => "workspace",
genomews => "genome_ws",
};
#Defining usage and options
my $specs = [
[ 'outputid:s', 'ID for probabilistic annotation in workspace' ],
[ 'genomews:s', 'Workspace with genome object' ],
[ 'workspace|w:s', 'Workspace to save probabilistic annotation in', { "default" => fbaws() } ],
];
my ($opt,$params) = universalFBAScriptCode($specs,$script,$primaryArgs,$translation,$manpage);
$params->{phenotypes} = [];
if (!-e $opt->{"Phenotype filename"}) {
print "Could not find input phenotype file!\n";
exit();
}
open(my $fh, "<", $opt->{"Phenotype filename"}) || return;
$opt->{"Phenotype filename"} = "";
my $headingline = <$fh>;
my $array = [split("\r",$headingline)];
$headingline = shift(@{$array});
chomp($headingline);
my $data = [];
my $headings = [split(/\s+/,$headingline)];
while (my $line = <$fh>) {
chomp($line);
push(@{$data},[split(/\s+/,$line)]);
}
close($fh);
my $headingColums;
for (my $i=0;$i < @{$headings}; $i++) {
$headingColums->{$headings->[$i]} = $i;
}
foreach my $pheno (@{$data}) {
if (defined($headingColums->{media}) && defined($headingColums->{mediaws})) {
my $phenoobj = [
[],
$pheno->[$headingColums->{media}],
$pheno->[$headingColums->{mediaws}],
[],
$pheno->[$headingColums->{growth}],
];
if (defined($headingColums->{geneko})) {
if (defined($pheno->[$headingColums->{geneko}])) {
if ($pheno->[$headingColums->{geneko}] eq "none") {
$phenoobj->[0] = [];
} else {
$phenoobj->[0] = [split(/\;/,$pheno->[$headingColums->{geneko}])];
}
}
}
if (defined($headingColums->{addtlCpd})) {
if (!defined($pheno->[$headingColums->{addtlCpd}]) || $pheno->[$headingColums->{addtlCpd}] eq "none") {
$phenoobj->[3] = [];
} else {
$phenoobj->[3] = [split(/\;/,$pheno->[$headingColums->{addtlCpd}])];
}
}
push(@{$params->{phenotypes}},$phenoobj);
}
}
#Calling the server
my $output = runFBACommand($params,$servercommand,$opt);
#Checking output and report results
if (!defined($output)) {
print "Phenotype import failed!\n";
} else {
print "Phenotype import successful:\n";
printObjectInfo($output);
}
| kbase/KBaseFBAModeling | scripts/pa-importprobanno.pl | Perl | mit | 3,691 |
/* Part of SWI-Prolog
Author: Jan Wielemaker
E-mail: J.Wielemaker@vu.nl
WWW: http://www.swi-prolog.org
Copyright (c) 2007-2020, University of Amsterdam
VU University Amsterdam
SWI-Prolog Solutions b.v.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
:- module(apply,
[ include/3, % :Pred, +List, -Ok
exclude/3, % :Pred. +List, -NotOk
partition/4, % :Pred, +List, -Included, -Excluded
partition/5, % :Pred, +List, ?Less, ?Equal, ?Greater
maplist/2, % :Pred, +List
maplist/3, % :Pred, ?List, ?List
maplist/4, % :Pred, ?List, ?List, ?List
maplist/5, % :Pred, ?List, ?List, ?List, ?List
convlist/3, % :Pred, +List, -List
foldl/4, % :Pred, +List, ?V0, ?V
foldl/5, % :Pred, +List1, +List2, ?V0, ?V
foldl/6, % :Pred, +List1, +List2, +List3, ?V0, ?V
foldl/7, % :Pred, +List1, +List2, +List3, +List4,
% ?V0, ?V
scanl/4, % :Pred, +List, ?V0, ?Vs
scanl/5, % :Pred, +List1, +List2, ?V0, ?Vs
scanl/6, % :Pred, +List1, +List2, +List3, ?V0, ?Vs
scanl/7 % :Pred, +List1, +List2, +List3, +List4,
% ?V0, ?Vs
]).
:- autoload(library(error),[must_be/2]).
/** <module> Apply predicates on a list
This module defines meta-predicates that apply a predicate on all
members of a list.
All predicates support partial application in the Goal argument. This
means that these calls are identical:
```
?- maplist(=, [foo, foo], [X, Y]).
?- maplist(=(foo), [X, Y]).
```
@see apply_macros.pl provides compile-time expansion for part of this
library.
@see http://www.cs.otago.ac.nz/staffpriv/ok/pllib.htm
@see Unit test code in src/Tests/library/test_apply.pl
@tbd Add include/4, include/5, exclude/4, exclude/5
*/
:- meta_predicate
include(1, +, -),
exclude(1, +, -),
partition(1, +, -, -),
partition(2, +, -, -, -),
maplist(1, ?),
maplist(2, ?, ?),
maplist(3, ?, ?, ?),
maplist(4, ?, ?, ?, ?),
convlist(2, +, -),
foldl(3, +, +, -),
foldl(4, +, +, +, -),
foldl(5, +, +, +, +, -),
foldl(6, +, +, +, +, +, -),
scanl(3, +, +, -),
scanl(4, +, +, +, -),
scanl(5, +, +, +, +, -),
scanl(6, +, +, +, +, +, -).
%! include(:Goal, +List1, ?List2) is det.
%
% Filter elements for which Goal succeeds. True if List2 contains
% those elements Xi of List1 for which call(Goal, Xi) succeeds.
%
% @see exclude/3, partition/4, convlist/3.
% @compat Older versions of SWI-Prolog had sublist/3 with the same
% arguments and semantics.
include(Goal, List, Included) :-
include_(List, Goal, Included).
include_([], _, []).
include_([X1|Xs1], P, Included) :-
( call(P, X1)
-> Included = [X1|Included1]
; Included = Included1
),
include_(Xs1, P, Included1).
%! exclude(:Goal, +List1, ?List2) is det.
%
% Filter elements for which Goal fails. True if List2 contains those
% elements Xi of List1 for which call(Goal, Xi) fails.
%
% @see include/3, partition/4
exclude(Goal, List, Included) :-
exclude_(List, Goal, Included).
exclude_([], _, []).
exclude_([X1|Xs1], P, Included) :-
( call(P, X1)
-> Included = Included1
; Included = [X1|Included1]
),
exclude_(Xs1, P, Included1).
%! partition(:Pred, +List, ?Included, ?Excluded) is det.
%
% Filter elements of List according to Pred. True if Included
% contains all elements for which call(Pred, X) succeeds and
% Excluded contains the remaining elements.
%
% @see include/3, exclude/3, partition/5.
partition(Pred, List, Included, Excluded) :-
partition_(List, Pred, Included, Excluded).
partition_([], _, [], []).
partition_([H|T], Pred, Incl, Excl) :-
( call(Pred, H)
-> Incl = [H|I],
partition_(T, Pred, I, Excl)
; Excl = [H|E],
partition_(T, Pred, Incl, E)
).
%! partition(:Pred, +List, ?Less, ?Equal, ?Greater) is semidet.
%
% Filter List according to Pred in three sets. For each element Xi
% of List, its destination is determined by call(Pred, Xi, Place),
% where Place must be unified to one of =|<|=, =|=|= or =|>|=.
% Pred must be deterministic.
%
% @see partition/4
partition(Pred, List, Less, Equal, Greater) :-
partition_(List, Pred, Less, Equal, Greater).
partition_([], _, [], [], []).
partition_([H|T], Pred, L, E, G) :-
call(Pred, H, Diff),
partition_(Diff, H, Pred, T, L, E, G).
partition_(<, H, Pred, T, L, E, G) :-
!,
L = [H|Rest],
partition_(T, Pred, Rest, E, G).
partition_(=, H, Pred, T, L, E, G) :-
!,
E = [H|Rest],
partition_(T, Pred, L, Rest, G).
partition_(>, H, Pred, T, L, E, G) :-
!,
G = [H|Rest],
partition_(T, Pred, L, E, Rest).
partition_(Diff, _, _, _, _, _, _) :-
must_be(oneof([<,=,>]), Diff).
/*******************************
* MAPLIST *
*******************************/
%! maplist(:Goal, ?List1).
%! maplist(:Goal, ?List1, ?List2).
%! maplist(:Goal, ?List1, ?List2, ?List3).
%! maplist(:Goal, ?List1, ?List2, ?List3, ?List4).
%
% True if Goal is successfully applied on all matching elements of the
% list. The maplist family of predicates is defined as:
%
% ```
% maplist(G, [X_11, ..., X_1n],
% [X_21, ..., X_2n],
% ...,
% [X_m1, ..., X_mn]) :-
% call(G, X_11, ..., X_m1),
% call(G, X_12, ..., X_m2),
% ...
% call(G, X_1n, ..., X_mn).
% ```
%
% This family of predicates is deterministic iff Goal is deterministic
% and List1 is a proper list, i.e., a list that ends in `[]`.
maplist(Goal, List) :-
maplist_(List, Goal).
maplist_([], _).
maplist_([Elem|Tail], Goal) :-
call(Goal, Elem),
maplist_(Tail, Goal).
maplist(Goal, List1, List2) :-
maplist_(List1, List2, Goal).
maplist_([], [], _).
maplist_([Elem1|Tail1], [Elem2|Tail2], Goal) :-
call(Goal, Elem1, Elem2),
maplist_(Tail1, Tail2, Goal).
maplist(Goal, List1, List2, List3) :-
maplist_(List1, List2, List3, Goal).
maplist_([], [], [], _).
maplist_([Elem1|Tail1], [Elem2|Tail2], [Elem3|Tail3], Goal) :-
call(Goal, Elem1, Elem2, Elem3),
maplist_(Tail1, Tail2, Tail3, Goal).
maplist(Goal, List1, List2, List3, List4) :-
maplist_(List1, List2, List3, List4, Goal).
maplist_([], [], [], [], _).
maplist_([Elem1|Tail1], [Elem2|Tail2], [Elem3|Tail3], [Elem4|Tail4], Goal) :-
call(Goal, Elem1, Elem2, Elem3, Elem4),
maplist_(Tail1, Tail2, Tail3, Tail4, Goal).
%! convlist(:Goal, +ListIn, -ListOut) is det.
%
% Similar to maplist/3, but elements for which call(Goal, ElemIn, _)
% fails are omitted from ListOut. For example (using library(yall)):
%
% ```
% ?- convlist([X,Y]>>(integer(X), Y is X^2),
% [3, 5, foo, 2], L).
% L = [9, 25, 4].
% ```
%
% @compat Also appears in YAP =|library(maplist)|= and SICStus
% =|library(lists)|=.
convlist(Goal, ListIn, ListOut) :-
convlist_(ListIn, ListOut, Goal).
convlist_([], [], _).
convlist_([H0|T0], ListOut, Goal) :-
( call(Goal, H0, H)
-> ListOut = [H|T],
convlist_(T0, T, Goal)
; convlist_(T0, ListOut, Goal)
).
/*******************************
* FOLDL *
*******************************/
%! foldl(:Goal, +List, +V0, -V).
%! foldl(:Goal, +List1, +List2, +V0, -V).
%! foldl(:Goal, +List1, +List2, +List3, +V0, -V).
%! foldl(:Goal, +List1, +List2, +List3, +List4, +V0, -V).
%
% Fold an ensemble of _m_ (0 <= _m_ <= 4) lists of length _n_
% head-to-tail ("fold-left"), using columns of _m_ list elements as
% arguments for Goal. The `foldl` family of predicates is defined as
% follows, with `V0` an initial value and `V` the final value of the
% folding operation:
%
% ```
% foldl(G, [X_11, ..., X_1n],
% [X_21, ..., X_2n],
% ...,
% [X_m1, ..., X_mn], V0, V) :-
% call(G, X_11, ..., X_m1, V0, V1),
% call(G, X_12, ..., X_m2, V1, V2),
% ...
% call(G, X_1n, ..., X_mn, V<n-1>, V).
% ```
%
% No implementation for a corresponding `foldr` is given. A `foldr`
% implementation would consist in first calling reverse/2 on each of
% the _m_ input lists, then applying the appropriate `foldl`. This is
% actually more efficient than using a properly programmed-out
% recursive algorithm that cannot be tail-call optimized.
foldl(Goal, List, V0, V) :-
foldl_(List, Goal, V0, V).
foldl_([], _, V, V).
foldl_([H|T], Goal, V0, V) :-
call(Goal, H, V0, V1),
foldl_(T, Goal, V1, V).
foldl(Goal, List1, List2, V0, V) :-
foldl_(List1, List2, Goal, V0, V).
foldl_([], [], _, V, V).
foldl_([H1|T1], [H2|T2], Goal, V0, V) :-
call(Goal, H1, H2, V0, V1),
foldl_(T1, T2, Goal, V1, V).
foldl(Goal, List1, List2, List3, V0, V) :-
foldl_(List1, List2, List3, Goal, V0, V).
foldl_([], [], [], _, V, V).
foldl_([H1|T1], [H2|T2], [H3|T3], Goal, V0, V) :-
call(Goal, H1, H2, H3, V0, V1),
foldl_(T1, T2, T3, Goal, V1, V).
foldl(Goal, List1, List2, List3, List4, V0, V) :-
foldl_(List1, List2, List3, List4, Goal, V0, V).
foldl_([], [], [], [], _, V, V).
foldl_([H1|T1], [H2|T2], [H3|T3], [H4|T4], Goal, V0, V) :-
call(Goal, H1, H2, H3, H4, V0, V1),
foldl_(T1, T2, T3, T4, Goal, V1, V).
/*******************************
* SCANL *
*******************************/
%! scanl(:Goal, +List, +V0, -Values).
%! scanl(:Goal, +List1, +List2, +V0, -Values).
%! scanl(:Goal, +List1, +List2, +List3, +V0, -Values).
%! scanl(:Goal, +List1, +List2, +List3, +List4, +V0, -Values).
%
% Scan an ensemble of _m_ (0 <= _m_ <= 4) lists of length _n_
% head-to-tail ("scan-left"), using columns of _m_ list elements as
% arguments for Goal. The `scanl` family of predicates is defined as
% follows, with `V0` an initial value and `V` the final value of the
% scanning operation:
%
% ```
% scanl(G, [X_11, ..., X_1n],
% [X_21, ..., X_2n],
% ...,
% [X_m1, ..., X_mn], V0, [V0, V1, ..., Vn] ) :-
% call(G, X_11, ..., X_m1, V0, V1),
% call(G, X_12, ..., X_m2, V1, V2),
% ...
% call(G, X_1n, ..., X_mn, V<n-1>, Vn).
% ```
%
% `scanl` behaves like a `foldl` that collects the sequence of
% values taken on by the `Vx` accumulator into a list.
scanl(Goal, List, V0, [V0|Values]) :-
scanl_(List, Goal, V0, Values).
scanl_([], _, _, []).
scanl_([H|T], Goal, V, [VH|VT]) :-
call(Goal, H, V, VH),
scanl_(T, Goal, VH, VT).
scanl(Goal, List1, List2, V0, [V0|Values]) :-
scanl_(List1, List2, Goal, V0, Values).
scanl_([], [], _, _, []).
scanl_([H1|T1], [H2|T2], Goal, V, [VH|VT]) :-
call(Goal, H1, H2, V, VH),
scanl_(T1, T2, Goal, VH, VT).
scanl(Goal, List1, List2, List3, V0, [V0|Values]) :-
scanl_(List1, List2, List3, Goal, V0, Values).
scanl_([], [], [], _, _, []).
scanl_([H1|T1], [H2|T2], [H3|T3], Goal, V, [VH|VT]) :-
call(Goal, H1, H2, H3, V, VH),
scanl_(T1, T2, T3, Goal, VH, VT).
scanl(Goal, List1, List2, List3, List4, V0, [V0|Values]) :-
scanl_(List1, List2, List3, List4, Goal, V0, Values).
scanl_([], [], [], [], _, _, []).
scanl_([H1|T1], [H2|T2], [H3|T3], [H4|T4], Goal, V, [VH|VT]) :-
call(Goal, H1, H2, H3, H4, V, VH),
scanl_(T1, T2, T3, T4, Goal, VH, VT).
/*******************************
* SANDBOX *
*******************************/
:- multifile
sandbox:safe_meta_predicate/1.
safe_api(Name/Arity, sandbox:safe_meta_predicate(apply:Name/Arity)).
term_expansion(safe_api, Clauses) :-
module_property(apply, exports(API)),
maplist(safe_api, API, Clauses).
safe_api.
| josd/eye | eye-wasm/swipl-wasm/home/library/apply.pl | Perl | mit | 13,796 |
# !!!!!!! DO NOT EDIT THIS FILE !!!!!!!
# This file is machine-generated by lib/unicore/mktables from the Unicode
# database, Version 6.1.0. Any changes made here will be lost!
# !!!!!!! INTERNAL PERL USE ONLY !!!!!!!
# This file is for internal use by core Perl only. The format and even the
# name or existence of this file are subject to change without notice. Don't
# use it directly.
return <<'END';
10A0 10FF
END
| efortuna/AndroidSDKClone | ndk_experimental/prebuilt/linux-x86_64/lib/perl5/5.16.2/unicore/lib/Blk/Georgian.pl | Perl | apache-2.0 | 433 |
=head1 NAME
Mdoc - perl module to parse Mdoc macros
=head1 SYNOPSIS
use Mdoc qw(ns pp soff son stoggle mapwords);
See mdoc2man and mdoc2texi for code examples.
=head1 FUNCTIONS
=over 4
=item def_macro( NAME, CODE, [ raw => 1, greedy => 1, concat_until => '.Xx' ] )
Define new macro. The CODE reference will be called by call_macro(). You can
have two distinct definitions for and inline macro and for a standalone macro
(i. e. 'Pa' and '.Pa').
The CODE reference is passed a list of arguments and is expected to return list
of strings and control characters (see C<CONSTANTS>).
By default the surrouding "" from arguments to macros are removed, use C<raw>
to disable this.
Normaly CODE reference is passed all arguments up to next nested macro. Set
C<greedy> to to pass everything up to the end of the line.
If the concat_until is present, the line is concated until the .Xx macro is
found. For example the following macro definition
def_macro('.Oo', gen_encloser(qw([ ]), concat_until => '.Oc' }
def_macro('.Cm', sub { mapwords {'($_)'} @_ } }
and the following input
.Oo
.Cm foo |
.Cm bar |
.Oc
results in [(foo) | (bar)]
=item get_macro( NAME )
Returns a hash reference like:
{ run => CODE, raw => [1|0], greedy => [1|0] }
Where C<CODE> is the CODE reference used to define macro called C<NAME>
=item parse_line( INPUT, OUTPUT_CODE, PREPROCESS_CODE )
Parse a line from the C<INPUT> filehandle. If a macro was detected it returns a
list (MACRO_NAME, @MACRO_ARGS), otherwise it calls the C<OUTPUT_CODE>, giving
caller a chance to modify line before printing it. If C<PREPROCESS_CODE> is
defined it calls it prior to passing argument to a macro, giving caller a
chance to alter them. if EOF was reached undef is returned.
=item call_macro( MACRO, ARGS, ... )
Call macro C<MACRO> with C<ARGS>. The CODE reference for macro C<MACRO> is
called and for all the nested macros. Every called macro returns a list which
is appended to return value and returned when all nested macros are processed.
Use to_string() to produce a printable string from the list.
=item to_string ( LIST )
Processes C<LIST> returned from call_macro() and returns formatted string.
=item mapwords BLOCK ARRAY
This is like perl's map only it calls BLOCK only on elements which are not
punctuation or control characters.
=item space ( ['on'|'off] )
Turn spacing on or off. If called without argument it returns the current state.
=item gen_encloser ( START, END )
Helper function for generating macros that enclose their arguments.
gen_encloser(qw({ }));
returns
sub { '{', ns, @_, ns, pp('}')}
=item set_Bl_callback( CODE , DEFS )
This module implements the Bl/El macros for you. Using set_Bl_callback you can
provide a macro definition that should be executed on a .Bl call.
=item set_El_callback( CODE , DEFS )
This module implements the Bl/El macros for you. Using set_El_callback you can
provide a macro definition that should be executed on a .El call.
=item set_Re_callback( CODE )
The C<CODE> is called after a Rs/Re block is done. With a hash reference as a
parameter, describing the reference.
=back
=head1 CONSTANTS
=over 4
=item ns
Indicate 'no space' between to members of the list.
=item pp ( STRING )
The string is 'punctuation point'. It means that every punctuation
preceeding that element is put behind it.
=item soff
Turn spacing off.
=item son
Turn spacing on.
=item stoggle
Toogle spacing.
=item hs
Print space no matter spacing mode.
=back
=head1 TODO
* The concat_until only works with standalone macros. This means that
.Po blah Pc
will hang until .Pc in encountered.
* Provide default macros for Bd/Ed
* The reference implementation is uncomplete
=cut
package Mdoc;
use strict;
use warnings;
use List::Util qw(reduce);
use Text::ParseWords qw(quotewords);
use Carp;
use Exporter qw(import);
our @EXPORT_OK = qw(ns pp soff son stoggle hs mapwords gen_encloser nl);
use constant {
ns => ['nospace'],
soff => ['spaceoff'],
son => ['spaceon'],
stoggle => ['spacetoggle'],
hs => ['hardspace'],
};
sub pp {
my $c = shift;
return ['pp', $c ];
}
sub gen_encloser {
my ($o, $c) = @_;
return sub { ($o, ns, @_, ns, pp($c)) };
}
sub mapwords(&@) {
my ($f, @l) = @_;
my @res;
for my $el (@l) {
local $_ = $el;
push @res, $el =~ /^(?:[,\.\{\}\(\):;\[\]\|])$/ || ref $el eq 'ARRAY' ?
$el : $f->();
}
return @res;
}
my %macros;
###############################################################################
# Default macro definitions start
###############################################################################
def_macro('Xo', sub { @_ }, concat_until => '.Xc');
def_macro('.Ns', sub {ns, @_});
def_macro('Ns', sub {ns, @_});
{
my %reference;
def_macro('.Rs', sub { () } );
def_macro('.%A', sub {
if ($reference{authors}) {
$reference{authors} .= " and @_"
}
else {
$reference{authors} = "@_";
}
return ();
});
def_macro('.%T', sub { $reference{title} = "@_"; () } );
def_macro('.%O', sub { $reference{optional} = "@_"; () } );
sub set_Re_callback {
my ($sub) = @_;
croak 'Not a CODE reference' if not ref $sub eq 'CODE';
def_macro('.Re', sub {
my @ret = $sub->(\%reference);
%reference = (); @ret
});
return;
}
}
def_macro('.Bl', sub { die '.Bl - no list callback set' });
def_macro('.It', sub { die ".It called outside of list context - maybe near line $." });
def_macro('.El', sub { die '.El requires .Bl first' });
{
my $elcb = sub { () };
sub set_El_callback {
my ($sub) = @_;
croak 'Not a CODE reference' if ref $sub ne 'CODE';
$elcb = $sub;
return;
}
sub set_Bl_callback {
my ($blcb, %defs) = @_;
croak 'Not a CODE reference' if ref $blcb ne 'CODE';
def_macro('.Bl', sub {
my $orig_it = get_macro('.It');
my $orig_el = get_macro('.El');
my $orig_bl = get_macro('.Bl');
my $orig_elcb = $elcb;
# Restore previous .It and .El on each .El
def_macro('.El', sub {
def_macro('.El', delete $orig_el->{run}, %$orig_el);
def_macro('.It', delete $orig_it->{run}, %$orig_it);
def_macro('.Bl', delete $orig_bl->{run}, %$orig_bl);
my @ret = $elcb->(@_);
$elcb = $orig_elcb;
@ret
});
$blcb->(@_)
}, %defs);
return;
}
}
def_macro('.Sm', sub {
my ($arg) = @_;
if (defined $arg) {
space($arg);
} else {
space() eq 'off' ?
space('on') :
space('off');
}
()
} );
def_macro('Sm', do { my $off; sub {
my ($arg) = @_;
if (defined $arg && $arg =~ /^(on|off)$/) {
shift;
if ($arg eq 'off') { soff, @_; }
elsif ($arg eq 'on') { son, @_; }
}
else {
stoggle, @_;
}
}} );
###############################################################################
# Default macro definitions end
###############################################################################
sub def_macro {
croak 'Odd number of elements for hash argument <'.(scalar @_).'>' if @_%2;
my ($macro, $sub, %def) = @_;
croak 'Not a CODE reference' if ref $sub ne 'CODE';
$macros{ $macro } = {
run => $sub,
greedy => delete $def{greedy} || 0,
raw => delete $def{raw} || 0,
concat_until => delete $def{concat_until},
};
if ($macros{ $macro }{concat_until}) {
$macros{ $macros{ $macro }{concat_until} } = { run => sub { @_ } };
$macros{ $macro }{greedy} = 1;
}
return;
}
sub get_macro {
my ($macro) = @_;
croak "Macro <$macro> not defined" if not exists $macros{ $macro };
+{ %{ $macros{ $macro } } }
}
#TODO: document this
sub parse_opts {
my %args;
my $last;
for (@_) {
if ($_ =~ /^\\?-/) {
s/^\\?-//;
$args{$_} = 1;
$last = _unquote($_);
}
else {
$args{$last} = _unquote($_) if $last;
undef $last;
}
}
return %args;
}
sub _is_control {
my ($el, $expected) = @_;
if (defined $expected) {
ref $el eq 'ARRAY' and $el->[0] eq $expected;
}
else {
ref $el eq 'ARRAY';
}
}
{
my $sep = ' ';
sub to_string {
if (@_ > 0) {
# Handle punctunation
my ($in_brace, @punct) = '';
my @new = map {
if (/^([\[\(])$/) {
($in_brace = $1) =~ tr/([/)]/;
$_, ns
}
elsif (/^([\)\]])$/ && $in_brace eq $1) {
$in_brace = '';
ns, $_
}
elsif ($_ =~ /^[,\.;:\?\!\)\]]$/) {
push @punct, ns, $_;
();
}
elsif (_is_control($_, 'pp')) {
$_->[1]
}
elsif (_is_control($_)) {
$_
}
else {
splice (@punct), $_;
}
} @_;
push @new, @punct;
# Produce string out of an array dealing with the special control characters
# space('off') must but one character delayed
my ($no_space, $space_off) = 1;
my $res = '';
while (defined(my $el = shift @new)) {
if (_is_control($el, 'hardspace')) { $no_space = 1; $res .= ' ' }
elsif (_is_control($el, 'nospace')) { $no_space = 1; }
elsif (_is_control($el, 'spaceoff')) { $space_off = 1; }
elsif (_is_control($el, 'spaceon')) { space('on'); }
elsif (_is_control($el, 'spacetoggle')) { space() eq 'on' ?
$space_off = 1 :
space('on') }
else {
if ($no_space) {
$no_space = 0;
$res .= "$el"
}
else {
$res .= "$sep$el"
}
if ($space_off) { space('off'); $space_off = 0; }
}
}
$res
}
else {
'';
}
}
sub space {
my ($arg) = @_;
if (defined $arg && $arg =~ /^(on|off)$/) {
$sep = ' ' if $arg eq 'on';
$sep = '' if $arg eq 'off';
return;
}
else {
return $sep eq '' ? 'off' : 'on';
}
}
}
sub _unquote {
my @args = @_;
$_ =~ s/^"([^"]+)"$/$1/g for @args;
wantarray ? @args : $args[0];
}
sub call_macro {
my ($macro, @args) = @_;
my @ret;
my @newargs;
my $i = 0;
@args = _unquote(@args) if (!$macros{ $macro }{raw});
# Call any callable macros in the argument list
for (@args) {
if ($_ =~ /^[A-Z][a-z]+$/ && exists $macros{ $_ }) {
push @ret, call_macro($_, @args[$i+1 .. $#args]);
last;
} else {
if ($macros{ $macro }{greedy}) {
push @ret, $_;
}
else {
push @newargs, $_;
}
}
$i++;
}
if ($macros{ $macro }{concat_until}) {
my ($n_macro, @n_args) = ('');
while (1) {
die "EOF was reached and no $macros{ $macro }{concat_until} found"
if not defined $n_macro;
($n_macro, @n_args) = parse_line(undef, sub { push @ret, shift });
if ($n_macro eq $macros{ $macro }{concat_until}) {
push @ret, call_macro($n_macro, @n_args);
last;
}
else {
$n_macro =~ s/^\.//;
push @ret, call_macro($n_macro, @n_args) if exists $macros{ $n_macro };
}
}
}
if ($macros{ $macro }{greedy}) {
#print "MACROG $macro (", (join ', ', @ret), ")\n";
return $macros{ $macro }{run}->(@ret);
}
else {
#print "MACRO $macro (", (join ', ', @newargs), ")".(join ', ', @ret)."\n";
return $macros{ $macro }{run}->(@newargs), @ret;
}
}
{
my ($in_fh, $out_sub, $preprocess_sub);
sub parse_line {
$in_fh = $_[0] if defined $_[0] || !defined $in_fh;
$out_sub = $_[1] if defined $_[1] || !defined $out_sub;
$preprocess_sub = $_[2] if defined $_[2] || !defined $preprocess_sub;
croak 'out_sub not a CODE reference'
if not ref $out_sub eq 'CODE';
croak 'preprocess_sub not a CODE reference'
if defined $preprocess_sub && not ref $preprocess_sub eq 'CODE';
while (my $line = <$in_fh>) {
chomp $line;
if ($line =~ /^\.[A-z][a-z0-9]+/ || $line =~ /^\.%[A-Z]/ ||
$line =~ /^\.\\"/)
{
$line =~ s/ +/ /g;
my ($macro, @args) = quotewords(' ', 1, $line);
@args = grep { defined $_ } @args;
$preprocess_sub->(@args) if defined $preprocess_sub;
if ($macro && exists $macros{ $macro }) {
return ($macro, @args);
} else {
$out_sub->($line);
}
}
else {
$out_sub->($line);
}
}
return;
}
}
1;
__END__
| execunix/vinos | external/bsd/ntp/dist/sntp/ag-tpl/0-old/Mdoc.pm | Perl | apache-2.0 | 13,941 |
use utf8;
package Schema::Result::ProfileParameter;
# Created by DBIx::Class::Schema::Loader
# DO NOT MODIFY THE FIRST PART OF THIS FILE
=head1 NAME
Schema::Result::ProfileParameter
=cut
use strict;
use warnings;
use base 'DBIx::Class::Core';
=head1 TABLE: C<profile_parameter>
=cut
__PACKAGE__->table("profile_parameter");
=head1 ACCESSORS
=head2 profile
data_type: 'bigint'
is_foreign_key: 1
is_nullable: 0
=head2 parameter
data_type: 'bigint'
is_foreign_key: 1
is_nullable: 0
=head2 last_updated
data_type: 'timestamp with time zone'
default_value: current_timestamp
is_nullable: 1
original: {default_value => \"now()"}
=cut
__PACKAGE__->add_columns(
"profile",
{ data_type => "bigint", is_foreign_key => 1, is_nullable => 0 },
"parameter",
{ data_type => "bigint", is_foreign_key => 1, is_nullable => 0 },
"last_updated",
{
data_type => "timestamp with time zone",
default_value => \"current_timestamp",
is_nullable => 1,
original => { default_value => \"now()" },
},
);
=head1 PRIMARY KEY
=over 4
=item * L</profile>
=item * L</parameter>
=back
=cut
__PACKAGE__->set_primary_key("profile", "parameter");
=head1 RELATIONS
=head2 parameter
Type: belongs_to
Related object: L<Schema::Result::Parameter>
=cut
__PACKAGE__->belongs_to(
"parameter",
"Schema::Result::Parameter",
{ id => "parameter" },
{ is_deferrable => 0, on_delete => "CASCADE", on_update => "NO ACTION" },
);
=head2 profile
Type: belongs_to
Related object: L<Schema::Result::Profile>
=cut
__PACKAGE__->belongs_to(
"profile",
"Schema::Result::Profile",
{ id => "profile" },
{ is_deferrable => 0, on_delete => "CASCADE", on_update => "NO ACTION" },
);
# Created by DBIx::Class::Schema::Loader v0.07045 @ 2016-11-15 09:35:47
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:bg3IAJfz8GX6uS5UXMPblA
# You can replace this text with custom code or comments, and it will be preserved on regeneration
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
1;
| rscrimojr/incubator-trafficcontrol | traffic_ops/app/lib/Schema/Result/ProfileParameter.pm | Perl | apache-2.0 | 2,531 |
# This file is auto-generated by the Perl DateTime Suite time zone
# code generator (0.07) This code generator comes with the
# DateTime::TimeZone module distribution in the tools/ directory
#
# Generated from /tmp/Q713JNUf8G/europe. Olson data version 2016a
#
# Do not edit this file directly.
#
package DateTime::TimeZone::Europe::Monaco;
$DateTime::TimeZone::Europe::Monaco::VERSION = '1.95';
use strict;
use Class::Singleton 1.03;
use DateTime::TimeZone;
use DateTime::TimeZone::OlsonDB;
@DateTime::TimeZone::Europe::Monaco::ISA = ( 'Class::Singleton', 'DateTime::TimeZone' );
my $spans =
[
[
DateTime::TimeZone::NEG_INFINITY, # utc_start
59649003028, # utc_end 1891-03-14 23:30:28 (Sat)
DateTime::TimeZone::NEG_INFINITY, # local_start
59649004800, # local_end 1891-03-15 00:00:00 (Sun)
1772,
0,
'LMT',
],
[
59649003028, # utc_start 1891-03-14 23:30:28 (Sat)
60279724239, # utc_end 1911-03-10 23:50:39 (Fri)
59649003589, # local_start 1891-03-14 23:39:49 (Sat)
60279724800, # local_end 1911-03-11 00:00:00 (Sat)
561,
0,
'PMT',
],
[
60279724239, # utc_start 1911-03-10 23:50:39 (Fri)
60445868400, # utc_end 1916-06-14 23:00:00 (Wed)
60279724239, # local_start 1911-03-10 23:50:39 (Fri)
60445868400, # local_end 1916-06-14 23:00:00 (Wed)
0,
0,
'WET',
],
[
60445868400, # utc_start 1916-06-14 23:00:00 (Wed)
60455286000, # utc_end 1916-10-01 23:00:00 (Sun)
60445872000, # local_start 1916-06-15 00:00:00 (Thu)
60455289600, # local_end 1916-10-02 00:00:00 (Mon)
3600,
1,
'WEST',
],
[
60455286000, # utc_start 1916-10-01 23:00:00 (Sun)
60470319600, # utc_end 1917-03-24 23:00:00 (Sat)
60455286000, # local_start 1916-10-01 23:00:00 (Sun)
60470319600, # local_end 1917-03-24 23:00:00 (Sat)
0,
0,
'WET',
],
[
60470319600, # utc_start 1917-03-24 23:00:00 (Sat)
60487340400, # utc_end 1917-10-07 23:00:00 (Sun)
60470323200, # local_start 1917-03-25 00:00:00 (Sun)
60487344000, # local_end 1917-10-08 00:00:00 (Mon)
3600,
1,
'WEST',
],
[
60487340400, # utc_start 1917-10-07 23:00:00 (Sun)
60500559600, # utc_end 1918-03-09 23:00:00 (Sat)
60487340400, # local_start 1917-10-07 23:00:00 (Sun)
60500559600, # local_end 1918-03-09 23:00:00 (Sat)
0,
0,
'WET',
],
[
60500559600, # utc_start 1918-03-09 23:00:00 (Sat)
60518790000, # utc_end 1918-10-06 23:00:00 (Sun)
60500563200, # local_start 1918-03-10 00:00:00 (Sun)
60518793600, # local_end 1918-10-07 00:00:00 (Mon)
3600,
1,
'WEST',
],
[
60518790000, # utc_start 1918-10-06 23:00:00 (Sun)
60531404400, # utc_end 1919-03-01 23:00:00 (Sat)
60518790000, # local_start 1918-10-06 23:00:00 (Sun)
60531404400, # local_end 1919-03-01 23:00:00 (Sat)
0,
0,
'WET',
],
[
60531404400, # utc_start 1919-03-01 23:00:00 (Sat)
60550239600, # utc_end 1919-10-05 23:00:00 (Sun)
60531408000, # local_start 1919-03-02 00:00:00 (Sun)
60550243200, # local_end 1919-10-06 00:00:00 (Mon)
3600,
1,
'WEST',
],
[
60550239600, # utc_start 1919-10-05 23:00:00 (Sun)
60561644400, # utc_end 1920-02-14 23:00:00 (Sat)
60550239600, # local_start 1919-10-05 23:00:00 (Sun)
60561644400, # local_end 1920-02-14 23:00:00 (Sat)
0,
0,
'WET',
],
[
60561644400, # utc_start 1920-02-14 23:00:00 (Sat)
60583417200, # utc_end 1920-10-23 23:00:00 (Sat)
60561648000, # local_start 1920-02-15 00:00:00 (Sun)
60583420800, # local_end 1920-10-24 00:00:00 (Sun)
3600,
1,
'WEST',
],
[
60583417200, # utc_start 1920-10-23 23:00:00 (Sat)
60595686000, # utc_end 1921-03-14 23:00:00 (Mon)
60583417200, # local_start 1920-10-23 23:00:00 (Sat)
60595686000, # local_end 1921-03-14 23:00:00 (Mon)
0,
0,
'WET',
],
[
60595686000, # utc_start 1921-03-14 23:00:00 (Mon)
60615126000, # utc_end 1921-10-25 23:00:00 (Tue)
60595689600, # local_start 1921-03-15 00:00:00 (Tue)
60615129600, # local_end 1921-10-26 00:00:00 (Wed)
3600,
1,
'WEST',
],
[
60615126000, # utc_start 1921-10-25 23:00:00 (Tue)
60628172400, # utc_end 1922-03-25 23:00:00 (Sat)
60615126000, # local_start 1921-10-25 23:00:00 (Tue)
60628172400, # local_end 1922-03-25 23:00:00 (Sat)
0,
0,
'WET',
],
[
60628172400, # utc_start 1922-03-25 23:00:00 (Sat)
60645106800, # utc_end 1922-10-07 23:00:00 (Sat)
60628176000, # local_start 1922-03-26 00:00:00 (Sun)
60645110400, # local_end 1922-10-08 00:00:00 (Sun)
3600,
1,
'WEST',
],
[
60645106800, # utc_start 1922-10-07 23:00:00 (Sat)
60665065200, # utc_end 1923-05-26 23:00:00 (Sat)
60645106800, # local_start 1922-10-07 23:00:00 (Sat)
60665065200, # local_end 1923-05-26 23:00:00 (Sat)
0,
0,
'WET',
],
[
60665065200, # utc_start 1923-05-26 23:00:00 (Sat)
60676556400, # utc_end 1923-10-06 23:00:00 (Sat)
60665068800, # local_start 1923-05-27 00:00:00 (Sun)
60676560000, # local_end 1923-10-07 00:00:00 (Sun)
3600,
1,
'WEST',
],
[
60676556400, # utc_start 1923-10-06 23:00:00 (Sat)
60691676400, # utc_end 1924-03-29 23:00:00 (Sat)
60676556400, # local_start 1923-10-06 23:00:00 (Sat)
60691676400, # local_end 1924-03-29 23:00:00 (Sat)
0,
0,
'WET',
],
[
60691676400, # utc_start 1924-03-29 23:00:00 (Sat)
60708006000, # utc_end 1924-10-04 23:00:00 (Sat)
60691680000, # local_start 1924-03-30 00:00:00 (Sun)
60708009600, # local_end 1924-10-05 00:00:00 (Sun)
3600,
1,
'WEST',
],
[
60708006000, # utc_start 1924-10-04 23:00:00 (Sat)
60723730800, # utc_end 1925-04-04 23:00:00 (Sat)
60708006000, # local_start 1924-10-04 23:00:00 (Sat)
60723730800, # local_end 1925-04-04 23:00:00 (Sat)
0,
0,
'WET',
],
[
60723730800, # utc_start 1925-04-04 23:00:00 (Sat)
60739455600, # utc_end 1925-10-03 23:00:00 (Sat)
60723734400, # local_start 1925-04-05 00:00:00 (Sun)
60739459200, # local_end 1925-10-04 00:00:00 (Sun)
3600,
1,
'WEST',
],
[
60739455600, # utc_start 1925-10-03 23:00:00 (Sat)
60756390000, # utc_end 1926-04-17 23:00:00 (Sat)
60739455600, # local_start 1925-10-03 23:00:00 (Sat)
60756390000, # local_end 1926-04-17 23:00:00 (Sat)
0,
0,
'WET',
],
[
60756390000, # utc_start 1926-04-17 23:00:00 (Sat)
60770905200, # utc_end 1926-10-02 23:00:00 (Sat)
60756393600, # local_start 1926-04-18 00:00:00 (Sun)
60770908800, # local_end 1926-10-03 00:00:00 (Sun)
3600,
1,
'WEST',
],
[
60770905200, # utc_start 1926-10-02 23:00:00 (Sat)
60787234800, # utc_end 1927-04-09 23:00:00 (Sat)
60770905200, # local_start 1926-10-02 23:00:00 (Sat)
60787234800, # local_end 1927-04-09 23:00:00 (Sat)
0,
0,
'WET',
],
[
60787234800, # utc_start 1927-04-09 23:00:00 (Sat)
60802354800, # utc_end 1927-10-01 23:00:00 (Sat)
60787238400, # local_start 1927-04-10 00:00:00 (Sun)
60802358400, # local_end 1927-10-02 00:00:00 (Sun)
3600,
1,
'WEST',
],
[
60802354800, # utc_start 1927-10-01 23:00:00 (Sat)
60819289200, # utc_end 1928-04-14 23:00:00 (Sat)
60802354800, # local_start 1927-10-01 23:00:00 (Sat)
60819289200, # local_end 1928-04-14 23:00:00 (Sat)
0,
0,
'WET',
],
[
60819289200, # utc_start 1928-04-14 23:00:00 (Sat)
60834409200, # utc_end 1928-10-06 23:00:00 (Sat)
60819292800, # local_start 1928-04-15 00:00:00 (Sun)
60834412800, # local_end 1928-10-07 00:00:00 (Sun)
3600,
1,
'WEST',
],
[
60834409200, # utc_start 1928-10-06 23:00:00 (Sat)
60851343600, # utc_end 1929-04-20 23:00:00 (Sat)
60834409200, # local_start 1928-10-06 23:00:00 (Sat)
60851343600, # local_end 1929-04-20 23:00:00 (Sat)
0,
0,
'WET',
],
[
60851343600, # utc_start 1929-04-20 23:00:00 (Sat)
60865858800, # utc_end 1929-10-05 23:00:00 (Sat)
60851347200, # local_start 1929-04-21 00:00:00 (Sun)
60865862400, # local_end 1929-10-06 00:00:00 (Sun)
3600,
1,
'WEST',
],
[
60865858800, # utc_start 1929-10-05 23:00:00 (Sat)
60882188400, # utc_end 1930-04-12 23:00:00 (Sat)
60865858800, # local_start 1929-10-05 23:00:00 (Sat)
60882188400, # local_end 1930-04-12 23:00:00 (Sat)
0,
0,
'WET',
],
[
60882188400, # utc_start 1930-04-12 23:00:00 (Sat)
60897308400, # utc_end 1930-10-04 23:00:00 (Sat)
60882192000, # local_start 1930-04-13 00:00:00 (Sun)
60897312000, # local_end 1930-10-05 00:00:00 (Sun)
3600,
1,
'WEST',
],
[
60897308400, # utc_start 1930-10-04 23:00:00 (Sat)
60914242800, # utc_end 1931-04-18 23:00:00 (Sat)
60897308400, # local_start 1930-10-04 23:00:00 (Sat)
60914242800, # local_end 1931-04-18 23:00:00 (Sat)
0,
0,
'WET',
],
[
60914242800, # utc_start 1931-04-18 23:00:00 (Sat)
60928758000, # utc_end 1931-10-03 23:00:00 (Sat)
60914246400, # local_start 1931-04-19 00:00:00 (Sun)
60928761600, # local_end 1931-10-04 00:00:00 (Sun)
3600,
1,
'WEST',
],
[
60928758000, # utc_start 1931-10-03 23:00:00 (Sat)
60944482800, # utc_end 1932-04-02 23:00:00 (Sat)
60928758000, # local_start 1931-10-03 23:00:00 (Sat)
60944482800, # local_end 1932-04-02 23:00:00 (Sat)
0,
0,
'WET',
],
[
60944482800, # utc_start 1932-04-02 23:00:00 (Sat)
60960207600, # utc_end 1932-10-01 23:00:00 (Sat)
60944486400, # local_start 1932-04-03 00:00:00 (Sun)
60960211200, # local_end 1932-10-02 00:00:00 (Sun)
3600,
1,
'WEST',
],
[
60960207600, # utc_start 1932-10-01 23:00:00 (Sat)
60975327600, # utc_end 1933-03-25 23:00:00 (Sat)
60960207600, # local_start 1932-10-01 23:00:00 (Sat)
60975327600, # local_end 1933-03-25 23:00:00 (Sat)
0,
0,
'WET',
],
[
60975327600, # utc_start 1933-03-25 23:00:00 (Sat)
60992262000, # utc_end 1933-10-07 23:00:00 (Sat)
60975331200, # local_start 1933-03-26 00:00:00 (Sun)
60992265600, # local_end 1933-10-08 00:00:00 (Sun)
3600,
1,
'WEST',
],
[
60992262000, # utc_start 1933-10-07 23:00:00 (Sat)
61007986800, # utc_end 1934-04-07 23:00:00 (Sat)
60992262000, # local_start 1933-10-07 23:00:00 (Sat)
61007986800, # local_end 1934-04-07 23:00:00 (Sat)
0,
0,
'WET',
],
[
61007986800, # utc_start 1934-04-07 23:00:00 (Sat)
61023711600, # utc_end 1934-10-06 23:00:00 (Sat)
61007990400, # local_start 1934-04-08 00:00:00 (Sun)
61023715200, # local_end 1934-10-07 00:00:00 (Sun)
3600,
1,
'WEST',
],
[
61023711600, # utc_start 1934-10-06 23:00:00 (Sat)
61038831600, # utc_end 1935-03-30 23:00:00 (Sat)
61023711600, # local_start 1934-10-06 23:00:00 (Sat)
61038831600, # local_end 1935-03-30 23:00:00 (Sat)
0,
0,
'WET',
],
[
61038831600, # utc_start 1935-03-30 23:00:00 (Sat)
61055161200, # utc_end 1935-10-05 23:00:00 (Sat)
61038835200, # local_start 1935-03-31 00:00:00 (Sun)
61055164800, # local_end 1935-10-06 00:00:00 (Sun)
3600,
1,
'WEST',
],
[
61055161200, # utc_start 1935-10-05 23:00:00 (Sat)
61072095600, # utc_end 1936-04-18 23:00:00 (Sat)
61055161200, # local_start 1935-10-05 23:00:00 (Sat)
61072095600, # local_end 1936-04-18 23:00:00 (Sat)
0,
0,
'WET',
],
[
61072095600, # utc_start 1936-04-18 23:00:00 (Sat)
61086610800, # utc_end 1936-10-03 23:00:00 (Sat)
61072099200, # local_start 1936-04-19 00:00:00 (Sun)
61086614400, # local_end 1936-10-04 00:00:00 (Sun)
3600,
1,
'WEST',
],
[
61086610800, # utc_start 1936-10-03 23:00:00 (Sat)
61102335600, # utc_end 1937-04-03 23:00:00 (Sat)
61086610800, # local_start 1936-10-03 23:00:00 (Sat)
61102335600, # local_end 1937-04-03 23:00:00 (Sat)
0,
0,
'WET',
],
[
61102335600, # utc_start 1937-04-03 23:00:00 (Sat)
61118060400, # utc_end 1937-10-02 23:00:00 (Sat)
61102339200, # local_start 1937-04-04 00:00:00 (Sun)
61118064000, # local_end 1937-10-03 00:00:00 (Sun)
3600,
1,
'WEST',
],
[
61118060400, # utc_start 1937-10-02 23:00:00 (Sat)
61133180400, # utc_end 1938-03-26 23:00:00 (Sat)
61118060400, # local_start 1937-10-02 23:00:00 (Sat)
61133180400, # local_end 1938-03-26 23:00:00 (Sat)
0,
0,
'WET',
],
[
61133180400, # utc_start 1938-03-26 23:00:00 (Sat)
61149510000, # utc_end 1938-10-01 23:00:00 (Sat)
61133184000, # local_start 1938-03-27 00:00:00 (Sun)
61149513600, # local_end 1938-10-02 00:00:00 (Sun)
3600,
1,
'WEST',
],
[
61149510000, # utc_start 1938-10-01 23:00:00 (Sat)
61166444400, # utc_end 1939-04-15 23:00:00 (Sat)
61149510000, # local_start 1938-10-01 23:00:00 (Sat)
61166444400, # local_end 1939-04-15 23:00:00 (Sat)
0,
0,
'WET',
],
[
61166444400, # utc_start 1939-04-15 23:00:00 (Sat)
61185193200, # utc_end 1939-11-18 23:00:00 (Sat)
61166448000, # local_start 1939-04-16 00:00:00 (Sun)
61185196800, # local_end 1939-11-19 00:00:00 (Sun)
3600,
1,
'WEST',
],
[
61185193200, # utc_start 1939-11-18 23:00:00 (Sat)
61193671200, # utc_end 1940-02-25 02:00:00 (Sun)
61185193200, # local_start 1939-11-18 23:00:00 (Sat)
61193671200, # local_end 1940-02-25 02:00:00 (Sun)
0,
0,
'WET',
],
[
61193671200, # utc_start 1940-02-25 02:00:00 (Sun)
61231244400, # utc_end 1941-05-04 23:00:00 (Sun)
61193674800, # local_start 1940-02-25 03:00:00 (Sun)
61231248000, # local_end 1941-05-05 00:00:00 (Mon)
3600,
1,
'WEST',
],
[
61231244400, # utc_start 1941-05-04 23:00:00 (Sun)
61244546400, # utc_end 1941-10-05 22:00:00 (Sun)
61231251600, # local_start 1941-05-05 01:00:00 (Mon)
61244553600, # local_end 1941-10-06 00:00:00 (Mon)
7200,
1,
'WEMT',
],
[
61244546400, # utc_start 1941-10-05 22:00:00 (Sun)
61257855600, # utc_end 1942-03-08 23:00:00 (Sun)
61244550000, # local_start 1941-10-05 23:00:00 (Sun)
61257859200, # local_end 1942-03-09 00:00:00 (Mon)
3600,
1,
'WEST',
],
[
61257855600, # utc_start 1942-03-08 23:00:00 (Sun)
61278426000, # utc_end 1942-11-02 01:00:00 (Mon)
61257862800, # local_start 1942-03-09 01:00:00 (Mon)
61278433200, # local_end 1942-11-02 03:00:00 (Mon)
7200,
1,
'WEMT',
],
[
61278426000, # utc_start 1942-11-02 01:00:00 (Mon)
61291126800, # utc_end 1943-03-29 01:00:00 (Mon)
61278429600, # local_start 1942-11-02 02:00:00 (Mon)
61291130400, # local_end 1943-03-29 02:00:00 (Mon)
3600,
1,
'WEST',
],
[
61291126800, # utc_start 1943-03-29 01:00:00 (Mon)
61307456400, # utc_end 1943-10-04 01:00:00 (Mon)
61291134000, # local_start 1943-03-29 03:00:00 (Mon)
61307463600, # local_end 1943-10-04 03:00:00 (Mon)
7200,
1,
'WEMT',
],
[
61307456400, # utc_start 1943-10-04 01:00:00 (Mon)
61323181200, # utc_end 1944-04-03 01:00:00 (Mon)
61307460000, # local_start 1943-10-04 02:00:00 (Mon)
61323184800, # local_end 1944-04-03 02:00:00 (Mon)
3600,
1,
'WEST',
],
[
61323181200, # utc_start 1944-04-03 01:00:00 (Mon)
61339417200, # utc_end 1944-10-07 23:00:00 (Sat)
61323188400, # local_start 1944-04-03 03:00:00 (Mon)
61339424400, # local_end 1944-10-08 01:00:00 (Sun)
7200,
1,
'WEMT',
],
[
61339417200, # utc_start 1944-10-07 23:00:00 (Sat)
61354630800, # utc_end 1945-04-02 01:00:00 (Mon)
61339420800, # local_start 1944-10-08 00:00:00 (Sun)
61354634400, # local_end 1945-04-02 02:00:00 (Mon)
3600,
1,
'WEST',
],
[
61354630800, # utc_start 1945-04-02 01:00:00 (Mon)
61369059600, # utc_end 1945-09-16 01:00:00 (Sun)
61354638000, # local_start 1945-04-02 03:00:00 (Mon)
61369066800, # local_end 1945-09-16 03:00:00 (Sun)
7200,
1,
'WEMT',
],
[
61369059600, # utc_start 1945-09-16 01:00:00 (Sun)
62332502400, # utc_end 1976-03-28 00:00:00 (Sun)
61369063200, # local_start 1945-09-16 02:00:00 (Sun)
62332506000, # local_end 1976-03-28 01:00:00 (Sun)
3600,
0,
'CET',
],
[
62332502400, # utc_start 1976-03-28 00:00:00 (Sun)
62348223600, # utc_end 1976-09-25 23:00:00 (Sat)
62332509600, # local_start 1976-03-28 02:00:00 (Sun)
62348230800, # local_end 1976-09-26 01:00:00 (Sun)
7200,
1,
'CEST',
],
[
62348223600, # utc_start 1976-09-25 23:00:00 (Sat)
62356604400, # utc_end 1976-12-31 23:00:00 (Fri)
62348227200, # local_start 1976-09-26 00:00:00 (Sun)
62356608000, # local_end 1977-01-01 00:00:00 (Sat)
3600,
0,
'CET',
],
[
62356604400, # utc_start 1976-12-31 23:00:00 (Fri)
62364560400, # utc_end 1977-04-03 01:00:00 (Sun)
62356608000, # local_start 1977-01-01 00:00:00 (Sat)
62364564000, # local_end 1977-04-03 02:00:00 (Sun)
3600,
0,
'CET',
],
[
62364560400, # utc_start 1977-04-03 01:00:00 (Sun)
62379680400, # utc_end 1977-09-25 01:00:00 (Sun)
62364567600, # local_start 1977-04-03 03:00:00 (Sun)
62379687600, # local_end 1977-09-25 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
62379680400, # utc_start 1977-09-25 01:00:00 (Sun)
62396010000, # utc_end 1978-04-02 01:00:00 (Sun)
62379684000, # local_start 1977-09-25 02:00:00 (Sun)
62396013600, # local_end 1978-04-02 02:00:00 (Sun)
3600,
0,
'CET',
],
[
62396010000, # utc_start 1978-04-02 01:00:00 (Sun)
62411734800, # utc_end 1978-10-01 01:00:00 (Sun)
62396017200, # local_start 1978-04-02 03:00:00 (Sun)
62411742000, # local_end 1978-10-01 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
62411734800, # utc_start 1978-10-01 01:00:00 (Sun)
62427459600, # utc_end 1979-04-01 01:00:00 (Sun)
62411738400, # local_start 1978-10-01 02:00:00 (Sun)
62427463200, # local_end 1979-04-01 02:00:00 (Sun)
3600,
0,
'CET',
],
[
62427459600, # utc_start 1979-04-01 01:00:00 (Sun)
62443184400, # utc_end 1979-09-30 01:00:00 (Sun)
62427466800, # local_start 1979-04-01 03:00:00 (Sun)
62443191600, # local_end 1979-09-30 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
62443184400, # utc_start 1979-09-30 01:00:00 (Sun)
62459514000, # utc_end 1980-04-06 01:00:00 (Sun)
62443188000, # local_start 1979-09-30 02:00:00 (Sun)
62459517600, # local_end 1980-04-06 02:00:00 (Sun)
3600,
0,
'CET',
],
[
62459514000, # utc_start 1980-04-06 01:00:00 (Sun)
62474634000, # utc_end 1980-09-28 01:00:00 (Sun)
62459521200, # local_start 1980-04-06 03:00:00 (Sun)
62474641200, # local_end 1980-09-28 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
62474634000, # utc_start 1980-09-28 01:00:00 (Sun)
62490358800, # utc_end 1981-03-29 01:00:00 (Sun)
62474637600, # local_start 1980-09-28 02:00:00 (Sun)
62490362400, # local_end 1981-03-29 02:00:00 (Sun)
3600,
0,
'CET',
],
[
62490358800, # utc_start 1981-03-29 01:00:00 (Sun)
62506083600, # utc_end 1981-09-27 01:00:00 (Sun)
62490366000, # local_start 1981-03-29 03:00:00 (Sun)
62506090800, # local_end 1981-09-27 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
62506083600, # utc_start 1981-09-27 01:00:00 (Sun)
62521808400, # utc_end 1982-03-28 01:00:00 (Sun)
62506087200, # local_start 1981-09-27 02:00:00 (Sun)
62521812000, # local_end 1982-03-28 02:00:00 (Sun)
3600,
0,
'CET',
],
[
62521808400, # utc_start 1982-03-28 01:00:00 (Sun)
62537533200, # utc_end 1982-09-26 01:00:00 (Sun)
62521815600, # local_start 1982-03-28 03:00:00 (Sun)
62537540400, # local_end 1982-09-26 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
62537533200, # utc_start 1982-09-26 01:00:00 (Sun)
62553258000, # utc_end 1983-03-27 01:00:00 (Sun)
62537536800, # local_start 1982-09-26 02:00:00 (Sun)
62553261600, # local_end 1983-03-27 02:00:00 (Sun)
3600,
0,
'CET',
],
[
62553258000, # utc_start 1983-03-27 01:00:00 (Sun)
62568982800, # utc_end 1983-09-25 01:00:00 (Sun)
62553265200, # local_start 1983-03-27 03:00:00 (Sun)
62568990000, # local_end 1983-09-25 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
62568982800, # utc_start 1983-09-25 01:00:00 (Sun)
62584707600, # utc_end 1984-03-25 01:00:00 (Sun)
62568986400, # local_start 1983-09-25 02:00:00 (Sun)
62584711200, # local_end 1984-03-25 02:00:00 (Sun)
3600,
0,
'CET',
],
[
62584707600, # utc_start 1984-03-25 01:00:00 (Sun)
62601037200, # utc_end 1984-09-30 01:00:00 (Sun)
62584714800, # local_start 1984-03-25 03:00:00 (Sun)
62601044400, # local_end 1984-09-30 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
62601037200, # utc_start 1984-09-30 01:00:00 (Sun)
62616762000, # utc_end 1985-03-31 01:00:00 (Sun)
62601040800, # local_start 1984-09-30 02:00:00 (Sun)
62616765600, # local_end 1985-03-31 02:00:00 (Sun)
3600,
0,
'CET',
],
[
62616762000, # utc_start 1985-03-31 01:00:00 (Sun)
62632486800, # utc_end 1985-09-29 01:00:00 (Sun)
62616769200, # local_start 1985-03-31 03:00:00 (Sun)
62632494000, # local_end 1985-09-29 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
62632486800, # utc_start 1985-09-29 01:00:00 (Sun)
62648211600, # utc_end 1986-03-30 01:00:00 (Sun)
62632490400, # local_start 1985-09-29 02:00:00 (Sun)
62648215200, # local_end 1986-03-30 02:00:00 (Sun)
3600,
0,
'CET',
],
[
62648211600, # utc_start 1986-03-30 01:00:00 (Sun)
62663936400, # utc_end 1986-09-28 01:00:00 (Sun)
62648218800, # local_start 1986-03-30 03:00:00 (Sun)
62663943600, # local_end 1986-09-28 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
62663936400, # utc_start 1986-09-28 01:00:00 (Sun)
62679661200, # utc_end 1987-03-29 01:00:00 (Sun)
62663940000, # local_start 1986-09-28 02:00:00 (Sun)
62679664800, # local_end 1987-03-29 02:00:00 (Sun)
3600,
0,
'CET',
],
[
62679661200, # utc_start 1987-03-29 01:00:00 (Sun)
62695386000, # utc_end 1987-09-27 01:00:00 (Sun)
62679668400, # local_start 1987-03-29 03:00:00 (Sun)
62695393200, # local_end 1987-09-27 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
62695386000, # utc_start 1987-09-27 01:00:00 (Sun)
62711110800, # utc_end 1988-03-27 01:00:00 (Sun)
62695389600, # local_start 1987-09-27 02:00:00 (Sun)
62711114400, # local_end 1988-03-27 02:00:00 (Sun)
3600,
0,
'CET',
],
[
62711110800, # utc_start 1988-03-27 01:00:00 (Sun)
62726835600, # utc_end 1988-09-25 01:00:00 (Sun)
62711118000, # local_start 1988-03-27 03:00:00 (Sun)
62726842800, # local_end 1988-09-25 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
62726835600, # utc_start 1988-09-25 01:00:00 (Sun)
62742560400, # utc_end 1989-03-26 01:00:00 (Sun)
62726839200, # local_start 1988-09-25 02:00:00 (Sun)
62742564000, # local_end 1989-03-26 02:00:00 (Sun)
3600,
0,
'CET',
],
[
62742560400, # utc_start 1989-03-26 01:00:00 (Sun)
62758285200, # utc_end 1989-09-24 01:00:00 (Sun)
62742567600, # local_start 1989-03-26 03:00:00 (Sun)
62758292400, # local_end 1989-09-24 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
62758285200, # utc_start 1989-09-24 01:00:00 (Sun)
62774010000, # utc_end 1990-03-25 01:00:00 (Sun)
62758288800, # local_start 1989-09-24 02:00:00 (Sun)
62774013600, # local_end 1990-03-25 02:00:00 (Sun)
3600,
0,
'CET',
],
[
62774010000, # utc_start 1990-03-25 01:00:00 (Sun)
62790339600, # utc_end 1990-09-30 01:00:00 (Sun)
62774017200, # local_start 1990-03-25 03:00:00 (Sun)
62790346800, # local_end 1990-09-30 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
62790339600, # utc_start 1990-09-30 01:00:00 (Sun)
62806064400, # utc_end 1991-03-31 01:00:00 (Sun)
62790343200, # local_start 1990-09-30 02:00:00 (Sun)
62806068000, # local_end 1991-03-31 02:00:00 (Sun)
3600,
0,
'CET',
],
[
62806064400, # utc_start 1991-03-31 01:00:00 (Sun)
62821789200, # utc_end 1991-09-29 01:00:00 (Sun)
62806071600, # local_start 1991-03-31 03:00:00 (Sun)
62821796400, # local_end 1991-09-29 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
62821789200, # utc_start 1991-09-29 01:00:00 (Sun)
62837514000, # utc_end 1992-03-29 01:00:00 (Sun)
62821792800, # local_start 1991-09-29 02:00:00 (Sun)
62837517600, # local_end 1992-03-29 02:00:00 (Sun)
3600,
0,
'CET',
],
[
62837514000, # utc_start 1992-03-29 01:00:00 (Sun)
62853238800, # utc_end 1992-09-27 01:00:00 (Sun)
62837521200, # local_start 1992-03-29 03:00:00 (Sun)
62853246000, # local_end 1992-09-27 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
62853238800, # utc_start 1992-09-27 01:00:00 (Sun)
62868963600, # utc_end 1993-03-28 01:00:00 (Sun)
62853242400, # local_start 1992-09-27 02:00:00 (Sun)
62868967200, # local_end 1993-03-28 02:00:00 (Sun)
3600,
0,
'CET',
],
[
62868963600, # utc_start 1993-03-28 01:00:00 (Sun)
62884688400, # utc_end 1993-09-26 01:00:00 (Sun)
62868970800, # local_start 1993-03-28 03:00:00 (Sun)
62884695600, # local_end 1993-09-26 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
62884688400, # utc_start 1993-09-26 01:00:00 (Sun)
62900413200, # utc_end 1994-03-27 01:00:00 (Sun)
62884692000, # local_start 1993-09-26 02:00:00 (Sun)
62900416800, # local_end 1994-03-27 02:00:00 (Sun)
3600,
0,
'CET',
],
[
62900413200, # utc_start 1994-03-27 01:00:00 (Sun)
62916138000, # utc_end 1994-09-25 01:00:00 (Sun)
62900420400, # local_start 1994-03-27 03:00:00 (Sun)
62916145200, # local_end 1994-09-25 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
62916138000, # utc_start 1994-09-25 01:00:00 (Sun)
62931862800, # utc_end 1995-03-26 01:00:00 (Sun)
62916141600, # local_start 1994-09-25 02:00:00 (Sun)
62931866400, # local_end 1995-03-26 02:00:00 (Sun)
3600,
0,
'CET',
],
[
62931862800, # utc_start 1995-03-26 01:00:00 (Sun)
62947587600, # utc_end 1995-09-24 01:00:00 (Sun)
62931870000, # local_start 1995-03-26 03:00:00 (Sun)
62947594800, # local_end 1995-09-24 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
62947587600, # utc_start 1995-09-24 01:00:00 (Sun)
62963917200, # utc_end 1996-03-31 01:00:00 (Sun)
62947591200, # local_start 1995-09-24 02:00:00 (Sun)
62963920800, # local_end 1996-03-31 02:00:00 (Sun)
3600,
0,
'CET',
],
[
62963917200, # utc_start 1996-03-31 01:00:00 (Sun)
62982061200, # utc_end 1996-10-27 01:00:00 (Sun)
62963924400, # local_start 1996-03-31 03:00:00 (Sun)
62982068400, # local_end 1996-10-27 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
62982061200, # utc_start 1996-10-27 01:00:00 (Sun)
62995366800, # utc_end 1997-03-30 01:00:00 (Sun)
62982064800, # local_start 1996-10-27 02:00:00 (Sun)
62995370400, # local_end 1997-03-30 02:00:00 (Sun)
3600,
0,
'CET',
],
[
62995366800, # utc_start 1997-03-30 01:00:00 (Sun)
63013510800, # utc_end 1997-10-26 01:00:00 (Sun)
62995374000, # local_start 1997-03-30 03:00:00 (Sun)
63013518000, # local_end 1997-10-26 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63013510800, # utc_start 1997-10-26 01:00:00 (Sun)
63026816400, # utc_end 1998-03-29 01:00:00 (Sun)
63013514400, # local_start 1997-10-26 02:00:00 (Sun)
63026820000, # local_end 1998-03-29 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63026816400, # utc_start 1998-03-29 01:00:00 (Sun)
63044960400, # utc_end 1998-10-25 01:00:00 (Sun)
63026823600, # local_start 1998-03-29 03:00:00 (Sun)
63044967600, # local_end 1998-10-25 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63044960400, # utc_start 1998-10-25 01:00:00 (Sun)
63058266000, # utc_end 1999-03-28 01:00:00 (Sun)
63044964000, # local_start 1998-10-25 02:00:00 (Sun)
63058269600, # local_end 1999-03-28 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63058266000, # utc_start 1999-03-28 01:00:00 (Sun)
63077014800, # utc_end 1999-10-31 01:00:00 (Sun)
63058273200, # local_start 1999-03-28 03:00:00 (Sun)
63077022000, # local_end 1999-10-31 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63077014800, # utc_start 1999-10-31 01:00:00 (Sun)
63089715600, # utc_end 2000-03-26 01:00:00 (Sun)
63077018400, # local_start 1999-10-31 02:00:00 (Sun)
63089719200, # local_end 2000-03-26 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63089715600, # utc_start 2000-03-26 01:00:00 (Sun)
63108464400, # utc_end 2000-10-29 01:00:00 (Sun)
63089722800, # local_start 2000-03-26 03:00:00 (Sun)
63108471600, # local_end 2000-10-29 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63108464400, # utc_start 2000-10-29 01:00:00 (Sun)
63121165200, # utc_end 2001-03-25 01:00:00 (Sun)
63108468000, # local_start 2000-10-29 02:00:00 (Sun)
63121168800, # local_end 2001-03-25 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63121165200, # utc_start 2001-03-25 01:00:00 (Sun)
63139914000, # utc_end 2001-10-28 01:00:00 (Sun)
63121172400, # local_start 2001-03-25 03:00:00 (Sun)
63139921200, # local_end 2001-10-28 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63139914000, # utc_start 2001-10-28 01:00:00 (Sun)
63153219600, # utc_end 2002-03-31 01:00:00 (Sun)
63139917600, # local_start 2001-10-28 02:00:00 (Sun)
63153223200, # local_end 2002-03-31 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63153219600, # utc_start 2002-03-31 01:00:00 (Sun)
63171363600, # utc_end 2002-10-27 01:00:00 (Sun)
63153226800, # local_start 2002-03-31 03:00:00 (Sun)
63171370800, # local_end 2002-10-27 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63171363600, # utc_start 2002-10-27 01:00:00 (Sun)
63184669200, # utc_end 2003-03-30 01:00:00 (Sun)
63171367200, # local_start 2002-10-27 02:00:00 (Sun)
63184672800, # local_end 2003-03-30 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63184669200, # utc_start 2003-03-30 01:00:00 (Sun)
63202813200, # utc_end 2003-10-26 01:00:00 (Sun)
63184676400, # local_start 2003-03-30 03:00:00 (Sun)
63202820400, # local_end 2003-10-26 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63202813200, # utc_start 2003-10-26 01:00:00 (Sun)
63216118800, # utc_end 2004-03-28 01:00:00 (Sun)
63202816800, # local_start 2003-10-26 02:00:00 (Sun)
63216122400, # local_end 2004-03-28 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63216118800, # utc_start 2004-03-28 01:00:00 (Sun)
63234867600, # utc_end 2004-10-31 01:00:00 (Sun)
63216126000, # local_start 2004-03-28 03:00:00 (Sun)
63234874800, # local_end 2004-10-31 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63234867600, # utc_start 2004-10-31 01:00:00 (Sun)
63247568400, # utc_end 2005-03-27 01:00:00 (Sun)
63234871200, # local_start 2004-10-31 02:00:00 (Sun)
63247572000, # local_end 2005-03-27 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63247568400, # utc_start 2005-03-27 01:00:00 (Sun)
63266317200, # utc_end 2005-10-30 01:00:00 (Sun)
63247575600, # local_start 2005-03-27 03:00:00 (Sun)
63266324400, # local_end 2005-10-30 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63266317200, # utc_start 2005-10-30 01:00:00 (Sun)
63279018000, # utc_end 2006-03-26 01:00:00 (Sun)
63266320800, # local_start 2005-10-30 02:00:00 (Sun)
63279021600, # local_end 2006-03-26 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63279018000, # utc_start 2006-03-26 01:00:00 (Sun)
63297766800, # utc_end 2006-10-29 01:00:00 (Sun)
63279025200, # local_start 2006-03-26 03:00:00 (Sun)
63297774000, # local_end 2006-10-29 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63297766800, # utc_start 2006-10-29 01:00:00 (Sun)
63310467600, # utc_end 2007-03-25 01:00:00 (Sun)
63297770400, # local_start 2006-10-29 02:00:00 (Sun)
63310471200, # local_end 2007-03-25 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63310467600, # utc_start 2007-03-25 01:00:00 (Sun)
63329216400, # utc_end 2007-10-28 01:00:00 (Sun)
63310474800, # local_start 2007-03-25 03:00:00 (Sun)
63329223600, # local_end 2007-10-28 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63329216400, # utc_start 2007-10-28 01:00:00 (Sun)
63342522000, # utc_end 2008-03-30 01:00:00 (Sun)
63329220000, # local_start 2007-10-28 02:00:00 (Sun)
63342525600, # local_end 2008-03-30 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63342522000, # utc_start 2008-03-30 01:00:00 (Sun)
63360666000, # utc_end 2008-10-26 01:00:00 (Sun)
63342529200, # local_start 2008-03-30 03:00:00 (Sun)
63360673200, # local_end 2008-10-26 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63360666000, # utc_start 2008-10-26 01:00:00 (Sun)
63373971600, # utc_end 2009-03-29 01:00:00 (Sun)
63360669600, # local_start 2008-10-26 02:00:00 (Sun)
63373975200, # local_end 2009-03-29 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63373971600, # utc_start 2009-03-29 01:00:00 (Sun)
63392115600, # utc_end 2009-10-25 01:00:00 (Sun)
63373978800, # local_start 2009-03-29 03:00:00 (Sun)
63392122800, # local_end 2009-10-25 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63392115600, # utc_start 2009-10-25 01:00:00 (Sun)
63405421200, # utc_end 2010-03-28 01:00:00 (Sun)
63392119200, # local_start 2009-10-25 02:00:00 (Sun)
63405424800, # local_end 2010-03-28 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63405421200, # utc_start 2010-03-28 01:00:00 (Sun)
63424170000, # utc_end 2010-10-31 01:00:00 (Sun)
63405428400, # local_start 2010-03-28 03:00:00 (Sun)
63424177200, # local_end 2010-10-31 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63424170000, # utc_start 2010-10-31 01:00:00 (Sun)
63436870800, # utc_end 2011-03-27 01:00:00 (Sun)
63424173600, # local_start 2010-10-31 02:00:00 (Sun)
63436874400, # local_end 2011-03-27 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63436870800, # utc_start 2011-03-27 01:00:00 (Sun)
63455619600, # utc_end 2011-10-30 01:00:00 (Sun)
63436878000, # local_start 2011-03-27 03:00:00 (Sun)
63455626800, # local_end 2011-10-30 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63455619600, # utc_start 2011-10-30 01:00:00 (Sun)
63468320400, # utc_end 2012-03-25 01:00:00 (Sun)
63455623200, # local_start 2011-10-30 02:00:00 (Sun)
63468324000, # local_end 2012-03-25 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63468320400, # utc_start 2012-03-25 01:00:00 (Sun)
63487069200, # utc_end 2012-10-28 01:00:00 (Sun)
63468327600, # local_start 2012-03-25 03:00:00 (Sun)
63487076400, # local_end 2012-10-28 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63487069200, # utc_start 2012-10-28 01:00:00 (Sun)
63500374800, # utc_end 2013-03-31 01:00:00 (Sun)
63487072800, # local_start 2012-10-28 02:00:00 (Sun)
63500378400, # local_end 2013-03-31 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63500374800, # utc_start 2013-03-31 01:00:00 (Sun)
63518518800, # utc_end 2013-10-27 01:00:00 (Sun)
63500382000, # local_start 2013-03-31 03:00:00 (Sun)
63518526000, # local_end 2013-10-27 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63518518800, # utc_start 2013-10-27 01:00:00 (Sun)
63531824400, # utc_end 2014-03-30 01:00:00 (Sun)
63518522400, # local_start 2013-10-27 02:00:00 (Sun)
63531828000, # local_end 2014-03-30 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63531824400, # utc_start 2014-03-30 01:00:00 (Sun)
63549968400, # utc_end 2014-10-26 01:00:00 (Sun)
63531831600, # local_start 2014-03-30 03:00:00 (Sun)
63549975600, # local_end 2014-10-26 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63549968400, # utc_start 2014-10-26 01:00:00 (Sun)
63563274000, # utc_end 2015-03-29 01:00:00 (Sun)
63549972000, # local_start 2014-10-26 02:00:00 (Sun)
63563277600, # local_end 2015-03-29 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63563274000, # utc_start 2015-03-29 01:00:00 (Sun)
63581418000, # utc_end 2015-10-25 01:00:00 (Sun)
63563281200, # local_start 2015-03-29 03:00:00 (Sun)
63581425200, # local_end 2015-10-25 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63581418000, # utc_start 2015-10-25 01:00:00 (Sun)
63594723600, # utc_end 2016-03-27 01:00:00 (Sun)
63581421600, # local_start 2015-10-25 02:00:00 (Sun)
63594727200, # local_end 2016-03-27 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63594723600, # utc_start 2016-03-27 01:00:00 (Sun)
63613472400, # utc_end 2016-10-30 01:00:00 (Sun)
63594730800, # local_start 2016-03-27 03:00:00 (Sun)
63613479600, # local_end 2016-10-30 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63613472400, # utc_start 2016-10-30 01:00:00 (Sun)
63626173200, # utc_end 2017-03-26 01:00:00 (Sun)
63613476000, # local_start 2016-10-30 02:00:00 (Sun)
63626176800, # local_end 2017-03-26 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63626173200, # utc_start 2017-03-26 01:00:00 (Sun)
63644922000, # utc_end 2017-10-29 01:00:00 (Sun)
63626180400, # local_start 2017-03-26 03:00:00 (Sun)
63644929200, # local_end 2017-10-29 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63644922000, # utc_start 2017-10-29 01:00:00 (Sun)
63657622800, # utc_end 2018-03-25 01:00:00 (Sun)
63644925600, # local_start 2017-10-29 02:00:00 (Sun)
63657626400, # local_end 2018-03-25 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63657622800, # utc_start 2018-03-25 01:00:00 (Sun)
63676371600, # utc_end 2018-10-28 01:00:00 (Sun)
63657630000, # local_start 2018-03-25 03:00:00 (Sun)
63676378800, # local_end 2018-10-28 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63676371600, # utc_start 2018-10-28 01:00:00 (Sun)
63689677200, # utc_end 2019-03-31 01:00:00 (Sun)
63676375200, # local_start 2018-10-28 02:00:00 (Sun)
63689680800, # local_end 2019-03-31 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63689677200, # utc_start 2019-03-31 01:00:00 (Sun)
63707821200, # utc_end 2019-10-27 01:00:00 (Sun)
63689684400, # local_start 2019-03-31 03:00:00 (Sun)
63707828400, # local_end 2019-10-27 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63707821200, # utc_start 2019-10-27 01:00:00 (Sun)
63721126800, # utc_end 2020-03-29 01:00:00 (Sun)
63707824800, # local_start 2019-10-27 02:00:00 (Sun)
63721130400, # local_end 2020-03-29 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63721126800, # utc_start 2020-03-29 01:00:00 (Sun)
63739270800, # utc_end 2020-10-25 01:00:00 (Sun)
63721134000, # local_start 2020-03-29 03:00:00 (Sun)
63739278000, # local_end 2020-10-25 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63739270800, # utc_start 2020-10-25 01:00:00 (Sun)
63752576400, # utc_end 2021-03-28 01:00:00 (Sun)
63739274400, # local_start 2020-10-25 02:00:00 (Sun)
63752580000, # local_end 2021-03-28 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63752576400, # utc_start 2021-03-28 01:00:00 (Sun)
63771325200, # utc_end 2021-10-31 01:00:00 (Sun)
63752583600, # local_start 2021-03-28 03:00:00 (Sun)
63771332400, # local_end 2021-10-31 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63771325200, # utc_start 2021-10-31 01:00:00 (Sun)
63784026000, # utc_end 2022-03-27 01:00:00 (Sun)
63771328800, # local_start 2021-10-31 02:00:00 (Sun)
63784029600, # local_end 2022-03-27 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63784026000, # utc_start 2022-03-27 01:00:00 (Sun)
63802774800, # utc_end 2022-10-30 01:00:00 (Sun)
63784033200, # local_start 2022-03-27 03:00:00 (Sun)
63802782000, # local_end 2022-10-30 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63802774800, # utc_start 2022-10-30 01:00:00 (Sun)
63815475600, # utc_end 2023-03-26 01:00:00 (Sun)
63802778400, # local_start 2022-10-30 02:00:00 (Sun)
63815479200, # local_end 2023-03-26 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63815475600, # utc_start 2023-03-26 01:00:00 (Sun)
63834224400, # utc_end 2023-10-29 01:00:00 (Sun)
63815482800, # local_start 2023-03-26 03:00:00 (Sun)
63834231600, # local_end 2023-10-29 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63834224400, # utc_start 2023-10-29 01:00:00 (Sun)
63847530000, # utc_end 2024-03-31 01:00:00 (Sun)
63834228000, # local_start 2023-10-29 02:00:00 (Sun)
63847533600, # local_end 2024-03-31 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63847530000, # utc_start 2024-03-31 01:00:00 (Sun)
63865674000, # utc_end 2024-10-27 01:00:00 (Sun)
63847537200, # local_start 2024-03-31 03:00:00 (Sun)
63865681200, # local_end 2024-10-27 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63865674000, # utc_start 2024-10-27 01:00:00 (Sun)
63878979600, # utc_end 2025-03-30 01:00:00 (Sun)
63865677600, # local_start 2024-10-27 02:00:00 (Sun)
63878983200, # local_end 2025-03-30 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63878979600, # utc_start 2025-03-30 01:00:00 (Sun)
63897123600, # utc_end 2025-10-26 01:00:00 (Sun)
63878986800, # local_start 2025-03-30 03:00:00 (Sun)
63897130800, # local_end 2025-10-26 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63897123600, # utc_start 2025-10-26 01:00:00 (Sun)
63910429200, # utc_end 2026-03-29 01:00:00 (Sun)
63897127200, # local_start 2025-10-26 02:00:00 (Sun)
63910432800, # local_end 2026-03-29 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63910429200, # utc_start 2026-03-29 01:00:00 (Sun)
63928573200, # utc_end 2026-10-25 01:00:00 (Sun)
63910436400, # local_start 2026-03-29 03:00:00 (Sun)
63928580400, # local_end 2026-10-25 03:00:00 (Sun)
7200,
1,
'CEST',
],
[
63928573200, # utc_start 2026-10-25 01:00:00 (Sun)
63941878800, # utc_end 2027-03-28 01:00:00 (Sun)
63928576800, # local_start 2026-10-25 02:00:00 (Sun)
63941882400, # local_end 2027-03-28 02:00:00 (Sun)
3600,
0,
'CET',
],
[
63941878800, # utc_start 2027-03-28 01:00:00 (Sun)
63960627600, # utc_end 2027-10-31 01:00:00 (Sun)
63941886000, # local_start 2027-03-28 03:00:00 (Sun)
63960634800, # local_end 2027-10-31 03:00:00 (Sun)
7200,
1,
'CEST',
],
];
sub olson_version {'2016a'}
sub has_dst_changes {86}
sub _max_year {2026}
sub _new_instance {
return shift->_init( @_, spans => $spans );
}
sub _last_offset { 3600 }
my $last_observance = bless( {
'format' => 'CE%sT',
'gmtoff' => '1:00',
'local_start_datetime' => bless( {
'formatter' => undef,
'local_rd_days' => 721720,
'local_rd_secs' => 0,
'offset_modifier' => 0,
'rd_nanosecs' => 0,
'tz' => bless( {
'name' => 'floating',
'offset' => 0
}, 'DateTime::TimeZone::Floating' ),
'utc_rd_days' => 721720,
'utc_rd_secs' => 0,
'utc_year' => 1978
}, 'DateTime' ),
'offset_from_std' => 0,
'offset_from_utc' => 3600,
'until' => [],
'utc_start_datetime' => bless( {
'formatter' => undef,
'local_rd_days' => 721719,
'local_rd_secs' => 82800,
'offset_modifier' => 0,
'rd_nanosecs' => 0,
'tz' => bless( {
'name' => 'floating',
'offset' => 0
}, 'DateTime::TimeZone::Floating' ),
'utc_rd_days' => 721719,
'utc_rd_secs' => 82800,
'utc_year' => 1977
}, 'DateTime' )
}, 'DateTime::TimeZone::OlsonDB::Observance' )
;
sub _last_observance { $last_observance }
my $rules = [
bless( {
'at' => '1:00u',
'from' => '1996',
'in' => 'Oct',
'letter' => '',
'name' => 'EU',
'offset_from_std' => 0,
'on' => 'lastSun',
'save' => '0',
'to' => 'max',
'type' => undef
}, 'DateTime::TimeZone::OlsonDB::Rule' ),
bless( {
'at' => '1:00u',
'from' => '1981',
'in' => 'Mar',
'letter' => 'S',
'name' => 'EU',
'offset_from_std' => 3600,
'on' => 'lastSun',
'save' => '1:00',
'to' => 'max',
'type' => undef
}, 'DateTime::TimeZone::OlsonDB::Rule' )
]
;
sub _rules { $rules }
1;
| jkb78/extrajnm | local/lib/perl5/DateTime/TimeZone/Europe/Monaco.pm | Perl | mit | 43,032 |
package GeneAttributeParser::KeggPathways;
use strict;
use warnings;
use Carp;
use base qw (GeneAttributeParser::BaseParser);
my %kegg_pathway_definitions;
sub new {
my $packagename = shift;
my $self = { gene_to_KeggPathways => {},
};
bless ($self, $packagename);
return($self);
}
sub parse_dump_file {
my $self = shift;
my ($file) = @_;
open (my $fh, $file) or croak ("Error, cannot open file $file");
while (<$fh>) {
#print;
chomp;
my @x = split(/\t/);
my $gene_id = $x[1];
my $ko_num = $x[5];
my $definition = $x[6];
if ( not defined $definition ){
die "Error: missing KO (column 7) from file $file\n";
}
if (exists $kegg_pathway_definitions{$ko_num}) {
unless ($kegg_pathway_definitions{$ko_num} eq $definition) {
croak "$file has conflicting definitions for Ko: $ko_num GeneID: $gene_id\nNew:\'"
. $definition . "\'\nvs\nPre-stored:\'" . $kegg_pathway_definitions{$ko_num} . "\'";
}
}
else {
$kegg_pathway_definitions{$ko_num} = $definition;
}
## assign EC to gene
$self->{gene_to_KeggPathways}->{$gene_id}->{$ko_num} = 1;
#print "$gene_id => $ko_num => $definition\n";
}
close $fh;
}
sub get_annotation {
my $self = shift;
my ($gene_id) = @_;
if (my $att_href = $self->{gene_to_KeggPathways}->{$gene_id}) {
my $annot = "";
foreach my $ko (keys %$att_href) {
if ($annot) {
$annot .= ";";
}
$annot .= "$ko" . "[" . $kegg_pathway_definitions{$ko} . "]";
}
return($annot);
}
else {
return("");
}
}
sub get_genes_with_annotations {
my $self = shift;
my @genes = keys %{$self->{gene_to_KeggPathways}};
return(@genes);
}
sub toString {
my $self = shift;
my $text = "";
my @genes = $self->get_genes_with_annotations();
foreach my $gene (@genes) {
my $annot = $self->get_annotation($gene);
$text .= "$gene\t$annot\n";
}
return($text);
}
1; #EOM
| rhysf/Synima | modules/GeneAttributeParser/KeggPathways.pm | Perl | mit | 2,018 |
=head1 NAME
pacsync - update sync databases
=head1 SYNOPSIS
pacsync [options] [<syncdb>]...
pacsync (--help|--version)
=head1 DESCRIPTION
Update sync databases. If no I<syncdb> names are provided all databases will
by updated.
=head1 OPTIONS
=over
=item B<--config>=F<path>
Set an alternate configuration file path.
=item B<--dbext>=I<extension>
Set an alternate sync database extension.
=item B<--dbpath>=F<path>
Set an alternate database path.
=item B<--logfile>=F<path>
Set an alternate log file path.
=item B<--no-timeout>
Disable low-speed timeouts for downloads.
=item B<--root>=F<path>
Set an alternate installation root.
=item B<--sysroot>=F<path>
Set an alternate system root. See L<pacutils-sysroot(7)>.
=item B<--debug>
Display additional debugging information.
=item B<--updated>
Return true only if a database was actually updated.
=item B<--force>
Update databases even if already up-to-date.
=item B<--help>
Display usage information and exit.
=item B<--version>
Display version information and exit.
=back
| andrewgregory/pacutils | doc/pacsync.pod | Perl | mit | 1,058 |
use strict;
use warnings;
package Logger;
use Method::Signatures;
use feature qw(say);
use POSIX qw(strftime);
use constant LEVELS => {
err => 'error',
inf => 'info',
wrn => 'warn',
dbg => 'debug',
ntc => 'notice'
};
method new {
my $obj = bless {}, $self;
return $obj;
}
method output($strMsg, $strType) {
my $strTime = strftime('%I:%M:%S[%p]', localtime());
say '[' . $strTime . ']' . '[' . uc($strType) . '] =>> ' . $strMsg;
}
method kill($strMsg, $strType) {
$self->output($strMsg, $strType);
exit;
}
1; | lake-icp/Luna | Utils/Logger.pm | Perl | mit | 576 |
#!/usr/pkg/bin/perl
#---- include Perl Modules. note that HTTP::Request::Common qw(POST) is
#---- included as an example but is only necessary if the API service requires
#---- a HTTP POST
use XML::LibXML;
use LWP::UserAgent;
use HTTP::Request::Common qw(POST);
#---- set the account information variables. If you're unsure what these are
#---- contact your ReachMail account admin or email support@reachmail.com.
#---- the account_id variable needs to be retrieved using API
#---- services, please refer to service
#---- AdministrationService\GetCurrentUser
$account_key = 'account-id';
$username = 'username';
$password = 'password';
$account_id = 'api-account-id';
#---- set the user agent and content type ----
$ua = LWP::UserAgent->new;
$ua->agent("$0/0.1 " . $ua->agent);
$ua->default_header('Content-Type' => "application/xml");
#---- set URL and authentication for lists, make the request ----
$list_url = HTTP::Request->new(POST => "https://services.reachmail.net/Rest/Contacts/v1/lists/query/api-account-id");
$list_url->content('<ListFilter></ListFilter>');
$list_url->authorization_basic('account-id\username', 'password');
$list_request = $ua->request($list_url);
#---- retrieve the content of the request and intialize a XML parser on it ----
$list_xml = $list_request->content;
$parser = XML::LibXML->new();
$list_dom = $parser->parse_string($list_xml);
#---- print the response ----
print $list_dom->toString(2);
| ReachmailInc/WebAPISamples | perl/samples/enumerate_lists_160.pl | Perl | mit | 1,434 |
package Genome;
use Moose;
use namespace::autoclean;
use DBI;
use Data::Dumper;
use File::Slurp;
has 'length' => (
is => 'ro',
isa => 'Int',
builder => '_build_length',
lazy => 1
);
has 'seq' => (
is => 'rw',
isa => 'Str',
builder => '_build_seq',
lazy => 1
);
has 'fasta' => (
is => 'ro',
isa => 'Str',
required => 1,
);
has 'name' => (
is => 'rw',
isa => 'Str',
builder => '_build_name',
);
sub _build_name
{
my $self = shift;
$self->fasta =~ m/(.*)\.fa.*$/;
my $name = $1;
$name =~ s/-/_/g;
return $name;
}
sub _build_length
{
my $self = shift;
my $name = $self->name;
my $countq = "SELECT position FROM $name ORDER BY position DESC LIMIT 1";
my $dbh = DBI->connect('dbi:mysql:genomes','genomes','ebvHACK958$');
my $sth = $dbh->prepare($countq);
$sth->execute();
my $return = $sth->fetchrow_arrayref();
return $$return[0];
}
sub _build_seq
{
my $self = shift;
my $name = $self->name;
my $ref_seq;
my $query = "SELECT base FROM $name ORDER BY position ASC";
my $dbh = DBI->connect('dbi:mysql:genomes','genomes','ebvHACK958$');
my $sth = $dbh->prepare($query);
$sth->execute;
my $result = $sth->fetchall_arrayref;
foreach my $base (@$result)
{
$ref_seq.=$$base[0];
}
return $ref_seq;
}
sub BUILD
{
my $self = shift;
my $name = $self->name;
#Check if already exists save us some time
my $dbh = DBI->connect('dbi:mysql:genomes','genomes','ebvHACK958$');
my $table_check = "SHOW TABLES LIKE ?";
my $check_sth = $dbh->prepare($table_check);
$check_sth->execute($name);
if($check_sth->fetchrow_arrayref)
{
# genome exists already
return;
}
# Get the reference
my $fasta = $self->fasta;
my @fasta_arr = read_file($fasta);
my $ref_seq;
my $first = 1;
foreach my $line (@fasta_arr)
{
chomp($line);
if($first)
{
$first = 0;
next;
}
elsif($line =~ m/^[^>\#\s]/)
{
$ref_seq.=$line;
}
}
my $make_table = "CREATE TABLE $name (base VARCHAR(1), position BIGINT)";
my $sth = $dbh->prepare($make_table);
$sth->execute() or die "Error Creating table for genome $name $DBI::errstr\n";
my @bases = split(//,$ref_seq);
my $counter = 1;
my $insert = "INSERT INTO $name (base, position) VALUES(?, ?)";
foreach my $base (@bases)
{
my $insert_h = $dbh->prepare($insert);
$insert_h->execute($base, $counter);
$counter++;
}
}
sub base_at
{
my $self = shift;
my $pos = shift;
$pos--;
return substr($self->seq, $pos,1);
}
__PACKAGE__->meta->make_immutable;
1; | mbiokyle29/bioinformatics | misc/perl/moose/Genome.pm | Perl | mit | 2,466 |
#!/usr/local/bin/perl -w
=pod
=head1 NAME
rename_genes_inGFF3.pl - rename the genes and transcripts names in the gff3 files
according to a name mapping table
=head1 SYNOPSIS
load_genes_from_jgi_gff3.pl rename_file gff_file
Maintained by Sharon Wei <weix@cshl.edu>
=cut
use strict;
use warnings;
use Data::Dumper qw(Dumper);
use File::Basename;
use FindBin qw( $Bin );
use Pod::Usage;
use Getopt::Long;
use IO::File;
use Readonly;
use List::Util qw( first );
use List::MoreUtils;
Readonly my @NAME_FILEDS => qw(NAME ALIAS ID);
Readonly my $UTR_REGEX => qr{UTR}xms;
my $help=0;
my $man=0;
GetOptions
(
"help|?" => \$help,
"man" => \$man,
)
or pod2usage(2);
pod2usage(-verbose => 2) if $man;
pod2usage(1) if $help;
my $rename_file = $ARGV[0] || pod2usage("\nNeed the path to a name mapping file\n");
my $gff_file = $ARGV[1] || pod2usage("\nNeed the path to a gff file\n");
map{
-e $_ || pod2usage( "\nFile $_ does not exist\n" );
-r $_ || pod2usage( "\nCannot read $_\n" );
-f $_ || pod2usage( "\nFile $_ is not plain-text\n" );
-s $_ || pod2usage( "\nFile $_ is empty\n" );
} $gff_file, $rename_file;
open my $rfh, $rename_file or die "Cannot open $rename_file";
my %rename_table = map{ chomp; uc $_; split ' ';} (<$rfh>);
#map {print "$_ => $rename_table{$_}"} keys %rename_table ;
# Create a GFF stream
my $GFF_HANDLE = IO::File->new("< $gff_file")
or die( "Could not read $gff_file: $!" );
my $rename_regex = join '|', reverse sort keys %rename_table;
print "# This GFF3 file is extracted from $gff_file and rename the gene names according to mappings in $rename_file\n#\n";
while( my $line = $GFF_HANDLE->getline ){
# Skip comment and empty lines
next if ( $line =~ /\#/ );
next if ( $line =~ /^\s+/ );
next unless ( $line =~ /($rename_regex)\b/i);
my $matched_name = $&;
my $new_name = $rename_table{ uc $matched_name };
$line =~ s/$matched_name\b/$new_name/ig;
print $line;
}
| warelab/gramene-ensembl | scripts/load-scripts/rename_genes_inGFF3.pl | Perl | mit | 2,034 |
#!/usr/bin/perl -w
use Spreadsheet::WriteExcel;
use HTML::Parser;
use LWP::Simple;
my $workbook = Spreadsheet::WriteExcel->new("GoogleScholar.xls");
my $linksheet = $workbook->add_worksheet("htmllinks");
my $inyear=1980;
my $outyear=2014;
my @scholarfname = my @scholarschool = my @scholarlname = ();
my @scholarstring = my @scholaryear = my @googlescholarurl = ();
for ($j=$inyear; $j<$outyear; $j++) {
my $getstring = "http://fisher.osu.edu/fin/findir/indexAYG.html?gradYear=" . $j;
my $html = get $getstring;
HTML::Parser->new(text_h => [\my @accum, "text"])->parse($html);
@arrayoflines= map("$_->[0] $_->[1]\n", @accum);
###outputs scholar name and university at @scholarstring
my $count1=14;
my @inputstring = ();
until ($arrayoflines[$count1] !~ /(\w+)/g) {
if ($count1 % 2) {
@inputstring = split(' ', $arrayoflines[$count1]);
$count2=1;
if ($arrayoflines[$count1] =~ m/(at .*,)/ ){
$schoolname=$1;
substr($schoolname,0,3)="";
substr ($schoolname, -1) = "";
}
push(@scholarschool,$schoolname);
if ($inputstring[$count2] =~ m/University/) {
$count2=$count2+2;
}
if ($inputstring[$count2] =~ m/(St.)|(San)/) {
$schoolstring = $inputstring[$count2] . "+" . $inputstring[$count2+1];
}
else{
$schoolstring = $inputstring[$count2];
}
$schoolstring =~ s/-/+/g;
$schoolstring =~ s/[,]//g;
$addstring = $namestring . "+" . $schoolstring;
push (@scholarstring, $addstring);
$urlcall = "http://scholar.google.com/scholar?hl=en&q=" . $addstring;
push(@googlescholarurl,$urlcall);
push(@scholaryear, $j);
} else {
$namestring = $arrayoflines[$count1];
$namestring =~ s/ /+/;
chomp $namestring;
$namestring =~ s/ //;
@nameinput = split('\+',$namestring);
push (@scholarfname, $nameinput[0]);
push (@scholarlname, $nameinput[1]);
}
$count1++;
}
}
#output google search link (name of scholar and year) in excel
$csj=0;
foreach $scholar (@googlescholarurl){
$linksheet->write($csj,0,$scholarfname[$csj]);
$linksheet->write($csj,1,$scholarlname[$csj]);
$linksheet->write($csj,2,$scholarschool[$csj]);
$linksheet->write($csj,3,$googlescholarurl[$csj]);
$linksheet->write($csj,4,$scholaryear[$csj]);
$csj++;
} | kkamb/googlescholar | ScholarOutput.pl | Perl | mit | 2,590 |
package AbstractProductB;
use Data::Printer;
use utf8;
use Moose;
has name => (is => 'rw', isa => "Str");
__PACKAGE__->meta->make_immutable;
1;
| saftacatalinmihai/PerlGoFDesignPatterns | AbstractFactory/AbstractProductB.pm | Perl | mit | 149 |
#!/usr/bin/perl
use strict;
use warnings;
use List::Permutor;
my $happiness = {};
while (<>) {
chomp;
if ($_ =~ /^(.+) would (gain|lose) (\d+) happiness units by sitting next to (.+)\.$/) {
$happiness->{$1}->{$4} = ($2 eq 'lose') ? -1 * $3 : int $3;
}
}
my $best_score = 0;
my @best_arr;
my $perm = new List::Permutor keys %{$happiness};
while (my @set = $perm->next) {
my $scores = 0;
for my $i (0..scalar(@set) - 1) {
my $h = ($i == 0) ? scalar(@set) - 1 : $i - 1;
my $j = ($i == scalar(@set) - 1) ? 0 : $i + 1;
$scores += $happiness->{$set[$i]}->{$set[$h]} + $happiness->{$set[$i]}->{$set[$j]};
}
if ($scores > $best_score) {
$best_score = $scores;
@best_arr = @set;
}
}
print join(', ', @best_arr), ' : ', $best_score, "\n";
| KenMGJ/advent-of-code | 2015/13/day13-1.pl | Perl | mit | 819 |
#!/usr/bin/perl
=head1 NAME
DBIx - Main interface to the object-relational mapper; maps shopify objects to database objects, and back again.
=cut
=head1 DESCRIPTION
DBIx represents a way to grab and upload data to the database.
=cut
=head1 EXAMPLES
To give an idea of how you're supposed to use this object, look at the following example, which builds off the example in L<WWW::Shopify>: here we get all the products, and then insert them into the database.
my $SA = new WWW::Shopify::Public($ShopURL, $APIKey, $AccessToken);
my $DBIX = new WWW::Shopify::Common::DBIx();
my @products = $SA->get_all('Product');
for (@products) {
my $product = $DBIX->from_shopify($_);
$product->insert;
}
This doesn't check for duplicates or anything else, but it's easy enough to check for that; see the DBIx documentation.
=cut
use strict;
use warnings;
use WWW::Shopify;
package WWW::Shopify::Common::DBIx;
use Exporter 'import';
our @EXPORT_OK = qw(transform_package strip_head);
sub new {
my $package = shift;
return bless {
namespace => int(@_) > 0 ? [@_] : undef,
classes => {},
package_prefix => 'WWW::Shopify::Model::DBIx::Schema::Result',
table_prefix => 'shopify_'
}, $package;
};
sub package_prefix { $_[0]->{package_prefix} = $_[1] if defined $_[1]; return $_[0]->{package_prefix}; }
sub table_prefix { $_[0]->{table_prefix} = $_[1] if defined $_[1]; return $_[0]->{table_prefix}; }
use List::Util qw(first);
sub in_namespace { return 1 unless defined $_[0]->{namespace}; return defined first { $_ eq $_[1] } @{$_[0]->{namespace}} }
use Module::Find;
my %arbitrary_sql = (
"WWW::Shopify::Model::Product" => sub {
my ($self) = @_;
return "__PACKAGE__->has_many('collects', '" . $self->package_prefix . "::Model::CustomCollection::Collect', 'product_id');\n";
}
);
sub arbitrary_sql {
my ($self, $package) = @_;
return "" unless exists $arbitrary_sql{$package};
$arbitrary_sql{$package}->($self);
}
sub all_classes {
return grep { $_ !~ m/DBIx/ } findallmod WWW::Shopify::Model;
}
sub class {
my ($self, $class) = @_;
return $self->{classes}->{$class};
}
sub classes {
my ($self) = @_;
return values(%{$self->{classes}});
}
sub class_names {
my ($self) = @_;
return keys(%{$self->{classes}});
}
sub generate_dbix_all {
my ($self) = @_;
$self->generate_dbix($_) for (@{$self->{namespace}});
}
sub strip_head { die unless $_[0] =~ m/^WWW::Shopify::/; return $'; }
sub transform_package {
return $_[0]->package_prefix . "::" . strip_head($_[1]) if ref($_[0]) && ref($_[0]) eq __PACKAGE__;
return "WWW::Shopify::Model::DBIx::Schema::Result::" . strip_head($_[0]);
}
sub joining_table_name { my $self = shift; return join("", map { $_->plural } sort(@_)); }
sub joining_class_name { my $self = shift; return $self->package_prefix . "::Model::" . join("", map { $_ =~ m/\:\:(\w+)$/; $1; } sort(@_)); }
sub generate_dbix_join {
my ($self, $join1, $join2) = @_;
my $name = $self->joining_class_name($join1, $join2);
$self->{classes}->{$name} = "
#!/usr/bin/perl
use strict;
use warnings;
# This class is generated from DBIx.pm. Do not modify.
package $name;
use base qw/DBIx::Class::Core/;
__PACKAGE__->table('" . $self->table_prefix . $self->joining_table_name($join1, $join2) . "');
__PACKAGE__->add_columns(
'id', { data_type => 'INT', is_nullable => '0', is_auto_increment => 1 },
'" . $join1->singular . "_id', { data_type => '" . WWW::Shopify::Field::Identifier->sql_type . "', is_nullable => 0 },
'" . $join2->singular . "_id', { data_type => '" . WWW::Shopify::Field::Identifier->sql_type . "', is_nullable => 0 }
);
__PACKAGE__->set_primary_key('id');
__PACKAGE__->belongs_to(" . $join1->singular . " => '" . $self->transform_package($join1) . "', '" . $join1->singular . "_id');
__PACKAGE__->belongs_to(" . $join2->singular . " => '" . $self->transform_package($join2) . "', '" . $join2->singular . "_id');
1;";
}
use Module::Find;
BEGIN { foreach my $package (findallmod WWW::Shopify::Model) { $package =~ s/::/\//g; eval { require $package . '.pm' }; print STDERR $@ if $@; } }
# Essentially an internal method.
# Generates a DBIx schema from the specified package.
use List::Util qw(first);
sub get_parent_column_name {
my ($self, $package) = @_;
my $fields = $package->fields;
my $parent_variable;
if (my $field = first { $_->is_relation && $_->is_parent } values(%$fields)) {
return $field->name;
}
elsif ($fields->{parent_id}) {
return 'parent_id';
}
else {
return ($package->parent->singular . '_id', !exists $fields->{$package->parent->singular . "_id"});
}
return undef;
}
sub has_shop_field {
my $package = ref($_[0]) ? ref($_[0]) : $_[0];
$package = $package->represents if $package =~ m/DBIx/;
return !$package->is_shop && (!$package->is_nested || $package =~ m/Address/);
}
use List::Util qw(first);
sub generate_dbix {
my ($self, $package) = @_;
my $fields = $package->fields;
my @ids = $package->identifier;
my $has_date = (defined first { $fields->{$_}->sql_type eq "DATETIME" } keys(%$fields));
my $parent_variable = undef;
my $table_name = $package->plural;
$table_name = $package->parent->plural . "_" . $table_name if $package->parent;
my @columns = ();
# If we're a nested item, and we don't have something called either parent_id or <parent->singular>_id, or somethign marked a relation parent, create one, 'cause we're expecting it.
if ($package->parent) {
my $needs_adding;
($parent_variable, $needs_adding) = $self->get_parent_column_name($package);
push(@columns, "\"$parent_variable\", { data_type => '" . WWW::Shopify::Field::Identifier->sql_type . "' }") if $needs_adding;
push(@ids, $parent_variable) if $needs_adding && !$fields->{id};
}
# All simple columns.
foreach my $field_name (grep { !$fields->{$_}->is_relation } keys(%$fields)) {
my $field = $fields->{$field_name};
my %attributes = ();
$attributes{'data_type'} = $field->sql_type;
$attributes{'is_nullable'} = ((!first { $field_name eq $_ } $package->identifier) && (!$package->is_nested || !$package->parent || $parent_variable ne $field_name)) ? 1 : 0;
push(@columns, "\"$field_name\", { " . join(", ", map { "$_ => '" . uc($attributes{$_}) . "'" } keys(%attributes)) . " }");
}
# If we don't have an ID give us one, so that all DB stuff can have primary keys.
if (!$fields->{'id'}) {
push(@columns, "\"id\", { data_type => 'BIGINT', is_nullable => 0, is_auto_increment => 1 }");
}
# All relationship columns that are belong to.
# ReferenceOne / Non-Nested / Interior : Belongs To
# ReferenceOne / Non-Nested / Exterior : Belongs To
# Parent : Belongs To
# OwnOne / Non-Nested / Interior : Belongs To
# OwnOne / Nested / Exterior : Belong To
my @field_relations = grep { $fields->{$_}->is_relation && $self->in_namespace($fields->{$_}->relation) } keys(%$fields);
my @relationships = ();
foreach my $field_name (grep { $fields->{$_}->is_db_belongs_to } @field_relations) {
my $field = $fields->{$field_name};
die $field_name unless $field->relation;
my %attributes = ();
my $accessor_name;
my $mod_field_name = $field_name;
if ($field_name =~ m/_id$/) {
$accessor_name = $`;
}
else {
$accessor_name = $field_name;
$mod_field_name = $field_name . "_id";
}
$attributes{'data_type'} = $field->sql_type;
# Geenrally make non-parent fields nullable.
$attributes{'is_nullable'} = 1 unless $field->is_parent;
push(@columns, "\"$mod_field_name\", { " . join(", ", map { "$_ => '" . uc($attributes{$_}) . "'" } keys(%attributes)) . " }");
push(@relationships, "__PACKAGE__->belongs_to($accessor_name => '" . $self->transform_package($field->relation) . "', '$mod_field_name'" . ($attributes{'is_nullable'} ? ", { join_type => 'left' }" : "") . ");");
}
# Many / Nested / Interior : Has Many
foreach my $field_name (grep { $fields->{$_}->is_db_has_many } @field_relations) {
my $field = $fields->{$field_name};
my ($parent_var) = $self->get_parent_column_name($field->relation);
push(@relationships, "__PACKAGE__->has_many($field_name => '" . $self->transform_package($field->relation) . "', '" . $parent_var . "');");
}
# OwnOne / Nested / Interior : Has One
foreach my $field_name (grep { $fields->{$_}->is_db_has_one } @field_relations) {
my $field = $fields->{$field_name};
my $parent_variable = $self->transform_package($field->relation)->parent_variable;
push(@relationships, "__PACKAGE__->has_one($field_name => '" . $self->transform_package($field->relation) . "', '$parent_variable');");
}
# OwnOne / Non-Nested / Exterior : Many-Many
# Many / Nested : Many-Many
# Many / Non-Nested : Many-Many
foreach my $field_name (grep { $fields->{$_}->is_db_many_many } @field_relations) {
my $field = $fields->{$field_name};
my $joining_name = $self->joining_class_name($package, $field->relation);
my $accessor_name = $field_name . "_hasmany";
$self->generate_dbix_join($package, $field->relation);
push(@relationships, "__PACKAGE__->has_many($accessor_name => '" . $joining_name . "', '" . $package->singular . "_id');");
push(@relationships, "__PACKAGE__->many_to_many($field_name => '$accessor_name', '" . $field->relation->singular . "');");
}
my @shop_relations = ();
if ($package->is_shop) {
# Get a list somewhere of all the top-level stuff.
@shop_relations = map { "__PACKAGE__->has_many(" . $_->plural . " => '" . $self->transform_package($_) . "', 'shop_id');" }
grep { $_ =~ m/Address/i || (!$_->is_nested && !$_->is_shop && $_ !~ m/metafield/i) } @{$self->{namespace}};
}
elsif (has_shop_field($package)) {
push(@columns, "\"shop_id\", { data_type => \"BIGINT\" }");
push(@shop_relations, "__PACKAGE__->belongs_to(shop => '" . $self->package_prefix . "::Model::Shop', 'shop_id');");
}
my @unique_keys = $package->unique_fields;
$self->{classes}->{$self->transform_package($package)} = "
#!/usr/bin/perl
use strict;
use warnings;
# This class is generated from DBIx.pm. Do not modify.
package " . $self->transform_package($package) . ";
use base qw/DBIx::Class::Core/;
__PACKAGE__->load_components(qw/InflateColumn::DateTime/);
" . ($has_date ? "__PACKAGE__->load_components(qw/InflateColumn::DateTime/);" : "") . "
__PACKAGE__->table('" . $self->table_prefix . $table_name . "');
__PACKAGE__->add_columns(
" . join(",\n\t", @columns) . "
);
__PACKAGE__->set_primary_key(" . join(", ", map { "'$_'" } (0 ? @ids : "id")) . ");
" . ((int(@unique_keys) > 0) ? "__PACKAGE__->add_unique_constraint(constraint_name => [ " . join(" ", map { "\"$_\"" } @unique_keys) . " ]);" : "") . "
" . join("\n", @shop_relations) . "
" . join("\n", @relationships) . "
sub represents { return '" . $package . "'; }
sub parent_variable { return " . ($parent_variable ? "'$parent_variable'" : "undef") . "; }
" . $self->arbitrary_sql($package) . "
1;";
}
use JSON qw(encode_json from_json);
use WWW::Shopify::Common::DBIxGroup;
# Takes in a schema and a shopify object and maps it to a DBIx existence.
sub from_shopify {
my $internal_from = sub {
my ($self, $schema, $type, $data, $shop_id) = @_;
# If we have a class relationship.
if ($type->is_relation) {
return undef if $type->relation && !$self->in_namespace($type->relation);
if ($type->is_many()) {
return [] unless $data;
my $array = [map { $self->from_shopify($schema, $_, $shop_id); } @$data];
return $array;
}
elsif ($type->is_own()) {
return {} unless $data;
return $self->from_shopify($data);
}
elsif ($type->is_reference() && $type->is_one()) {
return undef unless $data;
return $type->from_shopify($data);
}
}
return encode_json($type) if ($type eq 'WWW::Shopify::Field::Freeform');
return $type->from_shopify($data);
};
my ($self, $schema, $shopifyObject, $shop_id) = @_;
return undef unless $shopifyObject;
die new WWW::Shopify::Exception('Invalid object passed into to_shopify: ' . ref($shopifyObject) . '.') unless ref($shopifyObject) =~ m/Model::/;
my $dbPackage = $self->transform_package(ref($shopifyObject));
my $dbObject = undef;
my %identifiers = map { $_ => $shopifyObject->$_ } $shopifyObject->identifier;
if ($shopifyObject->is_nested && $dbPackage->parent_variable) {
die new WWW::Shopify::Exception("Invalid nested object passed into to_shopify.") unless $shopifyObject->associated_parent;
$identifiers{$dbPackage->parent_variable} = $shopifyObject->associated_parent->id if $shopifyObject->associated_parent->can('id');
}
$dbObject = $schema->resultset($dbPackage)->find(\%identifiers) if $shopifyObject;
$dbObject = $schema->resultset($dbPackage)->new({}) unless $dbObject;
my $fields = $shopifyObject->fields();
my $group = WWW::Shopify::Common::DBIxGroup->new(contents => $dbObject);
# Anything that's many-many like metafields shouldn't set parent variables on themselves. Or not.
if ($shopifyObject->associated_parent && ref($shopifyObject) !~ m/Metafield$/ && $dbObject->parent_variable) {
my $parent_variable = $dbObject->parent_variable;
$dbObject->$parent_variable($shopifyObject->associated_parent->id) if $shopifyObject->associated_parent->can('id');
}
if (has_shop_field($shopifyObject) && $shop_id) {
$dbObject->shop_id($shop_id);
}
if ($shopifyObject->is_shop) {
$shop_id = $dbObject->id;
}
foreach my $key (keys(%$fields)) {
next if $key =~ m/metafields/;
my $data = $shopifyObject->$key();
if ($fields->{$key}->is_relation && $fields->{$key}->is_many()) {
$_->associated_parent($shopifyObject) for (@$data);
}
my $db_value = &$internal_from($self, $schema, $fields->{$key}, $data, $shop_id);
if ($fields->{$key}->is_relation && $fields->{$key}->is_many()) {
$group->add_children(grep { defined $_ } @$db_value);
}
elsif (!$fields->{$key}->is_relation || ($fields->{$key}->is_reference && !$fields->{$key}->is_parent)) {
$dbObject->$key($db_value);
}
}
return $group;
}
sub to_shopify {
my $internal_to = sub {
my ($self, $type, $data, $shopifyObject, $test) = @_;
# If we have a class relationship.
if ($type->is_relation()) {
if ($type->is_db_has_many || $type->is_db_many_many) {
return undef if $type->relation =~ m/Metafield/;
return [] unless $data;
my $array = [map { my $object = $self->to_shopify($_, $test); $object->associated_parent($shopifyObject); $object } $data->all()];
return $array;
}
elsif ($type->is_own()) {
return {} unless $data;
my $object = $self->to_shopify($data, $test);
$object->associated_parent($shopifyObject);
return $object;
}
elsif ($type->is_reference()) {
return undef unless $data;
return $type->to_shopify($data, $test);
}
}
# This seems confusing, but due to us storing our stuff in the database as Shopify stuff
# We're transferring TYPE from shopify, but SELF to shopify.
return $type->from_shopify($data) if ref($type) =~ m/timezone/i;
return from_json($data) if ($data && ref($type) eq 'WWW::Shopify::Field::Freeform');
return $data;
};
my ($self, $dbObject, $test) = @_;
return undef unless $dbObject;
die new WWW::Shopify::Exception('Invalid object passed into to_shopify: ' . ref($dbObject) . '.') unless ref($dbObject) =~ m/Model::/;
my $shopifyObject = $dbObject->represents()->new;
my $fields = $shopifyObject->fields();
foreach my $key (keys(%$fields)) {
# Easiest way, AFAICT to work around this: http://lists.scsys.co.uk/pipermail/dbix-class/2009-December/008687.html
# DBIx strangeness.
my $data;
if ($dbObject->can($key . "_id")) {
my $method = $key . "_id";
$data = $dbObject->$key if ($dbObject->$method);
} else {
$data = $dbObject->$key;
}
$shopifyObject->{$key} = &$internal_to($self, $fields->{$key}, $data, $shopifyObject, $test) if defined $data;
}
$shopifyObject->associate($test);
return $shopifyObject;
}
=head1 SEE ALSO
L<WWW::Shopify>
=head1 AUTHOR
Adam Harrison
=head1 LICENSE
See LICENSE in the main directory.
=cut
1
| gitpan/WWW-Shopify | lib/WWW/Shopify/Common/DBIx.pm | Perl | mit | 15,865 |
use strict;
use Data::Dumper;
use Test::More;
use Config::Simple;
use Time::HiRes qw(time);
use Bio::KBase::AuthToken;
use installed_clients::WorkspaceClient;
use ReferenceDataManager::ReferenceDataManagerImpl;
use Config::IniFiles;
local $| = 1;
my $token = $ENV{'KB_AUTH_TOKEN'};
my $config_file = $ENV{'KB_DEPLOYMENT_CONFIG'};
my $config = new Config::Simple($config_file)->get_block('ReferenceDataManager');
my $ws_url = $config->{"workspace-url"};
my $ws_name = undef;
my $ws_client = new installed_clients::WorkspaceClient($ws_url,token => $token);
my $auth_token = Bio::KBase::AuthToken->new(token => $token, ignore_authrc => 1, auth_svc=>$config->{'auth-service-url'});
print("ws url:".$config->{'workspace-url'} . "\n");
print("auth url:".$config->{'auth-service-url'} . "\n");
my $ctx = LocalCallContext->new($token, $auth_token->user_id);
$ReferenceDataManager::ReferenceDataManagerServer::CallContext = $ctx;
my $impl = new ReferenceDataManager::ReferenceDataManagerImpl();
sub get_ws_name {
if (!defined($ws_name)) {
my $suffix = int(time * 1000);
$ws_name = 'test_RAST_SDK_' . $suffix;
$ws_client->create_workspace({workspace => $ws_name});
}
return $ws_name;
}
sub test_getObj2 {
my $obj_refs = [
{
'ref' => '20904/54220/1'
},
{
'ref' => '20904/54221/1'
},
{
'ref' => '20904/54222/1'
},
{
'ref' => '20904/54223/1'
},
{
'ref' => '20904/54252/1'
}
];
$ws_client->get_objects2({
objects => $obj_refs
});
}
sub check_genome_obj {
my($genome_obj) = @_;
ok(defined($genome_obj->{features}), "Features array is present");
ok(scalar @{ $genome_obj->{features} } eq 1, "Number of features");
ok(defined($genome_obj->{cdss}), "CDSs array is present");
ok(scalar @{ $genome_obj->{cdss} } eq 1, "Number of CDSs");
ok(defined($genome_obj->{mrnas}), "mRNAs array is present");
ok(scalar @{ $genome_obj->{mrnas} } eq 1, "Number of mRNAs");
}
sub test_rast_genomes {
my($genomes) = @_;
my $params={
genomes=>$genomes,
workspace_name=>get_ws_name()
};
return $impl->get_genomes4RAST();
}
=begin
#Testing _updateGenomesCore function
my $updret;
eval {
#$updret = $impl->_updateGenomesCore("GenomeFeatures_ci", "Genomes_ci","KBaseGenomes.Genome-12.3");
$updret = $impl->_updateGenomesCore("GenomeFeatures_prod", "Genomes_prod","KBaseGenomes.Genome-8.2");
};
ok(!$@, "_updateGenomesCore command successful");
if ($@) {
print "ERROR:".$@;
} else {
print "Result status: " .$updret."\n";
}
ok(defined($updret), "_updateGenomesCore command returneds a value:" . $updret);
=cut
=begin
#Testing list_loaded_genomes
my $wsret;
eval {
$wsret = $impl->list_loaded_genomes({
genome_ver => 1,
data_source => "refseq",#"others",
create_report => 1,
save_date => "2017-06-1",
workspace_name => get_ws_name()
#other_ws => "qzhang:narrative_1493170238855"
});
};
ok(!$@,"list_loaded_genomes command successful");
if ($@) {
print "ERROR:".$@;
} else {
print "Number of records:".@{$wsret}."\n";
print "First record:\n";
print Data::Dumper->Dump([$wsret->[@{$wsret} -1]])."\n";
#print Data::Dumper->Dump([$wsret->[0]])."\n";
}
ok(defined($wsret->[0]),"list_loaded_genomes command returned at least one genome");
=cut
=begin
#Testing get_genomes4RAST function
my $rgret;
eval {
$rgret = $impl->get_genomes4RAST();
};
ok(!$@,"get_genomes4RAST command successful");
if ($@) {
print "ERROR:".$@;
} else {
print "Number of records:". $rgret->{genome_text}."\n";
}
ok(defined($rgret->{genome_text}),"get_genomes4RAST command returned successfully.");
=cut
=begin
#Testing _getWorkspaceGenomes function
my $rgret;
eval {
$rgret = $impl->_getWorkspaceGenomes("ReferenceDataManager", "KBaseGenomes.Genome-14.", undef, '2018-05-19');
#$rgret = $impl->_getWorkspaceGenomes("qzhang:narrative_1493170238855","KBaseGenomes.Genome-8.2",0,'2018-05-19');
};
ok(!$@,"_getWorkspaceGenomes command successful");
if ($@) {
print "ERROR:".$@;
} else {
print "Number of records:". @{$rgret->{genome_names}}."\n";
}
ok(defined($rgret->[0]),"_getWorkspaceGenomes command returned successfully.");
=cut
=begin
#Testing update_loaded_genomes function
my $wsgnmret;
eval {
$wsgnmret = $impl->update_loaded_genomes({
refseq => 1,
start_offset => 87470,
kb_env => 'prod'
});
};
ok(!$@,"update_loaded_genomes command successful");
if ($@) {
print "ERROR:".$@;
} else {
print "Number of records:".@{$wsgnmret}."\n";
print "First record:\n";
print Data::Dumper->Dump([$wsgnmret->[0]])."\n";
}
ok(defined($wsgnmret->[0]),"update_loaded_genomes command returned at least one record");
=cut
=begin
#Testing index_genomes_in_solr
my $slrcore = "GenomeFeatures_prod";
my $ret;
my $gnms = [
{
'ref' => '15792/114157/1',
'source' => 'refseq',
'id' => 'GCF_002140775',
'accession' => 'GCF_002140775.1',
'version' => '1',
'workspace_name' => 'ReferenceDataManager',
'domain' => 'bacteria',
'source_id' => 'GCF_002140775',
'name' => 'GCF_002140775'
},
{
'version' => '1',
'accession' => 'GCF_002162135.1',
'id' => 'GCF_002162135',
'source' => 'refseq',
'ref' => '15792/114154/2',
'name' => 'GCF_002162135',
'source_id' => 'GCF_002162135',
'domain' => 'bacteria',
'workspace_name' => 'ReferenceDataManager'
}
];
eval {
$ret = $impl->index_genomes_in_solr({
genomes => [],#$gnms,#[@{$wsret}[(@{$wsret} - 2)..(@{$wsret} - 1)]],#$wsret, #[@{$wsret}[0..1]],
solr_core => $slrcore,
genome_ver => 1,
genome_source => 'refseq',#'others',
genome_ws => 'ReferenceDataManager', #'ReferenceGenomeWS',
genome_count => 50000,
save_date => "2017-06-13",
start_offset => 0,
index_features => 1
});
};
ok(!$@,"index_genomes_in_solr command successful");
if ($@) {
print "ERROR:".$@;
#my $err = $@;
#print "Error type: " . ref($err) . "\n";
#print "Error message: " . $err->{message} . "\n";
#print "Error error: " . $err->{error} . "\n";
#print "Error data: " .$err->{data} . "\n";
} else {
print "Number of records:".@{$ret}."\n";
print "First record:\n";
print Data::Dumper->Dump([$ret->[0]])."\n";
}
ok(defined($ret->[0]),"\nindex_genomes_in_solr command returned at least one genome");
=cut
#=begin
#Testing the list_reference_genomes function
my $refret;
eval {
$refret = $impl->list_reference_genomes({
refseq => 1,
domain => "bacteria,archaea,plant,fungi",
update_only => 0,
create_report => 0,
workspace_name => get_ws_name()
});
};
ok(!$@,"list_reference_Genomes command successful");
if ($@) {
print "ERROR:".$@;
} else {
print "Number of records:".@{$refret}."\n";
print "First record:\n";
print Data::Dumper->Dump([$refret->[0]])."\n";
#print Data::Dumper->Dump([$refret->[@{$refret} - 1]])."\n";
}
ok(defined($refret->[0]),"list_reference_Genomes command returned at least one genome");
#=cut
=begin
#Testing list_solr_genomes function
my $sgret;
eval {
$sgret = $impl->list_solr_genomes({
solr_core => "Genomes_prod",
domain => "Bacteria",
create_report => 1,
workspace_name => get_ws_name(),
complete => 1
});
};
ok(!$@,"list_solr_genomes command successful");
if ($@) {
print "ERROR:".$@;
} else {
print "Number of records:".@{$sgret}."\n";
print "First record:\n";
print Dumper($sgret->[0])."\n";
}
ok(defined($sgret->[0]),"list_solr_genomes command returned at least one genome");
=cut
eval {
=begin
#Testing list_solr_genomes function
my $sgret;
eval {
$sgret = $impl->list_solr_genomes({
solr_core => "Genomes_prod",
domain => "Bacteria",
complete => 1
});
};
ok(!$@,"list_solr_genomes command successful");
if ($@) {
print "ERROR:".$@;
} else {
print "Number of records:".@{$sgret}."\n";
print "First record:\n";
print Dumper($sgret->[0])."\n";
}
ok(defined($sgret->[0]),"list_solr_genomes command returned at least one genome");
=cut
=begin
my $rast_ret;
my $sgret = undef;
eval {
$rast_ret = test_rast_genomes($sgret);
};
ok(!$@, "test_rast_genomes ran successfully.");
if( $@) {
print "ERROR:".$@;
} else {
print Dumper($rast_ret)."\n";
}
=cut
#=begin
#Testing load_genomes function
my $ret; my $ref_genomes;
@{$ref_genomes} = @{$refret}[@{$refret}-10..@{$refret}-1];
eval {
$ret = $impl->load_genomes({
genomes => $ref_genomes,
index_in_solr => 0
});
};
ok(!$@,"load_genomes command successful");
if ($@) {
print "ERROR:".$@;
my $err = $@;
print "Error type: " . ref($err) . "\n";
print "Error message: " . $err->{message} . "\n";
print "Error error: " . $err->{error} . "\n";
print "Error data: " .$err->{data} . "\n";
} else {
print "Loaded " . scalar @{$ret} . " genomes:\n";
print Data::Dumper->Dump([$ret->[@{$ret}-1]])."\n";
}
ok(defined($ret->[0]),"load_genomes command returned at least one genome");
=cut
=begin
#Testing load_refgenomes function
my $rret;
eval {
$rret = $impl->load_refgenomes({
refseq=>1,
index_in_solr=>0,
kb_env => 'ci',
cut_off_date => '2018-05-19',
start_offset => 0,
genome_type => "KBaseGenomes.Genome-15.1" # "KBaseGenomes.Genome-10."
});
};
ok(!$@,"load_refgenomes command successful");
if ($@) {
print "ERROR:".$@;
my $err = $@;
print "Error type: " . ref($err) . "\n";
print "Error message: " . $err->{message} . "\n";
print "Error error: " . $err->{error} . "\n";
print "Error data: " .$err->{data} . "\n";
} else {
print "Loaded " . scalar @{$rret} . " genomes:\n";
print Data::Dumper->Dump([$rret->[@{$rret}-1]])."\n";
}
ok(defined($rret->[0]),"load_refgenomes command returned at least one genome");
=cut
done_testing(3);
};
=begin old testings
eval {
#Altering workspace map
$impl->{_workspace_map}->{refseq} = "ReferenceDataManager";
#$impl->{_workspace_map}->{refseq} = "Phytozome_Genomes";
#$impl->{_workspace_map}->{refseq} = "RefSeq_Genomes";
#$impl->{_workspace_map}->{refseq} = "KBasePublicRichGenomesV5";
#Testing update_loaded_genomes function
my $wsgnmret;
eval {
$wsgnmret = $impl->update_loaded_genomes({
refseq => 1,
kb_env => 'ci'
});
};
ok(!$@,"update_loaded_genomes command successful");
if ($@) {
print "ERROR:".$@;
} else {
print "Number of records:".@{$wsgnmret}."\n";
print "First record:\n";
print Data::Dumper->Dump([$wsgnmret->[0]])."\n";
}
ok(defined($wsgnmret->[0]),"update_loaded_genomes command returned at least one record");
#Testing _listGenomesInSolr
my $solrret;
eval {i
#$solrret = $impl->_listGenomesInSolr("Genomes_ci", "*",0,0,"KBaseGenomes.Genome-12.3");
$solrret = $impl->_listGenomesInSolr("Genomes_prod", "*",0,0,"KBaseGenomes.Genome-8.2");
};
ok(!$@, "_listGenomesInSolr command successful");
if ($@) {
print "ERROR:".$@;
} else {
print "List Genomes in Solr results:";
print $solrret->{response}->{response}->{numFound}."\n";
}
ok(defined($solrret),"_listGenomesInSolr command returned at least one genome");
#Testing list_solr_taxa function
my $stret;
eval {
$stret = $impl->list_solr_taxa({
solr_core => "taxonomy_ci",
group_option => "taxonomy_id"
});
};
ok(!$@,"list_solr_taxa command successful");
if ($@) {
print "ERROR:".$@;
} else {
print "Number of records:".@{$stret}."\n";
print "First record:\n";
print Data::Dumper->Dump([$stret->[0]])."\n";
}
ok(defined($stret->[0]),"list_solr_taxa command returned at least one genome");
#Testing the list_reference_genomes function
my $refret;
eval {
$refret = $impl->list_reference_genomes({
refseq => 1,
domain => "bacteria,archaea,plant,fungi",
update_only => 0
});
};
ok(!$@,"list_reference_Genomes command successful");
if ($@) {
print "ERROR:".$@;
} else {
print "Number of records:".@{$refret}."\n";
print "First record:\n";
print Data::Dumper->Dump([$refret->[0]])."\n";
#print Data::Dumper->Dump([$refret->[@{$refret} - 1]])."\n";
}
ok(defined($refret->[0]),"list_reference_Genomes command returned at least one genome");
#Testing _checkGenomeStatus function
my $gnstatusret;
eval {
$gnstatusret = $impl->_checkGenomeStatus($refret->[0], "GenomeFeatures_prod");
#$gnstatusret = $impl->_checkGenomeStatus($refret->[@{$refret} - 1], "GenomeFeatures_prod");
};
ok(!$@, "_checkGenomeStatus command successful");
if ($@) {
print "ERROR:".$@;
} else {
print "Result status: " .$gnstatusret."\n";
}
ok(defined($gnstatusret), "_checkGenomeStatus command returneds a value");
#Testing _checkTaxonStatus function
my $txstatusret;
eval {
$txstatusret = $impl->_checkTaxonStatus($refret->[0], "taxonomy_ci");
#$txstatusret = $impl->_checkTaxonStatus($refret->[@{$refret} - 1], "taxonomy_ci");
};
ok(!$@, "_checkTaxonStatus command successful");
if ($@) {
print "ERROR:".$@;
} else {
print "Result status: " .$txstatusret."\n";
}
ok(defined($txstatusret), "_checkTaxonStatus command returneds a value");
#Testing _updateGenomesCore function
my $updret;
eval {
$updret = $impl->_updateGenomesCore("GenomeFeatures_ci", "Genomes_ci","KBaseGenomes.Genome-12.3");
};
ok(!$@, "_updateGenomesCore command successful");
if ($@) {
print "ERROR:".$@;
} else {
print "Result status: " .$updret."\n";
}
ok(defined($updret), "_updateGenomesCore command returneds a value:" . $updret);
#Testing load_genomes function
my $ret;
eval {
$ret = $impl->load_genomes({
genomes => $refret,
index_in_solr => 0
});
};
ok(!$@,"load_genomes command successful");
if ($@) {
print "ERROR:".$@;
my $err = $@;
print "Error type: " . ref($err) . "\n";
print "Error message: " . $err->{message} . "\n";
print "Error error: " . $err->{error} . "\n";
print "Error data: " .$err->{data} . "\n";
} else {
print "Loaded " . scalar @{$ret} . " genomes:\n";
print Data::Dumper->Dump([$ret->[@{$ret}-1]])."\n";
}
ok(defined($ret->[0]),"load_genomes command returned at least one genome");
#Testing load_refgenomes function
my $rret;
eval {
$rret = $impl->load_refgenomes({
refseq=>1,
index_in_solr=>0,
start=>80000
});
};
ok(!$@,"load_refgenomes command successful");
if ($@) {
print "ERROR:".$@;
my $err = $@;
print "Error type: " . ref($err) . "\n";
print "Error message: " . $err->{message} . "\n";
print "Error error: " . $err->{error} . "\n";
print "Error data: " .$err->{data} . "\n";
} else {
print "Loaded " . scalar @{$rret} . " genomes:\n";
print Data::Dumper->Dump([$rret->[@{$rret}-1]])."\n";
}
ok(defined($rret->[0]),"load_refgenomes command returned at least one genome");
#Delete docs or wipe out the whole $delcore's content----USE CAUTION!
my $delcore = "QZtest";
my $ds = {
#'workspace_name' => "QZtest",
#'domain' => "Eukaryota"
#'genome_id' => 'kb|g.0'
};
#$impl->_deleteRecords($delcore, $ds);
#Testing list_loaded_genomes
my $wsret;
eval {
$wsret = $impl->list_loaded_genomes({
genome_ver => 1,
data_source => "others",
create_report => 1,
other_ws => "RefSeq_plant" #"qzhang:narrative_1493170238855"
});
};
ok(!$@,"list_loaded_genomes command successful");
if ($@) {
print "ERROR:".$@;
} else {
print "Number of records:".@{$wsret}."\n";
print "First record:\n";
print Data::Dumper->Dump([$wsret->[@{$wsret} -1]])."\n";
#print Data::Dumper->Dump([$wsret->[0]])."\n";
}
ok(defined($wsret->[0]),"list_loaded_genomes command returned at least one genome");
#Testing index_genomes_in_solr
my $slrcore = "RefSeq_RAST";
my $ret;
eval {
$ret = $impl->index_genomes_in_solr({
#genomes => $wsret,#[@{$wsret}[(@{$wsret} - 2)..(@{$wsret} - 1)]],#$wsret, #[@{$wsret}[0..1]],
solr_core => $slrcore,
genome_ver => 1,
start_offset => 0,
genome_count => 6000,
other_ws =>"ReferenceDataManager2"
});
};
ok(!$@,"index_genomes_in_solr command successful");
if ($@) {
print "ERROR:".$@;
#my $err = $@;
#print "Error type: " . ref($err) . "\n";
#print "Error message: " . $err->{message} . "\n";
#print "Error error: " . $err->{error} . "\n";
#print "Error data: " .$err->{data} . "\n";
} else {
print "Number of records:".@{$ret}."\n";
print "First record:\n";
print Data::Dumper->Dump([$ret->[0]])."\n";
}
ok(defined($ret->[0]),"\nindex_genomes_in_solr command returned at least one genome");
#Testing index_genomes_in_solr
my $slrcore = "GenomeFeatures_ci";
my $ret;
eval {
$ret = $impl->index_genomes_in_solr({
#genomes => $wsret,#[@{$wsret}[(@{$wsret} - 2)..(@{$wsret} - 1)]],#$wsret, #[@{$wsret}[0..1]],
solr_core => $slrcore,
genome_ver => 1,
start_offset => 0
});
};
ok(!$@,"index_genomes_in_solr command successful");
if ($@) {
print "ERROR:".$@;
#my $err = $@;
#print "Error type: " . ref($err) . "\n";
#print "Error message: " . $err->{message} . "\n";
#print "Error error: " . $err->{error} . "\n";
#print "Error data: " .$err->{data} . "\n";
} else {
print "Number of records:".@{$ret}."\n";
print "First record:\n";
print Data::Dumper->Dump([$ret->[0]])."\n";
}
ok(defined($ret->[0]),"\nindex_genomes_in_solr command returned at least one genome");
#Testing list_loaded_taxa
my $taxon_ret;
eval {
$taxon_ret = $impl->list_loaded_taxa({
workspace_name => "ReferenceTaxons",
create_report => 0
});
};
ok(!$@,"list_loaded_taxa command successful");
if ($@) {
my $err = $@;
print "Error occurred with error type: " . ref($err) . "\n";
#print "Error message: " . $err->{message} . "\n";
#print "Error error: " . $err->{error} . "\n";
#print "Error data: " .$err->{data} . "\n";
} else {
print "Number of records:".@{$taxon_ret}."\n";
print "First record:\n";
print Data::Dumper->Dump([$taxon_ret->[0]])."\n";
}
ok(defined($taxon_ret->[0]),"list_loaded_taxa command returned at least one taxon");
#Testing index_taxa_in_solr
my $solr_ret;
eval {
$solr_ret = $impl->index_taxa_in_solr({
taxa => $taxon_ret,
solr_core => "taxonomy_ci",
create_report => 0
});
};
ok(!$@,"index_taxa_in_solr command successful");
if ($@) {
my $err = $@;
#print "Error type: " . ref($err) . "\n";
#print "Error message: " . $err->{message} . "\n";
#print "Error error: " . $err->{error} . "\n";
#print "Error data: " .$err->{data} . "\n";
} else {
print "Number of records:".@{$solr_ret}."\n";
print "First record:\n";
print Data::Dumper->Dump([$solr_ret->[0]])."\n";
}
ok(defined($solr_ret->[0]),"index_taxa_in_solr command returned at least one taxon");
#Test _exists() function
my $exist_ret;
#my $crit = 'parent_taxon_ref:"1779/116411/1",rank:"species",scientific_lineage:"cellular organisms; Bacteria; Proteobacteria; Alphaproteobacteria; Rhizobiales; Bradyrhizobiaceae; Bradyrhizobium",scientific_name:"Bradyrhizobium sp. rp3", domain:"Bacteria"';
my $searchCriteria = {
parent_taxon_ref => '1779/116411/1',
rank => 'species',
scientific_lineage => 'cellular organisms; Bacteria; Proteobacteria; Alphaproteobacteria; Rhizobiales; Bradyrhizobiaceae; Bradyrhizobium',
scientific_name => 'Bradyrhizobium sp. rp3',
domain => 'Bacteria'
};
eval {
$exist_ret = $impl->_exists("GenomeFeatures_ci", $searchCriteria);
};
ok(!$@, "_exists() command successful");
if ($@) {
print "ERROR:".$@;
} else {
print "Return result=" . $exist_ret;
}
ok(defined($exist_ret),"_exists command returned a value");
done_testing(2);
};
=cut old testings.
my $err = undef;
if ($@) {
$err = $@;
}
eval {
if (defined($ws_name)) {
#$ws_client->delete_workspace({workspace => $ws_name});
#print("Test workspace was deleted\n");
print("Test workspace was named ". $ws_name . "\n");
my $wsinfo = $ws_client->get_workspace_info({
workspace => $ws_name
});
print Dumper($wsinfo);
my $maxid = $wsinfo->[4];
print "\nMax genome object id=$maxid\n";
eval {
my $wsoutput = $ws_client->list_objects({
workspaces => [$ws_name],
minObjectID => 0,
maxObjectID => $maxid,
includeMetadata => 1
});
print "Genome object count=" . @{$wsoutput}. "\n";
};
$ws_client->delete_workspace({workspace => $ws_name});
print("Test workspace was deleted\n");
}
};
if (defined($err)) {
if(ref($err) eq "Bio::KBase::Exceptions::KBaseException") {
die("Error while running tests: " . $err->trace->as_string);
} else {
die $err;
}
}
{
package LocalCallContext;
use strict;
sub new {
my($class,$token,$user) = @_;
my $self = {
token => $token,
user_id => $user
};
return bless $self, $class;
}
sub user_id {
my($self) = @_;
return $self->{user_id};
}
sub token {
my($self) = @_;
return $self->{token};
}
sub provenance {
my($self) = @_;
return [{'service' => 'ReferenceDataManager', 'method' => 'please_never_use_it_in_production', 'method_params' => []}];
}
sub authenticated {
return 1;
}
sub log_debug {
my($self,$msg) = @_;
print STDERR $msg."\n";
}
sub log_info {
my($self,$msg) = @_;
print STDERR $msg."\n";
}
sub method {
my($self) = @_;
return "TEST_METHOD";
}
}
| kbaseapps/ReferenceDataManager | test/ReferenceDataManager_server_test.pl | Perl | mit | 24,399 |
# -*- perl -*-
# !!! DO NOT EDIT !!!
# This file was automatically generated.
package Net::Amazon::Validate::ItemSearch::de::Brand;
use 5.006;
use strict;
use warnings;
sub new {
my ($class , %options) = @_;
my $self = {
'_default' => 'Apparel',
%options,
};
push @{$self->{_options}}, 'Apparel';
push @{$self->{_options}}, 'Baby';
push @{$self->{_options}}, 'Kitchen';
bless $self, $class;
}
sub user_or_default {
my ($self, $user) = @_;
if (defined $user && length($user) > 0) {
return $self->find_match($user);
}
return $self->default();
}
sub default {
my ($self) = @_;
return $self->{_default};
}
sub find_match {
my ($self, $value) = @_;
for (@{$self->{_options}}) {
return $_ if lc($_) eq lc($value);
}
die "$value is not a valid value for de::Brand!\n";
}
1;
__END__
=head1 NAME
Net::Amazon::Validate::ItemSearch::de::Brand;
=head1 DESCRIPTION
The default value is Apparel, unless mode is specified.
The list of available values are:
Apparel
Baby
Kitchen
=cut
| carlgao/lenga | images/lenny64-peon/usr/share/perl5/Net/Amazon/Validate/ItemSearch/de/Brand.pm | Perl | mit | 1,101 |
/* Part of ClioPatria SeRQL and SPARQL server
Author: Jan Wielemaker
E-mail: J.Wielemaker@cs.vu.nl
WWW: http://www.swi-prolog.org
Copyright (C): 2010-2016, University of Amsterdam,
VU University Amsterdam
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
As a special exception, if you link this library with other files,
compiled with a Free Software compiler, to produce an executable, this
library does not by itself cause the resulting executable to be covered
by the GNU General Public License. This exception does not however
invalidate any other reasons why the executable file might be covered by
the GNU General Public License.
*/
:- module(rdf_describe,
[ rdf_bounded_description/4, % :Expand, +Type, +URI, -Graph
rdf_bounded_description/5, % :Expand, +Type, +Pattern, +URI, -Graph
resource_CBD/3, % :Expand, +URI, -Graph
graph_CBD/3, % :Expand, +Graph0, -Graph
rdf_include_reifications/3, % :Expand, +Graph0, -Graph
rdf_include_labels/3, % :Expand, +Graph0, -Graph
lcbd_label/3 % +Subject, -Pred, -Label
]).
:- use_module(library(semweb/rdf_db)).
:- use_module(library(assoc)).
:- use_module(library(lists)).
/** <module> RDF Bounded descriptions
The predicates in this module deal with `RDF bounded descriptions'. A
bounded description is a subgraph that describes a single resource
(URI). Unfortunately, such an isolated description is not possible
without the possibility of loosing semantics. We provide some meaningful
approximations described in the literature.
Scanning the definitions given in the link below, we distinguish two
ortogonal expansions: one expanding the graph and another adding either
reifications or labels. Expansion is implemented by
rdf_bounded_description/4, while the returned graph can be further
expanded using rdf_include_reifications/3 and/or rdf_include_labels/3.
@tbd Also implement the variations on CBD
@see http://n2.talis.com/wiki/Bounded_Descriptions_in_RDF
*/
:- meta_predicate
rdf_bounded_description(3, +, +, -),
resource_CBD(3, +, -),
graph_CBD(3, +, -),
rdf_include_reifications(3, +, -).
/*******************************
* RESOURCE OPERATIONS *
*******************************/
%% rdf_bounded_description(:Expand, +Type, +URI, -Graph) is det.
%% rdf_bounded_description(:Expand, +Type, +Filter, +URI, -Graph) is det.
%
% Graph is a Bounded Description of URI. The literature defines
% various types of bounding descriptions. Currently supported
% types are:
%
% * cbd
% Concise Bounded Description of URI. This notion is also
% known as "the bnode-closure of a resource"
% * scbd
% Symmetric Concise Bounded Description is similar to
% =cbd=, but includes triples with both URI as subject and
% object.
rdf_bounded_description(Expand, Type, S, Graph) :-
rdf_bounded_description(Expand, Type, [], S, Graph).
rdf_bounded_description(Expand, Type, Filter, S, Graph) :-
empty_assoc(Map0),
compile_pattern(Filter, Triple, Expand, Filter1),
expansion(Type, Expand, S, Triple, Filter1, Graph, BNG),
phrase(new_bnodes(Graph, Map0), BN),
phrase(r_bnodes(BN, Type, Expand, Map0, _Map), BNG).
compile_pattern([], _, _, true).
compile_pattern([rdf(S,P,O)], rdf(S,P,O), Expand,
call(Expand, S,P,O)) :- !.
compile_pattern([rdf(S,P,O)|T], rdf(S,P,O), Expand,
( call(Expand, S,P,O) ; More )) :-
compile_pattern(T, rdf(S,P,O), Expand, More).
:- meta_predicate
expansion(+, 3, +, +, +, -, ?),
r_bnodes(+, +, 3, +, -, ?, ?).
expansion(cbd, Expand, S, rdf(S,P,O), Filter, RDF, Tail) :-
findall(rdf(S,P,O), (call(Expand, S,P,O),Filter), RDF, Tail).
expansion(scbd, Expand, S, rdf(S,P,O), Filter, RDF, Tail) :-
findall(rdf(S,P,O), (call(Expand, S,P,O),Filter), RDF, T0),
findall(rdf(O,P,S), (call(Expand, O,P,S),Filter), T0, Tail).
r_bnodes([], _, _, Map, Map) -->
[].
r_bnodes([H|T], Type, Expand, Map0, Map, Graph, Tail) :-
rdf_is_bnode(H), !,
put_assoc(H, Map0, true, Map1),
expansion(Type, Expand, H, _, true, Graph, Tail0),
phrase(new_bnodes(Graph, Map1), BN, T),
r_bnodes(BN, Type, Expand, Map1, Map, Tail0, Tail).
r_bnodes([_|T], Type, Expand, Map0, Map) -->
r_bnodes(T, Type, Expand, Map0, Map).
new_bnodes(Var, _) -->
{ var(Var) }, !.
new_bnodes([rdf(S,_,O)|RDF], Map) -->
new_bnode(S, Map),
new_bnode(O, Map),
new_bnodes(RDF, Map).
new_bnode(S, Map) --> { rdf_is_bnode(S), \+ get_assoc(S, Map, _) }, !, [S].
new_bnode(_, _) --> [].
%% resource_CBD(:Expand, +URI, -Graph) is det.
%
% Graph is the Concise Bounded Description of URI. This notion is
% also known as "the bnode-closure of a resource". Note that,
% according to the definition on the Talis wiki, the CBD includes
% reified statements. This predicate does not do this. Use
% rdf_include_reifications/3 to add reifications to the graph.
%
% @param Expand is called to enumerate the PO pairs for a subject.
% This will often be =rdf= to use rdf/3.
% @see http://n2.talis.com/wiki/Bounded_Descriptions_in_RDF
resource_CBD(Expand, S, Graph) :-
rdf_bounded_description(Expand, cbd, S, Graph).
/*******************************
* GRAPH OPERATIONS *
*******************************/
%% graph_CBD(:Expand, +Graph0, -Graph) is det.
%
% Add concise bounded descriptions for bnodes in a graph, creating
% an expanded graph.
graph_CBD(Expand, Graph0, Graph) :-
empty_assoc(Map0),
must_be(list, Graph0),
phrase(gr_cbd(Graph0, Expand, Map0, _Map), Graph).
:- meta_predicate
gr_cbd(+, 3, +, -, ?, ?).
gr_cbd([], _, Map, Map) -->
[].
gr_cbd([rdf(S,P,O)|T], Expand, Map0, Map) -->
{ rdf_is_bnode(S)
; rdf_is_bnode(O)
}, !,
[ rdf(S,P,O) ],
r_bnodes([S,O], cbd, Expand, Map0, Map1),
gr_cbd(T, Expand, Map1, Map).
gr_cbd([Triple|T], Expand, Map0, Map) -->
[Triple],
gr_cbd(T, Expand, Map0, Map).
%% rdf_include_reifications(:Expand, +Graph0, -Graph) is det.
%
% Include the reification of any reified statements in Graph0.
rdf_include_reifications(Expand, Graph0, Graph) :-
phrase(reified_triples(Graph0, Expand), Statements),
( Statements == []
-> Graph = Graph0
; graph_CBD(Expand, Statements, Statements1),
rdf_include_reifications(Expand, Statements1, Graph1),
append(Graph0, Graph1, Graph)
).
:- meta_predicate
reified_triples(+, 3, ?, ?),
reification(?,?,?,3,-).
reified_triples([], _) --> [].
reified_triples([rdf(S,P,O)|T], Expand) -->
findall(T, reification(S,P,O,Expand,T)),
reified_triples(T, Expand).
reification(S,P,O, Expand, Triple) :-
rdf_equal(SP, rdf:subject),
rdf_equal(PP, rdf:predicate),
rdf_equal(OP, rdf:object),
call(Expand, Stmt, SP, S),
call(Expand, Stmt, OP, O),
call(Expand, Stmt, PP, P),
( Triple = rdf(Stmt, SP, S)
; Triple = rdf(Stmt, PP, P)
; Triple = rdf(Stmt, OP, O)
).
%% rdf_include_labels(:Expand, +Graph0, -Graph) is det.
%
% Include missing `label' statements in Graph0. Expand must
% provide label triples on
%
% call(Expand, S, P, O)
%
% The predicate lcbd_label/3 does this for the standard
% definition, considering the properties rdfs:label, rdfs:comment
% and rdfs:seeAlso.
rdf_include_labels(Expand, Graph0, Graph) :-
phrase(label_triples(Graph0, Expand), LabelRDF),
( LabelRDF == []
-> Graph = Graph0
; append(Graph0, LabelRDF, Graph)
).
:- meta_predicate
label_triples(+, 3, ?, ?),
label_triple(+, 3, -).
label_triples([], _) --> [].
label_triples([rdf(_,_,O)|T], Expand) -->
findall(T, label_triple(O,Expand,T)),
label_triples(T, Expand).
label_triple(O, Expand, Triple) :-
call(Expand, O, LP, Label),
Triple = rdf(O, LP, Label).
:- rdf_meta
lcbd_property(r).
%% lcbd_label(+S, -P, -Label) is nondet.
%
% Standard conforming `Expand' for rdf_include_labels/3.
lcbd_label(S, P, Label) :-
lcbd_property(P),
rdf_has(S, P, Label).
lcbd_property(rdfs:label).
lcbd_property(rdfs:comment).
lcbd_property(rdfs:seeAlso).
| TeamSPoon/logicmoo_workspace | packs_web/ClioPatria/lib/semweb/rdf_describe.pl | Perl | mit | 8,625 |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Author: Jose Santos <jcas81@gmail.com>
% Date: 2009-04-24
%
% This file has predicates to generate a set of hypotheses with their respective
% coverage, given a set of positive and negative examples and a predicate to generate
% hypotheses from a positive example
%
% The output of compute_coverage/4 is a list of hypotheses information. Each element
% is a tuple of the form: (Hypothesis, HypothesisSignature, NumLiterals, ListExGen, ListExPos, ListExNeg)
%
% where:
% Hypothesis: is a unique hypothesis (with variables as a list of literals)
% HypothesisSignature: Hypothesis signature
% NumLiterals: number of literals in Hytpothesis
% ListExGen: list of example ids that generated HypID
% ListExPos: list of example ids of the positive examples covered
% ListExNeg: list of example ids of the negative examples covered
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
:- module(hypotheses,
[
compute_hypotheses/5,
compute_coverage/4
]
).
% GILPS modules
:- use_module('coverage', [hypothesis_coverage/6]).
:- use_module('score', [verifies_full_metrics/2, hypothesis_info/5]).
:- use_module('../settings/settings', [setting/2]).
:- use_module('../examples/examples', [id2example/2, example/5]). %to access the examples
:- use_module('../messages/messages', [message/2]).
% YAP modules
:- use_module(library(rbtrees), [rb_new/1, rb_lookup/3, rb_insert/4, rb_update/4, rb_keys/2, rb_visit/2, rb_map/3, rb_size/2]).
:- use_module(library(lists), [reverse/2]).
:- use_module(library(apply_macros), [maplist/3]).
:- use_module(library(varnumbers), [varnumbers/2]). % this is an undocumented YAP library. varnumbers/2 does the inverse of numbervars/3
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% addExampleHypothesesAux(+Hypotheses, +ExampleID, +CurHypGenInfo, -NextHypGenInfo)
%
% Given:
% Hypotheses: a list of hypothesis, each as a list of literals
% ExampleID: the exampleID that generated all the hypotheses
% CurGenInfo: the current rb tree
%
% Returns:
% NextHypGenInfo: updated rb tree by adding the head of each hypothesis in Hypotheses to it
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
addExampleHypothesesAux([], _ExampleID, HypGenInfo, HypGenInfo):-!.
addExampleHypothesesAux([(Hyp,HypSig)|Hyps], ExampleID, CurHypGenInfo, FinalHypGenInfo):-
hypothesis2Key(Hyp, Key), % get the rb_tree key from hyp
(rb_lookup(Key, (HypSig, NumLiterals, GenExamples), CurHypGenInfo) -> % check if Hyp was already in HypGenInfo
(
GenExamples=[ExampleID|_] -> % we do not want repetitions in list of examples that generated a given hypothesis
NCurHypGenInfo = CurHypGenInfo
;
rb_update(CurHypGenInfo, Key, (HypSig, NumLiterals, [ExampleID|GenExamples]), NCurHypGenInfo) % if it was, add example id to the list of generating examples
)
;
% should we keep both Key and Hyp in the hypothesis? we can convert between one and the other
length(Hyp, NumLiterals),
rb_insert(CurHypGenInfo, Key, (HypSig, NumLiterals, [ExampleID]), NCurHypGenInfo) % if it was not, create an entry and add example id as the unique element (so far)
),
addExampleHypothesesAux(Hyps, ExampleID, NCurHypGenInfo, FinalHypGenInfo).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% hypothesis2Key(+Hypothesis, -RBTreeKey)
%
% Given:
% Hypothesis: an hypothesis as a list of literals
%
% Returns:
% RBTreeKey: a unique key created from the hypothesis
%
% Notes:
% The key to the rb_tree is Hypothesis reversed (to check efficiently for parents) and finally passed through
% numbervars/3 to get rename the variables
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
hypothesis2Key(Hypothesis, RBTreeKey):-
copy_term(Hypothesis, TempHyp), % copy Hypothesis to a new variable
reverse(TempHyp, RBTreeKey),
numbervars(RBTreeKey, 0, _). % the hypothesis Hyp after converted, after renaming through numbervars, is the key to the RB tree
%key2hypothesis(+RBTreeKey, -Hypothesis):-
key2hypothesis(RBTreeKey, Hypothesis):-
varnumbers(RBTreeKey, TempHyp),
reverse(TempHyp, Hypothesis).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% addExampleHypotheses(+ExampleID, +APosEIDs, +ANegEIDs, +(CurExample, NumExamples), +HypGenPred, +CurHypGenInfo, -NextHypGenInfo)
%
% Given:
% ExampleID: The unique identifier of an example.
% TPosEIDs: ordered list of positive example ids remaining (i.e. the subset from APosEIDs not yet considered)
% APosEIDs: ordered list of all positive example ids (to possibly test for coverage)
% ANegEIDs: ordered list of all negative example ids (to possibly test for coverage)
% CurExample: Number of the current example (i.e. this is the CurExample-th processed example)
% NumExamples: Total number of examples
% HypGenPred: a predicate to generate hypotheses from examples
% CurHypGenInfo: a red-black tree as described in compute_hypotheses/5
%
% Returns:
% NextHypGenInfo: has CurHypGenInfo updated with the hypotheses generated for the current example.
% Existing hypotheses, merely receive ExampleID as their generators
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
:- meta_predicate addExampleHypotheses(?, ?, ?, ?, ?, :, ?, ?). %the third argument, HypGenPred, is to be executed in its own module
addExampleHypotheses(ExampleID, TPosEIDs, APosEIDs, ANegEIDs, (CurExample, NumExamples), HypGenPred, CurHypGenInfo, NextHypGenInfo):-
setting(nodes, MaxHypothesesPerExample),
%example(ExampleID, Example, _Weight, _Fold, _RandomN), % retrieve the example, given the ExampleID
%call_with_args(HypGenPred, Example, TPosEIDs, APosEIDs, ANegEIDs, MaxHypothesesPerExample, Hypotheses), % get the first MaxHypothesesPerExample Hypotheses that Example generates
%example(ExampleID, Example, _Weight, _Fold, _RandomN), % retrieve the example, given the ExampleID
call_with_args(HypGenPred, ExampleID, TPosEIDs, APosEIDs, ANegEIDs, MaxHypothesesPerExample, Hypotheses), % get the first MaxHypothesesPerExample Hypotheses that Example generates
id2example(ExampleID, Example),
message(hypotheses_for_example, [CurExample, NumExamples, Example, Hypotheses]),
addExampleHypothesesAux(Hypotheses, ExampleID, CurHypGenInfo, NextHypGenInfo).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% compute_hypotheses(+PosEIDs, +APosEIDs, +ANegEIDs, +HypothesesGenPred, -HypGenInfo)
%
% Given:
% PosEIDs: list of positive examples from which we want to generate hypotheses from
% APosEIDs: ordered list of positive example ids to possibly test coverage
% ANegEIDs: ordered list of negative example ids to possibly test coverage
% HypothesesGenPred: a predicate that takes generates Hypotheses from Examples
% (the first argument is an example id, the second is a list of hypotheses)
%
% Returns:
% HypGenInfo: a red black tree, with a grounded version (with numbervars/3) of the hypothesis (represented
% by a reversed list of literals) as key and as value a tuple
% (hypothesis signature, num literals, list of examples id that generated hypothesis)
%
% Notes:
% The hypothesis must pass through numbervars/3 before entering the rb_tree as key, otherwise two equal
% hypothesis would be considered different. It is represented as a reversed list of literals to enable to
% efficiently determine which hypotheses are parents of which.
% The parent of an item with key [H|T] has key T. The examples a child hypothesis covers are a subset
% of the ones the parent hypothesis covers.
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
:- meta_predicate hypothesesAux(?, ?, ?, ?, :, ?, ?).
% hypothesesAux(+RemainPosEIDs, +APosEIDs, +ANegEIDs, +(CurExample, NumExamples), +HypGenPred, +CurHypGenInfo, -FinalHypGenInfo)
hypothesesAux([], _, _, _, _, HGI, HGI):-!.
hypothesesAux([PosEID|PosEIDs], APosEIDs, ANegEIDs, (CurExample, NumExamples), HypGenPred, CurHGI, FinalHGI):-
CurExample1 is CurExample+1,
addExampleHypotheses(PosEID, PosEIDs, APosEIDs, ANegEIDs, (CurExample1, NumExamples), HypGenPred, CurHGI, NHGI),
hypothesesAux(PosEIDs, APosEIDs, ANegEIDs, (CurExample1, NumExamples), HypGenPred, NHGI, FinalHGI).
:- meta_predicate compute_hypotheses(?, ?, :, ?).
compute_hypotheses(PosEIDs, APosEIDs, ANegEIDs, HypGenPred, HypGenInfo):-
rb_new(CurHypGenInfo),
length(PosEIDs, NumExamples),
hypothesesAux(PosEIDs, APosEIDs, ANegEIDs, (0, NumExamples), HypGenPred, CurHypGenInfo, HypGenInfo),
rb_size(HypGenInfo, NumUniqueHypothesis),
message(hypotheses_computed, [PosEIDs, NumUniqueHypothesis]).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% compute_coverage(+HypGenInfo, +PosExampleIDs, +NegExampleIDs, -HypothesesInfo)
%
% Given:
% HypGenInfo: a red black tree, with a grounded version (with numbervars/3) of the hypothesis (represented
% by a reversed list of literals) as key and as value a tuple
% (hypothesis signature,
% hypothesis as a list of literals, num literals, list of examples id that generated hypothesis)
% PosExampleIDs: list of ids of the positive examples to consider (used to generate hypgeninfo)
% NegExampleIDs: list of ids of the negative examples to consider
%
% Returns:
% HypothesesInfo: The output is a list of hypotheses information. Each element is a tuple of the form:
% (Hypothesis, HypSig, NumLiterals, ListExGen, ListExPos, ListExNeg)
% where:
% Hypothesis: is a unique hypothesis (with variables as a list of literals)
% HypSignature: Hypothesis signature
% NumLiterals: number of literals in Hytpothesis
% ListExGen: list of example ids that generated HypID
% ListExPos: list of example ids of the positive examples covered
% ListExNeg: list of example ids of the negative examples covered
%
% Notes:
% HypTotInfo is a temporary variable that is a red black tree where each element is a tuple:
% (HypSig, NumLiterals, ListExGen, ListExPos, ListExNeg)
%
% where:
% HypSig: hypothesis signature (i.e. signature for the rb tree key)
% NumLiterals: number of literals in Hytpothesis
% ExIDsGen: list of example ids that generated HypID
% PosIDsCov: list of example ids of the positive examples covered
% NegIDsCov: list of example ids of the negative examples covered
% the key is hypothesis, with numbervars/3, as a reversed list of literals
%
% Notes:
% There are two implementations with and without smart_coverage, Smart_coverage only applies to TopLog
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
compute_coverage(HypGenInfo, PosExampleIDs, NegExampleIDs, Hypotheses):-
rb_new(InitialHypTotInfo),
rb_visit(HypGenInfo, HypGenInfoList),
rb_size(HypGenInfo, NumHypotheses),
setting(smart_coverage, SC),
setting(engine, Engine),
(Engine=toplog, SC=true -> % only do smart coverage if it's on and for the TopLog engine
maplist(processHypothesisList, HypGenInfoList, HypGenInfoList1),
keysort(HypGenInfoList1, SHypGenInfoList), % we want to sort by ascending number of literals in order to process the parents before the children
compute_coverage_smart(SHypGenInfoList, (0, NumHypotheses), HypGenInfo,
PosExampleIDs, NegExampleIDs, InitialHypTotInfo, HypTotInfo)
;
compute_coverage_normal(HypGenInfoList, (0, NumHypotheses),
PosExampleIDs, NegExampleIDs, InitialHypTotInfo, HypTotInfo)
),
message(total_coverage_computed, [NumHypotheses]),
rb_visit(HypTotInfo, List_HypTotInfo),
maplist(convertKey2Hyp, List_HypTotInfo, Hypotheses).
%convertKey2Hyp(+(Key-(HSig, NumLits, ExIDsGen, PosIDsCov, NegIDsCov)), -(Hypothesis, HSig, NumLits, ExIDsGen, PosIDsCov, NegIDsCov))
convertKey2Hyp(Key-(HSig, NumLits, ExIDsGen, PosIDsCov, NegIDsCov), (Hypothesis, HSig, NumLits, ExIDsGen, PosIDsCov, NegIDsCov)):-
key2hypothesis(Key, Hypothesis).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% compute_coverage_normal(+HypGenInfoList, +(CurHypNumber, NumHyps), +PosExampleIDs, +NegExampleIDs, +CurHypTotInfo, -FinalHypTotInfo)
%
% Implement compute_coverage/4 using always all examples (PosExampleIDs, NegExampleIDs), to compute the
% coverage of a given hypothesis
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
compute_coverage_normal([], _, _, _, HypTotInfo, HypTotInfo):-!.
compute_coverage_normal([(Key-(HSig, NumLiterals, GenExIDs))|Elems], (CurHypNumber, NumHyps),
PosExIDs, NegExIDs, CHTI, FHTI):-
CurHypNumber1 is CurHypNumber+1,
key2hypothesis(Key, Hypothesis),
(good_hypothesis(Hypothesis, HSig, PosExIDs, NegExIDs, PosExIDs, NegExIDs, PosExIDsCov, NegExIDsCov) ->
message(hypothesis_coverage_computed, [CurHypNumber1, NumHyps, Hypothesis, PosExIDsCov, NegExIDsCov]),
rb_insert(CHTI, Key, (HSig, NumLiterals, GenExIDs, PosExIDsCov, NegExIDsCov), NCHTI),% add hypothesis to current hypothesis tree info
compute_coverage_normal(Elems, (CurHypNumber1, NumHyps), PosExIDs, NegExIDs, NCHTI, FHTI)
;
message(hypothesis_discarded, [CurHypNumber1, NumHyps, Hypothesis]),
compute_coverage_normal(Elems, (CurHypNumber1, NumHyps), PosExIDs, NegExIDs, CHTI, FHTI) % ignore hypothesis
).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% good_hypothesis(+Hypothesis, +HypSig, +TestHypPosEIDs, +TestHypNegEIDs, +AllPosEIDs, +AllNegEIDs, -PosEIDsCov, -NegEIDsCov)
%
% Given:
% Hypothesis: an hypothesis as a list of literals
% HypothesisSignature: hypothesis signature
% TestHypPosEIDs: ordered list of positive examples to test for coverage
% TestHypNegEIDs: ordered list of negatie examples to test for coverage
% AllPosEIDs: ordered list of all positive examples to consider
% AllNegEIDs: ordered list of all negative examples to consider
%
% Returns:
% PosEIDsCov: ordered list of all positive examples covered (from TestHypPosEIDs)
% NegEIDsCov: ordered list of all negative examples covered (from TestHypNegEIDs)
%
% Notes:
% The reason for TestHyp and All is because of smart coverage. If smart coverage is false we could use always
% All_. We don't just use TestHyp_ because it may be misleading for verifies_metrics for some metrics.
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
good_hypothesis(Hypothesis, HypSig, TestHypPosEIDs, TestHypNegEIds, AllPosEIDs, AllNegEIDs, PosEIDsCov, NegEIDsCov):-
hypothesis_coverage(Hypothesis, HypSig, TestHypPosEIDs, TestHypNegEIds, PosEIDsCov, NegEIDsCov), % signature is not known...
hypothesis_info(PosEIDsCov, NegEIDsCov, AllPosEIDs, AllNegEIDs, HypInfo),
length(Hypothesis, NumLiterals),
verifies_full_metrics(NumLiterals, HypInfo).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% processHypothesisList(+(Key-(KeySig, NumLiterals, ListExGen)), -(NumLiterals-(Key, KeySig, ListExGen)))
%
% Just puts NumLiterals at the beginning of the term, in order for key_sort to work correctly
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
processHypothesisList((Key-(KeySig, NumLiterals, ListExGen)), (NumLiterals-(Key, KeySig, ListExGen))).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% compute_coverage_smart(+HypGenInfoList, +HypGenInfo, +(CurHypNumber, NumHyps), +PosExampleIDs, +NegExampleIDs, +CurHypTotInfo, -HypTotInfo)
%
% Implements compute_coverage/4 using an hypothesis parent coverage as the basis for computing its
% own coverage. This is logically sound and returns the same results being usually faster although there is
% a small overhead for computing an hypothesis parent.
% When an hypothesis has no parent all examples have to be used as before
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
compute_coverage_smart([], _, _, _, _, HypTotInfo, HypTotInfo):-!.
compute_coverage_smart([NumLiterals-(Key, HSig, GenExIDs)|Elems], (CurHypNumber, NumHyps), HGI, PosExIDs, NegExIDs, CHTI, FHTI):-
CurHypNumber1 is CurHypNumber+1,
parentCoverage(Key, CHTI, PosExIDs, NegExIDs, ParentPosExIDs, ParentNegExIDs),
key2hypothesis(Key, Hypothesis),
% there is a parent hypothesis, so we will need to test the examples the parent covers
(good_hypothesis(Hypothesis, HSig, ParentPosExIDs, ParentNegExIDs, PosExIDs, NegExIDs, PosExIDsCov, NegExIDsCov) ->
message(hypothesis_coverage_computed, [CurHypNumber1, NumHyps, Hypothesis, PosExIDsCov, NegExIDsCov]),
rb_insert(CHTI, Key, (HSig, NumLiterals, GenExIDs, PosExIDsCov, NegExIDsCov), NCHTI),% add hypothesis to current hypothesis tree info
compute_coverage_smart(Elems, (CurHypNumber1, NumHyps), HGI, PosExIDs, NegExIDs, NCHTI, FHTI)
;
message(hypothesis_discarded, [CurHypNumber1, NumHyps, Hypothesis]),
compute_coverage_smart(Elems, (CurHypNumber1, NumHyps), HGI, PosExIDs, NegExIDs, CHTI, FHTI) % ignore hypothesis
).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% parentKey(+ChildKey, -ParentKey)
%
% Given:
% ChildKey: the rbtree key for an hypothesis (a reversed list of ground literals)
%
% Returns:
% ParentKey: the rbtree key for the parent hypothesis
%
% Notes:
% We just need to delete the first literal as the list is already reversed
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
parentKey([_|T], T).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% parentCoverage(+ChildKey, +CurHypTotInfo, +AllPosExampleIDs, +AllNegExampleIDs, -ParentPosExampleIDs, -ParentNegExampleIDs)
%
% Given:
% ChildKey: The key (a list of ground literals with numbervars/3) for the child hypothesis
% Other input arguments: see above
% Returns:
% ParentPosExampleIDs: the coverage of the childkey hypothesis or AllPosExampleIDs if no parent exists
% ParentNegExampleIDs: the coverage of the childkey hypothesis or AllNegExampleIDs if no parent exists
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
parentCoverage(Key, CHI, _AllPosExIDs, _AllNegExIDs, ParentPosExIDs, ParentNegExIDs):-
parentKey(Key, PKey), % PKey is the key to the parent
rb_lookup(PKey, (_KeySig, _NumLiterals, _ParentGenExIDs, ParentPosExIDs, ParentNegExIDs), CHI),
!. % parent coverage exists, return it
parentCoverage(_Key, _CHI, AllPosExIDs, AllNegExIDs, AllPosExIDs, AllNegExIDs).
% no parent exists, return all examples as coverage
| JoseCSantos/GILPS | source/hypotheses/hypotheses.pl | Perl | mit | 20,283 |
#
# Copyright 2015 Electric Cloud, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##########################
# createAndAttachCredential.pl
##########################
use ElectricCommander;
use constant {
SUCCESS => 0,
ERROR => 1,
};
my $ec = new ElectricCommander();
$ec->abortOnError(0);
my $projName = '@PLUGIN_KEY@-@PLUGIN_VERSION@';
my $azureConfig = '$[/myJob/config]';
my $azureCredential = '$[/myJob/config]';
my $azureVMCredential = $azureCredential . "_vm_credential";
my %credentials = (
$azureCredential => "credential",
$azureVMCredential => "vm_credential"
);
foreach my $credName ( keys %credentials ) {
my $xpath = $ec->getFullCredential( $credentials{$credName} );
my $userName = $xpath->findvalue("//userName");
my $password = $xpath->findvalue("//password");
# Create credential
$ec->deleteCredential( $projName, $credName );
$xpath =
$ec->createCredential( $projName, $credName, $userName, $password );
my $errors = $ec->checkAllErrors($xpath);
# Give config the credential's real name
my $configPath = "/projects/$projName/azure_cfgs/$azureConfig";
print "Setting property $configPath / + $credentials{$credName}";
print " .. with value $credName";
$xpath =
$ec->setProperty( $configPath . "/" . $credentials{$credName},
$credName );
$errors .= $ec->checkAllErrors($xpath);
# Give job launcher full permissions on the credential
my $user = '$[/myJob/launchedByUser]';
$xpath = $ec->createAclEntry(
"user", $user,
{
projectName => $projName,
credentialName => $credName,
readPrivilege => allow,
modifyPrivilege => allow,
executePrivilege => allow,
changePermissionsPrivilege => allow
}
);
$errors .= $ec->checkAllErrors($xpath);
# Attach credential to steps that will need it
$xpath = $ec->attachCredential(
$projName,
$credName,
{
procedureName => 'Create VM',
stepName => 'Create VM'
}
);
$errors .= $ec->checkAllErrors($xpath);
$xpath = $ec->attachCredential(
$projName,
$credName,
{
procedureName => 'Delete VM',
stepName => 'Delete VM'
}
);
$errors .= $ec->checkAllErrors($xpath);
$xpath = $ec->attachCredential(
$projName,
$credName,
{
procedureName => 'Start VM',
stepName => 'Start VM'
}
);
$errors .= $ec->checkAllErrors($xpath);
$xpath = $ec->attachCredential(
$projName,
$credName,
{
procedureName => 'Stop VM',
stepName => 'Stop VM'
}
);
$errors .= $ec->checkAllErrors($xpath);
$xpath = $ec->attachCredential(
$projName,
$credName,
{
procedureName => 'Restart VM',
stepName => 'Restart VM'
}
);
$errors .= $ec->checkAllErrors($xpath);
$xpath = $ec->attachCredential(
$projName,
$credName,
{
procedureName => 'TearDown',
stepName => 'tearDown'
}
);
$errors .= $ec->checkAllErrors($xpath);
# $xpath = $ec->attachCredential(
# $projName,
# $credName,
# {
# procedureName => 'Create or Update Database Server',
# stepName => 'createUpdateDatabaseServer'
# }
# );
# $errors .= $ec->checkAllErrors($xpath);
# $xpath = $ec->attachCredential(
# $projName,
# $credName,
# {
# procedureName => 'Delete Database Server',
# stepName => 'deleteDatabaseServer'
# }
# );
# $errors .= $ec->checkAllErrors($xpath);
$xpath = $ec->attachCredential(
$projName,
$credName,
{
procedureName => 'Create Or Update Database',
stepName => 'createUpdateDatabase'
}
);
$errors .= $ec->checkAllErrors($xpath);
$xpath = $ec->attachCredential(
$projName,
$credName,
{
procedureName => 'DeleteDatabase',
stepName => 'deleteDatabase'
}
);
$errors .= $ec->checkAllErrors($xpath);
$xpath = $ec->attachCredential(
$projName,
$credName,
{
procedureName => 'Create or Update Vnet',
stepName => 'Create Vnet'
}
);
$errors .= $ec->checkAllErrors($xpath);
$xpath = $ec->attachCredential(
$projName,
$credName,
{
procedureName => 'Delete Vnet',
stepName => 'Delete Vnet'
}
);
$errors .= $ec->checkAllErrors($xpath);
$xpath = $ec->attachCredential(
$projName,
$credName,
{
procedureName => 'Create or Update Subnet',
stepName => 'createUpdateSubnet'
}
);
$errors .= $ec->checkAllErrors($xpath);
$xpath = $ec->attachCredential(
$projName,
$credName,
{
procedureName => 'Delete Subnet',
stepName => 'deleteSubnet'
}
);
$errors .= $ec->checkAllErrors($xpath);
$xpath = $ec->attachCredential(
$projName,
$credName,
{
procedureName => 'Create or Update NetworkSecurityGroup',
stepName => 'createUpdateNetworkSecurityGroup'
}
);
$errors .= $ec->checkAllErrors($xpath);
$xpath = $ec->attachCredential(
$projName,
$credName,
{
procedureName => 'Delete NetworkSecurityGroup',
stepName => 'deleteNetworkSecurityGroup'
}
);
$errors .= $ec->checkAllErrors($xpath);
$xpath = $ec->attachCredential(
$projName,
$credName,
{
procedureName => 'Create or Update NetworkSecurityRule',
stepName => 'createUpdateNetworkSecurityRule'
}
);
$errors .= $ec->checkAllErrors($xpath);
$xpath = $ec->attachCredential(
$projName,
$credName,
{
procedureName => 'Delete NetworkSecurityRule',
stepName => 'deleteNetworkSecurityRule'
}
);
$errors .= $ec->checkAllErrors($xpath);
$xpath = $ec->attachCredential(
$projName,
$credName,
{
procedureName => 'NoSQL Operations',
stepName => 'nosqlOperations'
}
);
$errors .= $ec->checkAllErrors($xpath);
$xpath = $ec->attachCredential(
$projName,
$credName,
{
procedureName => 'SQL Operations',
stepName => 'sqlOperations'
}
);
$errors .= $ec->checkAllErrors($xpath);
if ( "$errors" ne "" ) {
# Cleanup the partially created configuration we just created
$ec->deleteProperty($configPath);
$ec->deleteCredential( $projName, $azureCredential );
$ec->deleteCredential( $projName, $azureVMCredential );
my $errMsg = "Error creating configuration credential: " . $errors;
$ec->setProperty( "/myJob/configError", $errMsg );
print $errMsg;
exit ERROR;
}
}
| electric-cloud/EC-Azure | src/main/resources/project/configs/createAndAttachCredential.pl | Perl | apache-2.0 | 7,935 |
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2018] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Bio::EnsEMBL::Analysis::RunnableDB::Finished::Exonerate;
use warnings ;
use strict;
use Bio::EnsEMBL::Utils::Exception qw(verbose throw warning);
use Bio::EnsEMBL::Transcript;
use Bio::EnsEMBL::Translation;
use Bio::EnsEMBL::Exon;
use Bio::EnsEMBL::DnaDnaAlignFeature;
use Bio::EnsEMBL::DnaPepAlignFeature;
use Bio::EnsEMBL::FeaturePair;
use Bio::EnsEMBL::Analysis::Runnable::Finished::Exonerate;
use Bio::EnsEMBL::Pipeline::SeqFetcher::Finished_Pfetch;
use Bio::EnsEMBL::Analysis::Config::General;
use base 'Bio::EnsEMBL::Analysis::RunnableDB::Finished';
############################################################
sub fetch_input {
my ($self) = @_;
my $slice =
$self->fetch_sequence( $self->input_id, $self->db,
$ANALYSIS_REPEAT_MASKING, 1 );
$self->query($slice);
my $maskedslice =
$slice->get_repeatmasked_seq( $ANALYSIS_REPEAT_MASKING, $SOFT_MASKING )
or throw("Unable to fetch contig");
my $maskedseq = $maskedslice->seq();
if ( scalar( $maskedseq =~ s/([CATG])/$1/g ) > 3 ) {
$self->input_is_void(0);
}
else {
$self->input_is_void(1);
warning("Need at least 3 nucleotides");
}
# Incremental updating of the embl blast db analysis
# The embl blast dbs are made up of release files embl_*
# and update files emnew_*. This block of code makes
# sure that the analysis is only run against new version of either
# of these files.
my @files = split(",", $self->analysis->db_file);
my @patches;
if($files[-1] =~ /^embl_/){
my $search_only_patch = 0;
my $sic = $self->db->get_StateInfoContainer;
my $db_version_saved = $sic->fetch_db_version($self->input_id, $self->analysis);
my $db_version_current = $self->analysis->db_version;
if($db_version_saved) {
# split the embl blast db version "12-Mar-06 (85)" to
# patch version "12-Mar-06" and release version "85"
my ($patch_sv,$release_sv) = $db_version_saved =~ /^(\S+)\s+\((\d+)\)$/;
my ($patch_cv,$release_cv) = $db_version_current =~ /^(\S+)\s+\((\d+)\)$/;
if($release_sv eq $release_cv){
$search_only_patch = 1;
print STDOUT "blast db files [ @files ] version $release_sv already searched\n";
# Just to make sure that nothing is going wrong with the incremental updating...
throw("Problem with the embl blast db incremental updating, saved and current version identical !\n
saved [$db_version_saved] = current [$db_version_current]\n") unless($patch_sv ne $patch_cv)
}
}
foreach my $file (@files) {
my $patch_file = $file;
$patch_file =~ s/^embl_/emnew_/g;
$search_only_patch ? $file = $patch_file : push @patches,$patch_file;
}
}
$self->analysis->db_file(join(",",@files,@patches));
my %parameters = %{ $self->parameters_hash };
my $runnable = Bio::EnsEMBL::Analysis::Runnable::Finished::Exonerate->new(
-analysis => $self->analysis,
-program => $self->analysis->program,
-query_db => $self->analysis->db_file,
-target => $slice,
%parameters
);
$self->runnable($runnable);
return 1;
}
sub db_version_searched {
my ( $self, $arg ) = @_;
$self->{'_db_version_searched'} = $arg if $arg;
return $self->{'_db_version_searched'};
}
1;
=head1 NAME - Bio::EnsEMBL::Analysis::RunnableDB::Finished::Exonerate
=head1 AUTHOR
Mustapha Larbaoui B<email> ml6@sanger.ac.uk
| kiwiroy/ensembl-analysis | modules/Bio/EnsEMBL/Analysis/RunnableDB/Finished/Exonerate.pm | Perl | apache-2.0 | 4,001 |
package Google::Ads::AdWords::v201809::FeedItem::Status;
use strict;
use warnings;
sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201809'};
# derivation by restriction
use base qw(
SOAP::WSDL::XSD::Typelib::Builtin::string);
1;
__END__
=pod
=head1 NAME
=head1 DESCRIPTION
Perl data type class for the XML Schema defined simpleType
FeedItem.Status from the namespace https://adwords.google.com/api/adwords/cm/v201809.
This clase is derived from
SOAP::WSDL::XSD::Typelib::Builtin::string
. SOAP::WSDL's schema implementation does not validate data, so you can use it exactly
like it's base type.
# Description of restrictions not implemented yet.
=head1 METHODS
=head2 new
Constructor.
=head2 get_value / set_value
Getter and setter for the simpleType's value.
=head1 OVERLOADING
Depending on the simple type's base type, the following operations are overloaded
Stringification
Numerification
Boolification
Check L<SOAP::WSDL::XSD::Typelib::Builtin> for more information.
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| googleads/googleads-perl-lib | lib/Google/Ads/AdWords/v201809/FeedItem/Status.pm | Perl | apache-2.0 | 1,069 |
package VMOMI::VmDeployedEvent;
use parent 'VMOMI::VmEvent';
use strict;
use warnings;
our @class_ancestors = (
'VmEvent',
'Event',
'DynamicData',
);
our @class_members = (
['srcTemplate', 'VmEventArgument', 0, ],
);
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/VmDeployedEvent.pm | Perl | apache-2.0 | 455 |
#
# Copyright 2019 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::securactive::mode::bcn;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
my $oid_spvBCNName = '.1.3.6.1.4.1.36773.3.2.2.1.1.1';
my $oid_spvBCNGlobalStatus = '.1.3.6.1.4.1.36773.3.2.2.1.1.4';
my %bcn_status = (
1 => ['ok', 'OK'],
2 => ['warning', 'WARNING'],
3 => ['alert', 'WARNING'],
4 => ['not available', 'UNKNOWN'],
5 => ['not data at all', 'UNKNOWN'],
6 => ['not enough samples for computation', 'UNKNOWN'],
);
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$options{options}->add_options(arguments =>
{
"bcn:s" => { name => 'bcn' },
"name" => { name => 'use_name' },
"regexp" => { name => 'use_regexp' },
"display-transform-src:s" => { name => 'display_transform_src' },
"display-transform-dst:s" => { name => 'display_transform_dst' },
});
$self->{bcn_id_selected} = [];
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
}
sub manage_selection {
my ($self, %options) = @_;
$self->{result_names} = $self->{snmp}->get_table(oid => $oid_spvBCNName, nothing_quit => 1);
foreach my $oid ($self->{snmp}->oid_lex_sort(keys %{$self->{result_names}})) {
next if ($oid !~ /\.([0-9]+)$/);
my $instance = $1;
# Get all without a name
if (!defined($self->{option_results}->{bcn})) {
push @{$self->{bcn_id_selected}}, $instance;
next;
}
# By ID
if (!defined($self->{option_results}->{use_name}) && defined($self->{option_results}->{bcn})) {
if ($instance == $self->{option_results}->{bcn}) {
push @{$self->{bcn_id_selected}}, $instance;
}
next;
}
$self->{result_names}->{$oid} = $self->{output}->to_utf8($self->{result_names}->{$oid});
if (!defined($self->{option_results}->{use_regexp}) && $self->{result_names}->{$oid} eq $self->{option_results}->{bcn}) {
push @{$self->{bcn_id_selected}}, $instance;
}
if (defined($self->{option_results}->{use_regexp}) && $self->{result_names}->{$oid} =~ /$self->{option_results}->{bcn}/) {
push @{$self->{bcn_id_selected}}, $instance;
}
}
if (scalar(@{$self->{bcn_id_selected}}) <= 0) {
$self->{output}->add_option_msg(short_msg => "No bcn found for name '" . $self->{option_results}->{bcn} . "'.");
$self->{output}->option_exit();
}
}
sub run {
my ($self, %options) = @_;
$self->{snmp} = $options{snmp};
$self->manage_selection();
$self->{snmp}->load(oids => [$oid_spvBCNGlobalStatus],
instances => $self->{bcn_id_selected});
my $result = $self->{snmp}->get_leef();
if (!defined($self->{option_results}->{bcn}) || defined($self->{option_results}->{use_regexp})) {
$self->{output}->output_add(severity => 'OK',
short_msg => 'All BCN are ok.');
}
foreach my $instance (sort @{$self->{bcn_id_selected}}) {
my $name = $self->{result_names}->{$oid_spvBCNName . '.' . $instance};
$name = $self->get_display_value(value => $name);
my $status = $result->{$oid_spvBCNGlobalStatus . '.' . $instance};
my $exit_from_snmp = ${$bcn_status{$status}}[1];
$self->{output}->output_add(long_msg => sprintf("BCN '%s' global status is '%s'",
$name, ${$bcn_status{$status}}[0]));
if (!$self->{output}->is_status(value => $exit_from_snmp, compare => 'ok', litteral => 1) || (defined($self->{option_results}->{bcn}) && !defined($self->{option_results}->{use_regexp}))) {
$self->{output}->output_add(severity => $exit_from_snmp,
short_msg => sprintf("BCN '%s' global status is '%s'", $name, ${$bcn_status{$status}}[0]));
}
my $extra_label = '';
$extra_label = '_' . $name if (!defined($self->{option_results}->{bcn}) || defined($self->{option_results}->{use_regexp}));
#$self->{output}->perfdata_add(label => 'eurt' . $extra_label,
# value => $eurt,
# warning => $warnth,
# critical => $critth,
# min => 0);
}
$self->{output}->display();
$self->{output}->exit();
}
sub get_display_value {
my ($self, %options) = @_;
my $value = $options{value};
if (defined($self->{option_results}->{display_transform_src})) {
$self->{option_results}->{display_transform_dst} = '' if (!defined($self->{option_results}->{display_transform_dst}));
eval "\$value =~ s{$self->{option_results}->{display_transform_src}}{$self->{option_results}->{display_transform_dst}}";
}
return $value;
}
1;
__END__
=head1 MODE
Check BCN status.
=over 8
=item B<--bcn>
Set the bcn (number expected) ex: 1, 2,... (empty means 'check all bcn').
=item B<--name>
Allows to use bcn name with option --bcn instead of bcn oid index.
=item B<--regexp>
Allows to use regexp to filter bcn (with option --name).
=item B<--display-transform-src>
Regexp src to transform display value. (security risk!!!)
=item B<--display-transform-dst>
Regexp dst to transform display value. (security risk!!!)
=back
=cut
| Sims24/centreon-plugins | network/securactive/mode/bcn.pm | Perl | apache-2.0 | 6,562 |
=head1 Name
OpenXPKI::Crypto::Profile::CRL - cryptographic profile for CRLs.
=cut
use strict;
use warnings;
package OpenXPKI::Crypto::Profile::CRL;
use base qw(OpenXPKI::Crypto::Profile::Base);
use OpenXPKI::Server::Context qw( CTX );
use OpenXPKI::Debug;
use OpenXPKI::Exception;
use English;
use DateTime;
use Data::Dumper;
#use Smart::Comments;
=head2 new ( { CA, [ID, VALIDITY, CA_VALIDITY, CACERTIFICATE] } )
Create a new profile instance. Profile definitions are loaded from
the config layer. The profile name can be given explicit via <ID>,
in this case the node I<crl.<profile>> must exist.
If no profile is given, the config layer is checked for a profile
matching the name of the ca alias name. If no such profile is found,
all values are loaded from I<crl.default>.
A profile must define validity and digest, extension values are inherited
from the default profile in case they are not set in the special profile.
=over
=item CA
The alias of the ca token to be used (from the alias table)
=item ID
The name of the profile (as given in realm.crl)
=item VALIDITY
optional, override validity from profile definition.
Must be a hashref useable with OpenXPKI::DateTime::get_validity.
Only relative dates are supported.
=item CA_VALIDITY
optional, if given the computed nextupdate is checked if it exceeds the
ca validity and uses the validity set in I<crl.<profile>.lastcrl>.
Absolute dates are supported but the actual timestamp in the crl might
differ as it is converted to "hours from now".
=item CACERTIFICATE
PEM encoded ca certificate to use. This is mainly for testing, in regular
operation the certificate is determined using the API.
=back
=cut
sub new {
my $that = shift;
my $class = ref($that) || $that;
my $self = {};
bless $self, $class;
my $keys = { @_ };
OpenXPKI::Exception->throw (
message => "I18N_OPENXPKI_CRYPTO_PROFILE_CRL_NEW_MISSING_CA"
) if (not $keys->{CA});
$self->{CA} = $keys->{CA};
$self->{ID} = $keys->{ID} if ($keys->{ID});
$self->{VALIDITY} = $keys->{VALIDITY} if ($keys->{VALIDITY});
$self->{CA_VALIDITY} = $keys->{CA_VALIDITY} if ($keys->{CA_VALIDITY});
##! 2: "parameters ok"
$self->__load_profile ();
##! 2: "config loaded"
return $self;
}
=head2 __load_profile
Load the profile, called from constructor
=cut
sub __load_profile
{
my $self = shift;
my $config = CTX('config');
my $pki_realm = CTX('session')->data->pki_realm;
my @basepath = ("crl");
my $validity;
if ($self->{ID}) {
OpenXPKI::Exception->throw (
message => "Given CRL Profile not defined",
) if (not $config->exists(['crl', $self->{ID} ]));
push @basepath, $self->{ID};
} elsif ($config->exists(['crl', $self->{CA} ])) {
push @basepath, $self->{CA};
} else {
push @basepath, 'default';
}
##! 16: 'Using config at ' . $basepath[1];
$self->{PROFILE}->{DIGEST} = $config->get([ @basepath, 'digest' ]);
# use local setting for validity
if ($self->{VALIDITY}) {
##! 16: "Override validity: " . $self->{VALIDITY}
$validity = $self->{VALIDITY};
} else {
my $nextupdate = $config->get([ @basepath, 'validity', 'nextupdate' ]);
##! 16: 'Validity from profile ' . $nextupdate
$validity = {
VALIDITYFORMAT => 'relativedate',
VALIDITY => $nextupdate,
};
}
if (!$validity || !$validity->{VALIDITY}) {
OpenXPKI::Exception->throw (
message => "I18N_OPENXPKI_CRYPTO_PROFILE_CRL_LOAD_PROFILE_VALIDITY_NOTAFTER_NOT_DEFINED",
);
}
# for error handling
delete $self->{PROFILE}->{DAYS};
delete $self->{PROFILE}->{HOURS};
my $notafter;
# plain days
if ($validity->{VALIDITYFORMAT} eq "days") {
$self->{PROFILE}->{DAYS} = $validity->{VALIDITY};
$self->{PROFILE}->{HOURS} = 0;
$notafter = DateTime->now( time_zone => 'UTC' )->add( days => $validity->{VALIDITY} );
}
# handle relative date formats ("+0002" for two months)
if ($validity->{VALIDITYFORMAT} eq "relativedate") {
$notafter = OpenXPKI::DateTime::get_validity($validity);
my $hours = sprintf("%d", ($notafter->epoch() - time) / 3600);
my $days = sprintf("%d", $hours / 24);
$hours = $hours % 24;
$self->{PROFILE}->{DAYS} = $days;
$self->{PROFILE}->{HOURS} = $hours;
}
# only relative dates are allowed for CRLs
if (! exists $self->{PROFILE}->{DAYS}) {
OpenXPKI::Exception->throw (
message => "I18N_OPENXPKI_CRYPTO_PROFILE_CRL_LOAD_PROFILE_INVALID_VALIDITY_FORMAT",
params => $validity,
);
}
# Check if the CA would be valid at the next update or if its time for the "End of Life" CRL
my $ca_validity;
$ca_validity = OpenXPKI::DateTime::get_validity($self->{CA_VALIDITY}) if ($self->{CA_VALIDITY});
if ($ca_validity && $notafter > $ca_validity) {
my $last_crl_validity = $config->get([ @basepath, 'validity', 'lastcrl' ]);
if (!$last_crl_validity) {
CTX('log')->application()->warn('CRL for CA ' . $self->{CA}. ' in realm ' . $pki_realm . ' will be end of life before next update is scheduled!');
} else {
$notafter = OpenXPKI::DateTime::get_validity({
VALIDITYFORMAT => 'detect',
VALIDITY => $last_crl_validity,
});
my $hours = sprintf("%d", ($notafter->epoch() - time) / 3600);
my $days = sprintf("%d", $hours / 24);
$hours = $hours % 24;
$self->{PROFILE}->{DAYS} = $days;
$self->{PROFILE}->{HOURS} = $hours;
CTX('log')->application()->info('CRL for CA ' . $self->{CA} . ' in realm ' . $pki_realm . ' nearly EOL - will issue with last crl interval!');
}
}
# TODO - implement crl_number (but not here ...)
# possibly:
# RFC 3280, 5.2.5 - issuing_distributing_point (if someone really
# needs it ...)
my $path = join(".", @basepath);
foreach my $ext (qw( authority_info_access authority_key_identifier issuer_alt_name oid)) {
##! 16: 'load extension ' . $ext
$self->load_extension({
PATH => $path,
EXT => $ext,
});
}
##! 2: Dumper($self->{PROFILE})
##! 1: "end"
return 1;
}
sub get_nextupdate_in_days
{
my $self = shift;
return $self->{PROFILE}->{DAYS};
}
sub get_nextupdate_in_hours
{
my $self = shift;
return $self->{PROFILE}->{HOURS};
}
sub get_digest
{
my $self = shift;
return $self->{PROFILE}->{DIGEST};
}
1;
__END__
| openxpki/openxpki | core/server/OpenXPKI/Crypto/Profile/CRL.pm | Perl | apache-2.0 | 6,720 |
############################################################
#
# $Id$
# Apache2::AutoIndex::XSLT - XSLT Based Directory Listings
#
# Copyright 2006, 2007 Nicola Worthington
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
############################################################
package Apache2::AutoIndex::XSLT;
# vim:ts=4:sw=4:tw=78
use 5.6.1;
use strict;
use warnings;
#use warnings FATAL => 'all';
use File::Spec qw();
use Fcntl qw();
use XML::Quote qw();
use URI::Escape qw(); # Try to replace with Apache2::Util or Apache2::URI
# This is libapreq2 - we're parsing the query string manually
# to avoid loading another non-standard module
# use Apache2::Request qw();
# These two are required in general
use Apache2::ServerRec qw(); # $r->server
use Apache2::RequestRec qw();
use Apache2::RequestUtil qw(); # $r->document_root
# Used to return various Apache constant response codes
use Apache2::Const -compile => qw(:common :options :config :cmd_how :override :types);
# Used for writing to Apache logs
use Apache2::Log qw();
# Used for parsing Apache configuration directives
use Apache2::Module qw();
use Apache2::CmdParms qw(); # Needed for use with Apache2::Module callbacks
# Used to get the main server Apache2::ServerRec (not the virtual ServerRec)
use Apache2::ServerUtil qw();
# Used for Apache2::Util::ht_time time formatting
use Apache2::Util qw();
use Apache2::URI qw(); # $r->construct_url
use Apache2::Access qw(); # $r->allow_options
#use Apache2::Directive qw(); # Possibly not needed
use Apache2::SubRequest qw(); # Needed for subrequests :)
use Apache2::RequestIO qw(); # Needed for $r->print
# Start here ...
# http://perl.apache.org/docs/2.0/user/config/custom.html
# http://perl.apache.org/docs/2.0/api/Apache2/Module.html
# http://perl.apache.org/docs/2.0/api/Apache2/Const.html
# http://perl.apache.org/docs/2.0/user/porting/compat.html
# http://httpd.apache.org/docs/2.2/mod/mod_autoindex.html
# http://httpd.apache.org/docs/2.2/mod/mod_dir.html
# http://www.modperl.com/book/chapters/ch8.html
use vars qw($VERSION %DIRECTIVES %COUNTERS %FILETYPES);
$VERSION = '0.04' || sprintf('%d.%02d', q$Revision: 531 $ =~ /(\d+)/g);
%COUNTERS = (Listings => 0, Files => 0, Directories => 0, Errors => 0);
#
# Apache response handler
#
sub handler {
my $r = shift;
# Only handle directories
return Apache2::Const::DECLINED unless $r->content_type &&
$r->content_type eq Apache2::Const::DIR_MAGIC_TYPE;
# Parse query string and get config
my ($qstring,$dir_cfg) = init_handler($r);
# Read in the filetypes information
if (!defined %FILETYPES && defined $dir_cfg->{FileTypesFilename}) {
FileTypesFilename: for my $FileTypesFilename (
$dir_cfg->{FileTypesFilename},
File::Spec->catfile($r->document_root,$dir_cfg->{FileTypesFilename}),
File::Spec->catfile(Apache2::ServerUtil->server_root,'conf',$dir_cfg->{FileTypesFilename}),
File::Spec->catfile(Apache2::ServerUtil->server_root,$dir_cfg->{FileTypesFilename})
) {
my $ext = '';
if (open(FH,'<',$FileTypesFilename)) {
while (local $_ = <FH>) {
if (my ($k,$v) = $_ =~ /^\s*(\S+)\s*:\s*(\S.*?)\s*$/) {
if ($k =~ /ext(ension)?/i) {
$v =~ s/^\.//;
$ext = $v || '';
} elsif ($v) {
$FILETYPES{lc($ext)}->{$k} = $v;
}
}
}
close(FH);
last FileTypesFilename;
}
}
}
# Dump the configuration out to screen
if (defined $qstring->{CONFIG}) {
$r->content_type('text/plain');
$r->print(dump_apache_configuration($r));
return Apache2::Const::OK;
}
# Make sure we're at a URL with a trailing slash
if ($dir_cfg->{DirectorySlash} && $r->uri !~ m,/$,) {# || $r->path_info){
$r->headers_out->add(Location => sprintf('%s/%s',
$r->uri,
($r->args ? '?'.$r->args : '')
));
return Apache2::Const::REDIRECT;
}
# Return a directory listing if we're allowed to
if ($r->allow_options & Apache2::Const::OPT_INDEXES) {
# Should we render the XSLT or not?
my $render = 0;
if ($dir_cfg->{RenderXSLT} ||
(!exists $dir_cfg->{RenderXSLT} &&
defined $dir_cfg->{RenderXSLTEnvVar} &&
defined $ENV{$dir_cfg->{RenderXSLTEnvVar}} &&
$ENV{$dir_cfg->{RenderXSLTEnvVar}} =~ /^\s*(On|1|Yes|True)\s*$/i)
) {
eval {
require XML::LibXSLT;
require XML::LibXML;
$render = 1;
};
$r->log_error('Failed to load XML::LibXML or XML::LibXSLT modules: ', $@) if $@;
}
# Send the appropriate content type
my $content_type = $render
? 'text/html'
: 'text/xml; charset="utf-8"';
$r->content_type($content_type);
return Apache2::Const::OK if $r->header_only;
# The dir_xml subroutine will actually print and output
# all the XML DTD and XML, returning an OK if everything
# was successful.
my $rtn = Apache2::Const::SERVER_ERROR;
my $xml;
eval {
$xml = dir_xml($r,$dir_cfg,$qstring);
unless ($render) {
$r->print($xml);
} else {
my $parser = XML::LibXML->new();
my $source = $parser->parse_string($xml);
my $subr = $r->lookup_uri($dir_cfg->{IndexStyleSheet});
my $xslt = XML::LibXSLT->new();
my $style_doc = $parser->parse_file($subr->filename);
my $stylesheet = $xslt->parse_stylesheet($style_doc);
my $results = $stylesheet->transform($source);
$r->print($stylesheet->output_string($results));
}
$rtn = Apache2::Const::OK;
};
if (!defined $xml || $@) {
$COUNTERS{Errors}++;
warn $@, $r->print($@);
};
return $rtn;
# Otherwise he's not the messiah, he's a very naughty boy
} else {
$r->log_reason(
sprintf('%s Directory index forbidden by rule', __PACKAGE__),
sprintf('%s (%s)', $r->uri, $r->filename),
);
return Apache2::Const::FORBIDDEN;
}
}
sub transhandler {
my $r = shift;
# Only handle directories
return Apache2::Const::DECLINED unless $r->uri =~ /\/$/;
return Apache2::Const::DECLINED unless $r->content_type &&
$r->content_type eq Apache2::Const::DIR_MAGIC_TYPE;
# Parse query string and get config
my ($qstring,$dir_cfg) = init_handler($r);
foreach (@{$dir_cfg->{DirectoryIndex}}){
my $subr = $r->lookup_uri($r->uri . $_);
last if $subr->path_info;
if (stat $subr->finfo){
$r->uri($subr->uri);
last;
}
}
return Apache2::Const::DECLINED;
}
#
# Apache2::Status status page handler
#
# Let Apache2::Status know we're here if it's hanging around
unless (exists $ENV{AUTOMATED_TESTING}) {
eval { Apache2::Status->menu_item('AutoIndex' => sprintf('%s status',__PACKAGE__),
\&status) if Apache2::Module::loaded('Apache2::Status'); };
}
sub status {
my $r = shift;
my @status;
push @status, sprintf('<b>%s %s</b><br />', __PACKAGE__, $VERSION);
push @status, sprintf('<p><b>Configuration Directives:</b> %s</p>',
join(', ',keys %DIRECTIVES)
);
push @status, "<table>\n";
while (my ($k,$v) = each %COUNTERS) {
push @status, "<tr><th align=\"left\">$k:</th><td>$v</td></tr>\n";
}
push @status, "</table>\n";
push @status, "<p><b>Configuration:</b><br />\n";
push @status, dump_apache_configuration($r)."</p>\n";
return \@status;
}
#
# Private helper subroutines
#
sub init_handler {
my $r = shift;
# Get query string values - use this manual code instead of
# Apache2::Request because it uses less memory, and Apache2::Request
# does not come as standard with mod_perl2 (it's libapreq2 on CPAN)
my $qstring = {};
for (split(/[&;]/,($r->args||''))) {
my ($k,$v) = split('=',$_,2);
next unless defined $k;
$v = '' unless defined $v;
$qstring->{URI::Escape::uri_unescape($k)} =
URI::Escape::uri_unescape($v);
}
# Get the configuration directives
my $dir_cfg = get_config($r->server, $r->per_dir_config);
return ($qstring,$dir_cfg);
}
sub dir_xml {
my ($r,$dir_cfg,$qstring) = @_;
my $xml = '';
# Increment listings counter
$COUNTERS{Listings}++;
# Get directory to work on
my $directory = $r->filename;
$r->filename("$directory/") unless $directory =~ m/\/$/;
# Open the physical directory on disk to get a list of all items inside.
# This won't pick up virtual directories aliased in Apache's configs.
my $dh;
unless (opendir($dh,$directory)) {
$r->log_reason(
sprintf("%s Unable to open directory handle for '%s': %s",
__PACKAGE__, $directory, $!),
sprintf('%s (%s)', $r->uri, $directory),
);
return Apache2::Const::FORBIDDEN;
}
# Send the XML header and top of the index tree
$xml .= xml_header($r,$dir_cfg);
$xml .= sprintf("<index path=\"%s\" href=\"%s\" >\n",
$r->uri, $r->construct_url);
$xml .= xml_options($r,$qstring,$dir_cfg);
$xml .= "\t<updir icon=\"/icons/__back.png\" />\n"
unless $r->uri =~ m,^/?$,;
# Build a list of attributes for each item in the directory and then
# print it as an element in the index tree.
while (my $id = readdir($dh)) {
next if $id eq '..' || $id eq '.';
next if grep($id =~ /^$_$/, @{$dir_cfg->{IndexIgnoreRegex}});
#my $subr = $r->lookup_file($id); # Not used yet
my $filename = File::Spec->catfile($directory,$id);
my $type = file_type($r,$id,$filename);
my $attr = build_attributes($r,$dir_cfg,$id,$filename,$type);
$xml .= sprintf("\t<%s %s />\n", $type, join(' ',
map { sprintf("\n\t\t%s=\"%s\"",$_,$attr->{$_})
if defined $_ && defined $attr->{$_} }
keys(%{$attr})
));
$COUNTERS{Files}++ if $type eq 'file';
$COUNTERS{Directories}++ if $type eq 'dir';
}
# Close the index tree, directory handle and return
$xml .= "</index>\n";
closedir($dh);
return $xml;
}
sub xml_options {
my ($r,$qstring,$dir_cfg) = @_;
my $xml = '';
my $format = "\t\t<option name=\"%s\" value=\"%s\" />\n";
$xml .= "\t<options>\n";
# Query string options
for my $option (qw(C O F V P)) {
$xml .= sprintf($format,$option,$qstring->{$option})
if defined $qstring->{$option} &&
$qstring->{$option} =~ /\S+/;
}
# Apache configuration directives
for my $d (keys %DIRECTIVES) {
for my $value ((
!exists($dir_cfg->{$d}) ? ()
: ref($dir_cfg->{$d}) eq 'ARRAY'
? @{$dir_cfg->{$d}}
: ($dir_cfg->{$d})
)) {
# Don't bother printing stuff that we only have
# some confusing internal complex data structure for
$xml .= sprintf($format,$d,$value) unless ref($value);
}
}
$xml .= "\t</options>\n";
return $xml;
}
sub icon_by_extension {
my ($r,$id,$ext,$dir_cfg) = @_;
my $alt = '';
my $icon =
$ext && -f File::Spec->catfile($r->document_root,'icons',lc("$ext.png"))
? '/icons/'.lc("$ext.png")
: $dir_cfg->{DefaultIcon} || '';
while (my ($re,$v) = each %{$dir_cfg->{AddIconRegex}}) {
if ($id =~ /$re$/) {
($alt,$icon) = @{$v};
}
}
return ($alt,$icon);
}
sub build_attributes {
my ($r,$dir_cfg,$id,$filename,$type) = @_;
return {} if $type eq 'updir';
my $attr = stat_file($r,$filename);
if ($type eq 'file') {
($attr->{ext}) = $id =~ /\.([a-z0-9_]+)$/i;
($attr->{alt},$attr->{icon}) = icon_by_extension($r,$id,$attr->{ext},$dir_cfg);
} elsif ($type eq 'dir') {
$attr->{alt} = 'DIR';
$attr->{icon} = '/icons/__dir.png';
if ($dir_cfg->{AddIconRegex}->{'^^DIRECTORY^^'}) {
($attr->{alt},$attr->{icon}) =
@{$dir_cfg->{AddIconRegex}->{'^^DIRECTORY^^'}};
}
} elsif ($type eq 'updir') {
$attr->{icon} = '/icons/__back.png';
}
unless ($type eq 'updir') {
#$attr->{id} = $id; # This serves no real purpose anymor
$attr->{href} = URI::Escape::uri_escape($id);
$attr->{href} .= '/' if $type eq 'dir';
$attr->{title} = XML::Quote::xml_quote($id);
$attr->{desc} = $type eq 'dir'
? 'File Folder'
: defined $attr->{ext}
? sprintf('%s File',uc($attr->{ext}))
: 'File';
if (exists $dir_cfg->{AddDescription}->{$r->uri.URI::Escape::uri_escape($id)}) {
$attr->{desc} = $dir_cfg->{AddDescription}->{$r->uri.URI::Escape::uri_escape($id)};
} elsif (defined $FILETYPES{lc($attr->{ext})}->{DisplayName}) {
$attr->{desc} = $FILETYPES{lc($attr->{ext})}->{DisplayName};
}
$attr->{desc} = XML::Quote::xml_quote($attr->{desc});
}
return $attr;
}
sub file_type {
my ($r,$id,$file) = @_;
return -d $file && $id eq '..' ? 'updir' : -d $file ? 'dir' : 'file';
}
sub xml_header {
my ($r,$dir_cfg) = @_;
my $xml = '';
my $xslt = $dir_cfg->{IndexStyleSheet} || '';
my $type = $xslt =~ /\.css/ ? 'text/css' : 'text/xsl';
$xml .= qq{<?xml version="1.0"?>\n};
$xml .= qq{<?xml-stylesheet type="$type" href="$xslt"?>\n} if $xslt;
$xml .= qq{$_\n} for (
'<!DOCTYPE index [',
' <!ELEMENT index (options?, updir?, (file | dir)*)>',
' <!ATTLIST index href CDATA #REQUIRED',
' path CDATA #REQUIRED>',
' <!ELEMENT options (option*)>',
' <!ELEMENT option EMPTY>',
' <!ATTLIST option name CDATA #REQUIRED',
' value CDATA #IMPLIED>',
' <!ELEMENT updir EMPTY>',
' <!ATTLIST updir icon CDATA #IMPLIED>',
' <!ELEMENT file EMPTY>',
' <!ATTLIST file href CDATA #REQUIRED',
' title CDATA #REQUIRED',
' desc CDATA #IMPLIED',
' owner CDATA #IMPLIED',
' group CDATA #IMPLIED',
' uid CDATA #REQUIRED',
' gid CDATA #REQUIRED',
' ctime CDATA #REQUIRED',
' nicectime CDATA #IMPLIED',
' mtime CDATA #REQUIRED',
' nicemtime CDATA #IMPLIED',
' perms CDATA #REQUIRED',
' size CDATA #REQUIRED',
' nicesize CDATA #IMPLIED',
' icon CDATA #IMPLIED',
' alt CDATA #IMPLIED',
' ext CDATA #IMPLIED>',
' <!ELEMENT dir EMPTY>',
' <!ATTLIST dir href CDATA #REQUIRED',
' title CDATA #REQUIRED',
' desc CDATA #IMPLIED',
' owner CDATA #IMPLIED',
' group CDATA #IMPLIED',
' uid CDATA #REQUIRED',
' gid CDATA #REQUIRED',
' ctime CDATA #REQUIRED',
' nicectime CDATA #IMPLIED',
' mtime CDATA #REQUIRED',
' nicemtime CDATA #IMPLIED',
' perms CDATA #REQUIRED',
' size CDATA #REQUIRED',
' nicesize CDATA #IMPLIED',
' alt CDATA #IMPLIED',
' icon CDATA #IMPLIED>',
']>',
);
return $xml;
}
sub glob2regex {
my $glob = shift || '';
$glob =~ s/\./\\./g; # . is a dot
$glob =~ s/\?/./g; # ? is any single character
$glob =~ s/\*/.*/g; # * means any number of any characters
$glob =~ s/(?<!\\)([\(\)\[\]\+])/\\$1/g; # Escape metacharacters
return $glob; # Now a regex
}
sub comify {
local $_ = shift;
s/^\s+|\s+$//g;
1 while s/^([-+]?\d+)(\d{3})/$1,$2/;
return $_;
}
sub stat_file {
my ($r,$filename) = @_;
my %stat;
@stat{qw(dev ino mode nlink uid gid rdev size
atime mtime ctime blksize blocks)} = lstat($filename);
my %rtn;
$rtn{$_} = $stat{$_} for qw(uid gid mtime ctime size);
$rtn{perms} = file_mode($stat{mode});
$rtn{owner} = scalar getpwuid($rtn{uid});
$rtn{group} = scalar getgrgid($rtn{gid});
$rtn{nicesize} = comify(sprintf('%d KB',
($rtn{size} + ($rtn{size} ? 1024 : 0))/1024
));
# Reformat times to this format: yyyy-mm-ddThh:mm-tz:tz
for (qw(mtime ctime)) {
my $time = $rtn{$_};
$rtn{$_} = Apache2::Util::ht_time(
$r->pool, $time,
'%Y-%m-%dT%H:%M-00:00',
0,
);
$rtn{"nice$_"} = Apache2::Util::ht_time(
$r->pool, $time,
'%d/%m/%Y %H:%M',
0,
);
}
return \%rtn;
}
sub file_mode {
my $mode = shift;
# This block of code is taken with thanks from
# http://zarb.org/~gc/resource/find_recent,
# written by Guillaume Cottenceau.
return (
Fcntl::S_ISREG($mode) ? '-' :
Fcntl::S_ISDIR($mode) ? 'd' :
Fcntl::S_ISLNK($mode) ? 'l' :
Fcntl::S_ISBLK($mode) ? 'b' :
Fcntl::S_ISCHR($mode) ? 'c' :
Fcntl::S_ISFIFO($mode) ? 'p' :
Fcntl::S_ISSOCK($mode) ? 's' : '?' ) .
( ($mode & Fcntl::S_IRUSR()) ? 'r' : '-' ) .
( ($mode & Fcntl::S_IWUSR()) ? 'w' : '-' ) .
( ($mode & Fcntl::S_ISUID()) ? (($mode & Fcntl::S_IXUSR()) ? 's' : 'S')
: (($mode & Fcntl::S_IXUSR()) ? 'x' : '-') ) .
( ($mode & Fcntl::S_IRGRP()) ? 'r' : '-' ) .
( ($mode & Fcntl::S_IWGRP()) ? 'w' : '-' ) .
( ($mode & Fcntl::S_ISGID()) ? (($mode & Fcntl::S_IXGRP()) ? 's' : 'S')
: (($mode & Fcntl::S_IXGRP()) ? 'x' : '-') ) .
( ($mode & Fcntl::S_IROTH()) ? 'r' : '-' ) .
( ($mode & Fcntl::S_IWOTH()) ? 'w' : '-' ) .
( ($mode & Fcntl::S_ISVTX()) ? (($mode & Fcntl::S_IXOTH()) ? 't' : 'T')
: (($mode & Fcntl::S_IXOTH()) ? 'x' : '-') );
}
#
# Handle all Apache configuration directives
# http://perl.apache.org/docs/2.0/user/config/custom.html
#
%DIRECTIVES = (
# http://search.cpan.org/~nicolaw/Apache2-AutoIndex-XSLT/lib/Apache2/AutoIndex/XSLT.pm
FileTypesFilename => {
name => 'FileTypesFilename',
req_override => Apache2::Const::OR_ALL,
args_how => Apache2::Const::TAKE1,
errmsg => 'FileTypesFilename file',
},
RenderXSLT => {
name => 'RenderXSLT',
req_override => Apache2::Const::OR_ALL,
args_how => Apache2::Const::FLAG,
errmsg => 'RenderXSLT On|Off',
},
RenderXSLTEnvVar => {
name => 'RenderXSLTEnvVar',
req_override => Apache2::Const::OR_ALL,
args_how => Apache2::Const::TAKE1,
errmsg => 'RenderXSLTEnvVar variable name',
},
# http://httpd.apache.org/docs/2.2/mod/mod_autoindex.html
AddAlt => {
name => 'AddAlt',
req_override => Apache2::Const::OR_ALL,
args_how => Apache2::Const::ITERATE2,
errmsg => 'AddAlt string file [file] ...',
},
AddAltByEncoding => {
name => 'AddAltByEncoding',
req_override => Apache2::Const::OR_ALL,
args_how => Apache2::Const::ITERATE2,
errmsg => 'AddAltByEncoding string MIME-encoding [MIME-encoding] ...',
},
AddAltByType => {
name => 'AddAltByType',
req_override => Apache2::Const::OR_ALL,
args_how => Apache2::Const::ITERATE2,
errmsg => 'AddAltByType string MIME-type [MIME-type] ...',
},
AddDescription => {
name => 'AddDescription',
req_override => Apache2::Const::OR_ALL,
args_how => Apache2::Const::ITERATE2,
errmsg => 'AddDescription string file [file] ...',
},
AddIcon => {
name => 'AddIcon',
req_override => Apache2::Const::OR_ALL,
args_how => Apache2::Const::ITERATE2,
errmsg => 'AddIcon icon name [name] ...',
},
AddIconByEncoding => {
name => 'AddIconByEncoding',
req_override => Apache2::Const::OR_ALL,
args_how => Apache2::Const::ITERATE2,
errmsg => 'AddIconByEncoding icon MIME-encoding [MIME-encoding] ...',
},
AddIconByType => {
name => 'AddIconByType',
req_override => Apache2::Const::OR_ALL,
args_how => Apache2::Const::ITERATE2,
errmsg => 'AddIconByType icon MIME-type [MIME-type] ...',
},
DefaultIcon => {
name => 'DefaultIcon',
req_override => Apache2::Const::OR_ALL,
args_how => Apache2::Const::TAKE1,
errmsg => 'DefaultIcon url-path',
},
HeaderName => {
name => 'HeaderName',
req_override => Apache2::Const::OR_ALL,
args_how => Apache2::Const::TAKE1,
errmsg => 'HeaderName filename',
},
IndexIgnore => {
name => 'IndexIgnore',
req_override => Apache2::Const::OR_ALL,
args_how => Apache2::Const::ITERATE,
errmsg => 'IndexIgnore file [file] ...',
},
IndexOptions => {
name => 'IndexOptions',
req_override => Apache2::Const::OR_ALL,
args_how => Apache2::Const::ITERATE,
errmsg => 'IndexOptions [+|-]option [[+|-]option] ...',
},
IndexOrderDefault => {
name => 'IndexOrderDefault',
req_override => Apache2::Const::OR_ALL,
args_how => Apache2::Const::TAKE2,
errmsg => 'IndexOrderDefault Ascending|Descending Name|Date|Size|Description',
},
IndexStyleSheet => {
name => 'IndexStyleSheet',
req_override => Apache2::Const::OR_ALL,
args_how => Apache2::Const::TAKE1,
errmsg => 'IndexStyleSheet url-path',
},
ReadmeName => {
name => 'ReadmeName',
req_override => Apache2::Const::OR_ALL,
args_how => Apache2::Const::TAKE1,
errmsg => 'ReadmeName filename',
},
# http://httpd.apache.org/docs/2.2/mod/mod_dir.html
DirectoryIndex => {
name => 'DirectoryIndex',
req_override => Apache2::Const::OR_ALL,
args_how => Apache2::Const::ITERATE,
errmsg => 'DirectoryIndex local-url [local-url] ...',
},
DirectorySlash => {
name => 'DirectorySlash',
req_override => Apache2::Const::OR_ALL,
args_how => Apache2::Const::FLAG,
errmsg => 'DirectorySlash On|Off',
},
);
# Register our interest in a bunch of Apache configuration directives
unless (exists $ENV{AUTOMATED_TESTING}) {
eval {
Apache2::Module::add(__PACKAGE__, [
map {
if (ref($DIRECTIVES{$_}) eq 'HASH') {
$DIRECTIVES{$_}
} else {{
name => $_,
req_override => Apache2::Const::OR_ALL,
args_how => Apache2::Const::ITERATE,
}}
} keys %DIRECTIVES
]);
};
warn $@ if $@;
}
sub dump_apache_configuration {
my $r = shift;
my $rtn = '';
my %secs = ();
my $s = $r->server;
my $dir_cfg = get_config($s, $r->per_dir_config);
my $srv_cfg = get_config($s);
if ($s->is_virtual) {
$secs{"1: Main Server"} = get_config(Apache2::ServerUtil->server);
$secs{"2: Virtual Host"} = $srv_cfg;
$secs{"3: Location"} = $dir_cfg;
} else {
$secs{"1: Main Server"} = $srv_cfg;
$secs{"2: Location"} = $dir_cfg;
}
$rtn .= sprintf("Processing by %s.\n",
$s->is_virtual ? "virtual host" : "main server");
require Data::Dumper;
no warnings 'once';
local $Data::Dumper::Terse = 1;
local $Data::Dumper::Deepcopy = 1;
local $Data::Dumper::Sortkeys = 1;
$rtn = Data::Dumper::Dumper(\%secs);
return $rtn;
}
sub get_config {
Apache2::Module::get_config(__PACKAGE__, @_);
}
sub AddAlt {
push_val_on_key('AddAlt', $_[0], $_[1], join(' ',$_[2],$_[3]));
push_val_on_key('AddAltRegex', $_[0], $_[1], [( $_[2],glob2regex($_[3]) )]);
}
sub AddAltByEncoding {
push_val_on_key('AddAltByEncoding', @_);
push_val_on_key('AddAltByEncodingRegex', $_[0], $_[1], [( $_[2],$_[3] )]);
}
sub AddAltByType {
push_val_on_key('AddAltByType', @_);
push_val_on_key('AddAltByTypeRegex', $_[0], $_[1], [( $_[2],$_[3] )]);
}
sub AddDescription {
add_to_key('AddDescription', $_[0], $_[1], $_[3], $_[2]);
}
sub AddIcon {
push_val('AddIcon', $_[0], $_[1], join(' ',$_[2],$_[3]));
my $icon = $_[2];
my $alt = '';
if ($icon =~ /^\s*\(?(\S+?),(\S+?)\)\s*$/) {
$alt = $1;
$icon = $2;
}
push_val_on_key('AddIconRegex', $_[0], $_[1],
glob2regex($_[3]), $alt,$icon,
);
}
sub AddIconByEncoding {
push_val_on_key('AddIconByEncoding', @_);
push_val_on_key('AddIconByEncodingRegex', $_[0], $_[1], [( $_[2],$_[3] )]);
}
sub AddIconByType {
push_val_on_key('AddIconByType', @_);
push_val_on_key('AddIconByTypeRegex', $_[0], $_[1], [( $_[2],$_[3] )]);
}
sub IndexIgnore {
push_val('IndexIgnore', @_);
push_val('IndexIgnoreRegex', $_[0], $_[1], glob2regex($_[2]));
}
sub IndexOptions { push_val('IndexOptions', @_) }
sub DirectoryIndex { push_val('DirectoryIndex', @_) }
sub DefaultIcon { set_val('DefaultIcon', @_) }
sub HeaderName { set_val('HeaderName', @_) }
sub IndexOrderDefault { set_val('IndexOrderDefault', @_) }
sub IndexStyleSheet { set_val('IndexStyleSheet', @_) }
sub ReadmeName { set_val('ReadmeName', @_) }
sub DirectorySlash { set_val('DirectorySlash', @_) }
sub FileTypesFilename { set_val('FileTypesFilename', @_) }
sub RenderXSLT { set_val('RenderXSLT', @_) }
sub RenderXSLTEnvVar { set_val('RenderXSLTEnvVar', @_) }
sub DIR_CREATE { defaults(@_) }
sub SERVER_CREATE { defaults(@_) }
sub SERVER_MERGE { merge(@_); }
sub DIR_MERGE { merge(@_); }
sub set_val {
my ($key, $self, $parms, $arg) = @_;
$self->{$key} = $arg;
unless ($parms->path) {
my $srv_cfg = Apache2::Module::get_config($self,$parms->server);
$srv_cfg->{$key} = $arg;
}
}
sub push_val {
my ($key, $self, $parms, @args) = @_;
push @{ $self->{$key} }, @args;
unless ($parms->path) {
my $srv_cfg = Apache2::Module::get_config($self,$parms->server);
push @{ $srv_cfg->{$key} }, @args;
}
}
sub add_to_key {
my ($key, $self, $parms, $key2, @args) = @_;
if (exists $self->{$key}->{$key2}) {
$self->{$key}->{$key2} = [($self->{$key}->{$key2})]
if !ref($self->{$key}->{$key2});
push @{$self->{$key}->{$key2}}, @args;
} else {
if (@args > 1) { $self->{$key}->{$key2} = \@args; }
else { $self->{$key}->{$key2} = $args[0]; }
}
unless ($parms->path) {
my $srv_cfg = Apache2::Module::get_config($self,$parms->server);
if (exists $srv_cfg->{$key}->{$key2}) {
$srv_cfg->{$key}->{$key2} = [($srv_cfg->{$key}->{$key2})]
if !ref($srv_cfg->{$key}->{$key2});
push @{$srv_cfg->{$key}->{$key2}}, @args;
} else {
if (@args > 1) { $srv_cfg->{$key}->{$key2} = \@args; }
else { $srv_cfg->{$key}->{$key2} = $args[0]; }
}
}
}
sub push_val_on_key {
my ($key, $self, $parms, $key2, @args) = @_;
push @{ $self->{$key}->{$key2} }, @args;
unless ($parms->path) {
my $srv_cfg = Apache2::Module::get_config($self,$parms->server);
push @{ $srv_cfg->{$key}->{$key2} }, @args;
}
}
sub defaults {
my ($class, $parms) = @_;
return bless {
HeaderName => 'HEADER',
ReadmeName => 'FOOTER',
DirectoryIndex => [qw(index.html index.shtml)],
IndexStyleSheet => '/index.xslt',
DefaultIcon => '/icons/__unknown.png',
IndexIgnore => [()],
FileTypesFilename => 'filetypes.dat',
}, $class;
}
# http://perl.apache.org/docs/2.0/user/config/custom.html#Examples
sub merge {
my ($base, $add) = @_;
my %mrg = ();
for my $key (keys %$base, keys %$add) {
next if exists $mrg{$key};
if ($key eq 'MyPlus') {
$mrg{$key} = ($base->{$key}||0) + ($add->{$key}||0);
} elsif ($key eq 'MyList') {
push @{ $mrg{$key} },
@{ $base->{$key}||[] }, @{ $add->{$key}||[] };
} elsif ($key eq 'MyAppend') {
$mrg{$key} = join " ", grep defined, $base->{$key},
$add->{$key};
} else {
# override mode
$mrg{$key} = $base->{$key} if exists $base->{$key};
$mrg{$key} = $add->{$key} if exists $add->{$key};
}
}
return bless \%mrg, ref($base);
}
1;
=pod
=head1 NAME
Apache2::AutoIndex::XSLT - XSLT Based Directory Listings
=head1 SYNOPSIS
PerlLoadModule Apache2::AutoIndex::XSLT
<Location />
SetHandler perl-script
PerlResponseHandler Apache2::AutoIndex::XSLT
Options +Indexes
IndexStyleSheet /index.xslt
DefaultIcon /icons/__unknown.png
IndexIgnore .*
IndexIgnore index.xslt
IndexIgnore robots.txt
IndexIgnore sitemap.gz
</Location>
=head1 DESCRIPTION
This module is designed as a drop in mod_perl2 replacement for the mod_dir and
mod_index modules. It uses user configurable XSLT stylesheets to generate the
directory listings.
THIS CODE IS INCOMPLETE -- THIS IS A DEVELOPMENT RELEASE!
=head1 CONFIGURATION
This module attempts to emulate as much as the functionality from the Apache
mod_dir and mod_index modules as possible. Some of this is performed directly
by the Apache::AutoIndex::XSLT module itself, and some through a combination
of the I<options> elements presented in the output XML and the XSLT stylesheet.
As a result, some of these configuration directives will do little or nothing
at all if the XSLT stylesheet used does not use them.
=head2 FileTypesFilename
FileTypesFilename
=head2 RenderXSLT
RenderXSLT On
=head2 RenderXSLTEnvVar
SetEnvIf Remote_Addr . RenderXSLT=On
BrowserMatch "Firefox/(2.0|1.5|1.0.[234567])" !RenderXSLT
BrowserMatch "MSIE [67].0" !RenderXSLT
BrowserMatch "Netscape/8" !RenderXSLT
BrowserMatch "Opera/9" !RenderXSLT
RenderXSLTEnvVar RenderXSLT
=head2 AddAlt
AddAlt "PDF file" *.pdf
AddAlt Compressed *.gz *.zip *.Z
I<AddAlt> provides the alternate text to display for a file, instead of an
icon. File is a file extension, partial filename,
wild-card expression or full filename for files to describe. If String
contains any whitespace, you have to enclose it in quotes (" or '). This
alternate text is displayed if the client is image-incapable, has image
loading disabled, or fails to retrieve the icon.
=head2 AddAltByEncoding
AddAltByEncoding gzip x-gzip
I<AddAltByEncoding> provides the alternate text to display for a file, instead
of an icon. MIME-encoding is a valid content-encoding,
such as x-compress. If String contains any whitespace, you have to enclose it
in quotes (" or '). This alternate text is displayed if the client is
image-incapable, has image loading disabled, or fails to retrieve the icon.
=head2 AddAltByType
AddAltByType 'plain text' text/plain
I<AddAltByType> sets the alternate text to display for a file, instead of an
icon. MIME-type is a valid content-type, such as
text/html. If String contains any whitespace, you have to enclose it in quotes
(" or '). This alternate text is displayed if the client is image-incapable,
has image loading disabled, or fails to retrieve the icon.
=head2 AddDescription
AddDescription "The planet Mars" /web/pics/mars.png
This sets the description to display for a file. File is
a file extension, partial filename, wild-card expression or full filename for
files to describe. String is enclosed in double quotes (").
=head2 AddIcon
AddIcon (IMG,/icons/image.xbm) .gif .jpg .xbm
AddIcon /icons/dir.xbm ^^DIRECTORY^^
AddIcon /icons/backup.xbm *~
This sets the icon to display next to a file ending in name. Icon is either a
(%-escaped) relative URL to the icon, or of
the format (alttext,url) where alttext is the text tag given for an icon for
non-graphical browsers.
Name is either ^^DIRECTORY^^ for directories, ^^BLANKICON^^ for blank lines
(to format the list correctly), a file extension, a wildcard expression, a
partial filename or a complete filename.
I<AddIconByType> should be used in preference to I<AddIcon>, when possible.
=head2 AddIconByEncoding
AddIconByEncoding /icons/compress.xbm x-compress
This sets the icon to display next to files. Icon is
either a (%-escaped) relative URL to the icon, or of the format (alttext,url)
where alttext is the text tag given for an icon for non-graphical browsers.
MIME-encoding is a wildcard expression matching required the content-encoding.
=head2 AddIconByType
AddIconByType (IMG,/icons/image.xbm) image/*
This sets the icon to display next to files of type MIME-type.
Icon is either a (%-escaped) relative URL to the icon, or of
the format (alttext,url) where alttext is the text tag given for an icon for
non-graphical browsers.
MIME-type is a wildcard expression matching required the mime types.
=head2 DefaultIcon
DefaultIcon /icons/__unknown.png
The I<DefaultIcon> directive sets the icon to display for files when no
specific icon is known. Url-path is a (%-escaped)
relative URL to the icon.
=head2 HeaderName
=head2 IndexIgnore
IndexIgnore README .htindex *.bak *~
The I<IndexIgnore> directive adds to the list of files to hide when listing a
directory. File is a shell-style wildcard expression or full filename. Multiple
I<IndexIgnore> directives add to the list, rather than the replacing the list
of ignored files. By default, the list contains . (the current directory).
=head2 IndexOptions
IndexOptions +DescriptionWidth=* +FancyIndexing +FoldersFirst +HTMLTable
IndexOptions +IconsAreLinks +IconHeight=16 +IconWidth=16 +IgnoreCase
IndexOptions +IgnoreClient +NameWidth=* +ScanHTMLTitles +ShowForbidden
IndexOptions +SuppressColumnSorting +SuppressDescription
IndexOptions +SuppressHTMLPreamble +SuppressIcon +SuppressLastModified
IndexOptions +SuppressRules +SuppressSize +TrackModified +VersionSort
IndexOptions +XHTML
The I<IndexOptions> directive specifies the behavior of the directory indexing.
See L<http://httpd.apache.org/docs/2.2/mod/mod_autoindex.html#indexoptions>.
=head2 IndexOrderDefault
IndexOrderDefault Ascending Name
The I<IndexOrderDefault> directive is used in combination with the
I<FancyIndexing> index option. By default, fancyindexed directory listings are
displayed in ascending order by filename; the I<IndexOrderDefault> allows you
to change this initial display order.
I<IndexOrderDefault> takes two arguments. The first must be either Ascending or
Descending, indicating the direction of the sort. The second argument must be
one of the keywords Name, Date, Size, or Description, and identifies the
primary key. The secondary key is always the ascending filename.
You can force a directory listing to only be displayed in a particular order by
combining this directive with the I<SuppressColumnSorting> index option; this
will prevent the client from requesting the directory listing in a different
order.
=head2 IndexStyleSheet
IndexStyleSheet "/css/style.css"
The I<IndexStyleSheet> directive sets the name of the file that will be used as
the CSS for the index listing.
=head2 ReadmeName
ReadmeName FOOTER.html
The I<ReadmeName> directive sets the name of the file that will be appended to
the end of the index listing. Filename is the name of the file to include, and
is taken to be relative to the location being indexed. If Filename begins with
a slash, it will be taken to be relative to the I<DocumentRoot>.
=head2 DirectoryIndex
DirectoryIndex index.html index.shtml
The I<DirectoryIndex> directive sets the list of resources to look for, when
the client requests an index of the directory by specifying a / at the end of
the directory name. Local-url is the (%-encoded) URL of a document on the
server relative to the requested directory; it is usually the name of a file
in the directory. Several URLs may be given, in which case the server will
return the first one that it finds. If none of the resources exist and the
I<Indexes> option is set, the server will generate its own listing of the
directory.
=head2 DirectorySlash
DirectorySlash On
The I<DirectorySlash> directive determines, whether or not to fixup URLs
pointing to a directory or not. With this enabled (which is the default), if a
user requests a resource without a trailing slash, which points to a directory,
the user will be redirected to the same resource, but with trailing slash.
=head1 XSLT STYLESHEET
The XSLT stylesheet will default to I<index.xslt> in the DocumentRoot of the
website. This can be changed using the I<IndexStyleSheet> directive.
An example I<index.xslt> file is bundled with this module in the I<examples/>
directory.
=head1 SEE ALSO
L<Apache::AutoIndex>,
L<http://httpd.apache.org/docs/2.2/mod/mod_autoindex.html>,
L<http://httpd.apache.org/docs/2.2/mod/mod_dir.html>,
examples/*, L<http://bb-207-42-158-85.fallbr.tfb.net/>
=head1 VERSION
$Id$
=head1 AUTHOR
Nicola Worthington <nicolaw@cpan.org>, TFB Technology Ltd.
L<http://perlgirl.org.uk>, L<http://www.tfbtechnology.ltd.uk>, L<http://www.tfb.net>
If you like this software, why not show your appreciation by sending the
author something nice from her
L<Amazon wishlist|http://www.amazon.co.uk/gp/registry/1VZXC59ESWYK0?sort=priority>?
( http://www.amazon.co.uk/gp/registry/1VZXC59ESWYK0?sort=priority )
With special thanks to Jennifer Beattie for helping develop the example XSLT
stylesheets, and writing the I<examples/RegFileTypes.cs> "registered file type"
data and icons extraction program for Windows.
With special thanks to the authors of
L<http://httpd.apache.org/docs/2.2/mod/mod_autoindex.html> from which some
documentation taken.
=head1 COPYRIGHT
Copyright 2006, 2007 Nicola Worthington.
This software is licensed under The Apache Software License, Version 2.0.
L<http://www.apache.org/licenses/LICENSE-2.0>
=cut
__END__
| neechbear/Apache2-AutoIndex-XSLT | lib/Apache2/AutoIndex/XSLT.pm | Perl | apache-2.0 | 36,831 |
#!/usr/bin/perl
#-------------------------------------------------------------------------------
# BDS generic cluster example
#
# This is a trivial example of the 'cluster generic' interface implementation.
# The commands implemented in this example do NOT really submit
# tasks to a cluster, the tasks are run locally.
# This is intended as a toy example and also used for test-cases.
#
# The script is called when a task is killed
#
# Script's output:
# None
#
# Command line arguments:
# jobId: This is the jobId returned as the first line in 'clusterGenericRun'
# script (i.e. the jobID provided by the cluster management system)
#
# Pablo Cingolani
#-------------------------------------------------------------------------------
#---
# Parse command line arguments
#
# Note: The script 'run.pl' prepends 'CLUSTERGENERIC_LOCALHOST_' to the
# pid, so we have to remove that
#---
$jobId = shift @ARGV;
if( $jobId =~ /CLUSTERGENERIC_LOCALHOST_(.*)/ ) { $jobId = $1; }
#---
# Execute cluster command to kill task.
# Note: In this case the 'cluster' is just the localhost
#---
system "kill -9 $jobId";
# OK
exit(0);
| leepc12/BigDataScript | config/clusterGeneric_localhost/kill.pl | Perl | apache-2.0 | 1,215 |
=pod
=head1 NAME
BIO_set_callback_ex, BIO_get_callback_ex, BIO_set_callback, BIO_get_callback,
BIO_set_callback_arg, BIO_get_callback_arg, BIO_debug_callback,
BIO_callback_fn_ex, BIO_callback_fn
- BIO callback functions
=head1 SYNOPSIS
#include <openssl/bio.h>
typedef long (*BIO_callback_fn_ex)(BIO *b, int oper, const char *argp,
size_t len, int argi,
long argl, int ret, size_t *processed);
typedef long (*BIO_callback_fn)(BIO *b, int oper, const char *argp, int argi,
long argl, long ret);
void BIO_set_callback_ex(BIO *b, BIO_callback_fn_ex callback);
BIO_callback_fn_ex BIO_get_callback_ex(const BIO *b);
void BIO_set_callback(BIO *b, BIO_callack_fn cb);
BIO_callack_fn BIO_get_callback(BIO *b);
void BIO_set_callback_arg(BIO *b, char *arg);
char *BIO_get_callback_arg(const BIO *b);
long BIO_debug_callback(BIO *bio, int cmd, const char *argp, int argi,
long argl, long ret);
=head1 DESCRIPTION
BIO_set_callback_ex() and BIO_get_callback_ex() set and retrieve the BIO
callback. The callback is called during most high level BIO operations. It can
be used for debugging purposes to trace operations on a BIO or to modify its
operation.
BIO_set_callback() and BIO_get_callback() set and retrieve the old format BIO
callback. New code should not use these functions, but they are retained for
backwards compatbility. Any callback set via BIO_set_callback_ex() will get
called in preference to any set by BIO_set_callback().
BIO_set_callback_arg() and BIO_get_callback_arg() are macros which can be
used to set and retrieve an argument for use in the callback.
BIO_debug_callback() is a standard debugging callback which prints
out information relating to each BIO operation. If the callback
argument is set it is interpreted as a BIO to send the information
to, otherwise stderr is used.
BIO_callback_fn_ex() is the type of the callback function and BIO_callback_fn()
is the type of the old format callback function. The meaning of each argument
is described below:
=over
=item B<b>
The BIO the callback is attached to is passed in B<b>.
=item B<oper>
B<oper> is set to the operation being performed. For some operations
the callback is called twice, once before and once after the actual
operation, the latter case has B<oper> or'ed with BIO_CB_RETURN.
=item B<len>
The length of the data requested to be read or written. This is only useful if
B<oper> is BIO_CB_READ, BIO_CB_WRITE or BIO_CB_GETS.
=item B<argp> B<argi> B<argl>
The meaning of the arguments B<argp>, B<argi> and B<argl> depends on
the value of B<oper>, that is the operation being performed.
=item B<processed>
B<processed> is a pointer to a location which will be updated with the amount of
data that was actually read or written. Only used for BIO_CB_READ, BIO_CB_WRITE,
BIO_CB_GETS and BIO_CB_PUTS.
=item B<ret>
B<ret> is the return value that would be returned to the
application if no callback were present. The actual value returned
is the return value of the callback itself. In the case of callbacks
called before the actual BIO operation 1 is placed in B<ret>, if
the return value is not positive it will be immediately returned to
the application and the BIO operation will not be performed.
=back
The callback should normally simply return B<ret> when it has
finished processing, unless it specifically wishes to modify the
value returned to the application.
=head1 CALLBACK OPERATIONS
In the notes below, B<callback> defers to the actual callback
function that is called.
=over 4
=item B<BIO_free(b)>
callback_ex(b, BIO_CB_FREE, NULL, 0, 0, 0L, 1L, NULL)
or
callback(b, BIO_CB_FREE, NULL, 0L, 0L, 1L)
is called before the free operation.
=item B<BIO_read_ex(b, data, dlen, readbytes)>
callback_ex(b, BIO_CB_READ, data, dlen, 0, 0L, 1L, readbytes)
or
callback(b, BIO_CB_READ, data, dlen, 0L, 1L)
is called before the read and
callback_ex(b, BIO_CB_READ | BIO_CB_RETURN, data, dlen, 0, 0L, retvalue, readbytes)
or
callback(b, BIO_CB_READ|BIO_CB_RETURN, data, dlen, 0L, retvalue)
after.
=item B<BIO_write(b, data, dlen, written)>
callback_ex(b, BIO_CB_WRITE, data, dlen, 0, 0L, 1L, written)
or
callback(b, BIO_CB_WRITE, datat, dlen, 0L, 1L)
is called before the write and
callback_ex(b, BIO_CB_WRITE | BIO_CB_RETURN, data, dlen, 0, 0L, retvalue, written)
or
callback(b, BIO_CB_WRITE|BIO_CB_RETURN, data, dlen, 0L, retvalue)
after.
=item B<BIO_gets(b, buf, size)>
callback_ex(b, BIO_CB_GETS, buf, size, 0, 0L, 1, NULL, NULL)
or
callback(b, BIO_CB_GETS, buf, size, 0L, 1L)
is called before the operation and
callback_ex(b, BIO_CB_GETS | BIO_CB_RETURN, buf, size, 0, 0L, retvalue, readbytes)
or
callback(b, BIO_CB_GETS|BIO_CB_RETURN, buf, size, 0L, retvalue)
after.
=item B<BIO_puts(b, buf)>
callback_ex(b, BIO_CB_PUTS, buf, 0, 0, 0L, 1L, NULL);
or
callback(b, BIO_CB_PUTS, buf, 0, 0L, 1L)
is called before the operation and
callback_ex(b, BIO_CB_PUTS | BIO_CB_RETURN, buf, 0, 0, 0L, retvalue, written)
or
callback(b, BIO_CB_WRITE|BIO_CB_RETURN, buf, 0, 0L, retvalue)
after.
=item B<BIO_ctrl(BIO *b, int cmd, long larg, void *parg)>
callback_ex(b, BIO_CB_CTRL, parg, 0, cmd, larg, 1L, NULL)
or
callback(b, BIO_CB_CTRL, parg, cmd, larg, 1L)
is called before the call and
callback_ex(b, BIO_CB_CTRL | BIO_CB_RETURN, parg, 0, cmd, larg, ret, NULL)
or
callback(b, BIO_CB_CTRL|BIO_CB_RETURN, parg, cmd, larg, ret)
after.
=back
=head1 EXAMPLE
The BIO_debug_callback() function is a good example, its source is
in crypto/bio/bio_cb.c
=head1 COPYRIGHT
Copyright 2000-2016 The OpenSSL Project Authors. All Rights Reserved.
Licensed under the OpenSSL license (the "License"). You may not use
this file except in compliance with the License. You can obtain a copy
in the file LICENSE in the source distribution or at
L<https://www.openssl.org/source/license.html>.
=cut
| openweave/openweave-core | third_party/openssl/openssl/doc/crypto/BIO_set_callback.pod | Perl | apache-2.0 | 5,989 |
use JSON qw( encode_json );
test "Can pass a JSON filter as a query parameter",
requires => [ local_user_fixture() ],
check => sub {
my ( $user ) = @_;
my ( $room_id );
matrix_create_room_synced( $user )->then( sub {
( $room_id ) = @_;
matrix_sync( $user, filter => encode_json( {
room => {
state => { types => [ "m.room.member" ] },
timeline => { limit => 0 },
}
}));
})->then( sub {
my ( $body ) = @_;
my $room = $body->{rooms}{join}{$room_id};
assert_json_empty_list( $room->{timeline}{events} );
@{ $room->{state}{events} } == 1
or die "Expected a single state event because of the filter";
$room->{state}{events}[0]{type} eq "m.room.member"
or die "Expected a single member event because of the filter";
Future->done(1);
});
};
test "Can request federation format via the filter",
requires => [ local_user_fixture( with_events => 0 ),
qw( can_sync ) ],
check => sub {
my ( $user ) = @_;
my ( $filter_id, $room_id, $event_id_1 );
my $filter = {
event_format => 'federation',
room => { timeline => { limit => 1 } },
};
matrix_create_filter( $user, $filter )->then( sub {
( $filter_id ) = @_;
matrix_create_room_synced( $user )
})->then( sub {
( $room_id ) = @_;
matrix_send_room_text_message_synced( $user, $room_id,
body => "Test message",
);
})->then( sub {
( $event_id_1 ) = @_;
matrix_sync( $user, filter => $filter_id );
})->then( sub {
my ( $body ) = @_;
my $room = $body->{rooms}{join}{$room_id};
log_if_fail "sync room result", $room;
assert_json_keys( $room, qw( timeline state ephemeral ));
assert_json_keys( $room->{timeline}, qw( events limited prev_batch ));
assert_eq( scalar @{ $room->{timeline}{events} }, 1, "timeline event count" );
assert_json_keys(
$room->{timeline}{events}[0], qw(
event_id content room_id sender origin origin_server_ts type
prev_events auth_events depth hashes signatures
)
);
assert_eq( $room->{timeline}{events}[0]{content}{body}, "Test message", "timeline message" );
assert_eq( $room->{timeline}{events}[0]{event_id}, $event_id_1, "timeline event id" );
Future->done(1);
});
};
| matrix-org/sytest | tests/31sync/13filtered_sync.pl | Perl | apache-2.0 | 2,574 |
#
# Copyright 2022 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package cloud::azure::network::frontdoor::mode::requests;
use base qw(cloud::azure::custom::mode);
use strict;
use warnings;
sub get_metrics_mapping {
my ($self, %options) = @_;
my $metrics_mapping = {
'backendrequestcount' => {
'output' => 'Backend Requests Count',
'label' => 'backend-requests-count',
'nlabel' => 'frontdoor.requests.backend.count',
'unit' => '',
'min' => '0',
'max' => ''
},
'requestcount' => {
'output' => 'Total Requests Count',
'label' => 'total-requests-count',
'nlabel' => 'frontdoor.requests.total.count',
'unit' => '',
'min' => '0',
'max' => ''
},
'webapplicationfirewallrequestcount' => {
'output' => 'WAF Requests Count',
'label' => 'waf-requests-count',
'nlabel' => 'frontdoor.requests.waf.count',
'unit' => '',
'min' => '0',
'max' => ''
},
};
return $metrics_mapping;
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options, force_new_perfdata => 1);
bless $self, $class;
$options{options}->add_options(arguments => {
'filter-metric:s' => { name => 'filter_metric' },
'resource:s' => { name => 'resource' },
'resource-group:s' => { name => 'resource_group' }
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::check_options(%options);
if (!defined($self->{option_results}->{resource}) || $self->{option_results}->{resource} eq '') {
$self->{output}->add_option_msg(short_msg => 'Need to specify either --resource <name> with --resource-group option or --resource <id>.');
$self->{output}->option_exit();
}
my $resource = $self->{option_results}->{resource};
my $resource_group = defined($self->{option_results}->{resource_group}) ? $self->{option_results}->{resource_group} : '';
if ($resource =~ /^\/subscriptions\/.*\/resourceGroups\/(.*)\/providers\/Microsoft\.Network\/frontdoors\/(.*)$/) {
$resource_group = $1;
$resource = $2;
}
$self->{az_resource} = $resource;
$self->{az_resource_group} = $resource_group;
$self->{az_resource_type} = 'frontdoors';
$self->{az_resource_namespace} = 'Microsoft.Network';
$self->{az_timeframe} = defined($self->{option_results}->{timeframe}) ? $self->{option_results}->{timeframe} : 900;
$self->{az_interval} = defined($self->{option_results}->{interval}) ? $self->{option_results}->{interval} : 'PT5M';
$self->{az_aggregations} = ['Total'];
if (defined($self->{option_results}->{aggregation})) {
$self->{az_aggregations} = [];
foreach my $stat (@{$self->{option_results}->{aggregation}}) {
if ($stat ne '') {
push @{$self->{az_aggregations}}, ucfirst(lc($stat));
}
}
}
foreach my $metric (keys %{$self->{metrics_mapping}}) {
next if (defined($self->{option_results}->{filter_metric}) && $self->{option_results}->{filter_metric} ne ''
&& $metric !~ /$self->{option_results}->{filter_metric}/);
push @{$self->{az_metrics}}, $metric;
}
}
1;
__END__
=head1 MODE
Check Azure Front Door latency.
Example:
Using resource name :
perl centreon_plugins.pl --plugin=cloud::azure::network::frontdoor::plugin --mode=requests --custommode=api
--resource=<frontdoor_id> --resource-group=<resourcegroup_id> --aggregation='total'
--warning-total-requests-count='3000' --critical-total-requests-count='4000'
Using resource id :
perl centreon_plugins.pl --plugin=cloud::azure::network::frontdoor::plugin --mode=requests --custommode=api
--resource='/subscriptions/<subscription_id>/resourceGroups/<resourcegroup_id>/providers/Microsoft.Network/frontdoors/<frontdoor_id>'
--aggregation='total' --warning-total-requests-count='3000' --critical-total-requests-count='4000'
Default aggregation: 'total' / 'minimum', 'maximum' and 'average' are valid.
=over 8
=item B<--resource>
Set resource name or id (Required).
=item B<--resource-group>
Set resource group (Required if resource's name is used).
=item B<--warning-*>
Warning threshold where '*' can be:
'waf-requests-count', 'backend-request-count', 'total-requests-count'.
=item B<--critical-*>
Critical threshold where '*' can be:
'waf-requests-count', 'backend-request-count', 'total-requests-count'.
=back
=cut
| centreon/centreon-plugins | cloud/azure/network/frontdoor/mode/requests.pm | Perl | apache-2.0 | 5,348 |
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V10::Common::BasicUserListInfo;
use strict;
use warnings;
use base qw(Google::Ads::GoogleAds::BaseEntity);
use Google::Ads::GoogleAds::Utils::GoogleAdsHelper;
sub new {
my ($class, $args) = @_;
my $self = {actions => $args->{actions}};
# Delete the unassigned fields in this object for a more concise JSON payload
remove_unassigned_fields($self, $args);
bless $self, $class;
return $self;
}
1;
| googleads/google-ads-perl | lib/Google/Ads/GoogleAds/V10/Common/BasicUserListInfo.pm | Perl | apache-2.0 | 1,020 |
=encoding utf-8
=head1 Name
lua-resty-lrucache - in-Lua LRU Cache based on LuaJIT FFI
=head1 Status
This library is still under active development and is considered production ready.
=head1 Synopsis
-- file myapp.lua: example "myapp" module
local _M = {}
-- alternatively: local lrucache = require "resty.lrucache.pureffi"
local lrucache = require "resty.lrucache"
-- we need to initialize the cache on the lua module level so that
-- it can be shared by all the requests served by each nginx worker process:
local c = lrucache.new(200) -- allow up to 200 items in the cache
if not c then
return error("failed to create the cache: " .. (err or "unknown"))
end
function _M.go()
c:set("dog", 32)
c:set("cat", 56)
ngx.say("dog: ", c:get("dog"))
ngx.say("cat: ", c:get("cat"))
c:set("dog", { age = 10 }, 0.1) -- expire in 0.1 sec
c:delete("dog")
end
return _M
# nginx.conf
http {
lua_package_path "/path/to/lua-resty-lrucache/lib/?.lua;;";
server {
listen 8080;
location = /t {
content_by_lua '
require("myapp").go()
';
}
}
}
=head1 Description
This library implements a simple LRU cache for L<OpenResty|http://openresty.org> and the L<ngx_lua|https://github.com/chaoslawful/lua-nginx-module> module.
This cache also supports expiration time.
The LRU cache resides completely in the Lua VM and is subject to Lua GC. So do not expect
it to get shared across the OS process boundary. The upside is that you can cache
arbitrary complex Lua values (like deep nested Lua tables) without the overhead of
serialization (as with C<ngx_lua>'s
L<shared dictionary API|https://github.com/chaoslawful/lua-nginx-module#lua_shared_dict>).
The downside is that your cache is always limited to the current OS process
(like the current nginx worker process). It does not really make much sense to use this
library in the context of L<init_by_lua|https://github.com/chaoslawful/lua-nginx-module#lua_shared_dict>
because the cache will not get shared by any of the worker processes
(unless you just want to "warm up" the cache with predefined items which will get
inherited by the workers via C<fork>).
There are two different implementations included in this library, in the form of
two classes: C<resty.lrucache> and C<resty.lrucache.pureffi>. They share exactly the same API. The only difference is that the latter
is a pure FFI implementation that also implements an FFI-based hash table
for the cache lookup while the former uses native Lua tables for it.
If the cache hit rate is relatively high, you should use the C<resty.lrucache> class which is faster than C<resty.lrucache.pureffi>.
But if the cache hit rate is relatively low and there can be a I<lot> of
variations of keys inserted into and removed from the cache, then you should use the C<resty.lrucache.pureffi> instead, because
Lua tables are not good at removing keys frequently by design and you
would see the C<resizetab> function call in the LuaJIT runtime being very hot in
L<on-CPU flame graphs|https://github.com/openresty/stapxx#lj-lua-stacks> if
you use the C<resty.lrucache> class instead of C<resty.lrucache.pureffi> in this use case.
=head1 Methods
To load this library,
=over
=item 1.
you need to specify this library's path in ngx_lua's L<lua_package_path|https://github.com/chaoslawful/lua-nginx-module#lua_package_path> directive. For example, C<lua_package_path "/path/to/lua-resty-lrucache/lib/?.lua;;";>.
=item 2.
you use C<require> to load the library into a local Lua variable:
=back
local lrucache = require "resty.lrucache"
or
local lrucache = require "resty.lrucache.pureffi"
=head2 new
C<syntax: cache, err = lrucache.new(max_items [, load_factor])>
Creates a new cache instance. If failed, returns C<nil> and a string describing the error.
The C<max_items> argument specifies the maximal number of items held in the cache.
The C<load-factor> argument designates the "load factor" of the FFI-based hash-table used internally by C<resty.lrucache.pureffi>;
the default value is 0.5 (i.e. 50%); if the load factor is specified, it will be clamped
to the range of C<[0.1, 1]> (i.e. if load factor is greater than 1, it will be saturated to
1; likewise, if load-factor is smaller than C<0.1>, it will be clamped to C<0.1>). This argument is only meaningful for C<resty.lrucache.pureffi>.
=head2 set
C<syntax: cache:set(key, value, ttl)>
Sets a key with a value and an expiration time.
The C<ttl> argument specifies the expiration time period. The time value is in seconds, but you can also specify the fraction number part, like C<0.25>. A nil C<ttl> argument value means never expired (which is the default).
When the cache is full, the cache will automatically evict the least recently used item.
=head2 get
C<syntax: data, stale_data = cache:get(key)>
Fetches a value with the key. If the key does not exist in the cache or has already expired, a C<nil> value will be returned.
Starting from C<v0.03>, the stale data is also returned as the second return value if available.
=head2 delete
C<syntax: cache:delete(key)>
Removes an item specified by the key from the cache.
=head1 Prerequisites
=over
=item *
L<LuaJIT|http://luajit.org> 2.0+
=item *
L<ngx_lua|https://github.com/chaoslawful/lua-nginx-module> 0.8.10+
=back
=head1 Installation
It is recommended to use the latest L<ngx_openresty bundle|http://openresty.org> directly. At least ngx_openresty 1.4.2.9 is required. And you need to enable LuaJIT when building your ngx_openresty
bundle by passing the C<--with-luajit> option to its C<./configure> script. No extra Nginx configuration is required.
If you want to use this library with your own Nginx build (with ngx_lua), then you need to
ensure you are using at least ngx_lua 0.8.10.
Also, You need to configure
the L<lua_package_path|https://github.com/chaoslawful/lua-nginx-module#lua_package_path> directive to
add the path of your lua-resty-lrucache source tree to ngx_lua's Lua module search path, as in
# nginx.conf
http {
lua_package_path "/path/to/lua-resty-lrucache/lib/?.lua;;";
...
}
and then load the library in Lua:
local lrucache = require "resty.lrucache"
=head1 TODO
=over
=item *
add new method C<get_stale> for fetching already expired items.
=item *
add new method C<flush_all> for flushing out everything in the cache.
=back
=head1 Community
=head2 English Mailing List
The L<openresty-en|https://groups.google.com/group/openresty-en> mailing list is for English speakers.
=head2 Chinese Mailing List
The L<openresty|https://groups.google.com/group/openresty> mailing list is for Chinese speakers.
=head1 Bugs and Patches
Please report bugs or submit patches by
=over
=item 1.
creating a ticket on the L<GitHub Issue Tracker|http://github.com/agentzh/lua-resty-lrucache/issues>,
=item 2.
or posting to the L<OpenResty community>.
=back
=head1 Author
Yichun "agentzh" Zhang (章亦春) E<lt>agentzh@gmail.comE<gt>, CloudFlare Inc.
Shuxin Yang, CloudFlare Inc.
=head1 Copyright and License
This module is licensed under the BSD license.
Copyright (C) 2014-2015, by Yichun "agentzh" Zhang, CloudFlare Inc.
Copyright (C) 2014-2015, by Shuxin Yang, CloudFlare Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
=over
=item *
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
=back
=over
=item *
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
=back
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=head1 See Also
=over
=item *
the ngx_lua module: https://github.com/chaoslawful/lua-nginx-module
=item *
OpenResty: http://openresty.org
=back
| LomoX-Offical/nginx-openresty-windows | src/pod/lua-resty-lrucache-0.04/lua-resty-lrucache-0.04.pod | Perl | bsd-2-clause | 9,015 |
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright (C) 2013 Digium, Inc.
# All Rights Reserved.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# @Package: AsteriskPl
# @Authors: Erin Spiceland <espiceland@digium.com>
#
# See http://www.asterisk.org for more information about
# the Asterisk project. Please do not directly contact
# any of the maintainers of this project for assistance;
# the project provides a web site, mailing lists and IRC
# channels for your use.
#
# This program is free software, distributed under the terms
# detailed in the the LICENSE file at the top of the source tree.
#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
package AsteriskPl::Endpoint;
sub new {
# Definition of Endpoint object
my ($class, %self) = @_;
$self{'object_id'} = 1;
if (ref $self{'api'} ne 'AsteriskPl::AsteriskRESTAPI') {
die("Can't make new AsteriskPl::Asterisk instance with no "
. "AsteriskPl::AsteriskRESTAPI instance.");
}
bless \%self, $class;
return \%self;
}
sub get_id {
# Return the Endpoint object's id.
my $self = shift;
return $self->{'object_id'}
}
sub add_event_handler {
# Add an event handler for Stasis events on this object.
# For general events, use Asterisk.add_event_handler instead.
my $self = shift;
my $event_name = shift;
my $handler = shift;
}
sub remove_event_handler {
# Remove an event handler for Stasis events on this object.
# For general events, use Asterisk.remove_event_handler instead.
my $self = shift;
my $event_name = shift;
my $handler = shift;
}
sub get_endpoints {
# Asterisk endpoints; List available endoints
my $self = shift;
$params = {};
$is_success = $self->{'api'}->call({
'path' => '/endpoints',
'http_method' => 'GET'
});
$is_success = 1;
return $is_success;
}
sub get_endpoint {
# Single endpoint; Details for an endpoint
my $self = shift;
$params = {};
$is_success = $self->{'api'}->call({
'path' => '/endpoints/%s',
'http_method' => 'GET',
'object_id' => $self->{'object_id'}
});
$is_success = 1;
return $is_success;
}
1;
| asterisk/asterisk_rest_libraries | perl/lib/AsteriskPl/Endpoint.pm | Perl | bsd-3-clause | 2,147 |
#!/usr/bin/perl
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
use strict;
use warnings;
use Getopt::Long;
use Brocade::BSC;
use Brocade::BSC::Const qw(/ETH_TYPE/ /IP_/);
use Brocade::BSC::Node::OF::Switch;
use Brocade::BSC::Node::OF::FlowEntry;
use Brocade::BSC::Node::OF::Match;
use Brocade::BSC::Node::OF::Action::Output;
my $configfile = "";
my $status = undef;
my $flowinfo = undef;
my $ethtype = $ETH_TYPE_IPv4;
my $eth_src = "00:00:00:11:23:ae";
my $eth_dst = "00:ff:20:01:1a:3d";
my $ipv4_src = "17.1.2.3/8";
my $ipv4_dst = "172.168.5.6/18";
my $ip_proto = $IP_PROTO_ICMP;
my $ip_dscp = $IP_DSCP_CS2; # Class Selector 2 'Immediate'
my $ip_ecn = $IP_ECN_CE; # Congestion Encountered
my $icmpv4_type = 6; # Alternate Host Address (deprecated)
my $icmpv4_code = 3; # huh? type 6 ever had sub-types??
my $input_port = 10;
my $table_id = 0;
my $flow_id = 18;
my $flow_priority = 1009;
GetOptions("config=s" => \$configfile) or die ("Command line args");
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n");
print ("<<< Demo Start\n");
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n\n");
my $bvc = new Brocade::BSC(cfgfile => $configfile);
my $ofswitch = new Brocade::BSC::Node::OF::Switch(cfgfile => $configfile,
ctrl => $bvc);
print "<<< 'Controller': $bvc->{ipAddr}, 'OpenFlow' switch: $ofswitch->{name}\n\n";
print "<<< Set OpenFlow flow on the Controller\n";
printf " Match: Ethernet Type (0x%04x)\n", $ethtype;
print " Ethernet Source Address ($eth_src)\n";
print " Ethernet Destination Address ($eth_dst)\n";
print " IPv4 Source Address ($ipv4_src)\n";
print " IPv4 Destination Address ($ipv4_dst)\n";
print " IP Protocol Number ($ip_proto)\n";
print " IP DSCP ($ip_dscp)\n";
print " IP ECN ($ip_ecn)\n";
print " ICMPv4 Type ($icmpv4_type)\n";
print " ICMPv4 Code ($icmpv4_code)\n";
print " Input Port ($input_port)\n";
print " Action: Output (NORMAL)\n\n";
my $flowentry = new Brocade::BSC::Node::OF::FlowEntry;
$flowentry->table_id($table_id);
$flowentry->id($flow_id);
$flowentry->priority($flow_priority);
# # --- Instruction: 'Apply-action'
# # Action: 'Output' NORMAL
my $instruction = $flowentry->add_instruction(0);
my $action = new Brocade::BSC::Node::OF::Action::Output(order => 0,
port => 'NORMAL');
$instruction->apply_actions($action);
# # --- Match Fields
my $match = new Brocade::BSC::Node::OF::Match();
$match->eth_type($ethtype);
$match->eth_src($eth_src);
$match->eth_dst($eth_dst);
$match->ipv4_src($ipv4_src);
$match->ipv4_dst($ipv4_dst);
$match->ip_proto($ip_proto);
$match->ip_dscp($ip_dscp);
$match->ip_ecn($ip_ecn);
$match->icmpv4_type($icmpv4_type);
$match->icmpv4_code($icmpv4_code);
$match->in_port($input_port);
$flowentry->add_match($match);
print "<<< Flow to send:\n";
print $flowentry->get_payload() . "\n\n";
$status = $ofswitch->add_modify_flow($flowentry);
$status->ok or die "!!! Demo terminated, reason: ${\$status->msg}\n";
print "<<< Flow successfully added to the Controller\n\n";
($status, $flowinfo) = $ofswitch->get_configured_flow($table_id, $flow_id);
$status->ok or die "!!! Demo terminated, reason: ${\$status->msg}\n";
print "<<< Flow successfully read from the Controller\n";
print "Flow info:\n";
print JSON->new->pretty->encode(JSON::decode_json($flowinfo)) . "\n";
print "<<< Delete flow with id of '$flow_id' from the Controller's cache\n";
print " and from table '$table_id' on the '$ofswitch->{name}' node\n\n";
$status = $ofswitch->delete_flow($flowentry->table_id,
$flowentry->id);
$status->ok or die "!!! Demo terminated, reason: ${\$status->msg}\n";
print "<<< Flow successfully removed from the Controller\n";
print ("\n");
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
print (">>> Demo End\n");
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
| BRCDcomm/perlbscsamples | 1.3.0/sampleopenflow/demos/demo11.pl | Perl | bsd-3-clause | 5,826 |
#!/usr/bin/env perl
# Copyright (c) 2013-, Simon Lundström <simlu@su.se>, IT Services, Stockholm University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of Stockholm University nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
use strict;
use warnings;
use JSON;
sub load_module {
for my $module (@_) {
eval "use $module";
return $module if !$@;
}
die $@;
}
my $monitoring_plugin;
BEGIN {
$monitoring_plugin = load_module('Nagios::Plugin', 'Monitoring::Plugin');
}
use Data::Dumper;
use LWP::UserAgent;
my $np = $monitoring_plugin->new(
shortname => "#",
usage => "Usage: %s [-v|--verbose] [-t <timeout>] [--critical=<value to emit critical>] [--warning=<value to emit warning>] --one-of-the-checks-below",
version => "1.3.1",
timeout => 10,
extra => qq(
See <https://nagios-plugins.org/doc/guidelines.html#THRESHOLDFORMAT> for
information on how to use thresholds.
The STATUS label can have three values:
* green - All primary and replica shards are allocated. Your cluster is 100%
operational.
* yellow - All primary shards are allocated, but at least one replica is
missing. No data is missing, so search results will still be complete. However,
your high availability is compromised to some degree. If more shards disappear,
you might lose data. Think of yellow as a warning that should prompt
investigation.
* red - At least one primary shard (and all of its replicas) are missing. This
means that you are missing data: searches will return partial results, and
indexing into that shard will return an exception.
The defaults has been been taken from
<https://www.elastic.co/guide/en/elasticsearch/guide/current/_cluster_health.html>
),
);
$np->add_arg(
spec => 'cluster-status',
help => "--cluster-status\n Check the status of the cluster.",
);
$np->add_arg(
spec => 'index-status',
help => "--index-status\n Check the status of the indexes.",
);
$np->add_arg(
spec => 'nodes-online',
help => "--nodes-online\n Check the number of nodes online.",
);
$np->add_arg(
spec => 'split-brain',
help => "--split-brain\n Check if the cluster has a split-brain.",
);
$np->add_arg(
spec => 'warning|w=s',
help => [
'Set the warning threshold in INTEGER (applies to nodes-online)',
'Set the warning threshold in STATUS (applies to cluster-status and index-status)',
],
label => [ 'INTEGER', 'STATUS' ],
);
$np->add_arg(
spec => 'critical|c=s',
help => [
'Set the critical threshold in INTEGER (applies to nodes-online)',
'Set the critical threshold in STATUS (applies to cluster-status and index-status)',
],
label => [ 'INTEGER', 'STATUS' ],
);
$np->add_arg(
spec => 'url=s',
help => "--url\n URL to your Elasticsearch instance. (default: %s)",
default => 'http://localhost:9200',
);
$np->add_arg(spec => 'username|user|u=s',
help => "Username for authentication",
default => "",
);
$np->add_arg(spec => 'password|p=s',
help => "Password for authentication",
default => ""
);
$np->getopts;
my %ES_STATUS = (
"red" => 1,
"yellow" => 2,
"green" => 3,
);
my ($warning, $critical) = ($np->opts->warning, $np->opts->critical);
my $code;
my $json;
# Turns an array into "first, second & last"
sub pretty_join {
my ($a) = @_;
return @{$a}[0] if $#{$a} == 0;
return "" if $#{$a} == -1;
return join(', ', @{$a}[0..$#{$a}-1]).
' & '.@{$a}[$#{$a}];
}
# Checks the status of "something"
sub check_status($$) {
$code = $np->check_threshold(
check => (ref $_[0] eq "HASH") ? $ES_STATUS{$_[0]->{status}} : $ES_STATUS{$_[0]},
warning => "\@$ES_STATUS{$warning}",
critical => "\@$ES_STATUS{$critical}",
);
$np->add_message($code, $_[1]);
}
sub get_threshold_value {
my ($thresh, $value, $key) = @_;
if (ref $thresh eq 'CODE') {
return $thresh->($value, $key);
}
else {
return $thresh;
}
}
# Check a data structure with check_threshold.
# TODO Make sure it works recursively
sub check_each($$$$$) {
my %statuses;
my ($what, $where, $warning, $critical, $message) = @_;
# Run check_threshold on everything
foreach my $k (keys %$what) {
my $current_key = $where->($what->{$k});
my $warn = get_threshold_value($warning, $what->{$k}, $k);
my $crit = get_threshold_value($critical, $what->{$k}, $k);
my $code = $np->check_threshold(
check => $current_key,
warning => $warn,
critical => $crit,
);
# and put in in a hash where the status is the key and the value an array
# of the keys with that status
push @{$statuses{$code}}, $k;
}
for my $code (keys %statuses) {
# We don't care about OK checks, but add messages about everything else.
if ($code ne 0 && $statuses{$code}) {
$np->add_message($code, $message.pretty_join($statuses{$code}));
}
}
}
sub clean_extra_chars($) {
my ($ret) = @_;
$ret =~ s/[^\d\w]//g;
return $ret;
}
sub to_threshold($$) {
my ($ret, $original) = @_;
$ret =~ s/[\d\w]+%?/$original/;
return $ret;
}
sub decode_and_check_json {
my %opt = @_;
# Try to parse the JSON
my $json;
eval {
$json = decode_json($opt{json});
};
if ($@) {
$opt{np}->nagios_exit(CRITICAL, "JSON was invalid: $@");
}
return $json;
}
my $ua = LWP::UserAgent->new;
# NRPE timeout is 10 seconds, give us 1 second to run
$ua->timeout($np->opts->timeout-1);
my $url;
if ($np->opts->get('split-brain')) {
$url = $np->opts->url."/_cluster/state/master_node,nodes?pretty";
}
else {
# Time out 1 second before LWP times out.
$url = $np->opts->url."/_cluster/health?level=shards&timeout=".($np->opts->timeout-2)."s&pretty";
}
my $req = HTTP::Request->new(GET => $url);
# Username and Password are defined for basic auth
if ($np->opts->username and $np->opts->password) {
$req->authorization_basic($np->opts->username, $np->opts->password);
}
my $resp = $ua->request($req);
if (!$resp->is_success) {
$np->nagios_exit(CRITICAL, $resp->status_line);
}
$json = $resp->decoded_content;
$json = decode_and_check_json(json => $json, np => $np);
# Check that the cluster query didn't time out
if (defined $json->{timed_out} && $json->{timed_out}) {
$np->nagios_exit(CRITICAL, "Connection to cluster timed out!");
}
# Check the status of the cluster.
if ($np->opts->get('cluster-status')) {
# Set defaults
$warning = $warning || "yellow";
$critical = $critical || "red";
check_status($json, "Cluster $json->{cluster_name} is $json->{status}");
}
# Check the status of the cluster.
elsif ($np->opts->get('index-status')) {
# Set defaults
$warning = $warning || '@yellow';
$critical = $critical || '@red';
check_each($json->{indices},
sub {
my ($f) = @_;
return $ES_STATUS{$f->{status}};
},
to_threshold($warning, $ES_STATUS{clean_extra_chars($warning)}),
to_threshold($critical, $ES_STATUS{clean_extra_chars($critical)}),
"Indexes with issues: "
);
}
# Check that we have the number of nodes we prefer online.
elsif ($np->opts->get('nodes-online')) {
# Set defaults
$warning = $warning || '3:';
$critical = $critical || "2:";
$code = $np->check_threshold(
check => $json->{number_of_nodes},
warning => $warning,
critical => $critical,
);
$np->add_message($code, "Nodes online: $json->{number_of_nodes}");
}
# Check for split-brain of the cluster
elsif ($np->opts->get('split-brain')) {
my $master_node = $json->{master_node};
$req->uri->query($req->uri->query."&local=true");
my $header_host = $req->uri->host;
for my $node (keys %{$json->{nodes}}) {
my ($ip) = split(/:/, $json->{nodes}->{$node}->{transport_address});
my $uri = $req->uri;
$uri->host($ip);
$req->uri($uri);
# Let's use the original host as Host header to get "vhost support" since
# we connect directly to the IP and as the CN to verify in SSL
$req->header(Host => $header_host);
$ua->ssl_opts(SSL_verifycn_name => $header_host);
my $resp = $ua->request($req);
if (!$resp->is_success) {
$np->nagios_exit(CRITICAL, $resp->status_line.($resp->header("client-warning") eq "Internal response" ? " ".join(" ", split(/\n+/, $resp->decoded_content)) : ""));
}
my $node_json = $resp->decoded_content;
$node_json = decode_and_check_json(json => $node_json, np => $np);
if ($master_node ne $node_json->{master_node}) {
$np->nagios_exit(CRITICAL, "node=$node ip=$ip has splitbrain! It thinks $node_json->{master_node} is master but it's $master_node");
}
}
}
else {
exec ($0, "--help");
}
($code, my $message) = $np->check_messages();
$np->nagios_exit($code, $message);
| stockholmuniversity/nagios-plugin-elasticsearch | check_elasticsearch_cluster.pl | Perl | bsd-3-clause | 9,942 |
/* Part of SWI-Prolog
Author: Jan Wielemaker
E-mail: J.Wielemaker@vu.nl
WWW: http://www.swi-prolog.org
Copyright (c) 2012-2020, VU University Amsterdam
CWI, Amsterdam
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
:- module(prolog_codewalk,
[ prolog_walk_code/1, % +Options
prolog_program_clause/2 % -ClauseRef, +Options
]).
:- use_module(library(record),[(record)/1, op(_,_,record)]).
:- autoload(library(apply),[maplist/2]).
:- autoload(library(debug),[debug/3,debugging/1,assertion/1]).
:- autoload(library(error),[must_be/2]).
:- autoload(library(listing),[portray_clause/1]).
:- autoload(library(lists),[member/2,nth1/3,append/3]).
:- autoload(library(option),[meta_options/3]).
:- autoload(library(prolog_clause),
[clause_info/4,initialization_layout/4,clause_name/2]).
:- autoload(library(prolog_metainference),
[inferred_meta_predicate/2,infer_meta_predicate/2]).
/** <module> Prolog code walker
This module walks over the loaded program, searching for callable
predicates. It started as part of library(prolog_autoload) and has been
turned into a separate module to facilitate operations that require the
same reachability analysis, such as finding references to a predicate,
finding unreachable code, etc.
For example, the following determins the call graph of the loaded
program. By using source(true), The exact location of the call in the
source file is passed into _Where.
==
:- dynamic
calls/2.
assert_call_graph :-
retractall(calls(_, _)),
prolog_walk_code([ trace_reference(_),
on_trace(assert_edge),
source(false)
]),
predicate_property(calls(_,_), number_of_clauses(N)),
format('Got ~D edges~n', [N]).
assert_edge(Callee, Caller, _Where) :-
calls(Caller, Callee), !.
assert_edge(Callee, Caller, _Where) :-
assertz(calls(Caller, Callee)).
==
*/
:- meta_predicate
prolog_walk_code(:).
:- multifile
prolog:called_by/4,
prolog:called_by/2.
:- predicate_options(prolog_walk_code/1, 1,
[ undefined(oneof([ignore,error,trace])),
autoload(boolean),
clauses(list),
module(atom),
module_class(list(oneof([user,system,library,
test,development]))),
source(boolean),
trace_reference(any),
trace_condition(callable),
on_trace(callable),
infer_meta_predicates(oneof([false,true,all])),
evaluate(boolean),
verbose(boolean)
]).
:- record
walk_option(undefined:oneof([ignore,error,trace])=ignore,
autoload:boolean=true,
source:boolean=true,
module:atom, % Only analyse given module
module_class:list(oneof([user,system,library,
test,development]))=[user,library],
infer_meta_predicates:oneof([false,true,all])=true,
clauses:list, % Walk only these clauses
trace_reference:any=(-),
trace_condition:callable, % Call-back condition
on_trace:callable, % Call-back on trace hits
% private stuff
clause, % Processed clause
caller, % Head of the caller
initialization, % Initialization source
undecided, % Error to throw error
evaluate:boolean, % Do partial evaluation
verbose:boolean=false). % Report progress
:- thread_local
multifile_predicate/3. % Name, Arity, Module
%! prolog_walk_code(+Options) is det.
%
% Walk over all loaded (user) Prolog code. The following code is
% processed:
%
% 1. The bodies of all clauses in all user and library modules.
% This steps collects, but does not scan multifile predicates
% to avoid duplicate work.
% 2. All multi-file predicates collected.
% 3. All goals registered with initialization/1
%
% Options processed:
%
% * undefined(+Action)
% Action defines what happens if the analysis finds a
% definitely undefined predicate. One of `ignore` or
% `error` (default is `ignore`).
%
% * autoload(+Boolean)
% Try to autoload code while walking. This is enabled by default
% to obtain as much as possible information about goals and find
% references from autoloaded libraries.
%
% * clauses(+ListOfClauseReferences)
% Only process the given clauses. Can be used to find clauses
% quickly using source(false) and then process only interesting
% clauses with source information.
%
% * module(+Module)
% Only process the given module
%
% * module_class(+ModuleClassList)
% Limit processing to modules of the given classes. See
% module_property/2 for details on module classes. Default
% is to scan the classes =user= and =library=.
%
% * infer_meta_predicates(+BooleanOrAll)
% Use infer_meta_predicate/2 on predicates with clauses that
% call known meta-predicates. The analysis is restarted until
% a fixed point is reached. If =true= (default), analysis is
% only restarted if the inferred meta-predicate contains a
% callable argument. If =all=, it will be restarted until no
% more new meta-predicates can be found.
%
% * trace_reference(Callable)
% Print all calls to goals that subsume Callable. Goals are
% represented as Module:Callable (i.e., they are always
% qualified). See also subsumes_term/2.
%
% * trace_condition(:Cond)
% Additional filter condition applied after `trace_reference`.
% Called as call(Cond, Callee, Context), where `Context` is a
% dict containing the following keys:
%
% - Context:caller
% Qualified term representing the caller or the atom
% '<initialization>'.
% - Context:module
% Module being processed
% - Context:clause
% If we are processing a normal clause, the clause reference
% to this clause.
% - Context:initialization
% If we are processing an initialization/1 directive, a term
% `File:Line` representing the location of the declaration.
%
% * on_trace(:OnTrace)
% If a reference to =trace_reference= is found, call
% call(OnTrace, Callee, Caller, Location), where Location is one
% of these:
%
% - clause_term_position(+ClauseRef, +TermPos)
% - clause(+ClauseRef)
% - file_term_position(+Path, +TermPos)
% - file(+File, +Line, -1, _)
% - a variable (unknown)
%
% Caller is the qualified head of the calling clause or the
% atom '<initialization>'.
%
% * source(+Boolean)
% If =false= (default =true=), to not try to obtain detailed
% source information for printed messages.
%
% * verbose(+Boolean)
% If `true` (default `false`), report derived meta-predicates
% and iterations.
%
% @compat OnTrace was called using Caller-Location in older
% versions.
prolog_walk_code(Options) :-
meta_options(is_meta, Options, QOptions),
prolog_walk_code(1, QOptions).
prolog_walk_code(Iteration, Options) :-
statistics(cputime, CPU0),
make_walk_option(Options, OTerm, _),
( walk_option_clauses(OTerm, Clauses),
nonvar(Clauses)
-> walk_clauses(Clauses, OTerm)
; forall(( walk_option_module(OTerm, M0),
copy_term(M0, M),
current_module(M),
scan_module(M, OTerm)
),
find_walk_from_module(M, OTerm)),
walk_from_multifile(OTerm),
walk_from_initialization(OTerm)
),
infer_new_meta_predicates(New, OTerm),
statistics(cputime, CPU1),
( New \== []
-> CPU is CPU1-CPU0,
( walk_option_verbose(OTerm, true)
-> Level = informational
; Level = silent
),
print_message(Level,
codewalk(reiterate(New, Iteration, CPU))),
succ(Iteration, Iteration2),
prolog_walk_code(Iteration2, Options)
; true
).
is_meta(on_trace).
is_meta(trace_condition).
%! walk_clauses(+Clauses, +OTerm) is det.
%
% Walk the given clauses.
walk_clauses(Clauses, OTerm) :-
must_be(list, Clauses),
forall(member(ClauseRef, Clauses),
( user:clause(CHead, Body, ClauseRef),
( CHead = Module:Head
-> true
; Module = user,
Head = CHead
),
walk_option_clause(OTerm, ClauseRef),
walk_option_caller(OTerm, Module:Head),
walk_called_by_body(Body, Module, OTerm)
)).
%! scan_module(+Module, +OTerm) is semidet.
%
% True if we must scan Module according to OTerm.
scan_module(M, OTerm) :-
walk_option_module(OTerm, M1),
nonvar(M1),
!,
\+ M \= M1.
scan_module(M, OTerm) :-
walk_option_module_class(OTerm, Classes),
module_property(M, class(Class)),
memberchk(Class, Classes),
!.
%! walk_from_initialization(+OTerm)
%
% Find initialization/1,2 directives and process what they are
% calling. Skip
%
% @bug Relies on private '$init_goal'/3 database.
walk_from_initialization(OTerm) :-
walk_option_caller(OTerm, '<initialization>'),
forall(init_goal_in_scope(Goal, SourceLocation, OTerm),
( walk_option_initialization(OTerm, SourceLocation),
walk_from_initialization(Goal, OTerm))).
init_goal_in_scope(Goal, SourceLocation, OTerm) :-
'$init_goal'(_When, Goal, SourceLocation),
SourceLocation = File:_Line,
( walk_option_module(OTerm, M),
nonvar(M)
-> module_property(M, file(File))
; walk_option_module_class(OTerm, Classes),
source_file_property(File, module(MF))
-> module_property(MF, class(Class)),
memberchk(Class, Classes),
walk_option_module(OTerm, MF)
; true
).
walk_from_initialization(M:Goal, OTerm) :-
scan_module(M, OTerm),
!,
walk_called_by_body(Goal, M, OTerm).
walk_from_initialization(_, _).
%! find_walk_from_module(+Module, +OTerm) is det.
%
% Find undefined calls from the bodies of all clauses that belong
% to Module.
find_walk_from_module(M, OTerm) :-
debug(autoload, 'Analysing module ~q', [M]),
walk_option_module(OTerm, M),
forall(predicate_in_module(M, PI),
walk_called_by_pred(M:PI, OTerm)).
walk_called_by_pred(Module:Name/Arity, _) :-
multifile_predicate(Name, Arity, Module),
!.
walk_called_by_pred(Module:Name/Arity, _) :-
functor(Head, Name, Arity),
predicate_property(Module:Head, multifile),
!,
assertz(multifile_predicate(Name, Arity, Module)).
walk_called_by_pred(Module:Name/Arity, OTerm) :-
functor(Head, Name, Arity),
( no_walk_property(Property),
predicate_property(Module:Head, Property)
-> true
; walk_option_caller(OTerm, Module:Head),
walk_option_clause(OTerm, ClauseRef),
forall(catch(clause(Module:Head, Body, ClauseRef), _, fail),
walk_called_by_body(Body, Module, OTerm))
).
no_walk_property(number_of_rules(0)). % no point walking only facts
no_walk_property(foreign). % cannot walk foreign code
%! walk_from_multifile(+OTerm)
%
% Process registered multifile predicates.
walk_from_multifile(OTerm) :-
forall(retract(multifile_predicate(Name, Arity, Module)),
walk_called_by_multifile(Module:Name/Arity, OTerm)).
walk_called_by_multifile(Module:Name/Arity, OTerm) :-
functor(Head, Name, Arity),
forall(catch(clause_not_from_development(
Module:Head, Body, ClauseRef, OTerm),
_, fail),
( walk_option_clause(OTerm, ClauseRef),
walk_option_caller(OTerm, Module:Head),
walk_called_by_body(Body, Module, OTerm)
)).
%! clause_not_from_development(:Head, -Body, ?Ref, +Options) is nondet.
%
% Enumerate clauses for a multifile predicate, but omit those from
% a module that is specifically meant to support development.
clause_not_from_development(Module:Head, Body, Ref, OTerm) :-
clause(Module:Head, Body, Ref),
\+ ( clause_property(Ref, file(File)),
module_property(LoadModule, file(File)),
\+ scan_module(LoadModule, OTerm)
).
%! walk_called_by_body(+Body, +Module, +OTerm) is det.
%
% Check the Body term when executed in the context of Module.
% Options:
%
% - undefined(+Action)
% One of =ignore=, =error=
walk_called_by_body(True, _, _) :-
True == true,
!. % quickly deal with facts
walk_called_by_body(Body, Module, OTerm) :-
set_undecided_of_walk_option(error, OTerm, OTerm1),
set_evaluate_of_walk_option(false, OTerm1, OTerm2),
catch(walk_called(Body, Module, _TermPos, OTerm2),
missing(Missing),
walk_called_by_body(Missing, Body, Module, OTerm)),
!.
walk_called_by_body(Body, Module, OTerm) :-
format(user_error, 'Failed to analyse:~n', []),
portray_clause(('<head>' :- Body)),
debug_walk(Body, Module, OTerm).
% recompile this library after `debug(codewalk(trace))` and re-try
% for debugging failures.
:- if(debugging(codewalk(trace))).
debug_walk(Body, Module, OTerm) :-
gtrace,
walk_called_by_body(Body, Module, OTerm).
:- else.
debug_walk(_,_,_).
:- endif.
%! walk_called_by_body(+Missing, +Body, +Module, +OTerm)
%
% Restart the analysis because the previous analysis provided
% insufficient information.
walk_called_by_body(Missing, Body, _, OTerm) :-
debugging(codewalk),
format(user_error, 'Retrying due to ~w (~p)~n', [Missing, OTerm]),
portray_clause(('<head>' :- Body)), fail.
walk_called_by_body(undecided_call, Body, Module, OTerm) :-
catch(forall(walk_called(Body, Module, _TermPos, OTerm),
true),
missing(Missing),
walk_called_by_body(Missing, Body, Module, OTerm)).
walk_called_by_body(subterm_positions, Body, Module, OTerm) :-
( ( walk_option_clause(OTerm, ClauseRef), nonvar(ClauseRef),
clause_info(ClauseRef, _, TermPos, _NameOffset),
TermPos = term_position(_,_,_,_,[_,BodyPos])
-> WBody = Body
; walk_option_initialization(OTerm, SrcLoc),
ground(SrcLoc), SrcLoc = _File:_Line,
initialization_layout(SrcLoc, Module:Body, WBody, BodyPos)
)
-> catch(forall(walk_called(WBody, Module, BodyPos, OTerm),
true),
missing(subterm_positions),
walk_called_by_body(no_positions, Body, Module, OTerm))
; set_source_of_walk_option(false, OTerm, OTerm2),
forall(walk_called(Body, Module, _BodyPos, OTerm2),
true)
).
walk_called_by_body(no_positions, Body, Module, OTerm) :-
set_source_of_walk_option(false, OTerm, OTerm2),
forall(walk_called(Body, Module, _NoPos, OTerm2),
true).
%! walk_called(+Goal, +Module, +TermPos, +OTerm) is multi.
%
% Perform abstract interpretation of Goal, touching all sub-goals
% that are directly called or immediately reachable through
% meta-calls. The actual auto-loading is performed by the
% predicate_property/2 call for meta-predicates.
%
% If Goal is disjunctive, walk_called succeeds with a
% choice-point. Backtracking analyses the alternative control
% path(s).
%
% Options:
%
% * undecided(+Action)
% How to deal with insifficiently instantiated terms in the
% call-tree. Values are:
%
% - ignore
% Silently ignore such goals
% - error
% Throw =undecided_call=
%
% * evaluate(+Boolean)
% If =true= (default), evaluate some goals. Notably =/2.
%
% @tbd Analyse e.g. assert((Head:-Body))?
walk_called(Term, Module, parentheses_term_position(_,_,Pos), OTerm) :-
nonvar(Pos),
!,
walk_called(Term, Module, Pos, OTerm).
walk_called(Var, _, TermPos, OTerm) :-
var(Var), % Incomplete analysis
!,
undecided(Var, TermPos, OTerm).
walk_called(M:G, _, term_position(_,_,_,_,[MPos,Pos]), OTerm) :-
!,
( nonvar(M)
-> walk_called(G, M, Pos, OTerm)
; undecided(M, MPos, OTerm)
).
walk_called((A,B), M, term_position(_,_,_,_,[PA,PB]), OTerm) :-
!,
walk_called(A, M, PA, OTerm),
walk_called(B, M, PB, OTerm).
walk_called((A->B), M, term_position(_,_,_,_,[PA,PB]), OTerm) :-
!,
walk_called(A, M, PA, OTerm),
walk_called(B, M, PB, OTerm).
walk_called((A*->B), M, term_position(_,_,_,_,[PA,PB]), OTerm) :-
!,
walk_called(A, M, PA, OTerm),
walk_called(B, M, PB, OTerm).
walk_called(\+(A), M, term_position(_,_,_,_,[PA]), OTerm) :-
!,
\+ \+ walk_called(A, M, PA, OTerm).
walk_called((A;B), M, term_position(_,_,_,_,[PA,PB]), OTerm) :-
!,
( walk_option_evaluate(OTerm, Eval), Eval == true
-> Goal = (A;B),
setof(Goal,
( walk_called(A, M, PA, OTerm)
; walk_called(B, M, PB, OTerm)
),
Alts0),
variants(Alts0, Alts),
member(Goal, Alts)
; \+ \+ walk_called(A, M, PA, OTerm), % do not propagate bindings
\+ \+ walk_called(B, M, PB, OTerm)
).
walk_called(Goal, Module, TermPos, OTerm) :-
walk_option_trace_reference(OTerm, To), To \== (-),
( subsumes_term(To, Module:Goal)
-> M2 = Module
; predicate_property(Module:Goal, imported_from(M2)),
subsumes_term(To, M2:Goal)
),
trace_condition(M2:Goal, TermPos, OTerm),
print_reference(M2:Goal, TermPos, trace, OTerm),
fail. % Continue search
walk_called(Goal, Module, _, OTerm) :-
evaluate(Goal, Module, OTerm),
!.
walk_called(Goal, M, TermPos, OTerm) :-
( ( predicate_property(M:Goal, imported_from(IM))
-> true
; IM = M
),
prolog:called_by(Goal, IM, M, Called)
; prolog:called_by(Goal, Called)
),
Called \== [],
!,
walk_called_by(Called, M, Goal, TermPos, OTerm).
walk_called(Meta, M, term_position(_,E,_,_,ArgPosList), OTerm) :-
( walk_option_autoload(OTerm, false)
-> nonvar(M),
'$get_predicate_attribute'(M:Meta, defined, 1)
; true
),
( predicate_property(M:Meta, meta_predicate(Head))
; inferred_meta_predicate(M:Meta, Head)
),
!,
walk_option_clause(OTerm, ClauseRef),
register_possible_meta_clause(ClauseRef),
walk_meta_call(1, Head, Meta, M, ArgPosList, E-E, OTerm).
walk_called(Closure, _, _, _) :-
blob(Closure, closure),
!,
'$closure_predicate'(Closure, Module:Name/Arity),
functor(Head, Name, Arity),
'$get_predicate_attribute'(Module:Head, defined, 1).
walk_called(ClosureCall, _, _, _) :-
compound(ClosureCall),
compound_name_arity(ClosureCall, Closure, _),
blob(Closure, closure),
!,
'$closure_predicate'(Closure, Module:Name/Arity),
functor(Head, Name, Arity),
'$get_predicate_attribute'(Module:Head, defined, 1).
walk_called(Goal, Module, _, _) :-
nonvar(Module),
'$get_predicate_attribute'(Module:Goal, defined, 1),
!.
walk_called(Goal, Module, TermPos, OTerm) :-
callable(Goal),
!,
undefined(Module:Goal, TermPos, OTerm).
walk_called(Goal, _Module, TermPos, OTerm) :-
not_callable(Goal, TermPos, OTerm).
%! trace_condition(:Callee, +TermPos, +OTerm) is semidet.
%
% Call call(Condition, Callee, Dict)
trace_condition(Callee, TermPos, OTerm) :-
walk_option_trace_condition(OTerm, Cond), nonvar(Cond),
!,
cond_location_context(OTerm, TermPos, Context0),
walk_option_caller(OTerm, Caller),
walk_option_module(OTerm, Module),
put_dict(#{caller:Caller, module:Module}, Context0, Context),
call(Cond, Callee, Context).
trace_condition(_, _, _).
cond_location_context(OTerm, _TermPos, Context) :-
walk_option_clause(OTerm, Clause), nonvar(Clause),
!,
Context = #{clause:Clause}.
cond_location_context(OTerm, _TermPos, Context) :-
walk_option_initialization(OTerm, Init), nonvar(Init),
!,
Context = #{initialization:Init}.
%! undecided(+Variable, +TermPos, +OTerm)
undecided(Var, TermPos, OTerm) :-
walk_option_undecided(OTerm, Undecided),
( var(Undecided)
-> Action = ignore
; Action = Undecided
),
undecided(Action, Var, TermPos, OTerm).
undecided(ignore, _, _, _) :- !.
undecided(error, _, _, _) :-
throw(missing(undecided_call)).
%! evaluate(Goal, Module, OTerm) is nondet.
evaluate(Goal, Module, OTerm) :-
walk_option_evaluate(OTerm, Evaluate),
Evaluate \== false,
evaluate(Goal, Module).
evaluate(A=B, _) :-
unify_with_occurs_check(A, B).
%! undefined(:Goal, +TermPos, +OTerm)
%
% The analysis trapped a definitely undefined predicate.
undefined(_, _, OTerm) :-
walk_option_undefined(OTerm, ignore),
!.
undefined(Goal, _, _) :-
predicate_property(Goal, autoload(_)),
!.
undefined(Goal, TermPos, OTerm) :-
( walk_option_undefined(OTerm, trace)
-> Why = trace
; Why = undefined
),
print_reference(Goal, TermPos, Why, OTerm).
%! not_callable(+Goal, +TermPos, +OTerm)
%
% We found a reference to a non-callable term
not_callable(Goal, TermPos, OTerm) :-
print_reference(Goal, TermPos, not_callable, OTerm).
%! print_reference(+Goal, +TermPos, +Why, +OTerm)
%
% Print a reference to Goal, found at TermPos.
%
% @param Why is one of =trace= or =undefined=
print_reference(Goal, TermPos, Why, OTerm) :-
walk_option_clause(OTerm, Clause), nonvar(Clause),
!,
( compound(TermPos),
arg(1, TermPos, CharCount),
integer(CharCount) % test it is valid
-> From = clause_term_position(Clause, TermPos)
; walk_option_source(OTerm, false)
-> From = clause(Clause)
; From = _,
throw(missing(subterm_positions))
),
print_reference2(Goal, From, Why, OTerm).
print_reference(Goal, TermPos, Why, OTerm) :-
walk_option_initialization(OTerm, Init), nonvar(Init),
Init = File:Line,
!,
( compound(TermPos),
arg(1, TermPos, CharCount),
integer(CharCount) % test it is valid
-> From = file_term_position(File, TermPos)
; walk_option_source(OTerm, false)
-> From = file(File, Line, -1, _)
; From = _,
throw(missing(subterm_positions))
),
print_reference2(Goal, From, Why, OTerm).
print_reference(Goal, _, Why, OTerm) :-
print_reference2(Goal, _, Why, OTerm).
print_reference2(Goal, From, trace, OTerm) :-
walk_option_on_trace(OTerm, Closure),
walk_option_caller(OTerm, Caller),
nonvar(Closure),
call(Closure, Goal, Caller, From),
!.
print_reference2(Goal, From, Why, _OTerm) :-
make_message(Why, Goal, From, Message, Level),
print_message(Level, Message).
make_message(undefined, Goal, Context,
error(existence_error(procedure, PI), Context), error) :-
goal_pi(Goal, PI).
make_message(not_callable, Goal, Context,
error(type_error(callable, Goal), Context), error).
make_message(trace, Goal, Context,
trace_call_to(PI, Context), informational) :-
goal_pi(Goal, PI).
goal_pi(Goal, M:Name/Arity) :-
strip_module(Goal, M, Head),
callable(Head),
!,
functor(Head, Name, Arity).
goal_pi(Goal, Goal).
:- dynamic
possible_meta_predicate/2.
%! register_possible_meta_clause(+ClauseRef) is det.
%
% ClausesRef contains as call to a meta-predicate. Remember to
% analyse this predicate. We only analyse the predicate if it is
% loaded from a user module. I.e., system and library modules are
% trusted.
register_possible_meta_clause(ClausesRef) :-
nonvar(ClausesRef),
clause_property(ClausesRef, predicate(PI)),
pi_head(PI, Head, Module),
module_property(Module, class(user)),
\+ predicate_property(Module:Head, meta_predicate(_)),
\+ inferred_meta_predicate(Module:Head, _),
\+ possible_meta_predicate(Head, Module),
!,
assertz(possible_meta_predicate(Head, Module)).
register_possible_meta_clause(_).
pi_head(Module:Name/Arity, Head, Module) :-
!,
functor(Head, Name, Arity).
pi_head(_, _, _) :-
assertion(fail).
%! infer_new_meta_predicates(-MetaSpecs, +OTerm) is det.
infer_new_meta_predicates([], OTerm) :-
walk_option_infer_meta_predicates(OTerm, false),
!.
infer_new_meta_predicates(MetaSpecs, OTerm) :-
findall(Module:MetaSpec,
( retract(possible_meta_predicate(Head, Module)),
infer_meta_predicate(Module:Head, MetaSpec),
( walk_option_infer_meta_predicates(OTerm, all)
-> true
; calling_metaspec(MetaSpec)
)
),
MetaSpecs).
%! calling_metaspec(+Head) is semidet.
%
% True if this is a meta-specification that makes a difference to
% the code walker.
calling_metaspec(Head) :-
arg(_, Head, Arg),
calling_metaarg(Arg),
!.
calling_metaarg(I) :- integer(I), !.
calling_metaarg(^).
calling_metaarg(//).
%! walk_meta_call(+Index, +GoalHead, +MetaHead, +Module,
%! +ArgPosList, +EndPos, +OTerm)
%
% Walk a call to a meta-predicate. This walks all meta-arguments
% labeled with an integer, ^ or //.
%
% @arg EndPos reflects the end of the term. This is used if the
% number of arguments in the compiled form exceeds the
% number of arguments in the term read.
walk_meta_call(I, Head, Meta, M, ArgPosList, EPos, OTerm) :-
arg(I, Head, AS),
!,
( ArgPosList = [ArgPos|ArgPosTail]
-> true
; ArgPos = EPos,
ArgPosTail = []
),
( integer(AS)
-> arg(I, Meta, MA),
extend(MA, AS, Goal, ArgPos, ArgPosEx, OTerm),
walk_called(Goal, M, ArgPosEx, OTerm)
; AS == (^)
-> arg(I, Meta, MA),
remove_quantifier(MA, Goal, ArgPos, ArgPosEx, M, MG, OTerm),
walk_called(Goal, MG, ArgPosEx, OTerm)
; AS == (//)
-> arg(I, Meta, DCG),
walk_dcg_body(DCG, M, ArgPos, OTerm)
; true
),
succ(I, I2),
walk_meta_call(I2, Head, Meta, M, ArgPosTail, EPos, OTerm).
walk_meta_call(_, _, _, _, _, _, _).
remove_quantifier(Goal, _, TermPos, TermPos, M, M, OTerm) :-
var(Goal),
!,
undecided(Goal, TermPos, OTerm).
remove_quantifier(_^Goal0, Goal,
term_position(_,_,_,_,[_,GPos]),
TermPos, M0, M, OTerm) :-
!,
remove_quantifier(Goal0, Goal, GPos, TermPos, M0, M, OTerm).
remove_quantifier(M1:Goal0, Goal,
term_position(_,_,_,_,[_,GPos]),
TermPos, _, M, OTerm) :-
!,
remove_quantifier(Goal0, Goal, GPos, TermPos, M1, M, OTerm).
remove_quantifier(Goal, Goal, TermPos, TermPos, M, M, _).
%! walk_called_by(+Called:list, +Module, +Goal, +TermPos, +OTerm)
%
% Walk code explicitly mentioned to be called through the hook
% prolog:called_by/2.
walk_called_by([], _, _, _, _).
walk_called_by([H|T], M, Goal, TermPos, OTerm) :-
( H = G0+N
-> subterm_pos(G0, M, Goal, TermPos, G, GPos),
( extend(G, N, G2, GPos, GPosEx, OTerm)
-> walk_called(G2, M, GPosEx, OTerm)
; true
)
; subterm_pos(H, M, Goal, TermPos, G, GPos),
walk_called(G, M, GPos, OTerm)
),
walk_called_by(T, M, Goal, TermPos, OTerm).
subterm_pos(Sub, _, Term, TermPos, Sub, SubTermPos) :-
subterm_pos(Sub, Term, TermPos, SubTermPos),
!.
subterm_pos(Sub, M, Term, TermPos, G, SubTermPos) :-
nonvar(Sub),
Sub = M:H,
!,
subterm_pos(H, M, Term, TermPos, G, SubTermPos).
subterm_pos(Sub, _, _, _, Sub, _).
subterm_pos(Sub, Term, TermPos, SubTermPos) :-
subterm_pos(Sub, Term, same_term, TermPos, SubTermPos),
!.
subterm_pos(Sub, Term, TermPos, SubTermPos) :-
subterm_pos(Sub, Term, ==, TermPos, SubTermPos),
!.
subterm_pos(Sub, Term, TermPos, SubTermPos) :-
subterm_pos(Sub, Term, =@=, TermPos, SubTermPos),
!.
subterm_pos(Sub, Term, TermPos, SubTermPos) :-
subterm_pos(Sub, Term, subsumes_term, TermPos, SubTermPos),
!.
%! walk_dcg_body(+Body, +Module, +TermPos, +OTerm)
%
% Walk a DCG body that is meta-called.
walk_dcg_body(Var, _Module, TermPos, OTerm) :-
var(Var),
!,
undecided(Var, TermPos, OTerm).
walk_dcg_body([], _Module, _, _) :- !.
walk_dcg_body([_|_], _Module, _, _) :- !.
walk_dcg_body(String, _Module, _, _) :-
string(String),
!.
walk_dcg_body(!, _Module, _, _) :- !.
walk_dcg_body(M:G, _, term_position(_,_,_,_,[MPos,Pos]), OTerm) :-
!,
( nonvar(M)
-> walk_dcg_body(G, M, Pos, OTerm)
; undecided(M, MPos, OTerm)
).
walk_dcg_body((A,B), M, term_position(_,_,_,_,[PA,PB]), OTerm) :-
!,
walk_dcg_body(A, M, PA, OTerm),
walk_dcg_body(B, M, PB, OTerm).
walk_dcg_body((A->B), M, term_position(_,_,_,_,[PA,PB]), OTerm) :-
!,
walk_dcg_body(A, M, PA, OTerm),
walk_dcg_body(B, M, PB, OTerm).
walk_dcg_body((A*->B), M, term_position(_,_,_,_,[PA,PB]), OTerm) :-
!,
walk_dcg_body(A, M, PA, OTerm),
walk_dcg_body(B, M, PB, OTerm).
walk_dcg_body((A;B), M, term_position(_,_,_,_,[PA,PB]), OTerm) :-
!,
( walk_dcg_body(A, M, PA, OTerm)
; walk_dcg_body(B, M, PB, OTerm)
).
walk_dcg_body((A|B), M, term_position(_,_,_,_,[PA,PB]), OTerm) :-
!,
( walk_dcg_body(A, M, PA, OTerm)
; walk_dcg_body(B, M, PB, OTerm)
).
walk_dcg_body({G}, M, brace_term_position(_,_,PG), OTerm) :-
!,
walk_called(G, M, PG, OTerm).
walk_dcg_body(G, M, TermPos, OTerm) :-
extend(G, 2, G2, TermPos, TermPosEx, OTerm),
walk_called(G2, M, TermPosEx, OTerm).
%! subterm_pos(+SubTerm, +Term, :Cmp,
%! +TermPosition, -SubTermPos) is nondet.
%
% True when SubTerm is a sub term of Term, compared using Cmp,
% TermPosition describes the term layout of Term and SubTermPos
% describes the term layout of SubTerm. Cmp is typically one of
% =same_term=, =|==|=, =|=@=|= or =|subsumes_term|=
:- meta_predicate
subterm_pos(+, +, 2, +, -),
sublist_pos(+, +, +, +, 2, -).
:- public
subterm_pos/5. % used in library(check).
subterm_pos(_, _, _, Pos, _) :-
var(Pos), !, fail.
subterm_pos(Sub, Term, Cmp, Pos, Pos) :-
call(Cmp, Sub, Term),
!.
subterm_pos(Sub, Term, Cmp, term_position(_,_,_,_,ArgPosList), Pos) :-
is_list(ArgPosList),
compound(Term),
nth1(I, ArgPosList, ArgPos),
arg(I, Term, Arg),
subterm_pos(Sub, Arg, Cmp, ArgPos, Pos).
subterm_pos(Sub, Term, Cmp, list_position(_,_,ElemPosList,TailPos), Pos) :-
sublist_pos(ElemPosList, TailPos, Sub, Term, Cmp, Pos).
subterm_pos(Sub, {Arg}, Cmp, brace_term_position(_,_,ArgPos), Pos) :-
subterm_pos(Sub, Arg, Cmp, ArgPos, Pos).
sublist_pos([EP|TP], TailPos, Sub, [H|T], Cmp, Pos) :-
( subterm_pos(Sub, H, Cmp, EP, Pos)
; sublist_pos(TP, TailPos, Sub, T, Cmp, Pos)
).
sublist_pos([], TailPos, Sub, Tail, Cmp, Pos) :-
TailPos \== none,
subterm_pos(Sub, Tail, Cmp, TailPos, Pos).
%! extend(+Goal, +ExtraArgs, +TermPosIn, -TermPosOut, +OTerm)
%
% @bug:
extend(Goal, 0, Goal, TermPos, TermPos, _) :- !.
extend(Goal, _, _, TermPos, TermPos, OTerm) :-
var(Goal),
!,
undecided(Goal, TermPos, OTerm).
extend(M:Goal, N, M:GoalEx,
term_position(F,T,FT,TT,[MPos,GPosIn]),
term_position(F,T,FT,TT,[MPos,GPosOut]), OTerm) :-
!,
( var(M)
-> undecided(N, MPos, OTerm)
; true
),
extend(Goal, N, GoalEx, GPosIn, GPosOut, OTerm).
extend(Goal, N, GoalEx, TermPosIn, TermPosOut, _) :-
callable(Goal),
!,
Goal =.. List,
length(Extra, N),
extend_term_pos(TermPosIn, N, TermPosOut),
append(List, Extra, ListEx),
GoalEx =.. ListEx.
extend(Closure, N, M:GoalEx, TermPosIn, TermPosOut, OTerm) :-
blob(Closure, closure), % call(Closure, A1, ...)
!,
'$closure_predicate'(Closure, M:Name/Arity),
length(Extra, N),
extend_term_pos(TermPosIn, N, TermPosOut),
GoalEx =.. [Name|Extra],
( N =:= Arity
-> true
; print_reference(Closure, TermPosIn, closure_arity_mismatch, OTerm)
).
extend(Goal, _, _, TermPos, _, OTerm) :-
print_reference(Goal, TermPos, not_callable, OTerm).
extend_term_pos(Var, _, _) :-
var(Var),
!.
extend_term_pos(term_position(F,T,FT,TT,ArgPosIn),
N,
term_position(F,T,FT,TT,ArgPosOut)) :-
!,
length(Extra, N),
maplist(=(0-0), Extra),
append(ArgPosIn, Extra, ArgPosOut).
extend_term_pos(F-T, N, term_position(F,T,F,T,Extra)) :-
length(Extra, N),
maplist(=(0-0), Extra).
%! variants(+SortedList, -Variants) is det.
variants([], []).
variants([H|T], List) :-
variants(T, H, List).
variants([], H, [H]).
variants([H|T], V, List) :-
( H =@= V
-> variants(T, V, List)
; List = [V|List2],
variants(T, H, List2)
).
%! predicate_in_module(+Module, ?PI) is nondet.
%
% True if PI is a predicate locally defined in Module.
predicate_in_module(Module, PI) :-
current_predicate(Module:PI),
PI = Name/Arity,
\+ hidden_predicate(Name, Arity),
functor(Head, Name, Arity),
\+ predicate_property(Module:Head, imported_from(_)).
hidden_predicate(Name, _) :-
atom(Name), % []/N is not hidden
sub_atom(Name, 0, _, _, '$wrap$').
/*******************************
* ENUMERATE CLAUSES *
*******************************/
%! prolog_program_clause(-ClauseRef, +Options) is nondet.
%
% True when ClauseRef is a reference for clause in the program.
% Options is a subset of the options processed by
% prolog_walk_code/1. The logic for deciding on which clauses to
% enumerate is shared with prolog_walk_code/1.
%
% * module(?Module)
% * module_class(+list(Classes))
prolog_program_clause(ClauseRef, Options) :-
make_walk_option(Options, OTerm, _),
setup_call_cleanup(
true,
( current_module(Module),
scan_module(Module, OTerm),
module_clause(Module, ClauseRef, OTerm)
; retract(multifile_predicate(Name, Arity, MM)),
multifile_clause(ClauseRef, MM:Name/Arity, OTerm)
; initialization_clause(ClauseRef, OTerm)
),
retractall(multifile_predicate(_,_,_))).
module_clause(Module, ClauseRef, _OTerm) :-
predicate_in_module(Module, Name/Arity),
\+ multifile_predicate(Name, Arity, Module),
functor(Head, Name, Arity),
( predicate_property(Module:Head, multifile)
-> assertz(multifile_predicate(Name, Arity, Module)),
fail
; predicate_property(Module:Head, Property),
no_enum_property(Property)
-> fail
; catch(nth_clause(Module:Head, _, ClauseRef), _, fail)
).
no_enum_property(foreign).
multifile_clause(ClauseRef, M:Name/Arity, OTerm) :-
functor(Head, Name, Arity),
catch(clauseref_not_from_development(M:Head, ClauseRef, OTerm),
_, fail).
clauseref_not_from_development(Module:Head, Ref, OTerm) :-
nth_clause(Module:Head, _N, Ref),
\+ ( clause_property(Ref, file(File)),
module_property(LoadModule, file(File)),
\+ scan_module(LoadModule, OTerm)
).
initialization_clause(ClauseRef, OTerm) :-
catch(clause(system:'$init_goal'(_File, M:_Goal, SourceLocation),
true, ClauseRef),
_, fail),
walk_option_initialization(OTerm, SourceLocation),
scan_module(M, OTerm).
/*******************************
* MESSAGES *
*******************************/
:- multifile
prolog:message//1,
prolog:message_location//1.
prolog:message(trace_call_to(PI, Context)) -->
[ 'Call to ~q at '-[PI] ],
'$messages':swi_location(Context).
prolog:message_location(clause_term_position(ClauseRef, TermPos)) -->
{ clause_property(ClauseRef, file(File)) },
message_location_file_term_position(File, TermPos).
prolog:message_location(clause(ClauseRef)) -->
{ clause_property(ClauseRef, file(File)),
clause_property(ClauseRef, line_count(Line))
},
!,
[ '~w:~d: '-[File, Line] ].
prolog:message_location(clause(ClauseRef)) -->
{ clause_name(ClauseRef, Name) },
[ '~w: '-[Name] ].
prolog:message_location(file_term_position(Path, TermPos)) -->
message_location_file_term_position(Path, TermPos).
prolog:message(codewalk(reiterate(New, Iteration, CPU))) -->
[ 'Found new meta-predicates in iteration ~w (~3f sec)'-
[Iteration, CPU], nl ],
meta_decls(New),
[ 'Restarting analysis ...'-[], nl ].
meta_decls([]) --> [].
meta_decls([H|T]) -->
[ ':- meta_predicate ~q.'-[H], nl ],
meta_decls(T).
message_location_file_term_position(File, TermPos) -->
{ arg(1, TermPos, CharCount),
filepos_line(File, CharCount, Line, LinePos)
},
[ '~w:~d:~d: '-[File, Line, LinePos] ].
%! filepos_line(+File, +CharPos, -Line, -Column) is det.
%
% @param CharPos is 0-based character offset in the file.
% @param Column is the current column, counting tabs as 8 spaces.
filepos_line(File, CharPos, Line, LinePos) :-
setup_call_cleanup(
( open(File, read, In),
open_null_stream(Out)
),
( copy_stream_data(In, Out, CharPos),
stream_property(In, position(Pos)),
stream_position_data(line_count, Pos, Line),
stream_position_data(line_position, Pos, LinePos)
),
( close(Out),
close(In)
)).
| TeamSPoon/logicmoo_workspace | docker/rootfs/usr/local/lib/swipl/library/prolog_codewalk.pl | Perl | mit | 39,470 |
<?php
$TRYING_ADD_UNFILLED_ROW = 'Próbujesz dodać do tabeli źle uzupełniony wiersz.';
$ROW_WHICH_YOU_TRYING_ADD = 'Wiersz który dodajesz nie jest uzupełniony wymaganymi danymi. Brakuje %s. Poniżej drukuje wiersz który chcesz dodać oraz schemat modelu z listą wymaganych pól.';
$HERE_YOU_CALL = 'Tutaj dodajesz nieuzupełniony wiersz.'; | deArcane/framework | src/Mysql/_Exception/Queries/Insert/MissingField.lang.pl | Perl | mit | 346 |
#!/usr/bin/perl -w
###################################################
### illumina_realign.pm
### - Realign bam files using gatk indel realigner
###
### Author: S.W.Boymans, R.F.Ernst, H.H.D.Kerstens
####
###################################################
package IAP::realign;
use strict;
use POSIX qw(tmpnam);
use lib "$FindBin::Bin"; #locates pipeline directory
use IAP::sge;
sub runRealignment {
###
# Submit indel realign jobs.
# Two modes: single and multi sample.
###
my $configuration = shift;
my %opt = %{$configuration};
my $realignJobs = {};
my $runName = (split("/", $opt{OUTPUT_DIR}))[-1];
print "Running $opt{REALIGNMENT_MODE} sample indel realignment for the following BAM-files:\n";
### Parsing known indel files
my @knownIndelFiles;
if($opt{REALIGNMENT_KNOWN}) {
@knownIndelFiles = split('\t', $opt{REALIGNMENT_KNOWN});
}
### Multi sample realignment
if($opt{REALIGNMENT_MODE} eq 'multi'){
my $mainJobID = "$opt{OUTPUT_DIR}/jobs/RealignMainJob_".get_job_id().".sh";
open (QSUB,">$mainJobID") or die "ERROR: Couldn't create $mainJobID\n";
print QSUB "\#!/bin/sh\n\n. $opt{CLUSTER_PATH}/settings.sh\n\n";
my $jobId = "RE_".get_job_id();
my $cleanupJobId = "REALIGN_CLEANUP\_".get_job_id();
my $mergeJobs = "";
my @waitFor = ();
my $jobNative = &jobNative(\%opt,"REALIGNMENT");
open REALIGN_SH,">$opt{OUTPUT_DIR}/jobs/$jobId.sh" or die "Couldn't create $opt{OUTPUT_DIR}/jobs/$jobId.sh\n";
print REALIGN_SH "\#!/bin/sh\n\n";
print REALIGN_SH ". $opt{CLUSTER_PATH}/settings.sh\n\n";
print REALIGN_SH "cd $opt{OUTPUT_DIR}/tmp\n";
print REALIGN_SH "uname -n > ../logs/$jobId.host\n";
print REALIGN_SH "echo \"Start indel realignment\t\" `date` >> ../logs/$runName.log\n";
print REALIGN_SH "java -Xmx".$opt{REALIGNMENT_MASTER_MEM}."G -Djava.io.tmpdir=$opt{OUTPUT_DIR}/tmp/ -jar $opt{QUEUE_PATH}/Queue.jar -R $opt{GENOME} -S $opt{IAP_PATH}/$opt{REALIGNMENT_SCALA} -jobQueue $opt{REALIGNMENT_QUEUE} -nt $opt{REALIGNMENT_THREADS} -mem $opt{REALIGNMENT_MEM} -nsc $opt{REALIGNMENT_SCATTER} -mode $opt{REALIGNMENT_MODE} -jobNative \"$jobNative\" -run ";
if($opt{REALIGNMENT_KNOWN}) {
foreach my $knownIndelFile (@knownIndelFiles) {
print REALIGN_SH "-known $knownIndelFile ";
}
}
if($opt{QUEUE_RETRY} eq 'yes'){
print REALIGN_SH "-retry 1 ";
}
open CLEAN_SH, ">$opt{OUTPUT_DIR}/jobs/$cleanupJobId.sh" or die "Couldn't create $opt{OUTPUT_DIR}/jobs/$cleanupJobId.sh\n";
print CLEAN_SH "\#!/bin/sh\n\n";
print CLEAN_SH "uname -n > $opt{OUTPUT_DIR}/logs/$cleanupJobId.host\n";
print CLEAN_SH "PASS=0\n";
foreach my $sample (@{$opt{SAMPLES}}){
my $bam = $opt{BAM_FILES}->{$sample};
(my $flagstat = $bam) =~ s/\.bam/\.flagstat/;
(my $realignedBam = $bam) =~ s/\.bam/\.realigned\.bam/;
(my $realignedBai = $bam) =~ s/\.bam/\.realigned\.bai/;
(my $realignedBamBai = $bam) =~ s/\.bam/\.realigned\.bam\.bai/;
(my $realignedFlagstat = $bam) =~ s/\.bam/\.realigned\.flagstat/;
$opt{BAM_FILES}->{$sample} = $realignedBam;
print "\t$opt{OUTPUT_DIR}/$sample/mapping/$bam\n";
## Check for realigned bam file, skip sample if realigned bam file already exist.
if (-e "$opt{OUTPUT_DIR}/$sample/logs/Realignment_$sample.done"){
print "\t WARNING: $opt{OUTPUT_DIR}/$sample/logs/Realignment_$sample.done exists, skipping\n";
next;
}
push(@waitFor, join(",",@{$opt{RUNNING_JOBS}->{$sample}}));
print REALIGN_SH "-I $opt{OUTPUT_DIR}/$sample/mapping/$bam";
my $mergeJobId = "REALIGN_MERGE_$sample\_".get_job_id();
open MERGE_SH, ">$opt{OUTPUT_DIR}/$sample/jobs/$mergeJobId.sh" or die "Couldn't create $opt{OUTPUT_DIR}/$sample/jobs/$mergeJobId.sh\n";
print MERGE_SH "\#!/bin/sh\n\n";
print MERGE_SH "cd $opt{OUTPUT_DIR}/tmp/.queue/\n";
print MERGE_SH "CHUNKS=`find \$PWD -name '*$realignedBam' | sort | xargs`\n";
print MERGE_SH "if [ ! -f $opt{OUTPUT_DIR}/$sample/logs/Realignment_$sample.done ]\n";
print MERGE_SH "then\n";
print MERGE_SH "\t$opt{SAMBAMBA_PATH}/sambamba merge -t $opt{REALIGNMENT_MERGETHREADS} $opt{OUTPUT_DIR}/$sample/mapping/$realignedBam \`echo \$CHUNKS\` 1>>$opt{OUTPUT_DIR}/$sample/logs/realn_merge.log 2>>$opt{OUTPUT_DIR}/$sample/logs/realn_merge.err\n";
print MERGE_SH "\t$opt{SAMBAMBA_PATH}/sambamba index -t $opt{REALIGNMENT_MERGETHREADS} $opt{OUTPUT_DIR}/$sample/mapping/$realignedBam $opt{OUTPUT_DIR}/$sample/mapping/$realignedBai\n";
print MERGE_SH "\tcp $opt{OUTPUT_DIR}/$sample/mapping/$realignedBai $opt{OUTPUT_DIR}/$sample/mapping/$realignedBamBai\n";
print MERGE_SH "\t$opt{SAMBAMBA_PATH}/sambamba flagstat -t $opt{REALIGNMENT_MERGETHREADS} $opt{OUTPUT_DIR}/$sample/mapping/$realignedBam > $opt{OUTPUT_DIR}/$sample/mapping/$$realignedFlagstat\n\n";
print MERGE_SH "fi\n\n";
print MERGE_SH "if [ -s $opt{OUTPUT_DIR}/$sample/mapping/$flagstat ] && [ -s $opt{OUTPUT_DIR}/$sample/mapping/$realignedFlagstat ]\n";
print MERGE_SH "then\n";
print MERGE_SH "\tFS1=\`grep -m 1 -P \"\\d+ \" $opt{OUTPUT_DIR}/$sample/mapping/$flagstat | awk '{{split(\$0,columns , \"+\")} print columns[1]}'\`\n";
print MERGE_SH "\tFS2=\`grep -m 1 -P \"\\d+ \" $opt{OUTPUT_DIR}/$sample/mapping/$realignedFlagstat | awk '{{split(\$0,columns , \"+\")} print columns[1]}'\`\n";
print MERGE_SH "\tif [ \$FS1 -eq \$FS2 ]\n";
print MERGE_SH "\tthen\n";
print MERGE_SH "\t\ttouch $opt{OUTPUT_DIR}/$sample/logs/Realignment_$sample.done\n";
print MERGE_SH "\telse\n";
print MERGE_SH "\t\techo \"ERROR: $opt{OUTPUT_DIR}/$sample/mapping/$flagstat and $opt{OUTPUT_DIR}/$sample/mapping/$realignedFlagstat do not have the same read counts\" >>$opt{OUTPUT_DIR}/$sample/logs/realn_merge.err\n";
print MERGE_SH "\tfi\n";
print MERGE_SH "else\n";
print MERGE_SH "\techo \"ERROR: Either $opt{OUTPUT_DIR}/$sample/mapping/$flagstat or $opt{OUTPUT_DIR}/$sample/mapping/$realignedFlagstat is empty.\" >> $opt{OUTPUT_DIR}/$sample/logs/realn_merge.err\n";
print MERGE_SH "fi\n";
close MERGE_SH;
print CLEAN_SH "if [ ! -f $opt{OUTPUT_DIR}/$sample/logs/Realignment_$sample.done ]\n";
print CLEAN_SH "then\n";
print CLEAN_SH "\tPASS=1\n";
print CLEAN_SH "else\n";
print CLEAN_SH "\techo \"ERROR: $opt{OUTPUT_DIR}/$sample/mapping/$realignedBam didn't finish properly.\" >> $opt{OUTPUT_DIR}/logs/realn_cleanup.err\n";
print CLEAN_SH "fi\n\n";
my $qsub = &qsubJava(\%opt,"REALIGNMENT");
$mergeJobs .= $qsub." -p 100 -o ".$opt{OUTPUT_DIR}."/".$sample."/logs -e ".$opt{OUTPUT_DIR}."/".$sample."/logs -N ".$mergeJobId." -hold_jid ".$jobId," ".$opt{OUTPUT_DIR}."/".$sample."/jobs/".$mergeJobId.".sh\n";
push(@{$opt{RUNNING_JOBS}->{$sample}}, $mergeJobId);
}
print REALIGN_SH "-jobRunner GridEngine 1>>$opt{OUTPUT_DIR}/logs/$jobId.host 2>>$opt{OUTPUT_DIR}/logs/$jobId.host\n";
close REALIGN_SH;
print CLEAN_SH "if [ \$PASS -eq 0 ]\n";
print CLEAN_SH "then\n";
print CLEAN_SH "echo \"Finished indel realignment\t\" `date` >> ../logs/$runName.log\n";
print CLEAN_SH "\tmv $opt{OUTPUT_DIR}/tmp/IndelRealigner.jobreport.txt $opt{OUTPUT_DIR}/logs/IndelRealigner.jobreport.txt\n";
print CLEAN_SH "\tmv $opt{OUTPUT_DIR}/tmp/IndelRealigner.jobreport.pdf $opt{OUTPUT_DIR}/logs/IndelRealigner.jobreport.pdf\n";
print CLEAN_SH "fi\n";
close CLEAN_SH;
my $qsub = &qsubJava(\%opt,"REALIGNMENT_MASTER");
print QSUB $qsub," -o ",$opt{OUTPUT_DIR},"/logs -e ",$opt{OUTPUT_DIR},"/logs -N ",$jobId," -hold_jid ",join(",", @waitFor)," ", $opt{OUTPUT_DIR},"/jobs/",$jobId,".sh\n";
print QSUB $qsub," -o ",$opt{OUTPUT_DIR},"/logs -e ",$opt{OUTPUT_DIR},"/logs -N ",$cleanupJobId," -hold_jid ",$jobId," ",$opt{OUTPUT_DIR},"/jobs/",$cleanupJobId,".sh\n";
print QSUB $mergeJobs."\n";
system("sh $mainJobID");
}
### Single sample indel realignment
elsif($opt{REALIGNMENT_MODE} eq 'single'){
foreach my $sample (@{$opt{SAMPLES}}){
my $bam = $opt{BAM_FILES}->{$sample};
(my $flagstat = $bam) =~ s/\.bam/.flagstat/;
(my $realignedBam = $bam) =~ s/\.bam/\.realigned\.bam/;
(my $realignedBai = $bam) =~ s/\.bam/\.realigned\.bai/;
(my $realignedBamBai = $bam) =~ s/\.bam/\.realigned\.bam\.bai/;
(my $realignedFlagstat = $bam) =~ s/\.bam/\.realigned\.flagstat/;
$opt{BAM_FILES}->{$sample} = $realignedBam;
print "\t$opt{OUTPUT_DIR}/$sample/mapping/$bam\n";
## Check for realigned bam file, skip sample if realigned bam file already exist.
if (-e "$opt{OUTPUT_DIR}/$sample/logs/Realignment_$sample.done"){
print "\t WARNING: $opt{OUTPUT_DIR}/$sample/logs/Realignment_$sample.done exists, skipping\n";
next;
}
### Create realign bash script
my $logDir = $opt{OUTPUT_DIR}."/".$sample."/logs";
my $jobID = "Realign_".$sample."_".get_job_id();
my $bashFile = $opt{OUTPUT_DIR}."/".$sample."/jobs/".$jobID.".sh";
my $jobNative = &jobNative(\%opt,"REALIGNMENT");
open REALIGN_SH,">$bashFile" or die "Couldn't create $bashFile\n";
print REALIGN_SH "\#!/bin/bash\n\n";
print REALIGN_SH ". $opt{CLUSTER_PATH}/settings.sh\n\n";
print REALIGN_SH "cd $opt{OUTPUT_DIR}/$sample/tmp \n\n";
print REALIGN_SH "module load $opt{GATK_JAVA_MODULE}\n";
print REALIGN_SH "echo \"Start indel realignment\t\" `date` \"\t$bam\t\" `uname -n` >> $logDir/$sample.log\n\n";
print REALIGN_SH "if [ -f $opt{OUTPUT_DIR}/$sample/mapping/$bam ]\n";
print REALIGN_SH "then\n";
print REALIGN_SH "\tjava -Xmx".$opt{REALIGNMENT_MASTER_MEM}."G -Djava.io.tmpdir=$opt{OUTPUT_DIR}/$sample/tmp -jar $opt{QUEUE_PATH}/Queue.jar -R $opt{GENOME} -S $opt{IAP_PATH}/$opt{REALIGNMENT_SCALA} -jobQueue $opt{REALIGNMENT_QUEUE} -nt $opt{REALIGNMENT_THREADS} -mem $opt{REALIGNMENT_MEM} -nsc $opt{REALIGNMENT_SCATTER} -mode $opt{REALIGNMENT_MODE} -jobNative \"$jobNative\" ";
if($opt{REALIGNMENT_KNOWN}) {
foreach my $knownIndelFile (@knownIndelFiles) {
if(! -e $knownIndelFile){ die"ERROR: $knownIndelFile does not exist\n" }
else { print REALIGN_SH "-known $knownIndelFile " }
}
}
if($opt{QUEUE_RETRY} eq 'yes'){
print REALIGN_SH "-retry 1 ";
}
print REALIGN_SH "-run -I $opt{OUTPUT_DIR}/$sample/mapping/$bam -jobRunner GridEngine\n";
print REALIGN_SH "else\n";
print REALIGN_SH "\techo \"ERROR: $opt{OUTPUT_DIR}/$sample/mapping/$bam does not exist.\" >&2\n";
print REALIGN_SH "fi\n\n";
close REALIGN_SH;
### Submit realign bash script
my $qsub = &qsubJava(\%opt,"REALIGNMENT_MASTER");
if ( @{$opt{RUNNING_JOBS}->{$sample}} ){
system $qsub." -o ".$logDir."/Realignment_".$sample.".out -e ".$logDir."/Realignment_".$sample.".err -N ".$jobID." -hold_jid ".join(",",@{$opt{RUNNING_JOBS}->{$sample}})." ".$bashFile;
} else {
system $qsub." -o ".$logDir."/Realignment_".$sample.".out -e ".$logDir."/Realignment_".$sample.".err -N ".$jobID." ".$bashFile;
}
### Create flagstat bash script
my $jobIDFS = "RealignFS_".$sample."_".get_job_id();
my $bashFileFS = $opt{OUTPUT_DIR}."/".$sample."/jobs/".$jobIDFS.".sh";
open REALIGNFS_SH, ">$bashFileFS" or die "cannot open file $bashFileFS \n";
print REALIGNFS_SH "cd $opt{OUTPUT_DIR}/$sample/tmp\n";
print REALIGNFS_SH "if [ -s $opt{OUTPUT_DIR}/$sample/tmp/$realignedBam ]\n";
print REALIGNFS_SH "then\n";
print REALIGNFS_SH "\t$opt{SAMBAMBA_PATH}/sambamba flagstat -t $opt{REALIGNMENT_THREADS} $opt{OUTPUT_DIR}/$sample/tmp/$realignedBam > $opt{OUTPUT_DIR}/$sample/mapping/$realignedFlagstat\n";
print REALIGNFS_SH "fi\n\n";
print REALIGNFS_SH "if [ -s $opt{OUTPUT_DIR}/$sample/mapping/$flagstat ] && [ -s $opt{OUTPUT_DIR}/$sample/mapping/$realignedFlagstat ]\n";
print REALIGNFS_SH "then\n";
print REALIGNFS_SH "\tFS1=\`grep -m 1 -P \"\\d+ \" $opt{OUTPUT_DIR}/$sample/mapping/$flagstat | awk '{{split(\$0,columns , \"+\")} print columns[1]}'\`\n";
print REALIGNFS_SH "\tFS2=\`grep -m 1 -P \"\\d+ \" $opt{OUTPUT_DIR}/$sample/mapping/$realignedFlagstat | awk '{{split(\$0,columns , \"+\")} print columns[1]}'\`\n";
print REALIGNFS_SH "\tif [ \$FS1 -eq \$FS2 ]\n";
print REALIGNFS_SH "\tthen\n";
print REALIGNFS_SH "\t\tmv $opt{OUTPUT_DIR}/$sample/tmp/$realignedBam $opt{OUTPUT_DIR}/$sample/mapping/$realignedBam\n";
print REALIGNFS_SH "\t\tmv $opt{OUTPUT_DIR}/$sample/tmp/$realignedBai $opt{OUTPUT_DIR}/$sample/mapping/$realignedBai\n";
print REALIGNFS_SH "\t\tcp $opt{OUTPUT_DIR}/$sample/mapping/$realignedBai $opt{OUTPUT_DIR}/$sample/mapping/$realignedBamBai\n";
print REALIGNFS_SH "\t\tmv $opt{OUTPUT_DIR}/$sample/tmp/IndelRealigner.jobreport.txt $opt{OUTPUT_DIR}/$sample/logs/IndelRealigner.jobreport.txt\n";
print REALIGNFS_SH "\t\tmv $opt{OUTPUT_DIR}/$sample/tmp/IndelRealigner.jobreport.pdf $opt{OUTPUT_DIR}/$sample/logs/IndelRealigner.jobreport.pdf\n";
print REALIGNFS_SH "\t\ttouch $opt{OUTPUT_DIR}/$sample/logs/Realignment_$sample.done\n";
print REALIGNFS_SH "\telse\n";
print REALIGNFS_SH "\t\techo \"ERROR: $opt{OUTPUT_DIR}/$sample/mapping/$flagstat and $opt{OUTPUT_DIR}/$sample/mapping/$realignedFlagstat do not have the same read counts\" >>../logs/Realignment_$sample.err\n";
print REALIGNFS_SH "\tfi\n";
print REALIGNFS_SH "else\n";
print REALIGNFS_SH "\techo \"ERROR: Either $opt{OUTPUT_DIR}/$sample/mapping/$flagstat or $opt{OUTPUT_DIR}/$sample/mapping/$realignedFlagstat is empty.\" >> ../logs/Realignment_$sample.err\n";
print REALIGNFS_SH "fi\n\n";
print REALIGNFS_SH "echo \"End indel realignment\t\" `date` \"\t$sample\_dedup.bam\t\" `uname -n` >> $logDir/$sample.log\n";
close REALIGNFS_SH;
### Submit flagstat bash script
$qsub = &qsubTemplate(\%opt,"FLAGSTAT");
system $qsub." -o ".$logDir."/RealignmentFS_".$sample.".out -e ".$logDir."/RealignmentFS_".$sample.".err -N ".$jobIDFS." -hold_jid ".$jobID." ".$bashFileFS;
push(@{$opt{RUNNING_JOBS}->{$sample}}, $jobID);
push(@{$opt{RUNNING_JOBS}->{$sample}}, $jobIDFS);
}
}else{
die "ERROR: Invalid REALIGNMENT_MODE $opt{REALIGNMENT_MODE} , use 'single' or 'multi'\n";
}
return \%opt;
}
############
sub get_job_id {
my $id = tmpnam();
$id=~s/\/tmp\/file//;
return $id;
}
############
1;
| CuppenResearch/IAP | IAP/realign.pm | Perl | mit | 14,390 |
package Paws::RDS::DeleteDBClusterResult;
use Moose;
has DBCluster => (is => 'ro', isa => 'Paws::RDS::DBCluster');
has _request_id => (is => 'ro', isa => 'Str');
1;
### main pod documentation begin ###
=head1 NAME
Paws::RDS::DeleteDBClusterResult
=head1 ATTRIBUTES
=head2 DBCluster => L<Paws::RDS::DBCluster>
=head2 _request_id => Str
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/RDS/DeleteDBClusterResult.pm | Perl | apache-2.0 | 361 |
package Google::Ads::AdWords::v201409::TargetingIdeaService::ResponseHeader;
use strict;
use warnings;
{ # BLOCK to scope variables
sub get_xmlns { 'https://adwords.google.com/api/adwords/o/v201409' }
__PACKAGE__->__set_name('ResponseHeader');
__PACKAGE__->__set_nillable();
__PACKAGE__->__set_minOccurs();
__PACKAGE__->__set_maxOccurs();
__PACKAGE__->__set_ref();
use base qw(
SOAP::WSDL::XSD::Typelib::Element
Google::Ads::AdWords::v201409::SoapResponseHeader
);
}
1;
=pod
=head1 NAME
Google::Ads::AdWords::v201409::TargetingIdeaService::ResponseHeader
=head1 DESCRIPTION
Perl data type class for the XML Schema defined element
ResponseHeader from the namespace https://adwords.google.com/api/adwords/o/v201409.
=head1 METHODS
=head2 new
my $element = Google::Ads::AdWords::v201409::TargetingIdeaService::ResponseHeader->new($data);
Constructor. The following data structure may be passed to new():
$a_reference_to, # see Google::Ads::AdWords::v201409::SoapResponseHeader
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| gitpan/GOOGLE-ADWORDS-PERL-CLIENT | lib/Google/Ads/AdWords/v201409/TargetingIdeaService/ResponseHeader.pm | Perl | apache-2.0 | 1,054 |
#!/usr/bin/perl -w
# DESCRIPTION:
#
# Adds transaction commands: BEGIN...COMMIT every 50 000 lines
#
# AUTHOR: Andrew Krizhanovsky (http://code.google.com/p/wikokit)
# START DATE: 2010
# FINISH DATE:
# SEE:
# Let's speed up SQLite http://componavt.livejournal.com/3393.html
use strict;
no strict 'refs';
use Getopt::Std;
my( $headline, $str_today, $fn_in, $fn_out, $fn_log, $fn_err, $fsize);
my( $n_period, $n_line);
my( $log_text, $err_text);
my( $n_transaction, $add_text_start, $add_text_middle, $add_text_end);
$headline = "add_transactions.pl V0.01 (GNU-GPL) 2010 AKA MBG\n";
# --------------------------------------------------------------
# subroutine help_exit
# --------------------------------------------------------------
#wrap up in, s
sub help_exit
{
if ($#ARGV != 1){
print "\n".$headline."\n".
"Usage:\n add_transactions.exe in_dump.sql out\n".
" in_dump.sql - dump of the SQLite database\n".
"Examples: add_transactions.exe in_dump.sql out.xml";
}
if (1 != $#ARGV){
close(STDOUT); # baffle banners ;)
exit(0);
}
}
help_exit;
print "\n".$headline."\n";
print "Processing files ...\n";
# READ COMMAND LINE
# --------------------------------------------------------------
# must be two arguments
$fn_in = $ARGV[0];
$fn_log = $fn_in."_log";
$fn_err = $fn_in."_error";
$fn_out = $ARGV[1];
# number of lines (in dump file) between each commit
$n_period = 10000;
# open, copy files to buffer, close
# h - FILE HANDLE
# --------------------------------------------------------------
open (h_in,"<".$fn_in) or die ("Cannot open input file ".$fn_in);
$fsize = (stat ($fn_in))[7];
# read h_in, $text, $fsize;
# close (h_in);
# print "Read ${fn_in}.\n";
open (h_out,">".$fn_out) or die ("Cannot open output file ".$fn_out);
#my $b_table_mode = 0; # current lines belong to the CREATE TABLE SQL command
$add_text_start = "\n\nBEGIN;\n\n";
$add_text_middle = "\n\nCOMMIT;\nBEGIN;\n\n";
$add_text_end = "\n\nCOMMIT;\n\n";
print h_out $add_text_start;
$n_line = 0;
$n_transaction = 1;
LINE: while(<h_in>)
{
$n_line ++;
if (0 == $n_line % $n_period) { # $n_period-th line
$n_transaction ++;
print h_out $add_text_middle;
print ".";
}
#my($line) = $_; # file line
# chomp($line);
# remove lines with text "<redirect />"
# next LINE if $line =~ s/^\s*\<redirect\s\/\>//;
# next LINE if -1 ne index $line, "<redirect />";
# Print the line to the result file and add a newline
print h_out $_;
}
print h_out $add_text_end;
print "Number of transactions: ".$n_transaction;
EXIT:
# close output&log file
close (h_out);
print "\n";
close(STDOUT); # baffle banners ;)
| componavt/wikokit | wiwordik/mysql2sqlite/add_transactions.pl | Perl | apache-2.0 | 2,945 |
package Paws::DS::AddIpRoutesResult;
use Moose;
has _request_id => (is => 'ro', isa => 'Str');
### main pod documentation begin ###
=head1 NAME
Paws::DS::AddIpRoutesResult
=head1 ATTRIBUTES
=head2 _request_id => Str
=cut
1; | ioanrogers/aws-sdk-perl | auto-lib/Paws/DS/AddIpRoutesResult.pm | Perl | apache-2.0 | 238 |
package #
Date::Manip::TZ::afsao_00;
# Copyright (c) 2008-2013 Sullivan Beck. All rights reserved.
# This program is free software; you can redistribute it and/or modify it
# under the same terms as Perl itself.
# This file was automatically generated. Any changes to this file will
# be lost the next time 'tzdata' is run.
# Generated on: Mon Jun 3 12:52:59 EDT 2013
# Data version: tzdata2013c
# Code version: tzcode2013c
# This module contains data from the zoneinfo time zone database. The original
# data was obtained from the URL:
# ftp://ftp.iana.org/tz
use strict;
use warnings;
require 5.010000;
our (%Dates,%LastRule);
END {
undef %Dates;
undef %LastRule;
}
our ($VERSION);
$VERSION='6.40';
END { undef $VERSION; }
%Dates = (
1 =>
[
[ [1,1,2,0,0,0],[1,1,2,0,26,56],'+00:26:56',[0,26,56],
'LMT',0,[1883,12,31,23,33,3],[1883,12,31,23,59,59],
'0001010200:00:00','0001010200:26:56','1883123123:33:03','1883123123:59:59' ],
],
1883 =>
[
[ [1883,12,31,23,33,4],[1883,12,31,22,56,32],'-00:36:32',[0,-36,-32],
'LMT',0,[1912,1,1,0,36,31],[1911,12,31,23,59,59],
'1883123123:33:04','1883123122:56:32','1912010100:36:31','1911123123:59:59' ],
],
1912 =>
[
[ [1912,1,1,0,36,32],[1912,1,1,0,36,32],'+00:00:00',[0,0,0],
'GMT',0,[9999,12,31,0,0,0],[9999,12,31,0,0,0],
'1912010100:36:32','1912010100:36:32','9999123100:00:00','9999123100:00:00' ],
],
);
%LastRule = (
);
1;
| nriley/Pester | Source/Manip/TZ/afsao_00.pm | Perl | bsd-2-clause | 1,536 |
#!/usr/bin/env perl
# Pragmas.
use strict;
use warnings;
# Modules.
use PYX::GraphViz;
# Example PYX data.
my $pyx = <<'END';
(html
(head
(title
-Title
)title
)head
(body
(div
-data
)div
)body
END
# Object.
my $obj = PYX::GraphViz->new;
# Parse.
$obj->parse($pyx);
# Output
# PNG data | tupinek/PYX-GraphViz | examples/ex1.pl | Perl | bsd-2-clause | 290 |
interface(node).
interface(leaf).
interfaceAttribute(node, render).
interfaceAttribute(leaf, which).
class(root, node).
class(leaf1, leaf).
classChild(root, childs, leaf).
classField(gensymattrib, gensymattrib) :- false.
classField(root, gensymattrib).
classField(leaf1, gensymattrib).
interfaceField(node, display).
interfaceField(node, refname).
interfaceField(leaf, display).
interfaceField(leaf, refname).
assignment(root, self, radius_step, self, childs_which_last).
assignment(root, self, radius_step, self, gensymattrib).
assignment(root, self, radius_last, self, radius_step).
assignment(root, self, radius, self, radius_step).
assignment(root, self, childs_which_step, self, gensymattrib).
assignment(root, self, childs_which_last, self, childs_which_step).
assignment(root, childs, which, self, childs_which_step).
assignment(root, self, render_step, self, childs_which_step).
assignment(root, self, render_step, self, radius).
assignment(root, self, render_step, self, gensymattrib).
assignment(root, self, render_last, self, render_step).
assignment(root, self, render, self, render_step).
assignment(gensymattrib, gensymattrib, gensymattrib, gensymattrib, gensymattrib) :- false.
classAttribute(root, radius).
classAttribute(root, childs_which_step).
classAttribute(root, radius_step).
classAttribute(root, radius_last).
classAttribute(root, childs_which_step).
classAttribute(root, childs_which_last).
classAttribute(root, render_step).
classAttribute(root, render_last).
| modulexcite/superconductor | compiler/attrib-gram-evaluator-swipl/Tutorial/output/children.pl | Perl | bsd-3-clause | 1,486 |
#
# Copyright 2017 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package hardware::server::dell::openmanage::snmp::mode::components::fan;
use strict;
use warnings;
my %map_status = (
1 => 'other',
2 => 'unknown',
3 => 'ok',
4 => 'nonCriticalUpper',
5 => 'criticalUpper',
6 => 'nonRecoverableUpper',
7 => 'nonCriticalLower',
8 => 'criticalLower',
9 => 'nonRecoverableLower',
10 => 'failed',
);
# In MIB '10892.mib'
my $mapping = {
coolingDeviceStatus => { oid => '.1.3.6.1.4.1.674.10892.1.700.12.1.5', map => \%map_status },
coolingDeviceReading => { oid => '.1.3.6.1.4.1.674.10892.1.700.12.1.6' },
};
my $mapping2 = {
coolingDeviceLocationName => { oid => '.1.3.6.1.4.1.674.10892.1.700.12.1.8' },
};
my $oid_coolingDeviceTableEntry = '.1.3.6.1.4.1.674.10892.1.700.12.1';
sub load {
my (%options) = @_;
push @{$options{request}}, { oid => $oid_coolingDeviceTableEntry, start => $mapping->{coolingDeviceStatus}->{oid}, end => $mapping->{coolingDeviceReading}->{oid} },
{ oid => $mapping2->{coolingDeviceLocationName}->{oid} };
}
sub check {
my ($self) = @_;
$self->{output}->output_add(long_msg => "Checking fans");
$self->{components}->{fan} = {name => 'fans', total => 0, skip => 0};
return if ($self->check_exclude(section => 'fan'));
foreach my $oid ($self->{snmp}->oid_lex_sort(keys %{$self->{results}->{$oid_coolingDeviceTableEntry}})) {
next if ($oid !~ /^$mapping->{coolingDeviceStatus}->{oid}\.(.*)$/);
my $instance = $1;
my $result = $self->{snmp}->map_instance(mapping => $mapping, results => $self->{results}->{$oid_coolingDeviceTableEntry}, instance => $instance);
my $result2 = $self->{snmp}->map_instance(mapping => $mapping2, results => $self->{results}->{$mapping2->{coolingDeviceLocationName}->{oid}}, instance => $instance);
next if ($self->check_exclude(section => 'fan', instance => $instance));
$self->{components}->{fan}->{total}++;
$self->{output}->output_add(long_msg => sprintf("Fan '%s' status is '%s' [instance: %s, Location: %s, reading: %s]",
$instance, $result->{coolingDeviceStatus}, $instance,
$result2->{coolingDeviceLocationName}, $result->{coolingDeviceReading}
));
my $exit = $self->get_severity(section => 'fan', value => $result->{coolingDeviceStatus});
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("Fan '%s' status is '%s'",
$instance, $result->{coolingDeviceStatus}));
}
if (defined($result->{coolingDeviceReading}) && $result->{coolingDeviceReading} =~ /[0-9]/) {
my ($exit, $warn, $crit, $checked) = $self->get_severity_numeric(section => 'fan', instance => $instance, value => $result->{coolingDeviceReading});
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("Fan '%s' speed is %s rpm", $instance, $result->{coolingDeviceReading}));
}
$self->{output}->perfdata_add(label => 'fan_' . $instance, unit => 'rpm',
value => $result->{coolingDeviceReading},
warning => $warn,
critical => $crit,
min => 0
);
}
}
}
1;
| nichols-356/centreon-plugins | hardware/server/dell/openmanage/snmp/mode/components/fan.pm | Perl | apache-2.0 | 4,512 |
#
# Copyright 2017 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package storage::hp::lefthand::snmp::mode::components::rc;
use strict;
use warnings;
use storage::hp::lefthand::snmp::mode::components::resources qw($map_status);
my $mapping = {
storageRaidDeviceName => { oid => '.1.3.6.1.4.1.9804.3.1.1.2.4.4.1.2' },
storageRaidDeviceState => { oid => '.1.3.6.1.4.1.9804.3.1.1.2.4.4.1.90' },
storageRaidDeviceStatus => { oid => '.1.3.6.1.4.1.9804.3.1.1.2.4.4.1.91', map => $map_status },
};
my $oid_storageRaidEntry = '.1.3.6.1.4.1.9804.3.1.1.2.4.4.1';
sub load {
my ($self) = @_;
push @{$self->{request}}, { oid => $oid_storageRaidEntry };
}
sub check {
my ($self) = @_;
$self->{output}->output_add(long_msg => "Checking raid controllers");
$self->{components}->{rc} = {name => 'raid controllers', total => 0, skip => 0};
return if ($self->check_filter(section => 'rc'));
foreach my $oid ($self->{snmp}->oid_lex_sort(keys %{$self->{results}->{$oid_storageRaidEntry}})) {
next if ($oid !~ /^$mapping->{storageRaidDeviceStatus}->{oid}\.(.*)$/);
my $instance = $1;
my $result = $self->{snmp}->map_instance(mapping => $mapping, results => $self->{results}->{$oid_storageRaidEntry}, instance => $instance);
next if ($self->check_filter(section => 'rc', instance => $instance));
$self->{components}->{rc}->{total}++;
$self->{output}->output_add(long_msg => sprintf("raid device controller '%s' status is '%s' [instance: %s, state: %s].",
$result->{storageRaidDeviceName}, $result->{storageRaidDeviceStatus},
$instance, $result->{storageRaidDeviceState}
));
my $exit = $self->get_severity(label => 'default', section => 'rc', value => $result->{storageRaidDeviceStatus});
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("raid device controller '%s' state is '%s'",
$result->{storageRaidDeviceName}, $result->{storageRaidDeviceState}));
}
}
}
1; | Shini31/centreon-plugins | storage/hp/lefthand/snmp/mode/components/rc.pm | Perl | apache-2.0 | 3,003 |
#
# Copyright 2017 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package storage::nimble::snmp::mode::globalstats;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
use Digest::MD5 qw(md5_hex);
use centreon::plugins::values;
use centreon::plugins::statefile;
my $maps_counters = {
global => {
'000_read' => { set => {
key_values => [ { name => 'read', diff => 1 } ],
per_second => 1,
output_template => 'Read I/O : %s %s/s', output_error_template => "Read I/O : %s",
output_change_bytes => 1,
perfdatas => [
{ label => 'read', value => 'read_per_second', template => '%d',
unit => 'B/s' },
],
}
},
'001_write' => { set => {
key_values => [ { name => 'write', diff => 1 } ],
per_second => 1,
output_template => 'Write I/O : %s %s/s', output_error_template => "Write I/O : %s",
output_change_bytes => 1,
perfdatas => [
{ label => 'write', value => 'write_per_second', template => '%d',
unit => 'B/s', min => 0 },
],
}
},
'002_read-iops' => { set => {
key_values => [ { name => 'read_iops', diff => 1 } ],
per_second => 1,
output_template => 'Read IOPs : %.2f', output_error_template => "Read IOPs : %s",
perfdatas => [
{ label => 'read_iops', value => 'read_iops_per_second', template => '%.2f',
unit => 'iops', min => 0 },
],
}
},
'003_write-iops' => { set => {
key_values => [ { name => 'write_iops', diff => 1 } ],
per_second => 1,
output_template => 'Write IOPs : %.2f', output_error_template => "Write IOPs : %s",
perfdatas => [
{ label => 'write_iops', value => 'write_iops_per_second', template => '%.2f',
unit => 'iops', min => 0 },
],
}
},
'004_read-time' => { set => {
key_values => [ { name => 'read_time', diff => 1 } ],
output_template => 'Read Time : %.3f s', output_error_template => "Read Time : %s",
perfdatas => [
{ label => 'read_time', value => 'read_time_absolute', template => '%.3f',
unit => 's', min => 0 },
],
}
},
'005_write-time' => { set => {
key_values => [ { name => 'write_time', diff => 1 } ],
output_template => 'Write Time : %.3f s', output_error_template => "Write Time : %s",
perfdatas => [
{ label => 'write_time', value => 'write_time_absolute', template => '%.3f',
unit => 's', min => 0 },
],
}
},
},
};
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
$options{options}->add_options(arguments =>
{
"filter-counters:s" => { name => 'filter_counters' },
});
$self->{statefile_value} = centreon::plugins::statefile->new(%options);
foreach my $key (('global')) {
foreach (keys %{$maps_counters->{$key}}) {
my ($id, $name) = split /_/;
if (!defined($maps_counters->{$key}->{$_}->{threshold}) || $maps_counters->{$key}->{$_}->{threshold} != 0) {
$options{options}->add_options(arguments => {
'warning-' . $name . ':s' => { name => 'warning-' . $name },
'critical-' . $name . ':s' => { name => 'critical-' . $name },
});
}
$maps_counters->{$key}->{$_}->{obj} = centreon::plugins::values->new(statefile => $self->{statefile_value},
output => $self->{output},
perfdata => $self->{perfdata},
label => $name);
$maps_counters->{$key}->{$_}->{obj}->set(%{$maps_counters->{$key}->{$_}->{set}});
}
}
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
foreach my $key (('global')) {
foreach (keys %{$maps_counters->{$key}}) {
$maps_counters->{$key}->{$_}->{obj}->init(option_results => $self->{option_results});
}
}
$self->{statefile_value}->check_options(%options);
}
sub run_global {
my ($self, %options) = @_;
my ($short_msg, $short_msg_append, $long_msg, $long_msg_append) = ('', '', '', '');
my @exits;
foreach (sort keys %{$maps_counters->{global}}) {
if (defined($self->{option_results}->{filter_counters}) && $self->{option_results}->{filter_counters} ne '' &&
$_ !~ /$self->{option_results}->{filter_counters}/) {
$self->{output}->output_add(long_msg => "skipping counter $_", debug => 1);
next;
}
my $obj = $maps_counters->{global}->{$_}->{obj};
$obj->set(instance => 'global');
my ($value_check) = $obj->execute(new_datas => $self->{new_datas},
values => $self->{global});
if ($value_check != 0) {
$long_msg .= $long_msg_append . $obj->output_error();
$long_msg_append = ', ';
next;
}
my $exit2 = $obj->threshold_check();
push @exits, $exit2;
my $output = $obj->output();
$long_msg .= $long_msg_append . $output;
$long_msg_append = ', ';
if (!$self->{output}->is_status(litteral => 1, value => $exit2, compare => 'ok')) {
$short_msg .= $short_msg_append . $output;
$short_msg_append = ', ';
}
$obj->perfdata();
}
my $exit = $self->{output}->get_most_critical(status => [ @exits ]);
if (!$self->{output}->is_status(litteral => 1, value => $exit, compare => 'ok')) {
$self->{output}->output_add(severity => $exit,
short_msg => "$short_msg"
);
} else {
$self->{output}->output_add(short_msg => "$long_msg");
}
}
sub run {
my ($self, %options) = @_;
$self->manage_selection(%options);
$self->{new_datas} = {};
$self->{statefile_value}->read(statefile => $self->{cache_name});
$self->{new_datas}->{last_timestamp} = time();
$self->run_global();
$self->{statefile_value}->write(data => $self->{new_datas});
$self->{output}->display();
$self->{output}->exit();
}
sub manage_selection {
my ($self, %options) = @_;
if ($options{snmp}->is_snmpv1()) {
$self->{output}->add_option_msg(short_msg => "Need to use SNMP v2c or v3.");
$self->{output}->option_exit();
}
$self->{cache_name} = "nimble_" . $options{snmp}->get_hostname() . '_' . $options{snmp}->get_port() . '_' . $self->{mode} . '_' .
(defined($self->{option_results}->{filter_counters}) ? md5_hex($self->{option_results}->{filter_counters}) : md5_hex('all'));
$self->{global} = {};
my $oid_globalStats = '.1.3.6.1.4.1.37447.1.3';
my $oid_ioReads = '.1.3.6.1.4.1.37447.1.3.2.0';
my $oid_ioReadBytes = '.1.3.6.1.4.1.37447.1.3.8.0';
my $oid_ioReadTimeMicrosec = '.1.3.6.1.4.1.37447.1.3.6.0';
my $oid_ioWrites = '.1.3.6.1.4.1.37447.1.3.4.0';
my $oid_ioWriteBytes = '.1.3.6.1.4.1.37447.1.3.10.0';
my $oid_ioWriteTimeMicrosec = '.1.3.6.1.4.1.37447.1.3.7.0';
my $result = $options{snmp}->get_table(oid => $oid_globalStats,
nothing_quit => 1);
$self->{global}->{read} = defined($result->{$oid_ioReadBytes}) ? $result->{$oid_ioReadBytes} : undef;
$self->{global}->{read_iops} = defined($result->{$oid_ioReads}) ? $result->{$oid_ioReads} : undef;
$self->{global}->{read_time} = defined($result->{$oid_ioReadTimeMicrosec}) ? $result->{$oid_ioReadTimeMicrosec} / 1000000 : undef;
$self->{global}->{write} = defined($result->{$oid_ioWriteBytes}) ? $result->{$oid_ioWriteBytes} : undef;
$self->{global}->{write_iops} = defined($result->{$oid_ioWrites}) ? $result->{$oid_ioWrites} : undef;
$self->{global}->{write_time} = defined($result->{$oid_ioWriteTimeMicrosec}) ? $result->{$oid_ioWriteTimeMicrosec} / 1000000: undef;
}
1;
__END__
=head1 MODE
Check global statistics of storage.
=over 8
=item B<--warning-*>
Threshold warning.
Can be: 'read', 'read-iops', 'write', 'write-iops',
'read-time', 'write-time'.
=item B<--critical-*>
Threshold critical.
Can be: 'read', 'read-iops', 'write', 'write-iops',
'read-time', 'write-time'.
=item B<--filter-counters>
Only display some counters (regexp can be used).
Example: --filter-counters='-iops$'
=back
=cut
| nichols-356/centreon-plugins | storage/nimble/snmp/mode/globalstats.pm | Perl | apache-2.0 | 10,063 |
:- module(example1,_,[fuzzy]).
p(X,Y,_Mup1) :~ prod q(X,_Muq1),r(Y,_Mur).
p(X,Y,_Mup2) :~ prod q(X,_Muq2),s(Y,_Mus).
q(m,0.3) :~ .
r(X,_Mur):~ t(X,_Mut).
s(n,1) :~ .
t(n,0.4) :~ .
| leuschel/ecce | www/CiaoDE/ciao/library/fuzzy/examples/example1.pl | Perl | apache-2.0 | 185 |
#!perl
# Copyright 2015 Jeffrey Kegler
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
use 5.010;
use strict;
use warnings;
use English qw( -no_match_vars );
use Fatal qw(open close);
if (scalar @ARGV != 0) {
die("usage: $PROGRAM_NAME < marpa.w > private.h");
}
my $file = do { local $RS = undef; <STDIN>; };
for my $prototype ($file =~ m/^PRIVATE_NOT_INLINE \s (.*?) \s* ^[{]/gxms)
{
$prototype =~ s/[@][,]//g; # Remove Cweb spacing
say 'static ' . $prototype . q{;};
}
for my $prototype ($file =~ m/^PRIVATE \s (.*?) \s* ^[{]/gxms)
{
$prototype =~ s/[@][,]//g; # Remove Cweb spacing
say 'static inline ' . $prototype . q{;};
}
| pczarn/kollos | libmarpa/work/dev/w2private_h.pl | Perl | mit | 1,656 |
=pod
=head1 NAME
SSL_extension_supported,
SSL_CTX_add_client_custom_ext, SSL_CTX_add_server_custom_ext,
custom_ext_add_cb, custom_ext_free_cb, custom_ext_parse_cb
- custom TLS extension handling
=head1 SYNOPSIS
#include <openssl/ssl.h>
int SSL_CTX_add_client_custom_ext(SSL_CTX *ctx, unsigned int ext_type,
custom_ext_add_cb add_cb,
custom_ext_free_cb free_cb, void *add_arg,
custom_ext_parse_cb parse_cb,
void *parse_arg);
int SSL_CTX_add_server_custom_ext(SSL_CTX *ctx, unsigned int ext_type,
custom_ext_add_cb add_cb,
custom_ext_free_cb free_cb, void *add_arg,
custom_ext_parse_cb parse_cb,
void *parse_arg);
int SSL_extension_supported(unsigned int ext_type);
typedef int (*custom_ext_add_cb)(SSL *s, unsigned int ext_type,
const unsigned char **out,
size_t *outlen, int *al,
void *add_arg);
typedef void (*custom_ext_free_cb)(SSL *s, unsigned int ext_type,
const unsigned char *out,
void *add_arg);
typedef int (*custom_ext_parse_cb)(SSL *s, unsigned int ext_type,
const unsigned char *in,
size_t inlen, int *al,
void *parse_arg);
=head1 DESCRIPTION
SSL_CTX_add_client_custom_ext() adds a custom extension for a TLS client
with extension type B<ext_type> and callbacks B<add_cb>, B<free_cb> and
B<parse_cb>.
SSL_CTX_add_server_custom_ext() adds a custom extension for a TLS server
with extension type B<ext_type> and callbacks B<add_cb>, B<free_cb> and
B<parse_cb>.
In both cases the extension type must not be handled by OpenSSL internally
or an error occurs.
SSL_extension_supported() returns 1 if the extension B<ext_type> is handled
internally by OpenSSL and 0 otherwise.
=head1 EXTENSION CALLBACKS
The callback B<add_cb> is called to send custom extension data to be
included in ClientHello for TLS clients or ServerHello for servers. The
B<ext_type> parameter is set to the extension type which will be added and
B<add_arg> to the value set when the extension handler was added.
If the application wishes to include the extension B<ext_type> it should
set B<*out> to the extension data, set B<*outlen> to the length of the
extension data and return 1.
If the B<add_cb> does not wish to include the extension it must return 0.
If B<add_cb> returns -1 a fatal handshake error occurs using the TLS
alert value specified in B<*al>.
For clients (but not servers) if B<add_cb> is set to NULL a zero length
extension is added for B<ext_type>.
For clients every registered B<add_cb> is always called to see if the
application wishes to add an extension to ClientHello.
For servers every registered B<add_cb> is called once if and only if the
corresponding extension was received in ClientHello to see if the application
wishes to add the extension to ServerHello. That is, if no corresponding extension
was received in ClientHello then B<add_cb> will not be called.
If an extension is added (that is B<add_cb> returns 1) B<free_cb> is called
(if it is set) with the value of B<out> set by the add callback. It can be
used to free up any dynamic extension data set by B<add_cb>. Since B<out> is
constant (to permit use of constant data in B<add_cb>) applications may need to
cast away const to free the data.
The callback B<parse_cb> receives data for TLS extensions. For TLS clients
the extension data will come from ServerHello and for TLS servers it will
come from ClientHello.
The extension data consists of B<inlen> bytes in the buffer B<in> for the
extension B<extension_type>.
If the B<parse_cb> considers the extension data acceptable it must return
1. If it returns 0 or a negative value a fatal handshake error occurs
using the TLS alert value specified in B<*al>.
The buffer B<in> is a temporary internal buffer which will not be valid after
the callback returns.
=head1 NOTES
The B<add_arg> and B<parse_arg> parameters can be set to arbitrary values
which will be passed to the corresponding callbacks. They can, for example,
be used to store the extension data received in a convenient structure or
pass the extension data to be added or freed when adding extensions.
The B<ext_type> parameter corresponds to the B<extension_type> field of
RFC5246 et al. It is B<not> a NID.
If the same custom extension type is received multiple times a fatal
B<decode_error> alert is sent and the handshake aborts. If a custom extension
is received in ServerHello which was not sent in ClientHello a fatal
B<unsupported_extension> alert is sent and the handshake is aborted. The
ServerHello B<add_cb> callback is only called if the corresponding extension
was received in ClientHello. This is compliant with the TLS specifications.
This behaviour ensures that each callback is called at most once and that
an application can never send unsolicited extensions.
=head1 RETURN VALUES
SSL_CTX_add_client_custom_ext() and SSL_CTX_add_server_custom_ext() return 1 for
success and 0 for failure. A failure can occur if an attempt is made to
add the same B<ext_type> more than once, if an attempt is made to use an
extension type handled internally by OpenSSL or if an internal error occurs
(for example a memory allocation failure).
SSL_extension_supported() returns 1 if the extension B<ext_type> is handled
internally by OpenSSL and 0 otherwise.
=head1 COPYRIGHT
Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
Licensed under the OpenSSL license (the "License"). You may not use
this file except in compliance with the License. You can obtain a copy
in the file LICENSE in the source distribution or at
L<https://www.openssl.org/source/license.html>.
=cut
| openweave/openweave-core | third_party/openssl/openssl/doc/ssl/SSL_extension_supported.pod | Perl | apache-2.0 | 6,084 |
package DDG::Goodie::IsAwesome::zekiel;
# ABSTRACT: A simple goodie by zekiel
use DDG::Goodie;
use strict;
zci answer_type => "is_awesome_zekiel";
zci is_cached => 1;
triggers start => "duckduckhack zekiel", "zekiel duckduckhack";
handle remainder => sub {
return if $_;
return "zekiel is awesome and has successfully completed the DuckDuckHack Goodie tutorial!";
};
1;
| aleksandar-todorovic/zeroclickinfo-goodies | lib/DDG/Goodie/IsAwesome/zekiel.pm | Perl | apache-2.0 | 384 |
# Create global symbol declarations, transfer vector, and
# linker options files for PerlShr.
#
# Input:
# $cflags - command line qualifiers passed to cc when preprocesing perl.h
# Note: A rather simple-minded attempt is made to restore quotes to
# a /Define clause - use with care.
# $objsuffix - file type (including '.') used for object files.
# $libperl - Perl object library.
# $extnames - package names for static extensions (used to generate
# linker options file entries for boot functions)
# $rtlopt - name of options file specifying RTLs to which PerlShr.Exe
# must be linked
#
# Output:
# PerlShr_Attr.Opt - linker options file which speficies that global vars
# be placed in NOSHR,WRT psects. Use when linking any object files
# against PerlShr.Exe, since cc places global vars in SHR,WRT psects
# by default.
# PerlShr_Bld.Opt - declares universal symbols for PerlShr.Exe
# Perlshr_Gbl*.Mar, Perlshr_Gbl*.Obj (VAX only) - declares global symbols
# for global vars (done here because gcc can't globaldef) and creates
# transfer vectors for routines on a VAX.
# PerlShr_Gbl.Opt (VAX only) - list of PerlShr_Gbl*.Obj, used for input
# to the linker when building PerlShr.Exe.
#
# To do:
# - figure out a good way to collect global vars in one psect, given that
# we can't use globaldef because of gcc.
# - then, check for existing files and preserve symbol and transfer vector
# order for upward compatibility
# - then, add GSMATCH to options file - but how do we insure that new
# library has everything old one did
# (i.e. /Define=DEBUGGING,EMBED,MULTIPLICITY)?
#
# Author: Charles Bailey bailey@newman.upenn.edu
require 5.000;
$debug = $ENV{'GEN_SHRFLS_DEBUG'};
print "gen_shrfls.pl Rev. 18-Dec-2003\n" if $debug;
if ($ARGV[0] eq '-f') {
open(INP,$ARGV[1]) or die "Can't read input file $ARGV[1]: $!\n";
print "Input taken from file $ARGV[1]\n" if $debug;
@ARGV = ();
while (<INP>) {
chomp;
push(@ARGV,split(/\|/,$_));
}
close INP;
print "Read input data | ",join(' | ',@ARGV)," |\n" if $debug > 1;
}
$cc_cmd = shift @ARGV;
# Someday, we'll have $GetSyI built into perl . . .
$isvax = `\$ Write Sys\$Output \(F\$GetSyI(\"HW_MODEL\") .LE. 1024 .AND. F\$GetSyI(\"HW_MODEL\") .GT. 0\)`;
chomp $isvax;
print "\$isvax: \\$isvax\\\n" if $debug;
$isi64 = `\$ Write Sys\$Output \(F\$GetSyI(\"HW_MODEL\") .GE. 4096)`;
chomp $isi64;
print "\$isi64: \\$isi64\\\n" if $debug;
print "Input \$cc_cmd: \\$cc_cmd\\\n" if $debug;
$docc = ($cc_cmd !~ /^~~/);
print "\$docc = $docc\n" if $debug;
if ($docc) {
if (-f 'perl.h') { $dir = '[]'; }
elsif (-f '[-]perl.h') { $dir = '[-]'; }
else { die "$0: Can't find perl.h\n"; }
$use_threads = $use_mymalloc = $case_about_case = $debugging_enabled = 0;
$hide_mymalloc = $isgcc = $use_perlio = 0;
# Go see what is enabled in config.sh
$config = $dir . "config.sh";
open CONFIG, "< $config";
while(<CONFIG>) {
$use_threads++ if /usethreads='(define|yes|true|t|y|1)'/i;
$use_mymalloc++ if /usemymalloc='(define|yes|true|t|y|1)'/i;
$care_about_case++ if /d_vms_case_sensitive_symbols='(define|yes|true|t|y|1)'/i;
$debugging_enabled++ if /usedebugging_perl='(define|yes|true|t|y|1)'/i;
$hide_mymalloc++ if /embedmymalloc='(define|yes|true|t|y|1)'/i;
$isgcc++ if /gccversion='[^']/;
$use_perlio++ if /useperlio='(define|yes|true|t|y|1)'/i;
}
close CONFIG;
# put quotes back onto defines - they were removed by DCL on the way in
if (($prefix,$defines,$suffix) =
($cc_cmd =~ m#(.*)/Define=(.*?)([/\s].*)#i)) {
$defines =~ s/^\((.*)\)$/$1/;
$debugging_enabled ||= $defines =~ /\bDEBUGGING\b/;
@defines = split(/,/,$defines);
$cc_cmd = "$prefix/Define=(" . join(',',grep($_ = "\"$_\"",@defines))
. ')' . $suffix;
}
print "Filtered \$cc_cmd: \\$cc_cmd\\\n" if $debug;
# check for gcc - if present, we'll need to use MACRO hack to
# define global symbols for shared variables
print "\$isgcc: $isgcc\n" if $debug;
print "\$debugging_enabled: $debugging_enabled\n" if $debug;
}
else {
($junk,$junk,$cpp_file,$cc_cmd) = split(/~~/,$cc_cmd,4);
$isgcc = $cc_cmd =~ /case_hack/i
or 0; # for nice debug output
$debugging_enabled = $cc_cmd =~ /\bdebugging\b/i;
print "\$isgcc: \\$isgcc\\\n" if $debug;
print "\$debugging_enabled: \\$debugging_enabled\\\n" if $debug;
print "Not running cc, preprocesor output in \\$cpp_file\\\n" if $debug;
}
$objsuffix = shift @ARGV;
print "\$objsuffix: \\$objsuffix\\\n" if $debug;
$dbgprefix = shift @ARGV;
print "\$dbgprefix: \\$dbgprefix\\\n" if $debug;
$olbsuffix = shift @ARGV;
print "\$olbsuffix: \\$olbsuffix\\\n" if $debug;
$libperl = "${dbgprefix}libperl$olbsuffix";
$extnames = shift @ARGV;
print "\$extnames: \\$extnames\\\n" if $debug;
$rtlopt = shift @ARGV;
print "\$rtlopt: \\$rtlopt\\\n" if $debug;
sub scan_var {
my($line) = @_;
my($const) = $line =~ /^EXTCONST/;
print "\tchecking for global variable\n" if $debug > 1;
$line =~ s/\s*EXT/EXT/;
$line =~ s/INIT\s*\(.*\)//;
$line =~ s/\[.*//;
$line =~ s/=.*//;
$line =~ s/\W*;?\s*$//;
$line =~ s/\W*\)\s*\(.*$//; # closing paren for args stripped in previous stmt
print "\tfiltered to \\$line\\\n" if $debug > 1;
if ($line =~ /(\w+)$/) {
print "\tvar name is \\$1\\" . ($const ? ' (const)' : '') . "\n" if $debug > 1;
if ($const) { $cvars{$1}++; }
else { $vars{$1}++; }
}
}
sub scan_func {
my @lines = split /;/, @_[0];
for my $line (@lines) {
print "\tchecking for global routine\n" if $debug > 1;
$line =~ s/\b(IV|Off_t|Size_t|SSize_t|void|int)\b//i;
if ( $line =~ /(\w+)\s*\(/ ) {
print "\troutine name is \\$1\\\n" if $debug > 1;
if ($1 eq 'main' || $1 eq 'perl_init_ext' || $1 eq '__attribute__format__'
|| $1 eq 'sizeof' || (($1 eq 'Perl_stashpv_hvname_match') && ! $use_threads)) {
print "\tskipped\n" if $debug > 1;
}
else { $fcns{$1}++ }
}
}
}
# Go add some right up front if we need 'em
if ($use_mymalloc) {
$fcns{'Perl_malloc'}++;
$fcns{'Perl_calloc'}++;
$fcns{'Perl_realloc'}++;
$fcns{'Perl_mfree'}++;
}
$used_expectation_enum = $used_opcode_enum = 0; # avoid warnings
if ($docc) {
1 while unlink 'perlincludes.tmp';
END { 1 while unlink 'perlincludes.tmp'; } # and clean up after
open(PERLINC, '>perlincludes.tmp') or die "Couldn't open 'perlincludes.tmp' $!";
print PERLINC qq/#include "${dir}perl.h"\n/;
print PERLINC qq/#include "${dir}perlapi.h"\n/;
print PERLINC qq/#include "${dir}perliol.h"\n/ if $use_perlio;
print PERLINC qq/#include "${dir}regcomp.h"\n/;
close PERLINC;
$preprocess_list = 'perlincludes.tmp';
open(CPP,"${cc_cmd}/NoObj/PreProc=Sys\$Output $preprocess_list|")
or die "$0: Can't preprocess $preprocess_list: $!\n";
}
else {
open(CPP,"$cpp_file") or die "$0: Can't read preprocessed file $cpp_file: $!\n";
}
%checkh = map { $_,1 } qw( bytecode byterun intrpvar perlapi perlio perliol
perlvars proto regcomp thrdvar thread );
$ckfunc = 0;
LINE: while (<CPP>) {
while (/^#.*vmsish\.h/i .. /^#.*perl\.h/i) {
while (/__VMS_PROTOTYPES__/i .. /__VMS_SEPYTOTORP__/i) {
print "vms_proto>> $_" if $debug > 2;
if (/^\s*EXT(CONST|\s+)/) { &scan_var($_); }
else { &scan_func($_); }
last LINE unless defined($_ = <CPP>);
}
print "vmsish.h>> $_" if $debug > 2;
if (/^\s*EXT(CONST|\s+)/) { &scan_var($_); }
last LINE unless defined($_ = <CPP>);
}
while (/^#.*opcode\.h/i .. /^#.*perl\.h/i) {
print "opcode.h>> $_" if $debug > 2;
if (/^OP \*\s/) { &scan_func($_); }
if (/^\s*EXT(CONST|\s+)/) { &scan_var($_); }
last LINE unless defined($_ = <CPP>);
}
# Check for transition to new header file
if (/^# \d+ "(\S+)"/) {
my $spec = $1;
# Pull name from library module or header filespec
$spec =~ /^(\w+)$/ or $spec =~ /(\w+)\.h/i;
my $name = lc $1;
$ckfunc = exists $checkh{$name} ? 1 : 0;
$scanname = $name if $ckfunc;
print "Header file transition: ckfunc = $ckfunc for $name.h\n" if $debug > 1;
}
if ($ckfunc) {
print "$scanname>> $_" if $debug > 2;
if (/^\s*EXT(CONST|\s+)/) { &scan_var($_); }
else { &scan_func($_); }
}
else {
print $_ if $debug > 3 && ($debug > 5 || length($_));
if (/^\s*EXT(CONST|\s+)/) { &scan_var($_); }
}
}
close CPP;
while (<DATA>) {
next if /^#/;
s/\s+#.*\n//;
next if /^\s*$/;
($key,$array) = split('=',$_);
if ($array eq 'vars') { $key = "PL_$key"; }
else { $key = "Perl_$key"; }
print "Adding $key to \%$array list\n" if $debug > 1;
${$array}{$key}++;
}
if ($debugging_enabled and $isgcc) { $vars{'colors'}++ }
foreach (split /\s+/, $extnames) {
my($pkgname) = $_;
$pkgname =~ s/::/__/g;
$fcns{"boot_$pkgname"}++;
print "Adding boot_$pkgname to \%fcns (for extension $_)\n" if $debug;
}
# Eventually, we'll check against existing copies here, so we can add new
# symbols to an existing options file in an upwardly-compatible manner.
$marord++;
open(OPTBLD,">${dir}${dbgprefix}perlshr_bld.opt")
or die "$0: Can't write to ${dir}${dbgprefix}perlshr_bld.opt: $!\n";
if ($isvax) {
open(MAR,">${dir}perlshr_gbl${marord}.mar")
or die "$0: Can't write to ${dir}perlshr_gbl${marord}.mar: $!\n";
print MAR "\t.title perlshr_gbl$marord\n";
}
unless ($isgcc) {
if ($isi64) {
print OPTBLD "PSECT_ATTR=\$GLOBAL_RO_VARS,NOEXE,RD,NOWRT,SHR\n";
print OPTBLD "PSECT_ATTR=\$GLOBAL_RW_VARS,NOEXE,RD,WRT,NOSHR\n";
}
else {
print OPTBLD "PSECT_ATTR=\$GLOBAL_RO_VARS,PIC,NOEXE,RD,NOWRT,SHR\n";
print OPTBLD "PSECT_ATTR=\$GLOBAL_RW_VARS,PIC,NOEXE,RD,WRT,NOSHR\n";
}
}
print OPTBLD "case_sensitive=yes\n" if $care_about_case;
foreach $var (sort (keys %vars,keys %cvars)) {
if ($isvax) { print OPTBLD "UNIVERSAL=$var\n"; }
else { print OPTBLD "SYMBOL_VECTOR=($var=DATA)\n"; }
# This hack brought to you by the lack of a globaldef in gcc.
if ($isgcc) {
if ($count++ > 200) { # max 254 psects/file
print MAR "\t.end\n";
close MAR;
$marord++;
open(MAR,">${dir}perlshr_gbl${marord}.mar")
or die "$0: Can't write to ${dir}perlshr_gbl${marord}.mar: $!\n";
print MAR "\t.title perlshr_gbl$marord\n";
$count = 0;
}
print MAR "\t.psect ${var},long,pic,ovr,rd,wrt,noexe,noshr\n";
print MAR "\t${var}:: .blkl 1\n";
}
}
print MAR "\t.psect \$transfer_vec,pic,rd,nowrt,exe,shr\n" if ($isvax);
foreach $func (sort keys %fcns) {
if ($isvax) {
print MAR "\t.transfer $func\n";
print MAR "\t.mask $func\n";
print MAR "\tjmp G\^${func}+2\n";
}
else { print OPTBLD "SYMBOL_VECTOR=($func=PROCEDURE)\n"; }
}
if ($isvax) {
print MAR "\t.end\n";
close MAR;
}
open(OPTATTR,">${dir}perlshr_attr.opt")
or die "$0: Can't write to ${dir}perlshr_attr.opt: $!\n";
if ($isgcc) {
foreach $var (sort keys %cvars) {
print OPTATTR "PSECT_ATTR=${var},PIC,OVR,RD,NOEXE,NOWRT,SHR\n";
}
foreach $var (sort keys %vars) {
print OPTATTR "PSECT_ATTR=${var},PIC,OVR,RD,NOEXE,WRT,NOSHR\n";
}
}
else {
print OPTATTR "! No additional linker directives are needed when using DECC\n";
}
close OPTATTR;
$incstr = 'PERL,GLOBALS';
if ($isvax) {
$drvrname = "Compile_shrmars.tmp_".time;
open (DRVR,">$drvrname") or die "$0: Can't write to $drvrname: $!\n";
print DRVR "\$ Set NoOn\n";
print DRVR "\$ Delete/NoLog/NoConfirm $drvrname;\n";
print DRVR "\$ old_proc_vfy = F\$Environment(\"VERIFY_PROCEDURE\")\n";
print DRVR "\$ old_img_vfy = F\$Environment(\"VERIFY_IMAGE\")\n";
print DRVR "\$ MCR $^X -e \"\$ENV{'LIBPERL_RDT'} = (stat('$libperl'))[9]\"\n";
print DRVR "\$ Set Verify\n";
print DRVR "\$ If F\$Search(\"$libperl\").eqs.\"\" Then Library/Object/Create $libperl\n";
do {
push(@symfiles,"perlshr_gbl$marord");
print DRVR "\$ Macro/NoDebug/Object=PerlShr_Gbl${marord}$objsuffix PerlShr_Gbl$marord.Mar\n";
print DRVR "\$ Library/Object/Replace/Log $libperl PerlShr_Gbl${marord}$objsuffix\n";
} while (--$marord);
# We had to have a working miniperl to run this program; it's probably the
# one we just built. It depended on LibPerl, which will be changed when
# the PerlShr_Gbl* modules get inserted, so miniperl will be out of date,
# and so, therefore, will all of its dependents . . .
# We touch LibPerl here so it'll be back 'in date', and we won't rebuild
# miniperl etc., and therefore LibPerl, the next time we invoke MM[KS].
print DRVR "\$ old_proc_vfy = F\$Verify(old_proc_vfy,old_img_vfy)\n";
print DRVR "\$ MCR $^X -e \"utime 0, \$ENV{'LIBPERL_RDT'}, '$libperl'\"\n";
close DRVR;
}
# Initial hack to permit building of compatible shareable images for a
# given version of Perl.
if ($ENV{PERLSHR_USE_GSMATCH}) {
if ($ENV{PERLSHR_USE_GSMATCH} eq 'INCLUDE_COMPILE_OPTIONS') {
# Build up a major ID. Since it can only be 8 bits, we encode the version
# number in the top four bits and use the bottom four for build options
# that'll cause incompatibilities
($ver, $sub) = $] =~ /\.(\d\d\d)(\d\d)/;
$ver += 0; $sub += 0;
$gsmatch = ($sub >= 50) ? "equal" : "lequal"; # Force an equal match for
# dev, but be more forgiving
# for releases
$ver *=16;
$ver += 8 if $debugging_enabled; # If DEBUGGING is set
$ver += 4 if $use_threads; # if we're threaded
$ver += 2 if $use_mymalloc; # if we're using perl's malloc
print OPTBLD "GSMATCH=$gsmatch,$ver,$sub\n";
}
else {
my $major = int($] * 1000) & 0xFF; # range 0..255
my $minor = int(($] * 1000 - $major) * 100 + 0.5) & 0xFF; # range 0..255
print OPTBLD "GSMATCH=LEQUAL,$major,$minor\n";
}
print OPTBLD 'CLUSTER=$$TRANSFER_VECTOR,,',
map(",$_$objsuffix",@symfiles), "\n";
}
elsif (@symfiles) { $incstr .= ',' . join(',',@symfiles); }
# Include object modules and RTLs in options file
# Linker wants /Include and /Library on different lines
print OPTBLD "$libperl/Include=($incstr)\n";
print OPTBLD "$libperl/Library\n";
open(RTLOPT,$rtlopt) or die "$0: Can't read options file $rtlopt: $!\n";
while (<RTLOPT>) { print OPTBLD; }
close RTLOPT;
close OPTBLD;
exec "\$ \@$drvrname" if $isvax;
__END__
# Oddball cases, so we can keep the perl.h scan above simple
#Foo=vars # uncommented becomes PL_Foo
#Bar=funcs # uncommented becomes Perl_Bar
| Lh4cKg/sl4a | perl/src/vms/gen_shrfls.pl | Perl | apache-2.0 | 14,500 |
:- module(gcd, [gcd/1]).
:- use_module(library(chr)).
:- chr_constraint gcd/1.
gcd(0) <=> true.
gcd(N) \ gcd(M) <=> 0<N, N=<M | L is M mod N, gcd(L).
| fnogatz/CHR-Constraint-Store | examples/gcd.pl | Perl | mit | 151 |
# !!!!!!! DO NOT EDIT THIS FILE !!!!!!!
# This file is machine-generated by lib/unicore/mktables from the Unicode
# database, Version 12.1.0. Any changes made here will be lost!
# !!!!!!! INTERNAL PERL USE ONLY !!!!!!!
# This file is for internal use by core Perl only. The format and even the
# name or existence of this file are subject to change without notice. Don't
# use it directly. Use Unicode::UCD to access the Unicode character data
# base.
return <<'END';
V14
2404
2406
73056
73062
73063
73065
73066
73103
73104
73106
73107
73113
73120
73130
END
| operepo/ope | client_tools/svc/rc/usr/share/perl5/core_perl/unicore/lib/Scx/Gong.pl | Perl | mit | 573 |
package Note::App;
use strict;
use warnings;
use vars qw();
use Moose;
use Module::Load;
use Module::Refresh;
use Note::Param;
no warnings qw(uninitialized);
has 'config' => (
'is' => 'rw',
'isa' => 'HashRef',
'lazy' => 1,
'default' => sub { return {}; },
);
has 'storage' => (
'is' => 'rw',
'isa' => 'HashRef',
'default' => sub { return {}; },
);
has 'name' => (
'is' => 'rw',
'isa' => 'Str',
);
has 'root' => (
'is' => 'rw',
'isa' => 'Str',
);
# default, directory lookup dispatcher
sub dispatch
{
my ($obj, $param) = get_param(@_);
my $page = undef;
my $root = $param->{'root'}. '/page';
my $sp = join('/', @{$param->{'path'}});
my $file = $root. ((length($sp)) ? '/'. $sp : '');
if (-d $file)
{
$file .= '/_index';
}
$file .= '.njs';
# load njs page file
if (-e $file)
{
my $njs = new Note::File::JSON(
'file' => $file,
);
$njs->read_file();
my $data = $njs->data();
$param->{'file'} = $file;
$param->{'data'} = $data;
if (defined $data->{'class'})
{
my $pgclass = $data->{'class'};
# read perl package for class
load($pgclass);
# refresh module for development
my $classpath = $pgclass. '.pm';
$classpath =~ s/::/\//g;
Module::Refresh->refresh_module($classpath);
# create instance
$page = $pgclass->new($param);
}
else
{
$page = new Note::Page($param);
}
}
else
{
#die('File not found: '. $sp);
return undef;
}
return $page;
}
1;
| mfrager/note | App.pm | Perl | mit | 1,439 |
% Example for late instantiation with built-in unification
% Terminating
% Author: Thomas Stroeder
%query: p(o,o,o).
p(X,X,a) :- !.
p(X,Y,Z) :- Z = a, X = Y, p(X,Y,Z). | ComputationWithBoundedResources/ara-inference | doc/tpdb_trs/Prolog/AProVE_10_prolog/lateinstance.pl | Perl | mit | 174 |
#!/usr/bin/perl
use strict;
use warnings;
use Carp;
use Getopt::Std; # for commandline argument processing
use English qw( -no_match_vars ); # for eval error special variable
use Parse::Apache::ServerStatus; # from CPAN, for parsing Apache status page
use Math::Round; # for rounding percentages to 2 decimal places
#
# Process arguments
#
# get the arguments, and support --help and --version
my %flags; # a hashref to store the arguments in
$Getopt::Std::STANDARD_HELP_VERSION = 1;
sub VERSION_MESSAGE {
print "check_apache_serverstatus.pl version 0.2 (released February 2015)\n";
return;
}
sub HELP_MESSAGE{
print <<'END_HELP';
A plguin for checking the status of an Apache Web server using the server-status
module.
As well as checking that Apache is up, this plugin also checks if there are
enough free slots, and, if there are an in-ordinate number of slots in the R
state (reading request), which is a symptom of a slowlorios-style (D)DOS attack.
Required Flags:
-H The hostname for the server to be checked
Optional Flags:
-c The % of free slots below which a critical state is triggered as an
integer (defaults to 10).
-w The % of free slots below which a warning is triggered as an integer,
must be a greater than -c (defaults to 33).
-r The % of R slotgs above which a warning is triggered as an integer
(defaults to 33).
-t The timeout to wait for a reply from the server in seconds (defaults to
10).
Exit Codes:
0 SUCCESS - The server responded to the status request, the percentage
of free slots is above -w, and the percentage of slots in an
R state is below -r. I.e., the server looks healthy.
1 WARNING - The server responded to the status request, but either the
percentage of free slots is greater than -w but less than -c,
or the percentage of slots in an R state i greater than -r.
2 CRITICAL - The server did not respond at all, or the percentage of free
slots is below -c.
END_HELP
return;
}
my $args_legal = getopts('H:c:w:r:v', \%flags);
unless($args_legal){
print "ERROR - invalid arguments received\n";
exit 3;
}
# process required flags
my $hostname = $flags{'H'};
unless($hostname){
print "ERROR - invalid arguments received\n";
exit 3;
}
# process optional flags
my $critical = 10;
if($flags{'c'} && $flags{'c'} =~ m/^\d+$/sx){
$critical = $flags{'c'}
}
my $warning = 33;
if($flags{'w'} && $flags{'w'} =~ m/^\d+$/sx){
$warning = $flags{'w'};
}
## no critic (ProhibitNegativeExpressionsInUnlessAndUntilConditions);
unless($warning > $critical){
$warning = $critical;
}
## use critic
my $r_warning = 33;
if($flags{'r'} && $flags{'r'} =~ m/^\d+$/sx){
$r_warning = $flags{'r'};
}
my $timeout = 10;
if($flags{'t'} && $flags{'t'} =~ m/^\d+$/sx){
$timeout = $flags{'t'};
}
#
# try get the data from the status page
#
# instantiate a parse object
my $prs = Parse::Apache::ServerStatus->new(
url => "http://$hostname/server-status",
timeout => $timeout,
);
# try get the status page form the server
my $stats;
eval{
$stats = $prs->get or croak($prs->errstr);
1; # force a true return on successful execution
}or do{
# return a critical state
print "CRITICAL - failed to access server status page ($EVAL_ERROR)\n";
exit 2;
};
#
# Parse the result
#
# calcualte needed totals and percentages
my $total_slots = $stats->{'_'} + $stats->{'S'} + $stats->{'R'} + $stats->{'W'}
+ $stats->{'K'}+ $stats->{'D'} + $stats->{'C'} + $stats->{'L'}
+ $stats->{'G'} + $stats->{'I'} + $stats->{q{.}};
my $total_free = $stats->{'_'} + $stats->{q{.}};
my $percent_free = nearest(0.01, $total_free/$total_slots * 100);
my $percent_r = nearest(0.01, $stats->{'R'}/$total_slots * 100);
my $req_per_sec = $stats->{'rs'};
if($req_per_sec =~ m/^[.]/sx){
$req_per_sec = '0'.$req_per_sec;
}
my $performance_data = " | free_slots=${percent_free}%;${warning};${critical} r_slots=${percent_r}%;${r_warning} requests_per_sec=$req_per_sec";
# first deal with the critical state
if($percent_free < $critical){
my $out = "CRITICAL - only ${percent_free}% ($total_free) of $total_slots slots free (";
if($percent_r > $r_warning){
$out .= 'WARNING - '
}
$out .= $stats->{'R'}." in R state) ${performance_data}\n";
print $out;
exit 2;
}
# then deal with the number of free slots being in a warning stage
if($percent_free < $warning){
my $out = "WARNING - only ${percent_free}% ($total_free) of $total_slots slots free (";
if($percent_r > $r_warning){
$out .= 'WARNING - '
}
$out .= $stats->{'R'}." in R state) ${performance_data}\n";
print $out;
exit 1;
}
# finally deal with too many Rs
if($percent_r > $r_warning){
print 'WARNING - '.$percent_r.'% of slots in R state ('.$stats->{'R'}." slots) - potential slowloris attack! (${percent_free}% = $total_free out of $total_slots slots free) ${performance_data}\n";
exit 1;
}
# finally, if we got this far, all is well, so return success
print "OK - ${percent_free}% of slots free ($total_free out of $total_slots) - ${percent_r}% in R state ($stats->{R} slots) ${performance_data}\n";
exit 0; | bbusschots-nuim/check_apache_serverstatus | check_apache_serverstatus.pl | Perl | mit | 5,319 |
-- Does a very basic test of the rule engine.
--
-- © 2006 David Given.
-- Prime Mover is licensed under the MIT open source license. Search
-- for 'MIT' in this file to find the full license text.
--
-- $Id$
-- Create output files.
io.open("DATA1", "w"):write("one\n")
collectgarbage()
collectgarbage()
test1 = file "DATA1"
copy = simple {
class = "copy",
outputs = {"%U%"},
command = {
"cat %in% > %out%"
}
}
default = group {
copy { file "DATA1" },
copy { file "DATA1" },
install = pm.install("%out[1]%", "RESULT")
}
| davidgiven/primemover | tests/group-in-out.pm | Perl | mit | 534 |
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
package BMS::ForkCritter;
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
=head1 DESCRIPTION
=head1 SYNOPSIS
=head1 AUTHOR
Charles Tilford <podmail@biocode.fastmail.fm>
//Subject __must__ include 'Perl' to escape mail filters//
=head1 LICENSE
Copyright 2014 Charles Tilford
http://mit-license.org/
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
=cut
use strict;
use Bio::SeqIO;
use BMS::FriendlySAX;
use POSIX;
use BMS::ErrorInterceptor;
use BMS::Utilities::Benchmark;
use vars qw(@ISA);
@ISA = qw(BMS::ErrorInterceptor BMS::Utilities::Benchmark);
my $VERSION = ' $Id: ForkCritter.pm,v 1.37 2014/03/20 18:19:19 tilfordc Exp $ ';
our $scriptStartTime = time;
our $sortTmpDir = "/tmp/";
sub new {
my $proto = shift;
my $class = ref($proto) || $proto;
my $self = {
METHOD => \&_bogus_method,
PIDS => [], # pid array of child process IDs
PIDNUM => {}, # keyed to pid, points to child number
INPUT => '',
INPUTS => [],
TYPE => '', # The type of the input file (sequence, csv, etc)
INPTARGS => [], # Arguments to pass to the input method (used by SAX)
LASTREC => '', # The last item recovered by next_task
RECORD => 0, # Total number of records observed
COUNT => 0, # Number of records accepted by this child
LIMIT => 0, # Limit the number of records read
PROG => 0, # Progress notification interval in seconds
DOALL => 0, # Flag to parse all records
REFORK => 0, # Child number to allow re-running of failed child
REFORKOK => 0, # Can program re-try crashed children?
FHNUM => 0, # Increment for file handles
CHILD => 0, # The child count for this process
LASTKID => 0, # The last child spawned
LOWKID => 1, # The lowest child count for the set
FORKNUM => 1, # The total number of children being spawned
COUNTTAG => '', # What are we using to measure progress?
COUNTWHAT => 'lines', # What are the things we are counting?
HISTORY => {}, # Number of records previously read
VERBOSE => 0,
EXITCODE => 0,
TERMINATED => 0,
FORCE_EXIT => 0,
};
bless ($self, $class);
my $args = $self->parseparams
( -method => undef,
-type => 'ARRAY',
@_ );
while (my ($key, $val) = each %{$args}) {
# Standardize arguments to no underscore
next unless ($key =~ /_/);
$key =~ s/_//g;
$args->{$key} = $val;
}
$self->intercept_errors();
$self->ignore_error("Bio/Root/IO.pm line 505");
$self->method($args->{METHOD});
$self->unstable_input( $args->{UNSTABLE} );
$self->init_method( $args->{INITMETH} || $args->{INITMETHOD} );
$self->finish_method( $args->{FINISHMETH} || $args->{FINISHMETHOD} ||
$args->{FINALMETH} || $args->{FINALMETHOD} );
$self->group_method($args->{GROUPMETH} || $args->{GROUPMETHOD});
$self->skip_record_method( $args->{SKIPMETH} || $args->{SKIP_METH} ||
$args->{SKIPMETHOD} );
$self->last_item_method( $args->{LAST_ITEM_METH} ||
$args->{LAST_ITEM_METHOD} );
$self->input_type( $args->{INPUTTYPE} || $args->{TYPE} );
$self->input_args( $args->{INPUTARGS} || $args->{INPUT_ARGS} );
$self->exit_code( $args->{EXITCODE} || $args->{EXIT_CODE} );
$self->input( $args->{INPUT} );
$self->progress( $args->{PROGRESS} || $args->{PROG});
$self->verbose( $args->{VERBOSE} || $args->{DEBUG});
$self->limit( $args->{LIMIT});
$self->allow_refork( $args->{REFORK} );
$self->set_colmap($args->{COLMAP} || $args->{REMAP});
foreach my $key (qw(QUIETLIMIT LIMITMSG)) {
$self->{uc($key)} = $args->{uc($key)};
}
# Initialize the task directory:
$self->_task_directory();
return $self;
}
sub reset {
my $self = shift;
# Reset the number for the 'first' kid
$self->close_all();
$self->{LOWKID} = $self->fork_count + 1;
$self->{FILES} = {};
$self->{PIDS} = [];
$self->{PIDNUM} = {};
$self->{REFORK} = 0;
$self->{INPUTS} = [];
$self->{FORCE_EXIT} = 0;
$self->{TERMINATED} = 0;
$self->{INITIALIZED} = 0; # bad idea? trying to get handles working
map { $self->{$_} = undef; } qw(TOTAL_COUNT TASKDIR COL_MAP IOSEP IOSTRIP);
foreach my $extra (map { uc($_) } @_) {
delete $self->{ $extra };
}
$self->_clear_task;
$scriptStartTime = time;
}
sub set_colmap {
my $self = shift;
my $cm = shift || {};
while (my ($in, $out) = each %{$cm}) {
$self->remap_header_name( $in, $out );
}
}
sub set_column_separator {
my $self = shift;
my ($nv, $strip) = @_;
if (defined $nv) {
if ($nv eq '') {
# Clear setting
delete $self->{IOSEP};
delete $self->{IOSTRIP};
} else {
$self->{IOSEP} = $nv;
$self->{IOSTRIP} = $strip if (defined $strip);
}
}
return wantarray ? ($self->{IOSEP}, $self->{IOSTRIP} ) : $self->{IOSEP};
}
sub remap_header_name {
my $self = shift;
my ($nameReq, $newval) = @_;
my $name = $nameReq;
$name = '' unless (defined $name);
$name = uc($name); # unless ($self->column_case_matters());
$name =~ s/[_\-\s]+/ /g;# unless ($self->column_whitespace_matters());
if (defined $newval) {
if ($newval ne '') {
$self->{COL_MAP}{$name} = $newval;
} else {
delete $self->{COL_MAP}{$name};
}
}
my $rv = $self->{COL_MAP}{$name};
return defined $rv ? $rv : $nameReq;
}
sub header {
my $self = shift;
return @{$self->{SUBTYPES}{HEADER} || []};
}
sub DESTROY {
my $self = shift;
if (my $child = $self->child) {
# This is a child
$self->err("[+]", "Child $child DESTROY - " . &nice_date())
if ($self->verbose > 2);
$self->_finish;
$self->err("[+]", "Child $child finished - " . &nice_date())
if ($self->verbose > 1);
} else {
# This is the parent
$self->_clear_task;
my $dir = $self->_task_directory;
if ($dir && -d $dir) {
#print ">>>>CLEARING $dir\n". $self->stack_trace()."\n";
delete $self->{TASKDIR};
rmdir($dir);
}
$self->msg("[+]", "Parent ForkCritter finished - " . &nice_date())
if ($self->verbose);
}
if (my $ec = $self->{EXITCODE}) { exit($ec) }
}
sub graceful_death {
my $self = shift;
my @msg = $#_ == -1 ? ("ForkCritter fatal error!") : @_;
if (my $child = $self->child) {
push @msg, "Child $child";
} else {
push @msg, "Parent object";
}
if (my $li = $self->last_item) {
unshift @msg, "Last Item: $li";
}
$self->err( @msg );
$self->graceful_shutdown();
die;
}
sub graceful_shutdown {
my $self = shift;
return unless ($self);
$self->close_all();
}
sub close_all {
my $self = shift;
return unless ($self);
while (my ($tag, $data) = each %{$self->{FILES} || {}}) {
my $fh = $data->{FH};
close $fh if ($fh);
delete $data->{FH};
my $file = $data->{FRAG};
next unless ($file);
if (-e $file && -s $file) {
# The file exists and has data
if ($data->{OPTS} =~ /sort(\d+)?/) {
# The user wants the file sorted
my $mem = $1 || 0;
my $cmd = "sort ";
$cmd .= "-S ${mem}G " if ($mem);
$cmd .= "-T $sortTmpDir " if ($sortTmpDir);
$cmd .= " $file -o $file";
system($cmd);
}
} else {
# The file exists but is empty
unlink($file);
}
}
if (my $io = $self->{SEQIO}) {
$io->close();
}
}
sub method {
my $self = shift;
if (my $meth = $_[0]) {
if (ref($meth) eq 'CODE') {
$self->{METHOD} = $meth;
} else {
$self->err("'$meth' is not a code reference",
"method() NOT set");
$self->{METHOD} = \&_bogus_method;
}
}
return $self->{METHOD};
}
sub init_method {
my $self = shift;
if (defined $_[0]) {
my $meth = $_[0];
if (!$meth) {
$self->{INIT_METHOD} = undef;
} elsif (ref($meth) eq 'CODE') {
$self->{INIT_METHOD} = $meth;
} else {
$self->err("'$meth' is not a code reference",
"init_method() NOT set");
}
}
return $self->{INIT_METHOD};
}
sub finish_method {
my $self = shift;
if (defined $_[0]) {
my $meth = $_[0];
if (!$meth) {
$self->{FINISH_METHOD} = undef;
} elsif (ref($meth) eq 'CODE') {
$self->{FINISH_METHOD} = $meth;
} else {
$self->err("'$meth' is not a code reference",
"finish_method() NOT set");
}
}
return $self->{FINISH_METHOD};
}
sub next_record_method {
my $self = shift;
if (my $meth = $_[0]) {
if (ref($meth) eq 'CODE') {
$self->{NEXTFUNC} = $meth;
} else {
$self->err("'$meth' is not a code reference",
"next_record_method() NOT set");
}
}
return $self->{NEXTFUNC};
}
sub skip_record_method {
my $self = shift;
if (defined $_[0]) {
my $meth = $_[0];
if (!$meth) {
$self->{SKIPFUNC} = undef;
} elsif (ref($meth) eq 'CODE') {
$self->{SKIPFUNC} = $meth;
} else {
$self->err("'$meth' is not a code reference",
"skip_record_method() NOT set");
}
}
return $self->{SKIPFUNC};
}
=head2 group_method
Title : group_method
Usage : my $codeRef = $fc->group_method( $newval )
Function: Sets / gets a code callback for grouping records
Returns : The current callback
Args : Optional new value, should be a code reference
A group method is optional logic that will collect multiple records
together. This is useful when your data source should be analyzed in
logical sets, rather than one by one. The method will ONLY work if all
members of a group are in uninterupted sequential order.
The code reference will be provided with a single argument,
representing the currently encountered record. It is up to your code
to maintain the growing 'current' group, and to determine when a group
is complete. When you know you have a complete group (presumably
because the passed record is NOT in the growing group), return the
group object. Otherwise, return a false value (eg undef).
If the passed argument is itself undef, it indicates that ForkCritter
has reached the end of the data stream. Typically, your method will
then still have the 'current' group, which it should return at this
time (otherwise return undef).
=cut
sub group_method {
my $self = shift;
if (my $meth = $_[0]) {
if (ref($meth) eq 'CODE') {
$self->{GROUPFUNC} = $meth;
} else {
$self->err("'$meth' is not a code reference",
"group_method() NOT set");
}
}
return $self->{GROUPFUNC};
}
sub count_method {
my $self = shift;
if (my $meth = $_[0]) {
if (ref($meth) eq 'CODE') {
$self->{COUNTFUNC} = $meth;
$self->{MANUALCOUNTFUNC} = 1;
} else {
$self->err("'$meth' is not a code reference",
"count_method() NOT set");
}
}
return $self->{COUNTFUNC};
}
sub input {
my $self = shift;
if (my $input = $_[0]) {
my $type = $self->input_type( $_[1] );
$self->{INPUT} = $input;
$self->{TYPE} = $type;
$self->{TOTAL_COUNT} = undef;
push @{$self->{INPUTS}}, [$input, $type];
}
return $self->{INPUT};
}
my $typeCounters = {
basic => \&_count_basic,
array => \&_count_array,
fasta => \&_count_fasta,
fastq => \&_count_fastq,
genbank => \&_count_genbank,
xml => \&_count_xml,
tsv => \&_count_tab,
maf => \&_count_tab,
csv => \&_count_tab,
};
*type = \&input_type;
sub input_type {
my $self = shift;
if (my $req = lc($_[0] || "")) {
my $stnd = $self->{TYPE} = $self->_standard_types($req);
my $st = $self->{SUBTYPES} = {};
if ($req =~ /(head|maf)/) {
$st->{HEADER} = 1;
if ($req =~ /hash/) {
$st->{HASH} = 1;
}
if ($req =~ /maf/) {
$st->{LOOKFOR} = 'NCBI';
$st->{MAF} = 1;
}
}
$st->{CLEANASCII} = 1 if ($req =~ /clean/);
if ($req =~ /lookfor_(\S+)/) {
$st->{LOOKFOR} = $1;
}
if ($req =~ /groupby_(\S+)/) {
$st->{GROUPBY} = $1;
}
if ($stnd eq 'seq') {
if ($req =~ /(gbk|genbank|gb)/) {
$st->{SEQFORMAT} = 'genbank';
} elsif ($req =~ /fastq|fq/) {
$st->{SEQFORMAT} = 'fastq';
} elsif ($req =~ /uniprot|swiss|sp/) {
$st->{SEQFORMAT} = 'swiss';
} elsif ($req =~ /ipi/) {
$st->{SEQFORMAT} = 'ipi';
} else {
$st->{SEQFORMAT} = 'fasta';
}
$stnd = $st->{SEQFORMAT};
if ($req =~ /pair/) {
$st->{PAIRED} = 1;
}
}
unless ($self->{MANUALCOUNTFUNC}) {
$self->{COUNTFUNC} = $typeCounters->{$stnd};
}
}
return $self->{TYPE};
}
sub _standard_types {
my $self = shift;
if (my $type = lc(shift || "")) {
if ($type =~ /(seq|fasta|gbk|genbank|uniprot|swiss|sp)/) {
return 'seq';
} elsif ($type =~ /basic/) {
return 'basic';
} elsif ($type =~ /user/) {
return 'user';
} elsif ($type =~ /arr/) {
return 'array';
} elsif ($type =~ /tab/ || $type =~ /[ct]sv/) {
return $type =~ /csv/ ? 'csv' : 'tsv';
} elsif ($type =~ /maf/) {
return 'maf';
} elsif ($type =~ /sax/ || $type =~ /xml/) {
return 'xml';
}
warn "Unrecognized input type '$type'";
return $type;
}
return "";
}
sub input_args {
my $self = shift;
if ($_[0]) {
$self->{INPTARGS} = $_[0];
}
return $self->{INPTARGS};
}
sub unstable_input {
my $self = shift;
if (defined $_[0]) {
$self->{UNSTABLE_INPUT} = $_[0] ? 1 : 0;
}
return $self->{UNSTABLE_INPUT};
}
sub allow_refork {
my $self = shift;
if (defined $_[0]) {
$self->{REFORKOK} = $_[0] ? 1 : 0;
}
return $self->{REFORKOK};
}
sub force_exit {
return shift->{FORCE_EXIT}++;
}
sub exit_code {
my $self = shift;
if (defined $_[0]) {
$self->{EXITCODE} = $_[0];
}
return $self->{EXITCODE};
}
sub progress {
my $self = shift;
if (my $num = $_[0]) {
if ($num =~ /^\d+$/) {
$self->{PROG} = $num;
} else {
$self->err("prog() requires an integer argument, not '$num'");
}
}
return $self->{PROG};
}
sub last_item_method {
# Allows customization of the object stored by last_item()
my $self = shift;
if (defined $_[0]) {
$self->{LAST_ITEM_METH} = $_[0] ? $_[0] : undef;
}
return $self->{LAST_ITEM_METH};
}
sub last_item {
# Sets / Gets the last item recovered for this fork
my $self = shift;
if ($_[0]) {
if (my $meth = $self->{LAST_ITEM_METH}) {
$self->{LASTREC} = &{$meth}($_[0]) || '';
} else {
$self->{LASTREC} = $_[0];
}
}
return $self->{LASTREC};
}
sub total_fork {
# Set via execute(), or manually by user
# The total number of child processes spawned in a batch
my $self = shift;
if (my $num = $_[0]) {
if ($num =~ /^\d+$/) {
$self->{FORKNUM} = $num;
} else {
$self->err("total_fork() requires an integer argument, not '$num'");
}
}
return $self->{FORKNUM};
}
sub fork_count {
# Always incremented via fork()
# The total number of children forked so far
my $self = shift;
return $self->{LASTKID};
}
sub child {
# The child number of the current batch
my $self = shift;
return $self->{CHILD};
}
sub match_modulus {
my $self = shift;
return 1 if ($self->doall);
my ($num) = @_;
# Child will be a value 0 to (totalFork -1)
my $child = $self->child - $self->{LOWKID};
my $mod = ($num - 1) % $self->total_fork;
return ($child == $mod) ? 1 : 0;
}
sub match_block {
my $self = shift;
my ($num) = @_;
# Child will be a value 0 to (totalFork -1)
my $child = $self->child - $self->{LOWKID};
}
sub doall {
my $self = shift;
if (defined $_[0]) {
$self->{DOALL} = $_[0] ? 1 : 0;
}
return $self->{DOALL};
}
sub limit {
my $self = shift;
if (defined $_[0]) {
my $num = $_[0];
if ($num =~ /^\d+$/) {
$self->{LIMIT} = $num;
} else {
$self->err("limit() requires an integer argument, not '$num'");
}
}
return $self->{LIMIT};
}
sub extend_limit {
my $self = shift;
my $amnt = shift || 1;
if ($self->{LIMIT}) {
my $nl = $self->{LIMIT} += $amnt;
return $nl;
}
return 0;
}
sub verbose {
my $self = shift;
if (defined $_[0]) {
$self->{VERBOSE} = $_[0];
}
return $self->{VERBOSE};
}
sub history {
my $self = shift;
my $input = $self->input;
return 0 unless ($input);
return $self->{HISTORY}{$input} ? $self->{HISTORY}{$input} : 0;
}
sub history_file {
my $self = shift;
if (my $num = $self->child) {
return 0 unless ($self->{HFILE});
return $self->{HFILE} . "_$num";
}
my ($file) = @_;
if ($file) {
$self->{HFILE} = $file;
$self->{HISTORY} = {};
if (-e $file) {
# Read in prior history
open(HIST, "<$file") || $self->
graceful_death("Failed to read history file", $file, $!);
my %nums;
while (<HIST>) {
chomp;
my ($hfile, $recnum, $child) = split("\t", $_);
$nums{$hfile}{$recnum}++;
}
close HIST;
foreach my $hfile (keys %nums) {
my ($lowest) = sort { $a <=> $b } keys %{$nums{$hfile}};
$self->{HISTORY}{$hfile} = $lowest;
}
}
}
return $self->{HFILE};
}
sub output_file {
my $self = shift;
my $tag = uc(shift);
if ($tag) {
if (my $path = shift) {
my $opts = lc(shift || '');
if ($path =~ /^\*/) {
# User is passing a file handle
$self->{FILES}{$tag} = {
FH => $path,
PATH => '',
FRAG => '',
};
} elsif ($self->{INITIALIZED}) {
$self->err("You can not set output_file() once forking has begun");
} else {
$self->{FILES}{ $tag } = {
PATH => $path,
};
}
$self->{FILES}{ $tag }{OPTS} = $opts;
}
return $self->{FILES}{ $tag }{PATH} || '';
}
return '';
}
sub output_fh {
my $self = shift;
if ($_[0]) {
return $self->{FILES}{ uc($_[0]) }{FH};
}
return undef;
}
sub write_output {
my $self = shift;
my $tag = uc(shift);
my $txt = shift;
return if (!defined $txt || $txt eq "");
my $fh = $self->{FILES}{$tag}{FH};
if ($fh) {
print $fh $txt;
} else {
$self->err("Attempt to write to closed file handle on tag '$tag'",
"Perhaps you passed the wrong tag, or called reset()?",
$tag, $txt, $self->branch($self->{FILES}));
}
}
sub execute {
my $self = shift;
my ($num) = @_;
$num ||= 1;
$self->total_fork($num);
if ($self->verbose) {
my @bits = (sprintf("Forking %d child%s - %s", $num, $num == 1 ?
'':'ren', &nice_date()));
if (my $lim = $self->limit) {
push @bits, "User request to process only $lim records";
}
$self->msg(@bits);
}
$self->{REFORK} = 0;
my @pids;
for my $i (1..$num) {
my $pid = $self->fork( 'quiet' );
push @pids, $pid;
}
$self->msg(sprintf(" Spawned %d Child%s: %s\n", $#pids + 1, $#pids == 0 ?
'' : 'ren', join(', ', @pids))) if ($self->verbose > 1);
my $failed = $self->wait;
$self->join_files();
$self->msg(sprintf("All tasks completed - %s", &nice_date()))
if ($self->verbose);
$self->reset();
return $failed;
}
sub fork {
my $self = shift;
my ($bequiet) = @_;
my $num = $self->{REFORK} || ++$self->{LASTKID};
my $pid;
if ($pid = CORE::fork) {
# parent $pid = pid of child...
push @{$self->{PIDS}}, $pid;
$self->{PIDNUM}{$pid} = $num;
$self->msg(sprintf("%spawning Child %d PID %d\n",
$self->{REFORK} ? 'Res' : 'S', $num, $pid))
if (! $bequiet && $self->verbose > 1);
} elsif (defined $pid) {
# $pid is zero but defined - this is the child
# Each child calls one of the 'read' methods, eg read_info()
$self->{CHILD} = $self->{REFORK} || $self->{LASTKID};
my $lt = $self->{TIME}{START} = time;
$self->_init;
#if ($@) {
# $self->graceful_death
# ("[!!]", "Child $num failed to initialize prior to fork",
# $@);
# exit 1;
#}
my $meth = $self->method;
my $prog = $self->progress;
while ( my $rec = $self->_next_task ) {
&{$meth}($rec);
if ($prog && time - $lt > $prog) {
$self->_show_progress;
$lt = time;
}
}
$self->_finish;
if ($prog && $self->verbose > 1) {
my $elapsed = (time - $self->{TIME}{START}) / 60;
my $rate = $self->{COUNT} / ($elapsed || .01);
$self->msg(sprintf("[%2d]", $self->{CHILD}), sprintf
("Finished %d %s in %.1f min %.1f/min\n",
$self->{COUNT}, $self->{COUNTWHAT},
$elapsed, $rate));
}
if (my $ec = $self->exit_code()) {
exit $ec;
} else {
# Fastest way to exit child, without time-consuming clean-up:
kill('KILL', $$);
CORE::dump();
exit 0;
}
} else {
$self->graceful_death("Failure to fork process for iteration $num");
}
return wantarray ? ($pid, $num) : $pid;
}
sub wait {
my $self = shift;
my @pidarray = @{$self->{PIDS}};
$self->_create_count();
my $failed = 0;
my $expected = $self->exit_code();
foreach my $pid (@pidarray) {
waitpid($pid, 0);
my $err = 0;
my $exit_value = $? >> 8;
# print "EXIT VALUE $exit_value\n";
if (defined $expected) {
unless ($exit_value == $expected) {
$self->err("STACK_+2 Child $pid exits with exit value $exit_value, expected $expected");
$err++;
}
} elsif ($exit_value) {
$self->err("STACK_+2 Child $pid exits with exit value $exit_value");
$err++;
}
if ($!) {
# Bogus "Inappropriate ioctl for device" errors
# print " Child $pid throws error [!] $!\n";
# $err++;
}
if ($@) {
# print " Child $pid throws error [@] $!\n";
}
if ($? & 128) {
$self->err("Child $pid exits with core dump");
# Remove core file - obviously this is not always desirable
my $core_file = "core.$pid";
unlink($core_file) if (-e $core_file);
$err++;
}
if ($err) {
$failed++;
if ($self->allow_refork) {
# The user wants to re-try the child when it fails
$self->{REFORK} = $self->{PIDNUM}{$pid};
$self->fork;
}
}
}
return $failed;
}
sub join_files {
my $self = shift;
my $fork = $self->fork_count;
my %tobuild;
my %opts;
while (my ($tag, $data) = each %{$self->{FILES}}) {
my $path = $data->{PATH};
next unless ($path);
$path = '>' . $path unless ($path =~ /^\>/);
next unless ($path);
my @files;
for my $i (1..$fork) {
# Iteratively check for all likely files
my $file = sprintf("%s_%03d", $path, $i);
$file =~ s/^\>+//;
if (-e $file) {
if (-s $file) {
push @files, $file;
} else {
unlink($file);
}
}
}
next if ($#files < 0);
$tobuild{$path} = \@files;
$opts{$path} = $data->{OPTS};
}
my @paths = keys %tobuild;
return if ($#paths <0);
$self->msg(sprintf("Assembling %d file%s - %s", $#paths + 1, $#paths == 0 ?
'' : 's', &nice_date())) if ($self->verbose > 1);
while (my ($path, $files) = each %tobuild) {
my ($ftok,$fname);
if ($path =~ /^(\>+)(.+)$/) {
($ftok,$fname) = ($1, $2);
} else {
$self->err("The path '$path' is not set for write operations");
next;
}
my (%errors, @to_kill);
if ($#{$files} == 0) {
# We just have a single file, we should be able to just rename it
my $src = $files->[0];
$src =~ s/ /\\ /g;
my $trg = $fname;
$trg =~ s/ /\\ /g;
if ($ftok eq '>') {
system("mv $src $trg");
} else {
system("cat $src >> $trg");
}
push @to_kill, $files->[0];
} elsif ($opts{$path} =~ /sort/) {
# We have sorted the files, now we need to merge them
$self->bench_start('Merge sort');
my $cmd = "sort ";
$cmd .= "-T $sortTmpDir " if ($sortTmpDir);
$cmd .= "-m ".join(' ', @{$files})." $path";
system($cmd);
push @to_kill, @{$files};
$self->bench_end('Merge sort');
} else {
# We have multiple files, we will need to concatenate
$self->msg(sprintf("%s = %d fragment%s\n", $fname, $#{$files} + 1,
$#{$files} == 0 ? '' : 's'))
if ($self->verbose > 1);
if ($ftok eq '>') {
sysopen(TOFILE, $fname, O_WRONLY | O_TRUNC | O_CREAT) ||
$self->graceful_death("Failed to generate output file",
$fname, $!);
} else {
sysopen(TOFILE, $fname, O_WRONLY | O_APPEND | O_CREAT) ||
$self->graceful_death("Failed to concatenate output",
$fname, $!);
}
foreach my $file (@{$files}) {
sysopen(FROMFILE, $file, O_RDONLY) ||
$self->graceful_death
("Failed to read from output fragment",
$file, $!);
my $blksize = (stat FROMFILE)[11] || 16384;
my $buffer;
while (my $len = sysread(FROMFILE, $buffer, $blksize)) {
$self->graceful_death("sysread error", $file, $!)
if (!defined $len);
my $offset = 0;
while ($len) {
my $written = syswrite
(TOFILE, $buffer, $len, $offset);
if ($written) {
$offset += $written;
$len -= $written;
} else {
$errors{$file} += $len;
}
}
}
push @to_kill, $file;
}
close TOFILE;
}
my @errs = sort keys %errors;
unless ($#errs == -1) {
$self->err("OUTPUT ERROR: $path",
"Failed to join information from component temp files",
(map {sprintf("%s : %d bytes", $_, $errors{$_})} @errs),
"Temporary files have NOT been removed:", @to_kill);
} else {
foreach my $kf (@to_kill) {
unlink($kf);
}
}
}
}
sub get_fh {
my $self = shift;
my ($file) = @_;
if ($file !~ /^\>/) {
# The user plans to read this file
my $base = $file;
$base =~ s/^[\<\>]+//;
unless (-e $base) {
$self->graceful_death("Could not read '$base'", "Does not exist");
}
}
my ($fh, $ftype);
$self->ignore_error('Inappropriate ioctl for device');
undef $!;
undef $@;
if ($file =~ /\.gz$/) {
$file =~ s/^[\<\>]+//;
$ftype = 'gz';
$self->ignore_error('Illegal seek');
open($fh, "gunzip -c $file |");
$self->ignore_error('Illegal seek', 'StopIgnoring');
} else {
unless ($file =~ /^[\<\>]/) {
$file = "<$file";
}
$ftype = '';
open($fh, $file);
}
if (!$fh || ($! && $! ne 'Illegal seek')) {
if ($fh) {
$self->err
("Failed to recover file handle glob", $file,
$! ? '$! = '.$! : undef, $@ ? '$@ = '.$@ : undef);
} else {
$self->graceful_death
("Failed to open file handle", $file,
$! ? '$! = '.$! : undef, $@ ? '$@ = '.$@ : undef);
}
}
return $fh;
}
sub _create_count {
my $self = shift;
my @cdat;
my $cf = $self->{COUNT_FILE};
my $limit = $self->limit;
my $countFunc = $self->count_method();
open(CF, ">$cf") || $self->graceful_death
("Failed to write count file", $cf, $!);
foreach my $idat (@{$self->{INPUTS}}) {
# It is possible that we have forked analysis of multiple files,
# We need to count each seperately
my ($input, $type) = @{$idat};
my $num = $countFunc ? &{$countFunc}( $self, $input, $type ) : 0;
print CF join("\t", $input, $num || 0)."\n";
}
close CF;
}
sub _show_progress {
my $self = shift;
my $child = $self->child;
my $hf = $self->{PROG_HIST};
# Update the history file to note how many records were parsed:
open(HF, ">$hf") || $self->graceful_death
("Failed to write history file", $hf, $!);
print HF $self->{COUNT} . "\n";
close HF;
my @tfs = $self->_task_files;
foreach my $file (@tfs) {
if ($file =~ /^Child_(\d+)$/) {
# There is a lower numbered task still running - let *it*
# report progress
return if ($1 < $child);
}
}
# Calculate the total number of tasks done:
my $dir = $self->_task_directory;
my $count = 0;
foreach my $file (@tfs) {
if ($file =~ /^History/) {
# Read the history
open(HF, "<$dir/$file") || $self->graceful_death
("Failed to read history file", "$dir/$file", $!);
while (<HF>) {
chomp;
$count += $_;
}
close HF;
}
}
if (!defined $self->{TOTAL_COUNT} && -e $self->{COUNT_FILE}) {
my $cf = $self->{COUNT_FILE};
my $input = $self->input;
my %counts;
open(CF, "<$cf") || $self->graceful_death
("Failed to read count file", $cf, $!);
while (<CF>) {
chomp;
my ($file, $count) = split(/\t/, $_);
$counts{$file} = $count;
}
$self->{TOTAL_COUNT} = $counts{$input};
close CF;
}
my $total = $self->{TOTAL_COUNT};
my $elapsed = (time - $self->{TIME}{START});
my $rate = $count / ($elapsed || .01);
my $remain = '';
if ($total && $rate) {
my ($r, $u) = (($total - $count) / $rate, 'sec');
if ($r > 60) {
$r /= 60;
$u = 'min';
if ($r > 60) {
$r /= 60;
$u = 'hr';
if ($r > 24) {
$r /= 24;
$u = 'day';
}
}
}
$remain = sprintf(", %.1f %s remain", $r, $u);
}
my $li = substr($self->last_item, 0, 50);
$self->msg(sprintf("[%2d]",$child),sprintf
("%4d %s, %.1f min, %.1f per min%s - %s", $count,
$self->{COUNTWHAT}, $elapsed / 60, $rate * 60, $remain, $li));
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*_task_dir = \&_task_directory;
sub _task_directory {
my $self = shift;
unless ($self->{TASKDIR}) {
my $uniqTag = $$;
if ($self =~ /x([\da-f]+)/) {
# Use the hash key as a unique tag
# $uniqTag = $1; # I forget why I was doing this...
# $uniqTag =~ s/0+/0/g;
}
my $pdir = "/tmp/ForkCritter";
unless (-d $pdir) {
mkdir($pdir);
chmod(0777, $pdir);
}
my $dir = $self->{TASKDIR} = "$pdir/PID_$uniqTag";
if (-d $dir) {
$self->_clear_task;
# print "<<<<EXISTS: $dir\n". $self->stack_trace()."\n";
} else {
# print "<<<<MAKING: $dir\n". $self->stack_trace()."\n";
mkdir( $dir, 0777 );
chmod( 0777, $dir );
}
$self->{COUNT_FILE} = "$dir/RecordCount";
}
return $self->{TASKDIR};
}
sub _task_files {
my $self = shift;
my $dir = $self->_task_directory;
my @files;
if (-d $dir) {
opendir(TMPDIR, $dir) || $self->graceful_death
("Failed to read task file directory", $dir, $!);
foreach my $file (readdir TMPDIR) {
push @files, $file;
}
closedir TMPDIR;
}
return @files;
}
sub _clear_task {
my $self = shift;
my $dir = $self->_task_directory;
foreach my $file ($self->_task_files) {
unlink("$dir/$file");
}
}
sub _bogus_method {
shift->graceful_death
("You never set the callback method()!",
"Be sure to specify -method when calling new()",
"Or explicitly call \$fc->method()");
}
sub _init {
my $self = shift;
return 0 if ($self->{INITIALIZED});
$self->{FHNUM} = 0;
my $type = $self->input_type;
if ($type eq 'user') {
# The user is initializing themselves
} elsif ($type eq 'seq') {
$self->_init_seq;
} elsif ($type eq 'basic') {
$self->_init_basic;
} elsif ($type eq 'array') {
$self->_init_array;
} elsif ($type eq 'csv' || $type eq 'tsv' || $type eq 'maf') {
$self->_init_tab;
} elsif ($type eq 'xml') {
# Wait - do it last
} else {
$self->graceful_death
( "Unknown input type", $type || '-NOT SPECIFIED-');
}
my $ctag = sprintf("_%03d", $self->child);
my $fork = $self->fork_count;
my $dir = $self->_task_directory();
my $cf = $self->{TASK_FILE} = "$dir/Child$ctag";
my $hf = $self->{PROG_HIST} = "$dir/History$ctag";
open(HF, ">$hf") || $self->graceful_death
("Failed to write history file", $hf, $!);
print HF "0\n";
close HF;
open(TF, ">$cf") || $self->graceful_death
("Failed to write task file", $cf, $!);
print TF "$$\n";
close TF;
while (my ($tag, $data) = each %{$self->{FILES}}) {
my $path = $data->{PATH};
next unless ($path);
my $file = $path;
$file .= $ctag unless ($file eq '/dev/null');
$file = '>' . $file unless ($file =~ /^\>/);
my $fh = $self->get_fh($file);
$data->{FH} = $fh;
$data->{FRAG} = $file;
$data->{FRAG} =~ s/^\>+//;
}
if (my $initmeth = $self->init_method) {
&{$initmeth};
}
$self->{INITIALIZED} = 1;
if ($type eq 'xml') {
$self->_init_sax;
}
}
# If this method is changed, _sax_wrapper() should also be changed
my $secPerDay = 60 * 60 * 24;
sub _next_task {
my $self = shift;
return undef if ($self->{TERMINATED} || $self->{FORCE_EXIT});
if ($self->unstable_input) {
# Verify that the input has not changed since execution started
my $file = $self->input;
$file =~ s/^\<+//;
$self->graceful_death("Unstable Input - file no longer exists!",
$file) unless (-e $file);
$self->graceful_death("Unstable Input - file is now zero size!",
$file) unless (-s $file);
my $runTime = $self->{TIME}{START};
my $modTime = $scriptStartTime - int((-M $file) * $secPerDay);
my $diff = $modTime - $runTime;
$self->graceful_death("Unstable Input - file modified $diff seconds ".
"after analysis started!", $file) if ($diff > 30)
}
my $func = $self->next_record_method;
my $skip = $self->skip_record_method;
my $grpm = $self->group_method();
my $limit = $self->limit;
my $retval;
while (1) {
# Return a null entry if we have exceeded the number of requested recs
return undef if ($limit && $self->{RECORD} >= $limit);
# Get the next record in the stream
$retval = &{$func}( $self );
if ($grpm) {
# The user is grouping records
my $record = $retval;
$retval = &{$grpm}( $record );
# An undefined return value indicates an incomplete group
next if ($record && !$retval);
}
# undef always indicates the end of the task
return undef if (!defined $retval);
next if ( $skip && &{$skip}( $retval ) );
my $num = ++$self->{RECORD};
# Keep cycling until we reach the appropriate modulus for this child,
# unless this child is tasked to doall ($da) records in the input
if ($self->{DO_BLOCK}) {
$self->graceful_death("Charles needs to write match_block()!");
last if ($self->match_block($num));
} else {
last if ($self->match_modulus($num));
}
}
# printf("%s = %d %% %d\n", $retval->[0][0], $self->{RECORD},$self->child);
$self->{COUNT}++;
return $retval;
}
sub _finish {
my $self = shift;
return 0 if ($self->{TERMINATED}++);
if (my $finishmeth = $self->finish_method()) {
&{$finishmeth};
}
if (my $func = $self->{TERMFUNC}) {
&{$func}( $self ) if ($func);
}
my $fork = $self->fork_count;
$self->close_all();
# Remove the task file
unlink($self->{TASK_FILE});
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
sub _init_basic {
my $self = shift;
my $input = $self->input;
my $fh = $self->get_fh($input);
$self->{IO} = $fh;
$self->{COUNTTAG} = 'LINECOUNT';
$self->{TERMFUNC} ||= \&_finish_basic;
}
sub _finish_basic {
my $self = shift;
$self->{IO} = undef;
}
sub _count_basic {
my $self = shift;
my ($input, $type) = @_;
# User defined methods, can not count
return 0;
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
sub _init_array {
my $self = shift;
my $array = $self->input;
if (!$array || !ref($array) || ref($array) ne 'ARRAY') {
$array ||= '-UNDEF-';
$self->graceful_death("Input() '$array' is not an array reference");
}
$self->{INDEX} = 0;
$self->{COUNTTAG} = 'INDEX';
$self->{COUNTWHAT} = 'records';
$self->{NEXTFUNC} ||= \&_next_array;
$self->{TERMFUNC} ||= \&_finish_basic;
}
sub _next_array {
my $self = shift;
my $array = $self->input;
my $index = $self->{INDEX}++;
return undef if ($index > $#{$array});
my $retval = $array->[ $index ];
$self->last_item( $retval );
return $retval;
}
sub _count_array {
my $self = shift;
my $array = shift;
return $#{$array} + 1;
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
sub _init_seq {
my $self = shift;
my $input = $self->input;
# Use a filehandle to allow analysis of .gz fasta files
my $fh = $self->get_fh("<$input");
my $st = $self->{SUBTYPES} || {};
my $fmt = $st->{SEQFORMAT};
$self->{IO} = $fh;
$self->{SEQIO} = Bio::SeqIO->new( -fh => $fh, -format => $fmt );
if ($st->{PAIRED}) {
if ($input =~ /(.+)1.([^\.]{2,7})$/) {
my $inp2 = $1.'2.'.$2;
if (-s $inp2) {
my $fh = $self->get_fh("<$inp2");
$self->{SEQIO2} = Bio::SeqIO->new
( -fh => $fh, -format => $fmt );
} else {
$self->death("Paired SeqIO request, but failed to find pair",
$inp2);
}
} else {
$self->death("Paired SeqIO request, but can not interpret primary",
$input);
}
}
$self->{NEXTFUNC} ||= \&_next_seq;
$self->{TERMFUNC} ||= \&_finish_seq;
$self->{COUNTTAG} = 'RECORD';
$self->{COUNTWHAT} = 'records';
unless ($self->last_item_method) {
$self->last_item_method( sub {
my $seq = shift;
return $seq->display_id;
});
}
}
sub _next_seq {
my $self = shift;
my $retval = $self->{SEQIO}->next_seq;
return undef unless ($retval);
$self->last_item( $retval );
if (my $sio2 = $self->{SEQIO2}) {
if (my $bs = $sio2->next_seq()) {
$retval = [ $retval, $bs ];
}
}
return $retval;
}
sub _finish_seq {
my $self = shift;
foreach my $key ('SEQIO', 'SEQIO2') {
if (my $reader = $self->{$key}) {
# We are going to handle filehandles on our own
# BioPerl spews out a bunch of annoying errors via Bio::Root::IO
$reader->{_filehandle} = undef;
#$self->{IO} = undef;
}
}
}
sub _count_fasta {
my $self = shift;
my ($input) = @_;
my $num = 0;
my $fh = $self->get_fh($input);
my $limit = $self->limit;
while (<$fh>) {
$num++ if (/^\>/);
last if ($limit && $num >= $limit);
}
close $fh;
return $num;
}
sub _count_fastq {
my $self = shift;
my ($input) = @_;
my $num = 0;
my $fh = $self->get_fh($input);
my $limit = $self->limit;
while (<$fh>) {
$num++ if (/^\@/);
last if ($limit && $num >= $limit);
}
close $fh;
return $num;
}
sub _count_genbank {
my $self = shift;
my ($input) = @_;
my $num = 0;
my $fh = $self->get_fh($input);
my $limit = $self->limit;
while (<$fh>) {
$num++ if (/^LOCUS /);
last if ($limit && $num >= $limit);
}
close $fh;
return $num;
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
my $globalSelf;
sub _init_sax {
my $self = shift;
my $input = $self->input;
my $args = $self->input_args() || [];
$globalSelf = $self;
$self->{NEXTFUNC} ||= \&_next_sax;
$self->{TERMFUNC} ||= \&_finish_sax;
$self->{LASTTIME} = time;
$self->{COUNTTAG} = 'RECORD';
$self->{COUNTWHAT} = 'records';
unless ($self->last_item_method) {
if ($self->group_method) {
$self->last_item_method( sub {
return "Node Group " . $globalSelf->{RECORD};
});
} else {
$self->last_item_method( sub {
my $rec = shift;
return sprintf("<%s> %d", $rec->{NAME}, $globalSelf->{RECORD});
});
}
}
eval {
# Now get all the nodes
my $fs = BMS::FriendlySAX->new
( -file => $input,
# -limit => $self->limit,
-verbose => 0,
-method => \&_sax_wrapper,
-limitmsg => $self->{LIMITMSG},
@{$args},
);
};
if (my $grpm = $self->group_method) {
# Grouping was being used, and we need to deal with the final group
my $residual = &{$grpm}();
# Do not want to trigger grouping logic when we pass the group object
# (rather than a FriendlySAX node)
$self->{NOGROUP} = 1;
&_sax_wrapper($residual) if ($residual);
$self->{NOGROUP} = 0;
}
if ($@) {
my $expected = $self->{LIMITMSG} || 'user limit';
unless ($@ =~ /\Q$expected\E|\[IGNORE\]/i) {
$self->{EXITCODE} = 1;
$self->graceful_death("FriendlySAX error", $@);
}
}
}
sub _sax_wrapper {
my ($node) = @_;
my $self = $globalSelf;
my $limit = $self->limit;
my $grpm = $self->group_method;
if ($limit && $self->{RECORD} >= $limit) {
my $num = $self->{RECORD};
my $what = $grpm ? "Node Group $num" : sprintf
("<%s> %d", $node->{NAME} || "??", $num );
my $msg = $self->{LIMITMSG} ||
"User limit ($limit) halts processing on $what";
if ($self->{NOGROUP}) {
$self->msg($msg) if ($self->{CHILD} == $self->{LOWKID}
&& !$self->{QUIETLIMIT});
return 0;
} else {
if ($self->{QUIETLIMIT}) {
# $self->graceful_shutdown();
die;
} else {
# $self->graceful_shutdown();
die $msg;
}
}
}
if ($grpm) {
# The user is grouping records
$node = &{$grpm}( $node ) unless ($self->{NOGROUP});
# An undefined return value indicates an incomplete group
return unless ($node);
}
if (my $skip = $self->skip_record_method) {
return if (&{$skip}( $node ));
}
my $num = ++$self->{RECORD};
return unless ($self->match_modulus($num));
$self->last_item( $node );
$self->{COUNT}++;
my $meth = $self->method;
my $prog = $self->progress;
&{$meth}($node);
if ($prog && time - $self->{LASTTIME} > $prog) {
$self->_show_progress;
$self->{LASTTIME} = time;
}
}
sub _next_sax {
# Cycling will be accomplished by FriendlySAX, just return null
return undef;
}
sub _finish_sax {
my $self = shift;
$self->{IO} = undef;
}
sub _count_xml {
my $self = shift;
my ($input) = @_;
my $num = 0;
my $args = $self->input_args() || [];
my $pargs = $self->parseparams( @{$args} );
my $tags = $pargs->{TAG} || $pargs->{TAGS};
my $limit = $self->limit;
if ($tags) {
$tags = [ $tags ] unless (ref($tags));
my %thash = map { $_ => 1 } @{$tags};
my $fh = $self->get_fh($input);
while (<$fh>) {
if ( /\<\s*([^\/]\S+)/) {
my $tag = $1; $tag =~ s/\>//;
if ($thash{$tag}) {
$num++;
last if ($limit && $num >= $limit);
}
}
}
close $fh;
} else {
$num = 1;
}
return $num;
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
sub _init_tab {
my $self = shift;
my $input = $self->input;
my $type = $self->input_type;
my $fh = $self->get_fh($input);
$self->{IO} = $fh;
if ( $self->{IOSEP}) {
# Separator has been manually defined
} elsif ($type eq 'csv') {
$self->{IOSEP} = '[\\\'\\"],[\\\'\\"]';
$self->{IOSTRIP} = '[\\\'\\"]';
} else {
$self->{IOSEP} = "\t";
$self->{IOSTRIP} = "";
}
my $func = $self->{NEXTFUNC} ||= \&_next_tab;
$self->{TERMFUNC} ||= \&_finish_basic;
if ($self->{SUBTYPES}{HEADER}) {
# There is a header attached to the file
my $header;
my $lookFor = $self->{SUBTYPES}{LOOKFOR};
while (1) {
$header = &{$func}( $self );
if (!$header) {
last;
} elsif ($lookFor) {
my $chk = join('\t', @{$header});
last if ($chk =~ /$lookFor/i);
} else {
last;
}
}
$self->death("Failed to recover header") unless ($header);
my @mapped = map { $self->remap_header_name( $_ ) } @{$header};
$self->{HEAD_ARRAY} = \@mapped;
$self->{SUBTYPES}{HEADER} = \@mapped;
$self->{HEADER} = \@mapped if ($self->{SUBTYPES}{HASH});
#for my $i (0..$#{$header}) { printf("[%3d] %s\n", $i, $header->[$i]);}
}
$self->{COUNTTAG} = 'LINECOUNT';
if (my $gb = $self->{SUBTYPES}{GROUPBY}) {
# The user wants results grouped by a column
$self->{GROUP} = $gb;
$self->{STACK} = [];
$self->{PUSHBACK} = [];
$self->{NEXTFUNC} = \&_next_tab_group;
# $self->{COUNTTAG} = 'RECORD';
}
unless ($self->last_item_method) {
$self->last_item_method( sub {
my $line = shift;
$line = "" unless (defined $line);
$line =~ s/[\n\r]+$//;
return $line;
});
}
}
sub _next_tab {
my $self = shift;
my $fh = $self->{IO};
my $line = <$fh>;
return undef unless ($line);
$self->{LINECOUNT}++;
$self->last_item( $line );
chomp $line;
if (my $strip = $self->{IOSTRIP}) {
# Clean edges:
$line =~ s/^$strip//;
$line =~ s/$strip$//;
}
# Remove non-ascii
$line =~ s/\P{IsASCII}//g if ($self->{SUBTYPES}{CLEANASCII});
my $sep = $self->{IOSEP};
my @list = split($sep, $line);
my $retval = \@list;
if (my $head = $self->{HEADER}) {
# The user wants the data back as a hash
my %hash;
for my $i (0..$#{$head}) {
$hash{ $head->[$i] } = $list[$i];
}
$retval = \%hash;
}
return $retval;
}
sub _next_tab_group {
my $self = shift;
my $in = $self->{GROUP};
my $ishash = $self->{HEADER};
my @group;
while (1) {
my $rec = shift @{$self->{PUSHBACK}} || $self->_next_tab;
last unless ($rec);
if ($#group < 0) {
# First record - we need to seed the group
push @group, $rec;
next;
}
if ($ishash) {
# We are grouping by hash key
if ($group[-1]{$in} eq $rec->{$in}) {
push @group, $rec;
next;
}
} elsif ($group[-1][$in] eq $rec->[$in]) {
# We are grouping by array index
push @group, $rec;
next;
}
# This record should be assigned to the next group
push @{$self->{PUSHBACK}}, $rec;
last;
}
return undef if ($#group < 0);
$self->last_item('');
return \@group;
}
sub _count_tab {
my $self = shift;
my ($input) = @_;
my $num = 0;
my $fh = $self->get_fh($input);
my $limit = $self->limit;
while (<$fh>) {
$num++;
last if ($limit && $num >= $limit);
}
close $fh;
return $num;
}
sub nice_date {
my $dt = `date +'%d %b %H:%M:%S'`;
$dt =~ s/\s*[\n\r]+//;
return $dt;
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
1;
| VCF/MapLoc | BMS/ForkCritter.pm | Perl | mit | 53,301 |
#!/usr/bin/perl -w
# Copyright 2001, 20002 Rob Edwards
# For updates, more information, or to discuss the scripts
# please contact Rob Edwards at redwards@utmem.edu or via http://www.salmonella.org/
#
# This file is part of The Phage Proteome Scripts developed by Rob Edwards.
#
# Tnese scripts are free software; you can redistribute and/or modify
# them under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# They are distributed in the hope that they will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# in the file (COPYING) along with these scripts; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
$file = shift;
open (IN, $file) || die "Can't open $file";
open (OUT, ">$file.phylip") || die "Can't open $file.phylip";
$line=<IN>; chomp($line); print OUT $line;
while ($line=<IN>) {
chomp($line);
unless ($line =~ /^\s/) {
print OUT "\n";
@line = split /\s+/, $line;
if (length($line[0])>10) {
$line[0] =~ s/contig//i;
$line[0] =~ s/_\d+\.\d+$//;
}
if (length($line[0])<10) {
$spaces = " " x (10 - length($line[0]));
$line[0] .= $spaces;
}
print OUT join (" ", @line);
}
else {$line =~ s/^\s+/ /; print OUT $line}
}
| linsalrob/bioinformatics | phage_tree/clustal2phylip.pl | Perl | mit | 1,605 |
use strict;
use Data::Dumper;
use Carp;
#
# This is a SAS Component
#
=head1 NAME
get_relationship_HasFunctional
=head1 SYNOPSIS
get_relationship_HasFunctional [-c N] [-a] [--fields field-list] < ids > table.with.fields.added
=head1 DESCRIPTION
This relationship connects a role with the features in which
it plays a functional part.
Example:
get_relationship_HasFunctional -a < ids > table.with.fields.added
would read in a file of ids and add a column for each field in the relationship.
The standard input should be a tab-separated table (i.e., each line
is a tab-separated set of fields). Normally, the last field in each
line would contain the id. If some other column contains the id,
use
-c N
where N is the column (from 1) that contains the id.
This is a pipe command. The input is taken from the standard input, and the
output is to the standard output.
=head1 COMMAND-LINE OPTIONS
Usage: get_relationship_HasFunctional [arguments] < ids > table.with.fields.added
=over 4
=item -c num
Select the identifier from column num
=item -from field-list
Choose a set of fields from the Feature
entity to return. Field-list is a comma-separated list of strings. The
following fields are available:
=over 4
=item id
=item feature_type
=item source_id
=item sequence_length
=item function
=item alias
=back
=item -rel field-list
Choose a set of fields from the relationship to return. Field-list is a comma-separated list of
strings. The following fields are available:
=over 4
=item from_link
=item to_link
=back
=item -to field-list
Choose a set of fields from the Role entity to return. Field-list is a comma-separated list of
strings. The following fields are available:
=over 4
=item id
=item hypothetical
=back
=back
=head1 AUTHORS
L<The SEED Project|http://www.theseed.org>
=cut
use Bio::KBase::Utilities::ScriptThing;
use Bio::KBase::CDMI::CDMIClient;
use Getopt::Long;
#Default fields
my @all_from_fields = ( 'id', 'feature_type', 'source_id', 'sequence_length', 'function', 'alias' );
my @all_rel_fields = ( 'from_link', 'to_link', );
my @all_to_fields = ( 'id', 'hypothetical' );
my %all_from_fields = map { $_ => 1 } @all_from_fields;
my %all_rel_fields = map { $_ => 1 } @all_rel_fields;
my %all_to_fields = map { $_ => 1 } @all_to_fields;
my @default_fields = ('from-link', 'to-link');
my @from_fields;
my @rel_fields;
my @to_fields;
our $usage = <<'END';
Usage: get_relationship_HasFunctional [arguments] < ids > table.with.fields.added
--show-fields
List the available fields.
-c num
Select the identifier from column num
--from field-list
Choose a set of fields from the Feature
entity to return. Field-list is a comma-separated list of strings. The
following fields are available:
id
feature_type
source_id
sequence_length
function
alias
--rel field-list
Choose a set of fields from the relationship to return. Field-list is a comma-separated list of
strings. The following fields are available:
from_link
to_link
--to field-list
Choose a set of fields from the Role entity to
return. Field-list is a comma-separated list of strings. The following fields are available:
id
hypothetical
END
my $column;
my $input_file;
my $a;
my $f;
my $r;
my $t;
my $help;
my $show_fields;
my $i = "-";
my $geO = Bio::KBase::CDMI::CDMIClient->new_get_entity_for_script("c=i" => \$column,
"h" => \$help,
"show-fields" => \$show_fields,
"a" => \$a,
"from=s" => \$f,
"rel=s" => \$r,
"to=s" => \$t,
'i=s' => \$i);
if ($help) {
print $usage;
exit 0;
}
if ($show_fields)
{
print "from fields:\n";
print " $_\n" foreach @all_from_fields;
print "relation fields:\n";
print " $_\n" foreach @all_rel_fields;
print "to fields:\n";
print " $_\n" foreach @all_to_fields;
exit 0;
}
if ($a && ($f || $r || $t)) {die $usage};
if ($a) {
@from_fields = @all_from_fields;
@rel_fields = @all_rel_fields;
@to_fields = @all_to_fields;
} elsif ($f || $t || $r) {
my $err = 0;
if ($f) {
@from_fields = split(",", $f);
$err += check_fields(\@from_fields, %all_from_fields);
}
if ($r) {
@rel_fields = split(",", $r);
$err += check_fields(\@rel_fields, %all_rel_fields);
}
if ($t) {
@to_fields = split(",", $t);
$err += check_fields(\@to_fields, %all_to_fields);
}
if ($err) {exit 1;}
} else {
@rel_fields = @default_fields;
}
my $ih;
if ($input_file)
{
open $ih, "<", $input_file or die "Cannot open input file $input_file: $!";
}
else
{
$ih = \*STDIN;
}
while (my @tuples = Bio::KBase::Utilities::ScriptThing::GetBatch($ih, undef, $column)) {
my @h = map { $_->[0] } @tuples;
my $h = $geO->get_relationship_HasFunctional(\@h, \@from_fields, \@rel_fields, \@to_fields);
my %results;
for my $result (@$h) {
my @from;
my @rel;
my @to;
my $from_id;
my $res = $result->[0];
for my $key (@from_fields) {
push (@from,$res->{$key});
}
$res = $result->[1];
$from_id = $res->{'from_link'};
for my $key (@rel_fields) {
push (@rel,$res->{$key});
}
$res = $result->[2];
for my $key (@to_fields) {
push (@to,$res->{$key});
}
if ($from_id) {
push @{$results{$from_id}}, [@from, @rel, @to];
}
}
for my $tuple (@tuples)
{
my($id, $line) = @$tuple;
my $resultsForId = $results{$id};
if ($resultsForId) {
for my $result (@$resultsForId) {
print join("\t", $line, @$result) . "\n";
}
}
}
}
sub check_fields {
my ($fields, %all_fields) = @_;
my @err;
for my $field (@$fields) {
if (!$all_fields{$field})
{
push(@err, $field);
}
}
if (@err) {
my @f = keys %all_fields;
print STDERR "get_relationship_HasFunctional: unknown fields @err. Valid fields are @f\n";
return 1;
}
return 0;
}
| kbase/kb_seed | scripts/get_relationship_HasFunctional.pl | Perl | mit | 5,988 |
package O2::Template::Node;
use strict;
#-----------------------------------------------------------------------------
sub new {
my ($package, $value) = @_;
my $obj = bless {
children => [],
parent => undef,
value => $value,
}, $package;
return $obj;
}
#-----------------------------------------------------------------------------
sub getChildren {
my ($obj) = @_;
my @children;
my @_children = @{ $obj->{children} };
foreach my $child (@_children) {
if (ref($child) eq 'O2::Template::Node::Anonymous') {
push @children, $child->getChildren();
}
else {
push @children, $child;
}
}
return @children;
}
#-----------------------------------------------------------------------------
sub addChild {
my ($obj, $node) = @_;
push @{$obj->{children}}, $node;
$node->setParent( $obj );
return;
}
#-----------------------------------------------------------------------------
sub hasParent {
my ($obj) = @_;
return ref($obj) ne 'O2::Template::Node::Root';
}
#-----------------------------------------------------------------------------
sub getParent {
my ($obj) = @_;
return $obj->{parent} if ref( $obj->{parent} ) ne 'O2::Template::Node::Anonymous';
return $obj->{parent}->getParent();
}
#-----------------------------------------------------------------------------
sub setParent {
my ($obj, $node) = @_;
$obj->{parent} = $node;
return;
}
#-----------------------------------------------------------------------------
sub getPreviousSibling {
my ($obj) = @_;
return if ref($obj) eq 'O2::Template::Node::Root'; # Root has no siblings
my @siblings = $obj->getParent()->getChildren();
my $prev;
foreach my $node (@siblings) {
return $prev if $node eq $obj;
$prev = $node;
}
return;
}
#-----------------------------------------------------------------------------
sub getValue {
my ($obj) = @_;
return $obj->{value};
}
#-----------------------------------------------------------------------------
sub setValue {
my ($obj, $value) = @_;
$obj->{value} = $value;
return;
}
#-----------------------------------------------------------------------------
sub appendValue {
my ($obj, $value) = @_;
if ($obj->{value}) {
$obj->{value} .= $value;
}
else {
$obj->{value} = $value;
}
return;
}
#-----------------------------------------------------------------------------
sub getNextSibling {
my ($obj) = @_;
return if ref($obj) eq 'O2::Template::Node::Root'; # Root has no siblings
my @siblings = $obj->getParent()->getChildren();
my $current;
foreach my $node (@siblings) {
return $node if $current;
$current = $node if $node eq $obj;
}
return;
}
#-----------------------------------------------------------------------------
sub getNextSignificantSibling {
my ($obj) = @_;
my $sibling = $obj->getNextSibling();
while ($sibling) {
return $sibling if $sibling->isSignificant();
$sibling = $sibling->getNextSibling();
}
return;
}
#-----------------------------------------------------------------------------
sub getPreviousSignificantSibling {
my ($obj) = @_;
my $sibling = $obj->getPreviousSibling();
while ($sibling) {
return $sibling if $sibling->isSignificant();
$sibling = $sibling->getPreviousSibling();
}
return;
}
#-----------------------------------------------------------------------------
sub getLocation {
my ($obj) = @_;
return ($obj->{line}, $obj->{column});
}
#-----------------------------------------------------------------------------
sub setLocation {
my ($obj, $line, $column) = @_;
$obj->{line} = $line;
$obj->{column} = $column;
return 1;
}
#-----------------------------------------------------------------------------
sub toString {
my ($obj, $indent) = @_;
$indent ||= '';
my $value = $obj->getValue();
$value =~ s{ \n }{\\n}xmsg;
my $str = $indent . ref($obj) . " $value\n";
foreach my $child ($obj->getChildren()) {
$str .= $child->toString(" $indent");
}
return $str;
}
#-----------------------------------------------------------------------------
sub isSignificant {
return 1;
}
#-----------------------------------------------------------------------------
sub isWithinComment {
my ($obj) = @_;
my $node = $obj;
while ($node->hasParent()) {
my $parent = $node->getParent();
return 1 if ref($parent) eq 'O2::Template::Node::O2Tag' && $parent->getTagName() eq 'comment';
$node = $parent;
}
return 0;
}
#-----------------------------------------------------------------------------
1;
| haakonsk/O2-Framework | lib/O2/Template/Node.pm | Perl | mit | 4,569 |
package omnitool::applications::otadmin::datatypes::access_roles;
# is a sub-class of OmniClass
use parent 'omnitool::omniclass';
# primary key of datatype
$omnitool::applications::otadmin::datatypes::access_roles::dt = '12_1';
use strict;
# any special new() routines
sub init {
my $self = shift;
}
# routine to generate a list of Applications for which this Access Role might be used
sub options_used_in_applications {
my $self = shift;
my ($data_code) = @_; # primary key for recording updating, if applicable
my ($application_omniclass_object, $app, $options, $options_keys);
# first load up all apps in this OT Admin DB
$application_omniclass_object = $self->{luggage}{object_factory}->omniclass_object(
'dt' => '1_1',
'skip_hooks' => 1,
'skip_metainfo' => 1,
'load_fields' => 'name',
'sort_field' => 'name',
'data_codes' => ['all']
);
# then just load them in
foreach $app (@{ $application_omniclass_object->{records_keys} }) {
$$options{$app} = $application_omniclass_object->{records}{$app}{name};
push(@$options_keys,$app);
}
# return results
return ($options,$options_keys);
}
1;
__END__
Possible / example routines are below. Please copy and paste them above the '1;' above
to make use of them. See Pod documentation at the end of omniclass.pm to see usage suggestions.
Also, please be sure to save this file as $OTHOME/code/omnitool/applications/otadmin/datatypes/.pm
# routine to run at start of load() before loading records from the database.
sub pre_load {
my $self = shift;
my ($args) = @_; # args passed to load()
}
# virtual field hook subroutine; should be named 'field_' + 'table_column' val
# likely will have more than one of these
sub field_XYZ {
my $self = shift;
my ($args) = @_; # args passed to load()
=cut
Quick example - not for real use.
foreach $r (@{$self->{records_keys}}) {
$self->{records}{$r}{enhanced_name}[0] = {
'text' => $self->{records}{$r}{name},
'uri' => '#/tools/ot_admin/tools_mgr/'.$self->{metainfo}{$r}{altcode},
'glyph' => $self->{records}{$r}{icon_fa_glyph}
};
}
=cut
}
# routine to run at the end of load(), after loading records from the database.
sub post_load {
my $self = shift;
my ($args) = @_; # args passed to load()
}
# routine to run at the start of save(), before creating or updating the record(s).
sub pre_save {
my $self = shift;
my ($args) = @_; # args passed to save()
}
# routine to run at the end of save(). Good for any clean-up actions or sending notices.
sub post_save {
my $self = shift;
my ($args) = @_; # args passed to save()
}
# routine to run at the start of search(), before setting up and executing a search
sub pre_search {
my $self = shift;
my ($args) = @_; # args passed to search()
# uncomment to stop the search
# $$args{cancel_search} = 1;
}
# routine to run towards the end of search(), after the search was executed but before
# the records are auto-loaded
sub post_search {
my $self = shift;
my ($args) = @_; # args passed to search()
# where primary keys of matched records will be
# @{ $self->{search_results} }
}
# routine to run before a record is deleted, but after the data lock is checked
sub pre_delete {
my $self = shift;
my ($args) = @_; # args passed to delete()
# uncomment to stop the delete
# $$args{cancel_delete} = 1;
}
# routine to run after the deletion has occurred
sub post_delete {
my $self = shift;
my ($args) = @_; # args passed to delete()
}
# routine to run in form() before we prepare the individual fields
sub prepare_for_form_fields {
my $self = shift;
my ($form) = @_; # arguments passed to form() plus the current form structure data
}
# example routine to generate options for a select / radio / checkboxes field
# should be named 'options_' + 'table_column' val, where table_column is from target Datatype Field
# likely will have more than one of these
sub options_XYZ {
my $self = shift;
my ($data_code) @_; # primary key for recording updating, if applicable
=cut
Example of setting options from JavaScript files in from directory
my $js_directory = $ENV{OTHOME}.'/code/omnitool/applications/'.$self->{parent_application_code_dir}.'/javascript/';
($options,$options_keys) = $self->options_from_directory($js_directory,'js');
Example of just hard-setting it
$options = {
'ginger' => 'Perfect',
'pepper' => 'Loyal',
'lorelei' => 'Brilliant',
};
$options_keys = ['ginger','pepper',lorelei'];
=cut
# return results
return ($options,$options_keys);
}
# example of hook to call after running form() and building a create/update form
# useful for modifying that form structure
sub post_form_operations {
my $self = shift;
my ($form) = @_; # the complete form structure
# easy / uncessary change:
# $$form{instructions} .= 'This text was added to the form instrutions.';
}
# example hook to run after create/update form was submitted and it passed basic
# validation; perform additional validation logic
sub post_validate_form {
my $self = shift;
my ($form) = @_; # the complete form structure
# to stop the form submission from going through:
# first, mark the field as error:
# $$form{fields}{$field}{field_error} = 1;
# then, give a reason in the offending form field
# $$form{fields}{$field}{error_instructions} = 'Some reason';
# then return a '1'
# return 1;
# to let it submit without problem, return 0;
}
| ericschernoff/omnitool | omnitool/applications/otadmin/datatypes/access_roles.pm | Perl | mit | 5,394 |
package O2::DB::LimitSth;
use strict;
# Statement handler class for emulating "sql limit".
# (And supplying the warm & cozy "next()" method;-)
#-------------------------------------------------------------------------------
# %init : sth - dbi statement handler, start - move forward to this row index, rowCount - only return this many rows
sub new {
my ($pkg, %init) = @_;
# skip first rows manually?
if ($init{start} && $init{start} > 0) {
$init{rowCountStart} = $init{start};
$init{sth}->fetchrow_array() while $init{start}-- > 0;
}
return bless({
dbh => $init{dbh},
sth => $init{sth},
rowCount => $init{rowCount},
rowCountStart => ($init{rowCountStart} || 0),
useRowCount => ($init{rowCount} && $init{rowCount} > 0),
profilingId => $init{profilingId}, # used if profiling mode is set
sql => ($init{sql} || undef),
cachedSQLResult => ($init{cachedSQLResult} || undef),
_tmpArray => [],
}, $pkg);
}
#-------------------------------------------------------------------------------
sub skip {
my ($obj, $num) = @_;
$num ||= 1;
if ($#{ $obj->{cachedSQLResult} } > -1) {
shift @{ $obj->{cachedSQLResult} } while $num-- > 0;
}
else {
$obj->{sth}->fetchrow_array() while $num-- > 0;
}
}
#-------------------------------------------------------------------------------
sub execute {
my ($obj, @placeHolders) = @_;
@placeHolders = $obj->_getDbUtil()->encodePlaceHolders(@placeHolders);
my $tmpSql = $obj->{dbh}->_expandPH($obj->{sql}, @placeHolders);
my $cacheVal = $obj->{dbh}->getCachedSql($tmpSql, \@placeHolders);
$obj->{executeSql} = $tmpSql;
$obj->{_tmpArray} = [];
if ($cacheVal) {
$obj->{cachedSQLResult} = $cacheVal;
return $obj;
}
my $pid;
$pid = $obj->{dbh}->setupSubProfilingOfProfilingId($obj->{profilingId}, 'execute', \@placeHolders, caller) if $obj->{profilingId};
if ( !eval { $obj->{sth}->execute(@placeHolders) } || $@ ) {
die "Error executing '$tmpSql': " . $obj->{sth}->errstr();
}
$obj->{hasExecutedSql} = 1;
$obj->{dbh}->endProfiling($pid) if $pid;
return $obj;
}
#-------------------------------------------------------------------------------
sub next {
my ($obj) = @_;
return $obj->_fetchrow('array');
}
#-------------------------------------------------------------------------------
sub nextArray {
my ($obj) = @_;
return $obj->_fetchrow('array');
}
#-------------------------------------------------------------------------------
sub nextArrayRef {
my ($obj) = @_;
return $obj->_fetchrow('arrayref');
}
#-------------------------------------------------------------------------------
sub nextHash {
my ($obj) = @_;
return $obj->_fetchrow('hash');
}
#-------------------------------------------------------------------------------
sub nextHashRef {
my ($obj) = @_;
return $obj->_fetchrow('hashref');
}
#-------------------------------------------------------------------------------
sub _fetchrow {
my ($obj, $datatype) = @_;
if ($obj->{dbh}->{o2DBCacheEnabled} && $obj->{sql} && $obj->{cachedSQLResult} ) {
if ($#{ $obj->{cachedSQLResult} } >= 0) {
my $cacheVal;
$cacheVal = shift @{ $obj->{cachedSQLResult} };
if (defined $cacheVal) {
return @{ $obj->{dbh}->_asArrayRef($obj->{sql},$cacheVal) } if $datatype eq 'array';
return $obj->{dbh}->_asArrayRef($obj->{sql},$cacheVal) if $datatype eq 'arrayref';
return $cacheVal if $datatype eq 'hashref';
return %{$cacheVal} if $datatype eq 'hash';
}
}
return unless $obj->{hasExecutedSql} && $obj->{sth} && $obj->{executeSql}; #if we have an sth and we are in prepare execute mode
}
# return empty list and close statement handle when limit is reached.
if ( $obj->{useRowCount} ) {
$obj->{sth}->finish() if $obj->{rowCount}-- == 0;
return if $obj->{rowCount} < 0;
}
# because of caching we always have to it out in hashref so we can keep the name => value structue
my $result = $obj->{sth}->fetchrow_hashref();
$result = $obj->_getDbUtil()->decodeResult($result);
$obj->_cacheResult($result) if $obj->{dbh}->{o2DBCacheEnabled};
return unless defined $result;
return @{ $obj->{dbh}->_asArrayRef($obj->{sql},$result) } if $datatype eq 'array';
return $obj->{dbh}->_asArrayRef($obj->{sql},$result) if $datatype eq 'arrayref';
return $result if $datatype eq 'hashref';
if ($datatype eq 'hash') {
return if ref($result) ne 'HASH';
return %{ $result };
}
die "Illegal datatype: $datatype";
}
#-------------------------------------------------------------------------------
sub _cacheResult {
my ($obj, $valToCache) = @_;
if (ref $valToCache eq 'ARRAY' && $#{$valToCache} > -1) {
my @copy = @{$valToCache};
if ($obj->{rowCountStart} && $obj->{useRowCount}) { # <-- limitSelect is used
push @{ $obj->{_tmpArray}->[ $obj->{rowCountStart} ] }, \@copy;
}
else {
push @{ $obj->{_tmpArray} }, \@copy;
}
}
elsif (ref $valToCache eq 'HASH') {
my %copy = %{$valToCache};
if ($obj->{rowCountStart} && $obj->{useRowCount}) { # <-- limitSelect is used
push @{ $obj->{_tmpArray}->[ $obj->{rowCountStart} ] },\%copy;
}
else {
push @{ $obj->{_tmpArray} }, \%copy;
}
}
if (!$valToCache) {
$obj->setResultsOnSQL( $obj->{_tmpArray} ); # incremantal set
}
}
#-------------------------------------------------------------------------------
sub _getDbUtil {
my ($obj) = @_;
return $obj->{dbh}->_getDbUtil();
}
#-------------------------------------------------------------------------------
sub finish {
my ($obj) = @_;
$obj->{sth}->finish() if defined $obj->{sth}; # If we are in "cache mode", sth is undefined
$obj->{dbh}->endProfiling( $obj->{profilingId} ) if $obj->{profilingId};
if ( !$obj->{cachedSQLResult} && $obj->{sth} && $obj->{dbh}->{o2DBCacheEnabled} && $obj->{_tmpArray} && ( $obj->{executeSql} || $obj->{sql} ) ) {
$obj->setResultsOnSQL( $obj->{_tmpArray} );
$obj->{_tmpArray} = undef;
}
}
#-------------------------------------------------------------------------------
sub DESTROY {
my ($obj) = @_;
$obj->finish();
}
#-----------------------------------------------------------------------------------
# O2 DB caching logic
sub setResultsOnSQL {
my ($obj, $results) = @_;
if (defined $obj->{cachedSQLResult} && !$obj->{sth}) {
return;
}
my $sqlToCacheOn = $obj->{executeSql} || $obj->{sql};
if ($obj->{dbh}->{o2DBCacheHandler}->sqlIsCachable($sqlToCacheOn)) {
$obj->{dbh}->{o2DBCacheHandler}->setSQL($sqlToCacheOn, $results);
}
}
#-------------------------------------------------------------------------------
1;
| haakonsk/O2-Framework | lib/O2/DB/LimitSth.pm | Perl | mit | 6,931 |
#!/usr/bin/perl
use warnings;
#print "Hello world!\n";
use YAPE::Regex::Explain;
print YAPE::Regex::Explain->new('%\(([a-zA-Z_]\w*)\)')->explain;
#print YAPE::Regex::Explain->new('(?R)')->explain;
| niieani/napkin | tests/test-expression.pl | Perl | mit | 198 |
#
# Copyright 2018 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package apps::inin::mediaserver::snmp::mode::component;
use base qw(centreon::plugins::templates::hardware);
use strict;
use warnings;
sub set_system {
my ($self, %options) = @_;
$self->{regexp_threshold_overload_check_section_option} = '^(device)$';
$self->{cb_hook2} = 'snmp_execute';
$self->{thresholds} = {
device => [
['unknown', 'UNKNOWN'],
['up', 'OK'],
['down', 'CRITICAL'],
['congested', 'WARNING'],
['restarting', 'OK'],
['quiescing', 'OK'],
['testing', 'OK'],
],
};
$self->{components_path} = 'apps::inin::mediaserver::snmp::mode::components';
$self->{components_module} = ['device'];
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options, no_absent => 1, no_performance => 1, no_load_components => 1);
bless $self, $class;
$self->{version} = '1.0';
$options{options}->add_options(arguments =>
{
});
return $self;
}
sub snmp_execute {
my ($self, %options) = @_;
$self->{snmp} = $options{snmp};
$self->{results} = $self->{snmp}->get_multiple_table(oids => $self->{request});
}
1;
=head1 MODE
Check hardware devices.
=over 8
=item B<--component>
Which component to check (Default: '.*').
Can be: 'device'.
=item B<--no-component>
Return an error if no compenents are checked.
If total (with skipped) is 0. (Default: 'critical' returns).
=item B<--threshold-overload>
Set to overload default threshold values (syntax: section,[instance,]status,regexp)
It used before default thresholds (order stays).
Example: --threshold-overload='device,WARNING,restarting'
=back
=cut
package apps::inin::mediaserver::snmp::mode::components::device;
use strict;
use warnings;
my %map_status = (1 => 'unknown', 2 => 'up', 3 => 'down', 4 => 'congested',
5 => 'restarting', 6 => 'quiescing', 7 => 'testing'
);
my $mapping = {
i3MsGeneralInfoOperStatus => { oid => '.1.3.6.1.4.1.2793.8227.1.2', map => \%map_status },
};
sub load {
my ($self) = @_;
push @{$self->{request}}, { oid => $mapping->{i3MsGeneralInfoOperStatus}->{oid} };
}
sub check {
my ($self) = @_;
$self->{output}->output_add(long_msg => "Checking devices");
$self->{components}->{device} = {name => 'devices', total => 0, skip => 0};
return if ($self->check_filter(section => 'device'));
my $result = $self->{snmp}->map_instance(mapping => $mapping, results => $self->{results}->{$mapping->{i3MsGeneralInfoOperStatus}->{oid}}, instance => '0');
return if (!defined($result->{i3MsGeneralInfoOperStatus}));
$self->{components}->{device}->{total}++;
$self->{output}->output_add(long_msg => sprintf("device status is '%s' [instance = %s]",
$result->{i3MsGeneralInfoOperStatus}, '0'));
my $exit = $self->get_severity(section => 'device', value => $result->{i3MsGeneralInfoOperStatus});
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("Device status is '%s'", $result->{i3MsGeneralInfoOperStatus}));
}
}
1;
| wilfriedcomte/centreon-plugins | apps/inin/mediaserver/snmp/mode/component.pm | Perl | apache-2.0 | 4,123 |
package Api::PriceList;
use HTTP::Tiny;
use Data::Dumper;
use HTTP::Request;
use LWP::UserAgent;
use strict;
use warnings;
use JSON::Parse 'valid_json';
use JSON qw( decode_json );
############################################################################################
sub main {
my ($self, $uri, $name, $number, $net, $currency) = @_;
$net = (defined $net && $net eq 'on') ? 'true' : 'false';
my $json = qq~{"number":$number,"name":"$name","currency":$currency,"netPrices":$net}~;
my $req = HTTP::Request->new( 'POST', $uri.'/prices/save' );
$req->header(
'Content-Type' => 'application/json',
'charset' => 'utf-8',
);
$req->content( $json );
my $lwp = LWP::UserAgent->new;
my $re = $lwp->request( $req );
}
############################################################################################
1; | COMBASE/api_csv_daemon | lib/Api/PriceList.pm | Perl | apache-2.0 | 847 |
#!/usr/bin/env perl
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2017] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Adds GRC genomic mapping to the dna align feature table
# in the HAP pipeline with their cigar strings. Note that
# in some cases there is more than one alignment per patch.
#
# Example:
#
# perl add_GRC_align_features.pl -dbhost genebuildn \
# -dbname homo_sapiens_core_nn_nn -dbuser user -dbpass pass \
# -gca_patch_release GCA_000001405.20_GRCh38.p5 -verbose
use strict;
use warnings;
use Getopt::Long;
use Net::FTP;
use Bio::EnsEMBL::Utils::Exception qw(throw warning);
use Bio::EnsEMBL::Slice;
use Bio::EnsEMBL::DBSQL::DBAdaptor;
use Bio::EnsEMBL::DBSQL::SliceAdaptor;
use Bio::EnsEMBL::DBSQL::AssemblyExceptionFeatureAdaptor;
use Bio::EnsEMBL::AssemblyExceptionFeature;
use Bio::EnsEMBL::DnaDnaAlignFeature;
use Bio::EnsEMBL::Analysis;
$| = 1;
my $dbname = '';
my $dbhost = '';
my $dbuser = 'ensro';
my $dbpass = '';
my $dbport = '3306';
my $gca_patch_release = '';
my $store = 0;
my $external_db_id = '50692'; # GRC_alignment_import
my $syn_external_db_id = '50710'; # seq_region_synonym slice type - i.e. INSDC
my $verbose = 0;
my @patch_types = ('PATCH_FIX','PATCH_NOVEL','HAP');
my @ftpdir_types = ('ALT_REF_LOCI_','PATCHES'); # all ALT_REF_LOCI_* will be included
my @dna_align_features = ();
&GetOptions(
'dbhost:s' => \$dbhost,
'dbuser:s' => \$dbuser,
'dbpass:s' => \$dbpass,
'dbname:s' => \$dbname,
'dbport:n' => \$dbport,
'gca_patch_release:s' => \$gca_patch_release,
'external_db_id:n' => \$external_db_id,
'write!' => \$store,
'verbose!' => \$verbose,
);
if(!$gca_patch_release){
throw ("Need to specify gca accession and assembly version with -gca_patch_release.\n");
}
# get alt_scaffold_placement.txt to generate filename that we will need
# to retrieve files from the NCBI ftp site. Populates...
my %ftp_filename=();
my ($content, $remote_file_handle) = "";
open($remote_file_handle, '>', \$content);
my $ftp = Net::FTP->new('ftp.ncbi.nlm.nih.gov', Debug => 0)
or die "Can't connect to NCBI FTP: $@";
$ftp->login('anonymous', '-anonymous@')
or die 'Cannot login ', $ftp->message;
chomp $gca_patch_release;
my $ncbi_patch_release_wd = "/genomes/genbank/vertebrate_mammalian/Mus_musculus/all_assembly_versions/".$gca_patch_release."/".$gca_patch_release."_assembly_structure";
my $ncbi_wd = $ncbi_patch_release_wd;
$ftp->cwd($ncbi_wd);
# get the list of ftp dirs which contain patches and haplotypes data
my @patches_ftpdirs = ();
my @ftpdirs = $ftp->ls();
foreach my $ftpdir (@ftpdirs) {
foreach my $ftpdir_type (@ftpdir_types) {
if ($ftpdir =~ m/$ftpdir_type/) {
push(@patches_ftpdirs,$ftpdir);
}
}
}
my %align_str = ();
foreach my $patches_ftpdir (@patches_ftpdirs) {
%ftp_filename = ();
print "---Processing directory $patches_ftpdir\n";
my $ncbi_wd = $ncbi_patch_release_wd."/".$patches_ftpdir."/alt_scaffolds/";
$ftp->cwd($ncbi_wd)
or die 'Cannot change working directory ', $ftp->message;
close $remote_file_handle;
open($remote_file_handle, '>', \$content);
$ftp->get('alt_scaffold_placement.txt', $remote_file_handle)
or die "get failed ", $ftp->message;
my @asp_lines = split /\n/, $content;
foreach my $asp_line (@asp_lines) {
next if $asp_line =~ /^#/;
my @elem = split /\t/, $asp_line;
my $patch_name = $elem[2];
my $file_name = $elem[3]."_".$elem[6].".gff";
print "Filename: $file_name\t\tPatchname: $patch_name\n" if $verbose;
$ftp_filename{$patch_name} = $file_name;
}
# change directory to where the GRC alignments are kept:
$ncbi_wd = "alignments";
$ftp->cwd($ncbi_wd) or die 'Cannot change working directory ', $ftp->message;
# hash of arrays - there way be more than one alignment per file if they
# have been manually annotated, However the GRC may change all to one
# line in the near future, in the meantime, we need to deal with them.
foreach my $patch (keys %ftp_filename) {
close $remote_file_handle;
open($remote_file_handle, '>', \$content);
$ftp->get($ftp_filename{$patch}, $remote_file_handle)
or die "get failed ", $ftp->message;
my @lines = split "\n", $content;
foreach my $line (@lines) {
next if $line =~ /^\#/;
# We'll parse the data later because we need most of it.
push @{$align_str{$patch}},$line;
# In GRCh37, the HAP names were shortened like HSCHR17_1 instead of HSCHR17_1_CTG5
# In GRCh38 and GRCm38, the HAP and PATCHES names were extended like CHR_HSCHR17_1 instead of HSCHR17_1
# so I'll add a 'duplicated' line associated to the new name too
# so that when the HAP and PATCHES names are fetched from our DB, there can be a match
my $new_hap_name = "CHR_".$patch;
push @{$align_str{$new_hap_name}},$line;
}
}
} # endif patches_ftpdir
my $db = new Bio::EnsEMBL::DBSQL::DBAdaptor( -host => $dbhost,
-user => $dbuser,
-pass => $dbpass,
-port => $dbport,
-dbname => $dbname );
my $sa = $db->get_SliceAdaptor();
my $analysis = new Bio::EnsEMBL::Analysis( -logic_name => "grc_alignment_import",
-db_version => $gca_patch_release);
# TODO - leave $asm_exc_adaptor in - that way we can compare
# information from file with what we already know as a sanity check.
# Now get the patches, they come in pairs, the assembly exception and the reference
print "Getting patches...\n" if $verbose;
my $asm_exc_adaptor = $db->get_AssemblyExceptionFeatureAdaptor();
my @exceptions = @{$asm_exc_adaptor->fetch_all()};
my @patches;
EXC: foreach my $exc (@exceptions){
foreach my $type (@patch_types){
if($exc->type() =~ m/$type/){
push(@patches, $exc);
next EXC;
}
}
}
# Assuming that AssemblyExceptionFeatureAdaptor's fetch_all will always
# return 2 entries for each patch and that those two entries are adjacent
my $num_patches = scalar(@patches)/2;
print "Have ".$num_patches." patches.\n";
# for each patch
for (my $i = 0; $i < $num_patches; $i++) {
# get the two slices
my $ref_slice;
my $patch_slice;
for(my $j = 0; $j < 2; $j++) {
my $exc = pop(@patches);
# if this is the ref version
if($exc->type =~ m/REF/){
# alt is only the patch slice
$patch_slice = $exc->alternate_slice();
}
else{
# alt is replaced region of ref
$ref_slice = $exc->alternate_slice();
}
}
if(!($patch_slice and $ref_slice)){
throw("Something is wrong, the patch and ref slices were not set correctly.\n");
}
my @patch_vals = split /:/, $patch_slice->display_id;
my $patch_name = $patch_vals[2];
foreach my $string ( @{ $align_str{$patch_name}}) {
my @el = split /\t/, $string;
my $num = $#el;
throw ("Incorrect number of elements in gtf file: $num") unless $num == 8;
my ($seq_id, $source, $type, $start, $end, $score, $strand, $phase, $attr) = split /\t/, $string;
$strand = fix_strand($strand);
my %attribute = ();
foreach my $kvp (split ';', $attr) {
my ($key, $value) = split '=', $kvp;
$attribute{$key} = $value;
}
my $target = $attribute{"Target"};
my ($hseqname, $hstart, $hend, $hstrand ) = split " ", $target;
$hstrand = fix_strand($hstrand);
my $length = ($hend - $hstart) + 1;
my $cigar_line;
$cigar_line = $attribute{"Gap"};
if (defined $cigar_line) {
sanity_check_cigar_line($cigar_line, $length);
$cigar_line = reformat_cigar_line($cigar_line);
} else {
$cigar_line = $length."M";
}
print $cigar_line."\n";
# need the seq_region_id from seq_region_synonym
my @synonyms = @{$ref_slice->get_all_synonyms()};
my $seq_region_id = '';
foreach my $syn (@synonyms) {
if ($syn->external_db_id() == $syn_external_db_id) {
$seq_region_id = $syn->seq_region_id();
last();
}
}
print "about to print seq_region_id\n";
print "seq_region_id is $seq_region_id\n";
# ...to obtain the slice:
my $slice = $sa->fetch_by_seq_region_id($seq_region_id);
my $daf = new Bio::EnsEMBL::DnaDnaAlignFeature(
-slice => $slice,
-start => $start,
-end => $end,
-strand => $strand,
-analysis => $analysis,
-score => $score,
-hstart => $hstart,
-hend => $hend,
-hstrand => $hstrand,
-hseqname => $hseqname,
-hcoverage => $attribute{"pct_coverage"},
-percent_id => $attribute{"pct_identity_ungap"},
-external_db_id => $external_db_id,
-cigar_string => $cigar_line,
);
push @dna_align_features, $daf;
}
}
# now store all the dna_align features
if (scalar(@dna_align_features) > 0) {
write_dna_align_features_to_db($db,\@dna_align_features,$store)
}
print "There are ".scalar (@dna_align_features)." new dna_align_features.\n";
sub write_dna_align_features_to_db {
my ($db,$dna_align_features,$store) = @_;
DAF: foreach my $dna_align_feat (@$dna_align_features) {
if ($store) {
$db->get_DnaAlignFeatureAdaptor->store($dna_align_feat);
if ($@) {
throw("ERROR: Can't write dna_align_feat ".$dna_align_feat->hseqname." [$@]");
} else {
print "Written ".$dna_align_feat->hseqname." on chr ".$dna_align_feat->slice->name
." strand ".$dna_align_feat->hstrand." with start ".$dna_align_feat->start
." end ".$dna_align_feat->end."\n" if $verbose;
}
} else {
print "Not storing ".$dna_align_feat->hseqname."\n" if $verbose;
}
} # DAF
return 1;
}
sub fix_strand {
my $strand = shift;
if ($strand eq '+') {
$strand = 1;
} elsif ($strand eq '-') {
$strand = -1;
} else {
throw("Strand problem :".$strand);
}
return $strand;
}
sub sanity_check_cigar_line {
# ok, it sanity checks the GRCs idea of a cigar line which is close to GFF3 format
my ($line, $len) = @_;
my $cl_length = 0;
throw("Can only sanity check cigar lines with whitespace") unless $line =~ /\s/;
my @elements = split /\s/, $line;
foreach my $el (@elements) {
my ($operator, $num) = ($1, $2) if $el =~ /^(\w{1})(\d+)$/;
if ($operator =~ /[MI]/) {
$cl_length += $num;
} elsif ($operator eq 'D') {
# nothing to do
} else {
throw("Unknown alignment operator: $operator acting on $num");
}
}
if ($cl_length != $len) {
warn("Cigar_line length: $cl_length does not match length: $len for this line:\n$line\n\n");
}
}
sub reformat_cigar_line {
my $line = shift;
# hack
# the GRC cigar line format turns out to be back to front - fix it
# this is a retrospective hack, with hindsight the logic of the script
# would be different and probably incorporated into the sub above.
my @elements = split /\s/, $line;
$line = '';
foreach my $el (@elements) {
my ($operator, $num) = ($1, $2) if $el =~ /^(\w{1})(\d+)$/;
$line .= $num.$operator;
}
return $line;
}
exit;
| james-monkeyshines/ensembl-analysis | scripts/assembly_patches/add_GRC_align_features.pl | Perl | apache-2.0 | 12,096 |
package Paws::MTurk;
use Moose;
sub service { 'mturk-requester' }
sub version { '2017-01-17' }
sub target_prefix { 'MTurkRequesterServiceV20170117' }
sub json_version { "1.1" }
has max_attempts => (is => 'ro', isa => 'Int', default => 5);
has retry => (is => 'ro', isa => 'HashRef', default => sub {
{ base => 'rand', type => 'exponential', growth_factor => 2 }
});
has retriables => (is => 'ro', isa => 'ArrayRef', default => sub { [
] });
with 'Paws::API::Caller', 'Paws::API::EndpointResolver', 'Paws::Net::V4Signature', 'Paws::Net::JsonCaller', 'Paws::Net::JsonResponse';
sub AcceptQualificationRequest {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::AcceptQualificationRequest', @_);
return $self->caller->do_call($self, $call_object);
}
sub ApproveAssignment {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::ApproveAssignment', @_);
return $self->caller->do_call($self, $call_object);
}
sub AssociateQualificationWithWorker {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::AssociateQualificationWithWorker', @_);
return $self->caller->do_call($self, $call_object);
}
sub CreateAdditionalAssignmentsForHIT {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::CreateAdditionalAssignmentsForHIT', @_);
return $self->caller->do_call($self, $call_object);
}
sub CreateHIT {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::CreateHIT', @_);
return $self->caller->do_call($self, $call_object);
}
sub CreateHITType {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::CreateHITType', @_);
return $self->caller->do_call($self, $call_object);
}
sub CreateHITWithHITType {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::CreateHITWithHITType', @_);
return $self->caller->do_call($self, $call_object);
}
sub CreateQualificationType {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::CreateQualificationType', @_);
return $self->caller->do_call($self, $call_object);
}
sub CreateWorkerBlock {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::CreateWorkerBlock', @_);
return $self->caller->do_call($self, $call_object);
}
sub DeleteHIT {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::DeleteHIT', @_);
return $self->caller->do_call($self, $call_object);
}
sub DeleteQualificationType {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::DeleteQualificationType', @_);
return $self->caller->do_call($self, $call_object);
}
sub DeleteWorkerBlock {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::DeleteWorkerBlock', @_);
return $self->caller->do_call($self, $call_object);
}
sub DisassociateQualificationFromWorker {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::DisassociateQualificationFromWorker', @_);
return $self->caller->do_call($self, $call_object);
}
sub GetAccountBalance {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::GetAccountBalance', @_);
return $self->caller->do_call($self, $call_object);
}
sub GetAssignment {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::GetAssignment', @_);
return $self->caller->do_call($self, $call_object);
}
sub GetFileUploadURL {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::GetFileUploadURL', @_);
return $self->caller->do_call($self, $call_object);
}
sub GetHIT {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::GetHIT', @_);
return $self->caller->do_call($self, $call_object);
}
sub GetQualificationScore {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::GetQualificationScore', @_);
return $self->caller->do_call($self, $call_object);
}
sub GetQualificationType {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::GetQualificationType', @_);
return $self->caller->do_call($self, $call_object);
}
sub ListAssignmentsForHIT {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::ListAssignmentsForHIT', @_);
return $self->caller->do_call($self, $call_object);
}
sub ListBonusPayments {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::ListBonusPayments', @_);
return $self->caller->do_call($self, $call_object);
}
sub ListHITs {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::ListHITs', @_);
return $self->caller->do_call($self, $call_object);
}
sub ListHITsForQualificationType {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::ListHITsForQualificationType', @_);
return $self->caller->do_call($self, $call_object);
}
sub ListQualificationRequests {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::ListQualificationRequests', @_);
return $self->caller->do_call($self, $call_object);
}
sub ListQualificationTypes {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::ListQualificationTypes', @_);
return $self->caller->do_call($self, $call_object);
}
sub ListReviewableHITs {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::ListReviewableHITs', @_);
return $self->caller->do_call($self, $call_object);
}
sub ListReviewPolicyResultsForHIT {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::ListReviewPolicyResultsForHIT', @_);
return $self->caller->do_call($self, $call_object);
}
sub ListWorkerBlocks {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::ListWorkerBlocks', @_);
return $self->caller->do_call($self, $call_object);
}
sub ListWorkersWithQualificationType {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::ListWorkersWithQualificationType', @_);
return $self->caller->do_call($self, $call_object);
}
sub NotifyWorkers {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::NotifyWorkers', @_);
return $self->caller->do_call($self, $call_object);
}
sub RejectAssignment {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::RejectAssignment', @_);
return $self->caller->do_call($self, $call_object);
}
sub RejectQualificationRequest {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::RejectQualificationRequest', @_);
return $self->caller->do_call($self, $call_object);
}
sub SendBonus {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::SendBonus', @_);
return $self->caller->do_call($self, $call_object);
}
sub SendTestEventNotification {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::SendTestEventNotification', @_);
return $self->caller->do_call($self, $call_object);
}
sub UpdateExpirationForHIT {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::UpdateExpirationForHIT', @_);
return $self->caller->do_call($self, $call_object);
}
sub UpdateHITReviewStatus {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::UpdateHITReviewStatus', @_);
return $self->caller->do_call($self, $call_object);
}
sub UpdateHITTypeOfHIT {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::UpdateHITTypeOfHIT', @_);
return $self->caller->do_call($self, $call_object);
}
sub UpdateNotificationSettings {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::UpdateNotificationSettings', @_);
return $self->caller->do_call($self, $call_object);
}
sub UpdateQualificationType {
my $self = shift;
my $call_object = $self->new_with_coercions('Paws::MTurk::UpdateQualificationType', @_);
return $self->caller->do_call($self, $call_object);
}
sub operations { qw/AcceptQualificationRequest ApproveAssignment AssociateQualificationWithWorker CreateAdditionalAssignmentsForHIT CreateHIT CreateHITType CreateHITWithHITType CreateQualificationType CreateWorkerBlock DeleteHIT DeleteQualificationType DeleteWorkerBlock DisassociateQualificationFromWorker GetAccountBalance GetAssignment GetFileUploadURL GetHIT GetQualificationScore GetQualificationType ListAssignmentsForHIT ListBonusPayments ListHITs ListHITsForQualificationType ListQualificationRequests ListQualificationTypes ListReviewableHITs ListReviewPolicyResultsForHIT ListWorkerBlocks ListWorkersWithQualificationType NotifyWorkers RejectAssignment RejectQualificationRequest SendBonus SendTestEventNotification UpdateExpirationForHIT UpdateHITReviewStatus UpdateHITTypeOfHIT UpdateNotificationSettings UpdateQualificationType / }
1;
### main pod documentation begin ###
=head1 NAME
Paws::MTurk - Perl Interface to AWS Amazon Mechanical Turk
=head1 SYNOPSIS
use Paws;
my $obj = Paws->service('MTurk');
my $res = $obj->Method(
Arg1 => $val1,
Arg2 => [ 'V1', 'V2' ],
# if Arg3 is an object, the HashRef will be used as arguments to the constructor
# of the arguments type
Arg3 => { Att1 => 'Val1' },
# if Arg4 is an array of objects, the HashRefs will be passed as arguments to
# the constructor of the arguments type
Arg4 => [ { Att1 => 'Val1' }, { Att1 => 'Val2' } ],
);
=head1 DESCRIPTION
Amazon Mechanical Turk API Reference
=head1 METHODS
=head2 AcceptQualificationRequest(QualificationRequestId => Str, [IntegerValue => Int])
Each argument is described in detail in: L<Paws::MTurk::AcceptQualificationRequest>
Returns: a L<Paws::MTurk::AcceptQualificationRequestResponse> instance
The C<AcceptQualificationRequest> operation approves a Worker's request
for a Qualification.
Only the owner of the Qualification type can grant a Qualification
request for that type.
A successful request for the C<AcceptQualificationRequest> operation
returns with no errors and an empty body.
=head2 ApproveAssignment(AssignmentId => Str, [OverrideRejection => Bool, RequesterFeedback => Str])
Each argument is described in detail in: L<Paws::MTurk::ApproveAssignment>
Returns: a L<Paws::MTurk::ApproveAssignmentResponse> instance
The C<ApproveAssignment> operation approves the results of a completed
assignment.
Approving an assignment initiates two payments from the Requester's
Amazon.com account
=over
=item *
The Worker who submitted the results is paid the reward specified in
the HIT.
=item *
Amazon Mechanical Turk fees are debited.
=back
If the Requester's account does not have adequate funds for these
payments, the call to ApproveAssignment returns an exception, and the
approval is not processed. You can include an optional feedback message
with the approval, which the Worker can see in the Status section of
the web site.
You can also call this operation for assignments that were previous
rejected and approve them by explicitly overriding the previous
rejection. This only works on rejected assignments that were submitted
within the previous 30 days and only if the assignment's related HIT
has not been deleted.
=head2 AssociateQualificationWithWorker(QualificationTypeId => Str, WorkerId => Str, [IntegerValue => Int, SendNotification => Bool])
Each argument is described in detail in: L<Paws::MTurk::AssociateQualificationWithWorker>
Returns: a L<Paws::MTurk::AssociateQualificationWithWorkerResponse> instance
The C<AssociateQualificationWithWorker> operation gives a Worker a
Qualification. C<AssociateQualificationWithWorker> does not require
that the Worker submit a Qualification request. It gives the
Qualification directly to the Worker.
You can only assign a Qualification of a Qualification type that you
created (using the C<CreateQualificationType> operation).
Note: C<AssociateQualificationWithWorker> does not affect any pending
Qualification requests for the Qualification by the Worker. If you
assign a Qualification to a Worker, then later grant a Qualification
request made by the Worker, the granting of the request may modify the
Qualification score. To resolve a pending Qualification request without
affecting the Qualification the Worker already has, reject the request
with the C<RejectQualificationRequest> operation.
=head2 CreateAdditionalAssignmentsForHIT(HITId => Str, NumberOfAdditionalAssignments => Int, [UniqueRequestToken => Str])
Each argument is described in detail in: L<Paws::MTurk::CreateAdditionalAssignmentsForHIT>
Returns: a L<Paws::MTurk::CreateAdditionalAssignmentsForHITResponse> instance
The C<CreateAdditionalAssignmentsForHIT> operation increases the
maximum number of assignments of an existing HIT.
To extend the maximum number of assignments, specify the number of
additional assignments.
=over
=item *
HITs created with fewer than 10 assignments cannot be extended to have
10 or more assignments. Attempting to add assignments in a way that
brings the total number of assignments for a HIT from fewer than 10
assignments to 10 or more assignments will result in an
C<AWS.MechanicalTurk.InvalidMaximumAssignmentsIncrease> exception.
=item *
HITs that were created before July 22, 2015 cannot be extended.
Attempting to extend HITs that were created before July 22, 2015 will
result in an C<AWS.MechanicalTurk.HITTooOldForExtension> exception.
=back
=head2 CreateHIT(AssignmentDurationInSeconds => Int, Description => Str, LifetimeInSeconds => Int, Reward => Str, Title => Str, [AssignmentReviewPolicy => L<Paws::MTurk::ReviewPolicy>, AutoApprovalDelayInSeconds => Int, HITLayoutId => Str, HITLayoutParameters => ArrayRef[L<Paws::MTurk::HITLayoutParameter>], HITReviewPolicy => L<Paws::MTurk::ReviewPolicy>, Keywords => Str, MaxAssignments => Int, QualificationRequirements => ArrayRef[L<Paws::MTurk::QualificationRequirement>], Question => Str, RequesterAnnotation => Str, UniqueRequestToken => Str])
Each argument is described in detail in: L<Paws::MTurk::CreateHIT>
Returns: a L<Paws::MTurk::CreateHITResponse> instance
The C<CreateHIT> operation creates a new Human Intelligence Task (HIT).
The new HIT is made available for Workers to find and accept on the
Amazon Mechanical Turk website.
This operation allows you to specify a new HIT by passing in values for
the properties of the HIT, such as its title, reward amount and number
of assignments. When you pass these values to C<CreateHIT>, a new HIT
is created for you, with a new C<HITTypeID>. The HITTypeID can be used
to create additional HITs in the future without needing to specify
common parameters such as the title, description and reward amount each
time.
An alternative way to create HITs is to first generate a HITTypeID
using the C<CreateHITType> operation and then call the
C<CreateHITWithHITType> operation. This is the recommended best
practice for Requesters who are creating large numbers of HITs.
CreateHIT also supports several ways to provide question data: by
providing a value for the C<Question> parameter that fully specifies
the contents of the HIT, or by providing a C<HitLayoutId> and
associated C<HitLayoutParameters>.
If a HIT is created with 10 or more maximum assignments, there is an
additional fee. For more information, see Amazon Mechanical Turk
Pricing.
=head2 CreateHITType(AssignmentDurationInSeconds => Int, Description => Str, Reward => Str, Title => Str, [AutoApprovalDelayInSeconds => Int, Keywords => Str, QualificationRequirements => ArrayRef[L<Paws::MTurk::QualificationRequirement>]])
Each argument is described in detail in: L<Paws::MTurk::CreateHITType>
Returns: a L<Paws::MTurk::CreateHITTypeResponse> instance
The C<CreateHITType> operation creates a new HIT type. This operation
allows you to define a standard set of HIT properties to use when
creating HITs. If you register a HIT type with values that match an
existing HIT type, the HIT type ID of the existing type will be
returned.
=head2 CreateHITWithHITType(HITTypeId => Str, LifetimeInSeconds => Int, [AssignmentReviewPolicy => L<Paws::MTurk::ReviewPolicy>, HITLayoutId => Str, HITLayoutParameters => ArrayRef[L<Paws::MTurk::HITLayoutParameter>], HITReviewPolicy => L<Paws::MTurk::ReviewPolicy>, MaxAssignments => Int, Question => Str, RequesterAnnotation => Str, UniqueRequestToken => Str])
Each argument is described in detail in: L<Paws::MTurk::CreateHITWithHITType>
Returns: a L<Paws::MTurk::CreateHITWithHITTypeResponse> instance
The C<CreateHITWithHITType> operation creates a new Human Intelligence
Task (HIT) using an existing HITTypeID generated by the
C<CreateHITType> operation.
This is an alternative way to create HITs from the C<CreateHIT>
operation. This is the recommended best practice for Requesters who are
creating large numbers of HITs.
CreateHITWithHITType also supports several ways to provide question
data: by providing a value for the C<Question> parameter that fully
specifies the contents of the HIT, or by providing a C<HitLayoutId> and
associated C<HitLayoutParameters>.
If a HIT is created with 10 or more maximum assignments, there is an
additional fee. For more information, see Amazon Mechanical Turk
Pricing.
=head2 CreateQualificationType(Description => Str, Name => Str, QualificationTypeStatus => Str, [AnswerKey => Str, AutoGranted => Bool, AutoGrantedValue => Int, Keywords => Str, RetryDelayInSeconds => Int, Test => Str, TestDurationInSeconds => Int])
Each argument is described in detail in: L<Paws::MTurk::CreateQualificationType>
Returns: a L<Paws::MTurk::CreateQualificationTypeResponse> instance
The C<CreateQualificationType> operation creates a new Qualification
type, which is represented by a C<QualificationType> data structure.
=head2 CreateWorkerBlock(Reason => Str, WorkerId => Str)
Each argument is described in detail in: L<Paws::MTurk::CreateWorkerBlock>
Returns: a L<Paws::MTurk::CreateWorkerBlockResponse> instance
The C<CreateWorkerBlock> operation allows you to prevent a Worker from
working on your HITs. For example, you can block a Worker who is
producing poor quality work. You can block up to 100,000 Workers.
=head2 DeleteHIT(HITId => Str)
Each argument is described in detail in: L<Paws::MTurk::DeleteHIT>
Returns: a L<Paws::MTurk::DeleteHITResponse> instance
The C<DeleteHIT> operation is used to delete HIT that is no longer
needed. Only the Requester who created the HIT can delete it.
You can only dispose of HITs that are in the C<Reviewable> state, with
all of their submitted assignments already either approved or rejected.
If you call the DeleteHIT operation on a HIT that is not in the
C<Reviewable> state (for example, that has not expired, or still has
active assignments), or on a HIT that is Reviewable but without all of
its submitted assignments already approved or rejected, the service
will return an error.
=over
=item *
HITs are automatically disposed of after 120 days.
=item *
After you dispose of a HIT, you can no longer approve the HIT's
rejected assignments.
=item *
Disposed HITs are not returned in results for the ListHITs operation.
=item *
Disposing HITs can improve the performance of operations such as
ListReviewableHITs and ListHITs.
=back
=head2 DeleteQualificationType(QualificationTypeId => Str)
Each argument is described in detail in: L<Paws::MTurk::DeleteQualificationType>
Returns: a L<Paws::MTurk::DeleteQualificationTypeResponse> instance
The C<DeleteQualificationType> deletes a Qualification type and deletes
any HIT types that are associated with the Qualification type.
This operation does not revoke Qualifications already assigned to
Workers because the Qualifications might be needed for active HITs. If
there are any pending requests for the Qualification type, Amazon
Mechanical Turk rejects those requests. After you delete a
Qualification type, you can no longer use it to create HITs or HIT
types.
DeleteQualificationType must wait for all the HITs that use the deleted
Qualification type to be deleted before completing. It may take up to
48 hours before DeleteQualificationType completes and the unique name
of the Qualification type is available for reuse with
CreateQualificationType.
=head2 DeleteWorkerBlock(WorkerId => Str, [Reason => Str])
Each argument is described in detail in: L<Paws::MTurk::DeleteWorkerBlock>
Returns: a L<Paws::MTurk::DeleteWorkerBlockResponse> instance
The C<DeleteWorkerBlock> operation allows you to reinstate a blocked
Worker to work on your HITs. This operation reverses the effects of the
CreateWorkerBlock operation. You need the Worker ID to use this
operation. If the Worker ID is missing or invalid, this operation fails
and returns the message E<ldquo>WorkerId is invalid.E<rdquo> If the
specified Worker is not blocked, this operation returns successfully.
=head2 DisassociateQualificationFromWorker(QualificationTypeId => Str, WorkerId => Str, [Reason => Str])
Each argument is described in detail in: L<Paws::MTurk::DisassociateQualificationFromWorker>
Returns: a L<Paws::MTurk::DisassociateQualificationFromWorkerResponse> instance
The C<DisassociateQualificationFromWorker> revokes a previously granted
Qualification from a user.
You can provide a text message explaining why the Qualification was
revoked. The user who had the Qualification can see this message.
=head2 GetAccountBalance()
Each argument is described in detail in: L<Paws::MTurk::GetAccountBalance>
Returns: a L<Paws::MTurk::GetAccountBalanceResponse> instance
The C<GetAccountBalance> operation retrieves the amount of money in
your Amazon Mechanical Turk account.
=head2 GetAssignment(AssignmentId => Str)
Each argument is described in detail in: L<Paws::MTurk::GetAssignment>
Returns: a L<Paws::MTurk::GetAssignmentResponse> instance
The C<GetAssignment> operation retrieves the details of the specified
Assignment.
=head2 GetFileUploadURL(AssignmentId => Str, QuestionIdentifier => Str)
Each argument is described in detail in: L<Paws::MTurk::GetFileUploadURL>
Returns: a L<Paws::MTurk::GetFileUploadURLResponse> instance
The C<GetFileUploadURL> operation generates and returns a temporary
URL. You use the temporary URL to retrieve a file uploaded by a Worker
as an answer to a FileUploadAnswer question for a HIT. The temporary
URL is generated the instant the GetFileUploadURL operation is called,
and is valid for 60 seconds. You can get a temporary file upload URL
any time until the HIT is disposed. After the HIT is disposed, any
uploaded files are deleted, and cannot be retrieved.
=head2 GetHIT(HITId => Str)
Each argument is described in detail in: L<Paws::MTurk::GetHIT>
Returns: a L<Paws::MTurk::GetHITResponse> instance
The C<GetHIT> operation retrieves the details of the specified HIT.
=head2 GetQualificationScore(QualificationTypeId => Str, WorkerId => Str)
Each argument is described in detail in: L<Paws::MTurk::GetQualificationScore>
Returns: a L<Paws::MTurk::GetQualificationScoreResponse> instance
The C<GetQualificationScore> operation returns the value of a Worker's
Qualification for a given Qualification type.
To get a Worker's Qualification, you must know the Worker's ID. The
Worker's ID is included in the assignment data returned by the
C<ListAssignmentsForHIT> operation.
Only the owner of a Qualification type can query the value of a
Worker's Qualification of that type.
=head2 GetQualificationType(QualificationTypeId => Str)
Each argument is described in detail in: L<Paws::MTurk::GetQualificationType>
Returns: a L<Paws::MTurk::GetQualificationTypeResponse> instance
The C<GetQualificationType>operation retrieves information about a
Qualification type using its ID.
=head2 ListAssignmentsForHIT(HITId => Str, [AssignmentStatuses => ArrayRef[Str|Undef], MaxResults => Int, NextToken => Str])
Each argument is described in detail in: L<Paws::MTurk::ListAssignmentsForHIT>
Returns: a L<Paws::MTurk::ListAssignmentsForHITResponse> instance
The C<ListAssignmentsForHIT> operation retrieves completed assignments
for a HIT. You can use this operation to retrieve the results for a
HIT.
You can get assignments for a HIT at any time, even if the HIT is not
yet Reviewable. If a HIT requested multiple assignments, and has
received some results but has not yet become Reviewable, you can still
retrieve the partial results with this operation.
Use the AssignmentStatus parameter to control which set of assignments
for a HIT are returned. The ListAssignmentsForHIT operation can return
submitted assignments awaiting approval, or it can return assignments
that have already been approved or rejected. You can set
AssignmentStatus=Approved,Rejected to get assignments that have already
been approved and rejected together in one result set.
Only the Requester who created the HIT can retrieve the assignments for
that HIT.
Results are sorted and divided into numbered pages and the operation
returns a single page of results. You can use the parameters of the
operation to control sorting and pagination.
=head2 ListBonusPayments([AssignmentId => Str, HITId => Str, MaxResults => Int, NextToken => Str])
Each argument is described in detail in: L<Paws::MTurk::ListBonusPayments>
Returns: a L<Paws::MTurk::ListBonusPaymentsResponse> instance
The C<ListBonusPayments> operation retrieves the amounts of bonuses you
have paid to Workers for a given HIT or assignment.
=head2 ListHITs([MaxResults => Int, NextToken => Str])
Each argument is described in detail in: L<Paws::MTurk::ListHITs>
Returns: a L<Paws::MTurk::ListHITsResponse> instance
The C<ListHITs> operation returns all of a Requester's HITs. The
operation returns HITs of any status, except for HITs that have been
deleted of with the DeleteHIT operation or that have been auto-deleted.
=head2 ListHITsForQualificationType(QualificationTypeId => Str, [MaxResults => Int, NextToken => Str])
Each argument is described in detail in: L<Paws::MTurk::ListHITsForQualificationType>
Returns: a L<Paws::MTurk::ListHITsForQualificationTypeResponse> instance
The C<ListHITsForQualificationType> operation returns the HITs that use
the given Qualification type for a Qualification requirement. The
operation returns HITs of any status, except for HITs that have been
deleted with the C<DeleteHIT> operation or that have been auto-deleted.
=head2 ListQualificationRequests([MaxResults => Int, NextToken => Str, QualificationTypeId => Str])
Each argument is described in detail in: L<Paws::MTurk::ListQualificationRequests>
Returns: a L<Paws::MTurk::ListQualificationRequestsResponse> instance
The C<ListQualificationRequests> operation retrieves requests for
Qualifications of a particular Qualification type. The owner of the
Qualification type calls this operation to poll for pending requests,
and accepts them using the AcceptQualification operation.
=head2 ListQualificationTypes(MustBeRequestable => Bool, [MaxResults => Int, MustBeOwnedByCaller => Bool, NextToken => Str, Query => Str])
Each argument is described in detail in: L<Paws::MTurk::ListQualificationTypes>
Returns: a L<Paws::MTurk::ListQualificationTypesResponse> instance
The C<ListQualificationRequests> operation retrieves requests for
Qualifications of a particular Qualification type. The owner of the
Qualification type calls this operation to poll for pending requests,
and accepts them using the AcceptQualification operation.
=head2 ListReviewableHITs([HITTypeId => Str, MaxResults => Int, NextToken => Str, Status => Str])
Each argument is described in detail in: L<Paws::MTurk::ListReviewableHITs>
Returns: a L<Paws::MTurk::ListReviewableHITsResponse> instance
The C<ListReviewableHITs> operation retrieves the HITs with Status
equal to Reviewable or Status equal to Reviewing that belong to the
Requester calling the operation.
=head2 ListReviewPolicyResultsForHIT(HITId => Str, [MaxResults => Int, NextToken => Str, PolicyLevels => ArrayRef[Str|Undef], RetrieveActions => Bool, RetrieveResults => Bool])
Each argument is described in detail in: L<Paws::MTurk::ListReviewPolicyResultsForHIT>
Returns: a L<Paws::MTurk::ListReviewPolicyResultsForHITResponse> instance
The C<ListReviewPolicyResultsForHIT> operation retrieves the computed
results and the actions taken in the course of executing your Review
Policies for a given HIT. For information about how to specify Review
Policies when you call CreateHIT, see Review Policies. The
ListReviewPolicyResultsForHIT operation can return results for both
Assignment-level and HIT-level review results.
=head2 ListWorkerBlocks([MaxResults => Int, NextToken => Str])
Each argument is described in detail in: L<Paws::MTurk::ListWorkerBlocks>
Returns: a L<Paws::MTurk::ListWorkerBlocksResponse> instance
The C<ListWorkersBlocks> operation retrieves a list of Workers who are
blocked from working on your HITs.
=head2 ListWorkersWithQualificationType(QualificationTypeId => Str, [MaxResults => Int, NextToken => Str, Status => Str])
Each argument is described in detail in: L<Paws::MTurk::ListWorkersWithQualificationType>
Returns: a L<Paws::MTurk::ListWorkersWithQualificationTypeResponse> instance
The C<ListWorkersWithQualificationType> operation returns all of the
Workers that have been associated with a given Qualification type.
=head2 NotifyWorkers(MessageText => Str, Subject => Str, WorkerIds => ArrayRef[Str|Undef])
Each argument is described in detail in: L<Paws::MTurk::NotifyWorkers>
Returns: a L<Paws::MTurk::NotifyWorkersResponse> instance
The C<NotifyWorkers> operation sends an email to one or more Workers
that you specify with the Worker ID. You can specify up to 100 Worker
IDs to send the same message with a single call to the NotifyWorkers
operation. The NotifyWorkers operation will send a notification email
to a Worker only if you have previously approved or rejected work from
the Worker.
=head2 RejectAssignment(AssignmentId => Str, RequesterFeedback => Str)
Each argument is described in detail in: L<Paws::MTurk::RejectAssignment>
Returns: a L<Paws::MTurk::RejectAssignmentResponse> instance
The C<RejectAssignment> operation rejects the results of a completed
assignment.
You can include an optional feedback message with the rejection, which
the Worker can see in the Status section of the web site. When you
include a feedback message with the rejection, it helps the Worker
understand why the assignment was rejected, and can improve the quality
of the results the Worker submits in the future.
Only the Requester who created the HIT can reject an assignment for the
HIT.
=head2 RejectQualificationRequest(QualificationRequestId => Str, [Reason => Str])
Each argument is described in detail in: L<Paws::MTurk::RejectQualificationRequest>
Returns: a L<Paws::MTurk::RejectQualificationRequestResponse> instance
The C<RejectQualificationRequest> operation rejects a user's request
for a Qualification.
You can provide a text message explaining why the request was rejected.
The Worker who made the request can see this message.
=head2 SendBonus(AssignmentId => Str, BonusAmount => Str, Reason => Str, WorkerId => Str, [UniqueRequestToken => Str])
Each argument is described in detail in: L<Paws::MTurk::SendBonus>
Returns: a L<Paws::MTurk::SendBonusResponse> instance
The C<SendBonus> operation issues a payment of money from your account
to a Worker. This payment happens separately from the reward you pay to
the Worker when you approve the Worker's assignment. The SendBonus
operation requires the Worker's ID and the assignment ID as parameters
to initiate payment of the bonus. You must include a message that
explains the reason for the bonus payment, as the Worker may not be
expecting the payment. Amazon Mechanical Turk collects a fee for bonus
payments, similar to the HIT listing fee. This operation fails if your
account does not have enough funds to pay for both the bonus and the
fees.
=head2 SendTestEventNotification(Notification => L<Paws::MTurk::NotificationSpecification>, TestEventType => Str)
Each argument is described in detail in: L<Paws::MTurk::SendTestEventNotification>
Returns: a L<Paws::MTurk::SendTestEventNotificationResponse> instance
The C<SendTestEventNotification> operation causes Amazon Mechanical
Turk to send a notification message as if a HIT event occurred,
according to the provided notification specification. This allows you
to test notifications without setting up notifications for a real HIT
type and trying to trigger them using the website. When you call this
operation, the service attempts to send the test notification
immediately.
=head2 UpdateExpirationForHIT(ExpireAt => Str, HITId => Str)
Each argument is described in detail in: L<Paws::MTurk::UpdateExpirationForHIT>
Returns: a L<Paws::MTurk::UpdateExpirationForHITResponse> instance
The C<UpdateExpirationForHIT> operation allows you update the
expiration time of a HIT. If you update it to a time in the past, the
HIT will be immediately expired.
=head2 UpdateHITReviewStatus(HITId => Str, [Revert => Bool])
Each argument is described in detail in: L<Paws::MTurk::UpdateHITReviewStatus>
Returns: a L<Paws::MTurk::UpdateHITReviewStatusResponse> instance
The C<UpdateHITReviewStatus> operation updates the status of a HIT. If
the status is Reviewable, this operation can update the status to
Reviewing, or it can revert a Reviewing HIT back to the Reviewable
status.
=head2 UpdateHITTypeOfHIT(HITId => Str, HITTypeId => Str)
Each argument is described in detail in: L<Paws::MTurk::UpdateHITTypeOfHIT>
Returns: a L<Paws::MTurk::UpdateHITTypeOfHITResponse> instance
The C<UpdateHITTypeOfHIT> operation allows you to change the HITType
properties of a HIT. This operation disassociates the HIT from its old
HITType properties and associates it with the new HITType properties.
The HIT takes on the properties of the new HITType in place of the old
ones.
=head2 UpdateNotificationSettings(HITTypeId => Str, [Active => Bool, Notification => L<Paws::MTurk::NotificationSpecification>])
Each argument is described in detail in: L<Paws::MTurk::UpdateNotificationSettings>
Returns: a L<Paws::MTurk::UpdateNotificationSettingsResponse> instance
The C<UpdateNotificationSettings> operation creates, updates, disables
or re-enables notifications for a HIT type. If you call the
UpdateNotificationSettings operation for a HIT type that already has a
notification specification, the operation replaces the old
specification with a new one. You can call the
UpdateNotificationSettings operation to enable or disable notifications
for the HIT type, without having to modify the notification
specification itself by providing updates to the Active status without
specifying a new notification specification. To change the Active
status of a HIT type's notifications, the HIT type must already have a
notification specification, or one must be provided in the same call to
C<UpdateNotificationSettings>.
=head2 UpdateQualificationType(QualificationTypeId => Str, [AnswerKey => Str, AutoGranted => Bool, AutoGrantedValue => Int, Description => Str, QualificationTypeStatus => Str, RetryDelayInSeconds => Int, Test => Str, TestDurationInSeconds => Int])
Each argument is described in detail in: L<Paws::MTurk::UpdateQualificationType>
Returns: a L<Paws::MTurk::UpdateQualificationTypeResponse> instance
The C<UpdateQualificationType> operation modifies the attributes of an
existing Qualification type, which is represented by a
QualificationType data structure. Only the owner of a Qualification
type can modify its attributes.
Most attributes of a Qualification type can be changed after the type
has been created. However, the Name and Keywords fields cannot be
modified. The RetryDelayInSeconds parameter can be modified or added to
change the delay or to enable retries, but RetryDelayInSeconds cannot
be used to disable retries.
You can use this operation to update the test for a Qualification type.
The test is updated based on the values specified for the Test,
TestDurationInSeconds and AnswerKey parameters. All three parameters
specify the updated test. If you are updating the test for a type, you
must specify the Test and TestDurationInSeconds parameters. The
AnswerKey parameter is optional; omitting it specifies that the updated
test does not have an answer key.
If you omit the Test parameter, the test for the Qualification type is
unchanged. There is no way to remove a test from a Qualification type
that has one. If the type already has a test, you cannot update it to
be AutoGranted. If the Qualification type does not have a test and one
is provided by an update, the type will henceforth have a test.
If you want to update the test duration or answer key for an existing
test without changing the questions, you must specify a Test parameter
with the original questions, along with the updated values.
If you provide an updated Test but no AnswerKey, the new test will not
have an answer key. Requests for such Qualifications must be granted
manually.
You can also update the AutoGranted and AutoGrantedValue attributes of
the Qualification type.
=head1 PAGINATORS
Paginator methods are helpers that repetively call methods that return partial results
=head1 SEE ALSO
This service class forms part of L<Paws>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/MTurk.pm | Perl | apache-2.0 | 37,960 |
#
# Copyright 2019 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package apps::video::openheadend::snmp::mode::operationstatus;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
use centreon::plugins::templates::catalog_functions qw(catalog_status_threshold);
sub custom_status_output {
my ($self, %options) = @_;
my $msg = 'status : ' . $self->{result_values}->{dep_status};
return $msg;
}
sub custom_status_calc {
my ($self, %options) = @_;
$self->{result_values}->{status} = $options{new_datas}->{$self->{instance} . '_operationOpStatus'};
$self->{result_values}->{dep_status} = $options{new_datas}->{$self->{instance} . '_operationDepStatus'};
$self->{result_values}->{display} = $options{new_datas}->{$self->{instance} . '_display'};
return 0;
}
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'operation', type => 1, cb_prefix_output => 'prefix_operation_output', message_multiple => 'All operations are ok', skipped_code => { -10 => 1 } }
];
$self->{maps_counters}->{operation} = [
{ label => 'status', threshold => 0, set => {
key_values => [ { name => 'operationOpStatus' }, { name => 'operationDepStatus' }, { name => 'display' } ],
closure_custom_calc => $self->can('custom_status_calc'),
closure_custom_output => $self->can('custom_status_output'),
closure_custom_perfdata => sub { return 0; },
closure_custom_threshold_check => \&catalog_status_threshold,
}
},
];
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$options{options}->add_options(arguments =>
{
"filter-id:s" => { name => 'filter_id' },
"warning-status:s" => { name => 'warning_status', default => '' },
"critical-status:s" => { name => 'critical_status', default => '%{status} =~ /false/i' },
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::check_options(%options);
$self->change_macros(macros => ['warning_status', 'critical_status']);
}
sub prefix_operation_output {
my ($self, %options) = @_;
return "Operation '" . $options{instance_value}->{display} . "' ";
}
my %map_type = (1 => 'demux', 2 => 'monitor-pid', 3 => 'monitor-type',
4 => 'playout-file', 5 => 'playout-circular-file', 6 => 'monitor-sid',
7 => 'playout-directory', 8 => 'hint', 9 => 'transcode-avc', 10 => 'transcode-mp2',
11 => 'transcode-aac', 12 => 'transmit-input', 13 => 'transmit-output',
14 => 'transcode-a52', 15 => 'grid-input', 16 => 'grid-acquire-mono',
17 => 'grid-acquire-stereo', 18 => 'acquire-mono', 19 => 'acquire-stereo',
20 => 'mux-input', 21 => 'remap-pid', 22 => 'remap-sid',
23 => 'mosaic', 24 => 'hint-scte35',
);
my %map_status = (1 => 'true', 2 => 'false');
my $mapping = {
operationType => { oid => '.1.3.6.1.4.1.35902.1.6.1.1.3', map => \%map_type },
operationDepStatus => { oid => '.1.3.6.1.4.1.35902.1.6.1.1.5', map => \%map_status },
operationOpStatus => { oid => '.1.3.6.1.4.1.35902.1.6.1.1.7', map => \%map_status },
};
sub manage_selection {
my ($self, %options) = @_;
$self->{operation} = {};
my $snmp_result = $options{snmp}->get_multiple_table(
oids => [
{ oid => $mapping->{operationType}->{oid} },
{ oid => $mapping->{operationDepStatus}->{oid} },
{ oid => $mapping->{operationOpStatus}->{oid} },
],
return_type => 1, nothing_quit => 1);
foreach my $oid (keys %{$snmp_result}) {
next if ($oid !~ /^$mapping->{operationOpStatus}->{oid}\.(.*)$/);
my $instance = $1;
my $result = $options{snmp}->map_instance(mapping => $mapping, results => $snmp_result, instance => $instance);
if (defined($self->{option_results}->{filter_id}) && $self->{option_results}->{filter_id} ne '' &&
$instance !~ /$self->{option_results}->{filter_id}/) {
$self->{output}->output_add(long_msg => "skipping '" . $instance . "': no matching filter.", debug => 1);
next;
}
if (defined($self->{option_results}->{filter_type}) && $self->{option_results}->{filter_type} ne '' &&
$result->{operationType} !~ /$self->{option_results}->{filter_type}/) {
$self->{output}->output_add(long_msg => "skipping '" . $result->{operationType} . "': no matching filter.", debug => 1);
next;
}
$self->{operation}->{$instance} = {
display => $instance,
%$result
};
}
if (scalar(keys %{$self->{operation}}) <= 0) {
$self->{output}->add_option_msg(short_msg => "No operation found.");
$self->{output}->option_exit();
}
}
1;
__END__
=head1 MODE
Check operation status.
=over 8
=item B<--filter-id>
Filter by operation ID (can be a regexp).
=item B<--filter-type>
Filter by operation type (can be a regexp).
=item B<--warning-status>
Set warning threshold for status.
Can used special variables like: %{status}, %{dep_status}, %{display}
=item B<--critical-status>
Set critical threshold for status (Default: '%{dep_status} =~ /false/i').
Can used special variables like: %{status}, %{dep_status}, %{display}
=back
=cut
| Sims24/centreon-plugins | apps/video/openheadend/snmp/mode/operationstatus.pm | Perl | apache-2.0 | 6,315 |
package VMOMI::ArrayOfHostLowLevelProvisioningManagerFileDeleteResult;
use parent 'VMOMI::ComplexType';
use strict;
use warnings;
our @class_ancestors = ( );
our @class_members = (
['HostLowLevelProvisioningManagerFileDeleteResult', 'HostLowLevelProvisioningManagerFileDeleteResult', 1, 1],
);
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/ArrayOfHostLowLevelProvisioningManagerFileDeleteResult.pm | Perl | apache-2.0 | 519 |
package Venn::SchemaBase::ResultSet;
=head1 NAME
Venn::SchemaBase::ResultSet
=head1 DESCRIPTION
Base DBIC ResultSet
=head1 AUTHOR
Venn Engineering
Josh Arenberg, Norbert Csongradi, Ryan Kupfer, Hai-Long Nguyen
=head1 LICENSE
Copyright 2013,2014,2015 Morgan Stanley
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
use v5.14;
use Moose;
use MooseX::NonMoose;
use namespace::autoclean;
extends 'DBIx::Class::ResultSet';
with 'Venn::Role::Logging';
use DBIx::Class::ResultClass::HashRefInflator;
use Data::Dumper;
use Venn::Exception qw( API::InvalidSortPair );
__PACKAGE__->load_components(qw{
Helper::ResultSet::SetOperations
Helper::ResultSet::CorrelateRelationship
Helper::ResultSet::Random
});
=head1 METHODS
=head2 BUILDARGS(...)
Fix for MooseX::NonMoose and DBIx::Class combo
return : (array) Args for BUILD
=cut
sub BUILDARGS { $_[2] } ## no critic (RequireArgUnpacking,RequireFinalReturn)
=head2 as_hash()
Returns the result as a hash reference using HashRefInflator.
return : (hashref) ResultSet as a HashRef
=cut
sub as_hash {
my ($self) = @_;
$self->result_class('DBIx::Class::ResultClass::HashRefInflator');
return $self;
}
=head2 as_hash_round()
Same as as_hash, but converts any floats ( including those in scientific notation
ala DB2 ) to a rounded, two-decimal-place format.
return : (hashref) ResultSet as a HashRef
=cut
sub as_hash_round {
my ($self) = @_;
$self->result_class('Venn::ResultClass::HashRefInflatorFloatConvert');
return $self;
}
sub search_readonly {
my ($self, $cond, $attrs) = @_;
if ($self->result_source->schema->storage_type =~ /db2/i) {
$attrs //= {};
$attrs->{for} = \'READ ONLY WITH UR';
}
return $self->search($cond, $attrs);
}
=head2 related_resultset_chain(@relationships)
Returns the last related resultset using an array/path of relationships.
return : (ResultSet) Last relationship ResultSet
=cut
sub related_resultset_chain {
my ($self, @chain) = @_;
my $rs = $self;
for my $rel (@chain) {
$rs = $rs->related_resultset($rel);
}
return $rs;
}
=head2 prefetch(\@tables)
Prefetches specified tables. This pulls in the other table and its values.
param \@tables : (arrayref) list of tables
=cut
sub prefetch {
my ($self, $tables) = @_;
return $self->search(undef, { prefetch => $tables });
}
=head2 join_tables(\@tables)
joins specified tables. This pulls in the other table and its values.
param \@tables : (arrayref) list of tables
=cut
sub join_tables {
my ($self, $tables) = @_;
return $self->search(undef, { join => $tables });
}
=head2 as_alias($alias_name)
Aliases the current ResultSet table to $alias_name.
param $alias_name : (Str) Name of the alias
return : (ResultSet) Modified ResultSet
=cut
sub as_alias {
my ($self, $alias_name) = @_;
return $self->search(undef, { alias => $alias_name });
}
=item search_with_query_params
Parses query parameters to form the where clause and attributes
of the query.
param $c : (object) Catalyst context
param \%opts : (hashref) Options for parsing query params
return : (resultset) ResultSet with appropriate conds/attrs
throws : API::InvalidSortPair, ...
=cut
sub search_with_query_params {
my ( $self, $c, $opts ) = @_;
$opts->{attrs}{allowed} = [qw(
order_by
columns
join
prefetch
page
rows
offset
group_by
having
distinct
)];
my $me = $self->current_source_alias;
# this will eventually go away when we can replace it with something more elegant,
# so for now, please excuse the complexities
## no critic (ProhibitComplexMappings,ProhibitMutatingListFunctions)
my %conditions = map {
next unless $_ =~ /^filter_/;
my $orig = $_;
$_ =~ s/^filter_//;
my $alias = sprintf("%s.%s", $me, $_);
$c->req->params->{$orig} =~ /%/
? ( $alias => { -like => $c->req->params->{$orig} } )
: ( $alias => $c->req->params->{$orig} )
} grep { /filter_/ } keys %{$c->req->params};
## use critic
my %attributes;
for my $attr (@{$opts->{attrs}->{allowed}}) {
next if exists $opts->{attrs}->{disallowed}->{$attr};
$attributes{$attr} = $c->req->params->{$attr} if defined $c->req->params->{$attr};
}
# special cases: ordering
if (exists $c->req->params->{sort} && exists $c->req->params->{dir}) {
my $dir = lc $c->req->params->{dir};
if ($dir eq 'asc' || $dir eq 'desc') {
$attributes{order_by} = { "-$dir" => $c->req->params->{sort} };
}
}
elsif (exists $c->req->params->{order_by}) {
my @sort_pairs = split /,/, $c->req->params->{order_by};
for my $pair (@sort_pairs) {
my ($column, $direction) = ( $pair =~ /^(\S+) (asc|desc)$/i );
$attributes{order_by} = [];
if ($column && $direction) {
push @{$attributes{order_by}}, { "-$direction" => $column };
}
else {
Venn::Exception::API::InvalidSortPair->throw({ c => $c, pair => $pair });
}
}
}
#$c->log->debug("Conditions: " . Dumper(\%conditions));
#$c->log->debug("Attributes: " . Dumper(\%attributes));
return $self->search(\%conditions, \%attributes);
}
=head2 lock_table($mode)
Locks a table if using db2. If not db2, this is a no-op.
Defaults to the associated table for the resultset's resultsource
Defaults to EXCLUSIVE mode for the table lock.
=cut
sub lock_table {
my ($self, $mode) = @_;
$mode //= 'EXCLUSIVE';
my $tablename = $self->result_source->name;
my $schema = $self->result_source->schema;
if ( $schema->storage_type =~ /db2/i ) {
return $schema->storage->dbh_do(
sub {
my ($storage, $dbh) = @_;
$dbh->do("LOCK TABLE $tablename IN $mode MODE");
}
);
}
return;
}
=head2 find_by_primary_field($primary_field)
Retrieve record by primary_field
param $primary_field : (Str) Value of field to search on
return : (ResultSet)
=cut
sub find_by_primary_field {
my ($self, $primary_field) = @_;
return $self->single( { $self->result_class->primary_field => $primary_field } );
}
sub container_name {
my ($self) = @_;
return ref($self) =~ /::(?:[A-Z]_)?([^:]+)$/ ? $1 : '';
}
=head2 agt_definition([$agt_name])
Returns definition for AGT
param $assignmentgroup_type : (string) AGT name (example: zlight),
automatically set when $self->agt_name defined
return : (hash) Definition for AGT
=cut
sub agt_definition {
my ($self, $assignmentgroup_type) = @_;
$assignmentgroup_type //= $self->agt_name // die "Can't find out agt_name";
return $self->result_source->schema->resultset('AssignmentGroup_Type')
->single({ assignmentgroup_type_name => $assignmentgroup_type })
->definition;
}
1;
| Morgan-Stanley/venn-core | lib/Venn/SchemaBase/ResultSet.pm | Perl | apache-2.0 | 7,615 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=head1 NAME
Bio::EnsEMBL::Compara::RunnableDB::GeneTrees::OverallGroupsetQC
=head1 DESCRIPTION
This Analysis will take the sequences from a cluster, the cm from
nc_profile and run a profiled alignment, storing the results as
cigar_lines for each sequence.
=head1 SYNOPSIS
my $db = Bio::EnsEMBL::Compara::DBAdaptor->new($locator);
my $sillytemplate = Bio::EnsEMBL::Compara::RunnableDB::GeneTrees::OverallGroupsetQC->new
(
-db => $db,
-input_id => $input_id,
-analysis => $analysis
);
$sillytemplate->fetch_input(); #reads from DB
$sillytemplate->run();
$sillytemplate->write_output(); #writes to DB
=head1 AUTHORSHIP
Ensembl Team. Individual contributions can be found in the GIT log.
=head1 APPENDIX
The rest of the documentation details each of the object methods.
Internal methods are usually preceded with an underscore (_)
=cut
package Bio::EnsEMBL::Compara::RunnableDB::GeneTrees::OverallGroupsetQC;
use strict;
use warnings;
use Time::HiRes qw(time gettimeofday tv_interval);
use base ('Bio::EnsEMBL::Compara::RunnableDB::BaseRunnable');
sub param_defaults {
return {
'unmap_tolerance' => 0.2,
};
}
=head2 fetch_input
Title : fetch_input
Usage : $self->fetch_input
Function: Fetches input data from the database
Returns : none
Args : none
=cut
sub fetch_input {
my $self = shift @_;
$self->param('groupset_tree', $self->compara_dba->get_GeneTreeAdaptor->fetch_all(-tree_type => 'clusterset', -clusterset_id => 'default')->[0]) or die "Could not fetch groupset tree";
}
=head2 run
Title : run
Usage : $self->run
Function: runs something
Returns : none
Args : none
=cut
sub run {
my $self = shift @_;
if(my $reuse_db = $self->param('reuse_db')) {
$self->overall_groupset_qc($reuse_db);
}
}
##########################################
#
# internal methods
#
##########################################
sub generate_dbname {
my ($self, $given_compara_dba) = @_;
return join('.',
$given_compara_dba->dbc->host,
$given_compara_dba->dbc->port,
$given_compara_dba->dbc->dbname
);
}
sub overall_groupset_qc {
my $self = shift @_;
my $reuse_db = shift @_;
my $reuse_compara_dba = Bio::EnsEMBL::Compara::DBSQL::DBAdaptor->go_figure_compara_dba($reuse_db); # may die if bad parameters
my $xtb_filename = $self->join_one_pair( $reuse_compara_dba, $self->compara_dba );
my $xtb_copy_filename = $self->param('cluster_dir') . "/" . "groupset_qc.xtb";
my $cpcmd = "cp $xtb_filename $xtb_copy_filename";
unless(system($cpcmd) == 0) {
warn "failed to copy $xtb_filename to $xtb_copy_filename\n";
}
my $map_filename = $self->cluster_mapping($xtb_filename, $reuse_compara_dba, $self->compara_dba);
my $map_copy_filename = $self->param('cluster_dir') . "/" . "groupset_qc.map";
$cpcmd = "cp $map_filename $map_copy_filename";
unless(system($cpcmd) == 0) {
warn "failed to copy $map_filename to $map_copy_filename\n";
}
$self->quantify_mapping($map_filename, $reuse_compara_dba);
}
# ---------------------------------------------------------------------------------------------------------------
# The following 3 subroutines have been re-written using proper classes in Bio::EnsEMBL::Compara::StableId,
# We should be using those instead of copy-pasting.
# Also see Bio::EnsEMBL::Compara::RunnableDB::StableIdMapper on proper manupulating those objects.
# ---------------------------------------------------------------------------------------------------------------
sub fetch_groupset { # see Bio::EnsEMBL::Compara::StableId::Adaptor::load_compara_ncs
my $self = shift;
my $given_compara_dba = shift;
my $starttime = time();
my $default_noname = 'NoName';
my $dataset;
my $sql = "SELECT gtn.root_id, m2.stable_id FROM gene_tree_node gtn JOIN seq_member m1 USING (seq_member_id) JOIN gene_member m2 USING (gene_member_id)";
my $sth = $given_compara_dba->dbc->prepare($sql);
$sth->execute();
printf("%1.3f secs to fetch entries\n", (time()-$starttime)) if ($self->debug);
my $counter = 0;
while(my($cluster_id, $member_stable_id)=$sth->fetchrow()) {
# print STDERR "ID=$cluster_id NAME=$cluster_name MEM=$member_stable_id\n" if ($self->debug);
my $cluster_name;
if (defined($cluster_id)) {
$cluster_name = 'Node_' . $cluster_id;
} else {
$cluster_name = $default_noname; # we need some name here however bogus (for formatting purposes)
}
if ($member_stable_id) {
$dataset->{membership}{$member_stable_id} = $cluster_id;
$dataset->{clustername}{$cluster_id} = $cluster_name;
} else {
$self->throw("Missing stable_id for $cluster_id\n");
}
if ($self->debug && ($counter++ % 50000 == 0)) { printf("%10d loaded\n", $counter); }
}
return $dataset;
}
sub join_one_pair { # see Bio::EnsEMBL::Compara::StableId::NamedClusterSetLink::compute_stats
my ($self, $from_dba, $to_dba) = @_;
my $from_dbname = $self->generate_dbname( $from_dba );
print STDERR "Fetching groupset for $from_dbname\n" if ($self->debug);
my $from_dataset = $self->fetch_groupset( $from_dba );
my $from_membership = $from_dataset->{membership};
my $from_clustername = $from_dataset->{clustername};
my $to_dbname = $self->generate_dbname( $to_dba );
print STDERR "Fetching groupset for $to_dbname\n" if ($self->debug);
my $to_dataset = $self->fetch_groupset( $to_dba );
my $to_membership = $to_dataset->{membership};
my $to_clustername= $to_dataset->{clustername};
my %direct = ();
my %reverse = ();
my %from_size = ();
my %to_size = ();
my %xto_size = ();
my %xfrom_size = ();
my $total_count = 0;
my $common_count = 0;
while(my ($from_member, $from_family) = each %$from_membership) {
if(my $to_family = $to_membership->{$from_member}) {
$direct{$from_family}{$to_family}++;
$reverse{$to_family}{$from_family}++;
$from_size{$from_family}++;
$to_size{$to_family}++;
$common_count++;
} else { # strictly disappeared members (disappearing either with or without the families)
$xfrom_size{$from_family}++;
}
$total_count++;
}
while(my ($to_member, $to_family) = each %$to_membership) {
if(not exists $from_membership->{$to_member}) { # strictly new members (going either into existing or new family)
$xto_size{$to_family}++;
$total_count++;
}
}
print STDERR "Total number of keys: $total_count\n" if ($self->debug);
print STDERR "Number of common keys: $common_count\n" if ($self->debug);
my $xtb_filename = $self->worker_temp_directory . $from_dbname . "-" . $to_dbname. '.xtb';
open(OUT, ">$xtb_filename") or die "Could not open '$xtb_filename' for writing : $!";
foreach my $from_id (sort {$a <=> $b} keys %direct) {
my $from_name = $from_clustername->{$from_id};
my $subhash = $direct{$from_id};
foreach my $to_id (sort { $subhash->{$b} <=> $subhash->{$a} } keys %$subhash) {
my $to_name = $to_clustername->{$to_id};
my $cnt = $direct{$from_id}{$to_id};
print OUT join("\t", $from_id, $from_name, $from_size{$from_id}, $to_id, $to_name, $to_size{$to_id}, $cnt)."\n";
}
}
foreach my $to_id (sort {$a <=> $b} keys %xto_size) { # iterate through families that contain new members
next if($reverse{$to_id}); # skip the ones that also have old members (i.e. iterate only through strictly-new families)
my $to_name = $to_clustername->{$to_id};
print OUT join("\t", 0, '-', 0, $to_id, $to_name, $xto_size{$to_id}, $xto_size{$to_id})."\n";
}
foreach my $from_id (sort {$a <=> $b} keys %xfrom_size) { # iterate through families that lost some members
next if($direct{$from_id}); # skip the families that retained some members (i.e. iterate only through strictly-disappearing families)
my $from_name = $from_clustername->{$from_id};
print OUT join("\t", $from_id, $from_name, $xfrom_size{$from_id}, 0, '-', 0, $xfrom_size{$from_id})."\n";
}
close OUT;
return $xtb_filename;
}
sub cluster_mapping { # see Bio::EnsEMBL::Compara::StableId::NamedClusterSetLink::maximum_name_reuse
my ($self, $link_filename, $from_dba, $to_dba) = @_;
my $premap = ''; # premap will always be empty here
my $threshold = 0.67;
my $maj_label = $threshold ? sprintf("Major_%d", int($threshold*100) ) : 'Majority';
my @labels = ('Exact', 'Exact_o', $maj_label, $maj_label.'_o', 'NextBest', 'NextBest_o', 'NewName', 'NewName_o', 'NewFam', 'NewFam_o');
my $default_version = 1;
my $prefix = 'ENSGT';
my $to_rel = 2;
my $stid_counter = 0;
my $revcontrib;
my $from2size;
my $to2size;
my $xfrom2size;
my $xto2size;
my $from2name;
my $to2name;
open(LINKFILE, $link_filename) || die "Cannot open '$link_filename' file $@";
while (my ($from_id, $from_name, $from_size, $to_id, $to_name, $to_size, $contrib) = split(/\s/,<LINKFILE>)) {
next unless($contrib=~/^\d+$/); # skip the header line if present
if($from_size and $to_size) { # Shared
$from2name->{$from_id} = $premap ? ($premap->{$from_id} || die "Premap does not contain mapping for '$from_name' (id=$from_id)") : $from_name;
$to2name->{$to_id} = $to_name;
$revcontrib->{$to_id}{$from_id} = $contrib;
$from2size->{$from_id} = $from_size;
$to2size->{$to_id} = $to_size;
} elsif($to_size) { # Newborn
$to2name->{$to_id} = $to_name;
$xto2size->{$to_id} = $to_size;
} elsif($from_size) { # Disappearing
$from2name->{$from_id} = $premap ? ($premap->{$from_id} || die "Premap does not contain mapping for '$from_name' (id=$from_id)") : $from_name;
$xfrom2size->{$from_id} = $from_size;
}
}
close LINKFILE;
# Now we run through the hashes
my %matchtype_counter = ();
my %from_taken = (); # indicates the 'from' name has been taken
my %postmap = (); # the goal of this subroutine is to map between the '$to' and '$given_name'-s
my $from_dbname = $self->generate_dbname( $from_dba );
my $to_dbname = $self->generate_dbname( $to_dba );
my $map_filename = $self->worker_temp_directory . $from_dbname . "-" . $to_dbname. '.map';
open(MAP, ">$map_filename") or die "Could not open '$map_filename' for writing : $!";
TOPAIR: foreach my $topair (sort { $b->[1] <=> $a->[1] } map { [$_,$to2size->{$_}] } keys %$to2size ) {
my ($to_id, $to_size) = @$topair;
my $subhash = $revcontrib->{$to_id};
my $td_counts = 0;
my $matchtype = '';
my $matchscore = 0;
my $given_name = ''; # serves both as logical flag and the actual name
FROMPAIR: foreach my $frompair (sort { $b->[1] <=> $a->[1] } map { [$_,$subhash->{$_}] } keys %$subhash ) {
my ($from_id, $contrib) = @$frompair;
my $from_size = $from2size->{$from_id};
my $from_name = $from2name->{$from_id};
if (!defined $from_taken{$from_name}) { # means the '$from' name is still unused, so we can reuse it now
if ($contrib==$from_size and $contrib==$to_size) {
$matchtype = 'Exact';
} elsif ($threshold>0) { # either the majority rule is applicable or we don't bother looking at other possibilities (as they are even smaller)
if ($contrib/$from_size>=$threshold and $contrib/$to_size>=$threshold) {
$matchtype = $maj_label;
} # otherwise we have an implicit 'NewName' case
} else { # non-threshold mode
$matchtype = $td_counts ? 'NextBest' : $maj_label;
}
if ($matchtype) {
if ($matchtype eq 'Exact') {
# $from_name =~ /^(\w+)(?:\.(\d+))?/;
# $given_name = $1.'.'. (defined($2) ? $2 : $default_version ); # same version (but we may want to make it more obvious)
$given_name = $from_name;
} else {
$from_name =~ /^(\w+)(?:\.(\d+))?/;
$given_name = $1.'.'. ((defined($2) ? $2 : $default_version)+1); # change the version if the match is not exact (or set it if previously unset)
}
$from_taken{$from_name} = 1;
$matchscore = int(100*$contrib/$to_size);
}
last FROMPAIR;
} # if name not taken
$td_counts++; # counts all attempts, not only the ones where the '$from' name was unused
} # FROMPAIR
# the following two lines work either if we arrive here from 'last FROMPAIR' after implicit 'NewName'
# or by exhausting all FROMPAIRS (beacause they were all taken)
$matchtype ||= 'NewName';
$given_name ||= sprintf("%s%04d%010d.%d",$prefix, $to_rel, ++$stid_counter, $default_version);
print MAP (join("\t", $to_id, $to2name->{$to_id}, $given_name, $matchscore)."\n");
$postmap{$to_id} = $given_name;
if ($to_size == 1) {
$matchtype .= '_o';
}
$matchtype_counter{$matchtype}++;
} # TOPAIR
while (my ($to_id, $to_size) = each %$xto2size) {
my $given_name = sprintf("%s%04d%010d.%d",$prefix, $to_rel, ++$stid_counter, $default_version);
print MAP join("\t", $to_id, $to2name->{$to_id}, $given_name, 0)."\n";
$postmap{$to_id} = $given_name;
my $matchtype = ($to_size == 1) ? 'NewFam_o' : 'NewFam';
$matchtype_counter{$matchtype}++;
}
close MAP;
return $map_filename;
}
sub quantify_mapping {
my ($self, $map_filename, $reuse_compara_dba) = @_;
my %mapping_stats = ();
open(MAP, "$map_filename") or die "Could not open '$map_filename' for reading : $!";
my $tag_count = 0;
while (<MAP>) {
my ($cluster_id, $from_cluster_name, $to_cluster_name, $contribution) = split(" ",$_);
if ($to_cluster_name =~ /ENSGT/) {
$mapping_stats{novel}{$cluster_id} = 1;
} else {
$to_cluster_name =~ /\_(\d+)/;
my $reuse_node_id = $1;
$mapping_stats{mapped}{$cluster_id} = $contribution;
$mapping_stats{mapped_tagging}{$cluster_id} = $reuse_node_id;
}
if ($self->debug && ($tag_count++ % 100 == 0)) { print STDERR "[$tag_count] mapped clusters\n"; }
}
close MAP;
my $current_gene_tree_adaptor = $self->compara_dba->get_GeneTreeAdaptor;
my $reuse_gene_tree_adaptor = $reuse_compara_dba->get_GeneTreeAdaptor;
foreach my $mapped_cluster_id (keys %{$mapping_stats{mapped_tagging}}) {
my $reuse_node_id = $mapping_stats{mapped_tagging}{$mapped_cluster_id};
next unless (defined($reuse_node_id));
my $reuse_node = $reuse_gene_tree_adaptor->fetch_by_dbID($reuse_node_id);
next unless (defined($reuse_node));
next unless $reuse_node->has_tag('aln_runtime');
my $reuse_aln_runtime_value = $reuse_node->get_value_for_tag('aln_runtime');
my $this_node = $current_gene_tree_adaptor->fetch_by_dbID($mapped_cluster_id);
next unless (defined($this_node));
$this_node->store_tag('reuse_node_id',$reuse_node_id);
$this_node->store_tag('reuse_aln_runtime',$reuse_aln_runtime_value);
my $contribution = $mapping_stats{mapped}{$mapped_cluster_id};
next unless (defined($contribution));
$this_node->store_tag('reuse_contribution',$contribution);
}
my $num_novel_clusters = scalar keys %{$mapping_stats{novel}};
my $num_mapped_clusters = scalar keys %{$mapping_stats{mapped}};
my $sum_contrib;
foreach my $mapped_cluster_id (keys %{$mapping_stats{mapped}}) {
$sum_contrib += $mapping_stats{mapped}{$mapped_cluster_id};
}
if ($num_mapped_clusters == 0) {
$self->warning('No mapped clusters');
return;
}
my $average_mapped_contribution = $sum_contrib / $num_mapped_clusters;
my $proportion_novel_clusters = $num_novel_clusters/($num_novel_clusters+$num_mapped_clusters);
print STDERR "# Proportion novel clusters = $proportion_novel_clusters [$num_novel_clusters $num_mapped_clusters]\n";
print STDERR "# Average contribution mapped clusters = $average_mapped_contribution\n";
my $groupset_tree = $self->param('groupset_tree');
$groupset_tree->store_tag('sid_map_novel_cls', $num_novel_clusters);
$groupset_tree->store_tag('sid_map_mapped_cls', $num_mapped_clusters);
$groupset_tree->store_tag('sid_map_summary_contrib', $sum_contrib);
$groupset_tree->store_tag('sid_map_average_contrib', $average_mapped_contribution);
$groupset_tree->store_tag('sid_prop_novel_cls', $proportion_novel_clusters);
my $unmap_tolerance = $self->param('unmap_tolerance');
print STDERR "# Unmap tolerance parameter set to $unmap_tolerance\n";
if ($proportion_novel_clusters > $unmap_tolerance) {
$self->input_job->transient_error(0);
die "Quality Check FAILED: Proportion of novel clusters $proportion_novel_clusters > $unmap_tolerance\n";
}
return;
}
1;
| danstaines/ensembl-compara | modules/Bio/EnsEMBL/Compara/RunnableDB/GeneTrees/OverallGroupsetQC.pm | Perl | apache-2.0 | 17,875 |
#!/usr/bin/env perl
=head1 LICENSE
Copyright (c) 1999-2011 The European Bioinformatics Institute and
Genome Research Limited. All rights reserved.
This software is distributed under a modified Apache license.
For license details, please see
http://www.ensembl.org/info/about/code_licence.html
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <ensembl-dev@ebi.ac.uk>.
Questions may also be sent to the Ensembl help desk at
<helpdesk@ensembl.org>.
=head1 NAME
import_array_from_fasta.pl
=head1 SYNOPSIS
import_array_from_fasta.pl [options] file
This is a short cut import with little validation. Expects a pre-process non-redundant fasta file,
otherwise any probe mapping done based on the output of this script will generate duplicate probes
and features.
Options:
Mandatory
Optional
=head1 OPTIONS
=over 8
=item B<-name|n>
Mandatory: Instance name for the data set, this is the directory where the native data files are located
=item B<-format|f>
Mandatory: The format of the data files e.g. nimblegen
=over 8
=item B<-group|g>
Mandatory: The name of the experimental group
=over 8
=item B<-data_root>
The root data dir containing native data and pipeline data, default = $ENV{'EFG_DATA'}
=over 8
=item B<-fasta>
Flag to turn on dumping of all probe_features in fasta format for the remapping pipeline
=item B<-norm>
Normalisation method, deafult is the Bioconductor vsn package which performs generalised log ratio transformations
=item B<-species|s>
Species name for the array.
=item B<-debug>
Turns on and defines the verbosity of debugging output, 1-3, default = 0 = off
=over 8
=item B<-help>
Print a brief help message and exits.
=item B<-man>
Prints the manual page and exits.
=back
=head1 DESCRIPTION
B<This program> takes a input redundant probe name fasta file and generates an NR probe dbID fasta file.
=cut
#add @INC stuff here, or leave to .bashrc/.efg?
BEGIN{
if (! defined $ENV{'EFG_DATA'}) {
if (-f "~/src/ensembl-functgenomics/scripts/.efg") {
system (". ~/src/ensembl-functgenomics/scripts/.efg");
} else {
die ("This script requires the .efg file available from ensembl-functgenomics\n".
"Please source it before running this script\n");
}
}
}
#use Bio::EnsEMBL::Root; #Only used for rearrange see pdocs
#Roll own Root object to handle debug levels, logging, dumps etc.
### MODULES ###
use Getopt::Long;
#use Carp;#For dev only? cluck not exported by default Remove this and implement in Helper
use Pod::Usage;
#POSIX? File stuff
use File::Path;
#use Bio::EnsEMBL::Registry;
use Bio::EnsEMBL::Utils::Exception qw( throw warning );
use Bio::EnsEMBL::Funcgen::Utils::EFGUtils qw (open_file run_system_cmd backup_file);
use Bio::EnsEMBL::Funcgen::DBSQL::DBAdaptor;
use Bio::EnsEMBL::Funcgen::Array;
use Bio::EnsEMBL::Funcgen::ArrayChip;
use Bio::EnsEMBL::Funcgen::Probe;
use strict;
$| = 1; #autoflush
my ($pass, $dbname, $help, $man, $array_name, $line);
my ($format, $clobber, $vendor, $desc, $file);
#my $reg = "Bio::EnsEMBL::Registry";
my $data_dir = $ENV{'EFG_DATA'};
my $user = "ensadmin";
my $host = 'ens-genomics1';
my $port = '3306';
my $out_dir = '.';
#this should import using the API
#taking array name vendor args to populate the appropriate array/arary_chip records
#or parse them from the info line?
#currently just generates and imports flat file
#should also build cache and generate nr file?
#this depends on id/name field refering to unique seq
#same name can't refer to more than one seq
GetOptions (
"file|f=s" => \$file,
"pass|p=s" => \$pass,
"port=s" => \$port,
"host|h=s" => \$host,
"user|u=s" => \$user,
"dbname|d=s" => \$dbname,
"help|?" => \$help,
"man|m" => \$man,
"array|a=s" => \$array_name,
"vendor|v=s" => \$vendor,
'clobber' => \$clobber,
"format=s" => \$format,
"description=s" => \$desc,
"outdir|o=s" => \$out_dir,
);
pod2usage(1) if $help;
pod2usage(-exitstatus => 0, -verbose => 2) if $man;
#do mandatory params and checking here
if(!($array_name && $dbname && $vendor && $pass)){
throw('Mandatory parameters not met, more here please');
}
my $db = Bio::EnsEMBL::Funcgen::DBSQL::DBAdaptor->new(
-dbname => $dbname,
-port => $port,
-pass => $pass,
-host => $host,
-user => $user,
);
my $array_a = $db->get_ArrayAdaptor();
my $array_chip_a = $db->get_ArrayChipAdaptor();
my $probe_a = $db->get_ProbeAdaptor();
#we cant use the same methodology too store the array/arraychips as they are not in the
#original format i.e. there will be no status control on array chip level
my $array = $array_a->fetch_by_name_vendor($array_name, $vendor);
if($array){
if(! $clobber){
throw("Array already exists, specify -clobber only if you are aboslutely sure you want to'");
}else{
warn "clobber not yer implementd";
}
}
$array = Bio::EnsEMBL::Funcgen::Array->new(
-NAME => $array_name,
-FORMAT => uc($format),
-VENDOR => uc($vendor),
-TYPE => 'OLIGO',
-DESCRIPTION => $desc,
);
($array) = @{$array_a->store($array)};
my $array_chip = Bio::EnsEMBL::Funcgen::ArrayChip->new(
-ARRAY_ID => $array->dbID(),
-NAME => $array_name,
-DESIGN_ID => $array_name,
);
($array_chip) = @{$array_chip_a->store($array_chip)};
#don't really need to do this?
$array->add_ArrayChip($array_chip);
#set up the nr fasta file
my $nr_fasta = open_file($out_dir."/${array_name}_nr.fasta", '>');
#open in file
my $in = open_file($file);
#do the biz
my ($aname, $pid);
my $ac_id = $array_chip->dbID();
while($line = <$in>){
chomp $line;
if($line =~ /^>/){
(undef, $aname, $pid) = split/\:/, $line;
}
else{#found seq line
my $probe = Bio::EnsEMBL::Funcgen::Probe->new(
-NAME => $aname.':'.$pid,
-LENGTH => length($line),
-ARRAY => $array,
-ARRAY_CHIP_ID => $ac_id,
-CLASS => 'EXPERIMENTAL',
);
($probe) = @{$probe_a->store($probe)};
print $nr_fasta '>'.$probe->dbID()."\n${line}\n";
}
}
close($in);
close($nr_fasta);
| adamsardar/perl-libs-custom | EnsemblAPI/ensembl-functgenomics/scripts/import/import_array_from_fasta.pl | Perl | apache-2.0 | 6,434 |
package Model::R::ToPay::ContractorApiFunctionAccess;
use strict;
use base qw(Model::R::ToPay);
__PACKAGE__->meta->setup(
table => 'contractor_api_function_access',
columns => [
contractor_api_access_id => { type => 'integer', not_null => 1 },
external_api_function_id => { type => 'integer', not_null => 1 },
active => { type => 'boolean', default => 'true', not_null => 1 },
date_created => { type => 'timestamp', default => 'now()', not_null => 1 },
],
primary_key_columns => [ 'contractor_api_access_id', 'external_api_function_id' ],
allow_inline_column_values => 1,
foreign_keys => [
contractor_api_access => {
class => 'Model::R::ToPay::ContractorApiAccess',
key_columns => { contractor_api_access_id => 'id' },
},
external_api_function => {
class => 'Model::R::ToPay::ExternalApiFunction',
key_columns => { external_api_function_id => 'id' },
},
],
);
__PACKAGE__->meta->make_manager_class('contractor_api_function_access');
1;
| ant-i/db-crud | dbs/Model/R/ToPay/ContractorApiFunctionAccess.pm | Perl | apache-2.0 | 1,129 |
=head1 NAME
Bio::EnsEMBL::Analysis::Config::ProbeAlign
=head1 SYNOPSIS
use Bio::EnsEMBL::Analysis::Config::ProbeAlign;
=head1 DESCRIPTION
This contains the configuration for step 2 of the
process which maps probes to a Genome. This step
is an alignment of probes (dna) against a genome (dna)
using exonerate. So this config looks very similar to that
of any other exonerate-driving config.
The layout of the configuration is a set of hashes,
each one keyed by logic name. There is also a DEFAULT hash,
which is used as the default for all logic names.
There are genomic and transcript based logic names and config
hashes for each discrete format of array.
=head1 LICENSE
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2018] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
package Bio::EnsEMBL::Analysis::Config::ProbeAlign;
use warnings ;
use strict;
use vars qw( %Config );
%Config =
(
#This entire hash is exported as the global $PROBE_CONFIG var
#each key will be exported as $PROBE_CONFIG->{'_CONFIG_'.$key}
#Dependant on logic name of RunnableDB
PROBE_CONFIG =>
{
DEFAULT =>
{
# path to softmasked, dusted genomic sequence or transcript seqs on the farm
# #'/data/blastdb/Ensembl/Rmacaque/MMUL_2/genome/softmasked_dusted.fa',
#/data/blastdb/Ensembl/Human/NCBI35/softmasked_dusted/', #allowed to be a dir.
TARGETSEQS => $ENV{'GENOMICSEQS'},#or $ENV{'TRANSCRIPTSEQS'}
QUERYTYPE => 'dna',
# must be a single file containing all (non-redundant) probes indexed by affy_probe_id
# QUERYSEQS refers to the value of the parameter NON_REDUNDANT_PROBE_SEQS
# in the config-file ensembl-analysis/Config/CollapseAffyProbes.pm
#QUERYSEQS => $ENV{'NR_FASTA'},
#Removed this now as we want to run different analyses at the same time so we have to hardcode below
# must supply one, since the queryseqs MUST be a single file
#InputIDREGEXP this is used to infer chunk number from the headers of a single fasta file
#Therefore we cannot have mixed type in the same file, must be in a different array set
#If not related, or reformated prior to Import if they are related
IIDREGEXP => '(\d+):(\d+)',
#DNADB is not essential, but we need this if we are going to define a DNADB not on ensembldb
#e.g. new release on staging
#Add species and group here?
DNADB => {
-dbname => $ENV{'DNADB_NAME'},
-host => $ENV{'DNADB_HOST'},
-port => $ENV{'DNADB_PORT'},
-user => $ENV{'DNADB_USER'},
-pass => $ENV{'DNADB_PASS'},
-species => $ENV{'SPECIES'},
-multispecies_db => $ENV{'DNADB_MULTISPECIES_DB'},
-species_id => $ENV{'DNADB_SPECIES_ID'}
},
OUTDB => {
-dbname => $ENV{'DB_NAME'},
-host => $ENV{'DB_HOST'},
-port => $ENV{'DB_PORT'},
-user => $ENV{'DB_USER'},
-pass => $ENV{'DB_PASS'},
-species => $ENV{'SPECIES'},#required for auto generation fo DNADB
-multispecies_db => $ENV{'MULTISPECIES_DB'},
-species_id => $ENV{'SPECIES_ID'}
},
#25 mers
OPTIONS => ' --bestn 100 --dnahspthreshold 116 --fsmmemory 256 --dnawordlen 25 --dnawordlimit 11 ',
#50 mers
#OPTIONS => ' --bestn 100 --dnahspthreshold 116 --fsmmemory 256 --dnawordlen 50 --dnawordthreshold 11 ',
# if the number of hits reaches or exceeds the figure below, we reject
# all hits to that probe
HIT_SATURATION_LEVEL => 100,#2 for unique tiling arrays mappings
MAX_MISMATCHES => 1,#No way to dynamically test method prerequisite config vars without setting a FILTER_METHOD hash?
#This would not be bullet proof a the hash could then be edited
#Would need to add another method to Runnable::ExonerateProbe to change this
#FILTER_METHOD => 'filter_mismatches',
#Or we can pass a code ref here to allow easy extension without editing Runnables etc.
#Can't name code ref subs!
#This is used in ExonerateProbe.pm
#e.g.
#FILTER_METHOD => sub {
# my ($self, $query_match_length, $q_length, $score) = @_;
# my $mismatch;
# my $full_score = $q_length * 5;
# if($query_match_length == $q_length){
# if($score == $full_score){
# $mismatch = 0;
# }
# }
# if(! defined $mismatch){
# my $max_mismatch = $self->allowed_mismatches;
# for my $i(1..$max_mismatch){
# my $mismatch_length = $q_length - $i;
# my $mismatch_score = $mismatch_length * 5;
# if($query_match_length == $q_length){
# if ($score == ($full_score - ($i*9))) {
# $mismatch = $i;
# }
# }
# elsif($query_match_length == $mismatch_length){
# $mismatch = $i if ($score == $mismatch_score);
# }
# }
# }
# return $mismatch;
#},
},#end of DEFAULT
#IIDREGEXP, DNADB, OUTDB and QUERYSEQS and QUERYTYPE should be same for all these configs
#Need to add ILLUMINA_PROBE_ALIGN, ILLUMINA_PROBE_TRANSCRIPT_ALIGN, CODELINK, AGILENT etc
#There is no point in using a % threshold here as bestn value will most likely cause only high quality
#hits to be returned. However there is a possiblity with longer sequences that we may get a duff alignment.
#We could set -percent to a conservative 95%, but this entirely depends on the length and number of allowed
#mismatches.
#Define QUERYSEQS here instead of in Runnable to prevent convolution
#of RunnableDB and environment,
#i.e. we can still run ProbeAlign so long as we change this config
#The only downfall is lack of validation of QUERYSEQS files
#WARNING CHECK YOUR QUERYSEQS!
#Remember: Exonerate scoring is +5 for match -4 for mismatch
AFFY_UTR_PROBEALIGN =>
{
MAX_MISMATCHES => 1,
TARGETSEQS => $ENV{'GENOMICSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.AFFY_UTR.fasta',
#at least 25 mers allowing 1bp mismatch
#this will still work in the worst case where the mismatch is at the centre of a 25bp probe
OPTIONS => ' --bestn 101 --fsmmemory 256 --dnawordlen 12 --seedrepeat 2 --dnahspthreshold 118 --dnawordlimit 0',
#Perfect matches only for Jing
#MAX_MISMATCHES => 0,
#OPTIONS => ' --bestn 101 --fsmmemory 256 --dnawordlen 25 --seedrepeat 1 --dnahspthreshold 125 --dnawordlimit 0',
#OPTIONS => ' --bestn 101 --dnahspthreshold 116 --fsmmemory 256 --dnawordlen 14 --dnawordlimit 11 ',
HIT_SATURATION_LEVEL => 100,
},
#Essentially same as AFFY but with different NR_FASTA
AFFY_ST_PROBEALIGN =>
{
TARGETSEQS => $ENV{'GENOMICSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.AFFY_ST.fasta',
#25 mers
#OPTIONS => ' --bestn 101 --dnahspthreshold 116 --fsmmemory 256 --dnawordlen 14 --dnawordlimit 11 ',
OPTIONS => ' --bestn 101 --fsmmemory 256 --dnawordlen 12 --seedrepeat 2 --dnahspthreshold 118 --dnawordlimit 0',
HIT_SATURATION_LEVEL => 100,
MAX_MISMATCHES => 1,
},
NIMBLEGEN_PROBEALIGN =>
{
TARGETSEQS => $ENV{'GENOMICSEQS'},
#Need to define this dynamically based on oligo length (40-60mers)
#50mers
#Can we up the dnaword limit 10 50 here if we only want unique matches?
OPTIONS => ' --bestn 2 --dnahspthreshold 116 --fsmmemory 256 --dnawordlen 50 --dnawordlimit 11 ',
HIT_SATURATION_LEVEL => 2, #We only want unique mappings for tiling probes
MAX_MISMATCHES => 0, #Unique mappings for tiling probes
},
#ILLUMINA_WG are 50mers. These settings allow for at least 1bp mismatch
ILLUMINA_WG_PROBEALIGN =>
{
TARGETSEQS => $ENV{'GENOMICSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.ILLUMINA_WG.fasta',
#50mers
OPTIONS => ' --bestn 101 --dnahspthreshold 246 --fsmmemory 256 --dnawordlen 25 --seedrepeat 2 --dnawordlimit 0 ',
HIT_SATURATION_LEVEL => 100,
MAX_MISMATCHES => 1,
},
ILLUMINA_WG_PROBETRANSCRIPTALIGN =>
{
TARGETSEQS => $ENV{'TRANSCRIPTSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.ILLUMINA_WG.fasta',
#50mers
OPTIONS => ' --bestn 101 --dnahspthreshold 246 --fsmmemory 256 --dnawordlen 25 --seedrepeat 2 --dnawordlimit 0 ',
HIT_SATURATION_LEVEL => 100,
MAX_MISMATCHES => 1,
},
#ILLUMINA_INFINIUM are 50mers. These settings allow for at least 1bp mismatch
ILLUMINA_INFINIUM_PROBEALIGN =>
{
TARGETSEQS => $ENV{'GENOMICSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.ILLUMINA_INFINIUM.fasta',
#50mers
OPTIONS => ' --bestn 101 --dnahspthreshold 246 --fsmmemory 256 --dnawordlen 25 --seedrepeat 2 --dnawordlimit 0 ',
HIT_SATURATION_LEVEL => 100,#This may need changing?
MAX_MISMATCHES => 1,
},
#CODELINK are 30mers
CODELINK_PROBEALIGN =>
{
TARGETSEQS => $ENV{'GENOMICSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.CODELINK.fasta',
OPTIONS => ' --bestn 101 --dnahspthreshold 141 --fsmmemory 256 --dnawordlen 15 --seedrepeat 2 --dnawordlimit 0 ',
HIT_SATURATION_LEVEL => 100,
MAX_MISMATCHES => 1,
},
CODELINK_PROBETRANSCRIPTALIGN =>
{
TARGETSEQS => $ENV{'TRANSCRIPTSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.CODELINK.fasta',
OPTIONS => ' --bestn 101 --dnahspthreshold 141 --fsmmemory 256 --dnawordlen 15 --seedrepeat 2 --dnawordlimit 0 ',
HIT_SATURATION_LEVEL => 100,
MAX_MISMATCHES => 1,
},
#PHALANX are 60mers
PHALANX_PROBEALIGN =>
{
TARGETSEQS => $ENV{'GENOMICSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.PHALANX.fasta',
OPTIONS => ' --bestn 101 --dnahspthreshold 291 --fsmmemory 256 --dnawordlen 30 --seedrepeat 2 --dnawordlimit 0 ',
HIT_SATURATION_LEVEL => 100,
MAX_MISMATCHES => 1,
},
PHALANX_PROBETRANSCRIPTALIGN =>
{
TARGETSEQS => $ENV{'TRANSCRIPTSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.PHALANX.fasta',
OPTIONS => ' --bestn 101 --dnahspthreshold 291 --fsmmemory 256 --dnawordlen 30 --seedrepeat 2 --dnawordlimit 0 ',
HIT_SATURATION_LEVEL => 100,
MAX_MISMATCHES => 1,
},
AFFY_UTR_PROBETRANSCRIPTALIGN =>
{
TARGETSEQS => $ENV{'TRANSCRIPTSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.AFFY_UTR.fasta',
#25 mers
OPTIONS => ' --bestn 101 --fsmmemory 256 --dnawordlen 12 --seedrepeat 2 --dnahspthreshold 118 --dnawordlimit 0',
MAX_MISMATCHES => 1,
#Perfect matches only for Jing
#MAX_MISMATCHES => 0,
#OPTIONS => ' --bestn 101 --fsmmemory 256 --dnawordlen 25 --seedrepeat 1 --dnahspthreshold 125 --dnawordlimit 0',
#OPTIONS => ' --bestn 101 --dnahspthreshold 116 --fsmmemory 256 --dnawordlen 14 --dnawordlimit 11 ',
#HIT_SATURATION_LEVEL => 100,#I don't think we want this for the transcript mappings
#Defaults to 100 anyway, but not used
#FILTER_METHOD => 'filter_mismatches',#Would need to add another method to Runnable::Exonerate
#MAX_MISMATCHES => 1,
},
#Essentially same as AFFY but with different NR_FASTA
AFFY_ST_PROBETRANSCRIPTALIGN =>
{
TARGETSEQS => $ENV{'TRANSCRIPTSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.AFFY_ST.fasta',
#25 mers
OPTIONS => ' --bestn 101 --fsmmemory 256 --dnawordlen 12 --seedrepeat 2 --dnahspthreshold 118 --dnawordlimit 0',
#OPTIONS => ' --bestn 101 --dnahspthreshold 116 --fsmmemory 256 --dnawordlen 14 --dnawordlimit 11 ',
#HIT_SATURATION_LEVEL => 100,
MAX_MISMATCHES => 1,
},
#AGILENT 60 mers
#Min length 45bp
AGILENT_PROBEALIGN =>
{
TARGETSEQS => $ENV{'GENOMICSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.AGILENT.fasta',
#OPTIONS => ' --bestn 101 --dnahspthreshold 216 --fsmmemory 256 --dnawordlen 22 --seedrepeat 2 --dnawordlimit 0 ', #ORIG
HIT_SATURATION_LEVEL => 100,
#MAX_MISMATCHES => 1,
#Danio Zv7 params
#Do we need a way of setting these in env so we don't have to edit here?
#Can we do a $ENV{'PARAM'} || ref self default?
#These would have to be set before this hash by reading ini into hash?
OPTIONS => ' --bestn 101 --dnahspthreshold 216 --fsmmemory 256 --dnawordlen 15 --seedrepeat 4 --dnawordlimit 0 ',
MAX_MISMATCHES => 3,
},
AGILENT_PROBETRANSCRIPTALIGN =>
{
TARGETSEQS => $ENV{'TRANSCRIPTSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.AGILENT.fasta',
#OPTIONS => ' --bestn 101 --dnahspthreshold 216 --fsmmemory 256 --dnawordlen 22 --seedrepeat 2 --dnawordlimit 0 ', #ORIG
HIT_SATURATION_LEVEL => 100,
#MAX_MISMATCHES => 1,
#Danio Zv7 params
#Do we need a way of setting these in env so we don't have to edit here?
#Can we do a $ENV{'PARAM'} || ref self default?
#These would have to be set before this hash by reading ini into hash?
OPTIONS => ' --bestn 101 --dnahspthreshold 216 --fsmmemory 256 --dnawordlen 15 --seedrepeat 4 --dnawordlimit 0 ',
MAX_MISMATCHES => 3,
},
CATMA_PROBEALIGN =>
{
TARGETSEQS => $ENV{'GENOMICSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.CATMA.fasta',
OPTIONS => ' --bestn 101 --dnahspthreshold 291 --fsmmemory 256 --dnawordlen 59 --seedrepeat 2 --dnawordlimit 0 ', #ORIG
HIT_SATURATION_LEVEL => 100,
MAX_MISMATCHES => 1,
},
CATMA_PROBETRANSCRIPTALIGN =>
{
TARGETSEQS => $ENV{'TRANSCRIPTSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.CATMA.fasta',
OPTIONS => ' --bestn 101 --dnahspthreshold 291 --fsmmemory 256 --dnawordlen 59 --seedrepeat 2 --dnawordlimit 0 ', #ORIG
HIT_SATURATION_LEVEL => 100,
MAX_MISMATCHES => 1,
},
NSF_PROBEALIGN =>
{
TARGETSEQS => $ENV{'GENOMICSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.NSF.fasta',
OPTIONS => ' --bestn 101 --dnahspthreshold 291 --fsmmemory 256 --dnawordlen 22 --seedrepeat 2 --dnawordlimit 0 ', #ORIG
HIT_SATURATION_LEVEL => 100,
MAX_MISMATCHES => 1,
},
NSF_PROBETRANSCRIPTALIGN =>
{
TARGETSEQS => $ENV{'TRANSCRIPTSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.NSF.fasta',
OPTIONS => ' --bestn 101 --dnahspthreshold 291 --fsmmemory 256 --dnawordlen 22 --seedrepeat 2 --dnawordlimit 0 ', #ORIG
HIT_SATURATION_LEVEL => 100,
MAX_MISMATCHES => 1,
},
#LEIDEN 50 mers
#Actually some are 50 some are 60.
#3 mismatches for Zv7 due to low quality assembly
LEIDEN_PROBEALIGN =>
{
TARGETSEQS => $ENV{'GENOMICSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.LEIDEN.fasta',
#OPTIONS => ' --bestn 101 --dnahspthreshold 241 --fsmmemory 256 --dnawordlen 25 --seedrepeat 2 --dnawordlimit 0 ',
OPTIONS => ' --bestn 101 --dnahspthreshold 223 --fsmmemory 256 --dnawordlen 13 --seedrepeat 4 --dnawordlimit 0 ',
HIT_SATURATION_LEVEL => 100,
MAX_MISMATCHES => 3,
},
LEIDEN_PROBETRANSCRIPTALIGN =>
{
TARGETSEQS => $ENV{'TRANSCRIPTSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.LEIDEN.fasta',
#OPTIONS => ' --bestn 101 --dnahspthreshold 241 --fsmmemory 256 --dnawordlen 25 --seedrepeat 2 --dnawordlimit 0 ',
OPTIONS => ' --bestn 101 --dnahspthreshold 223 --fsmmemory 256 --dnawordlen 13 --seedrepeat 4 --dnawordlimit 0 ',
HIT_SATURATION_LEVEL => 100,
MAX_MISMATCHES =>3,
},
#STEMPLE_LAB_SANGER 65 mers
#3 mismatches due to low quality assembly
STEMPLE_LAB_SANGER_PROBEALIGN =>
{
TARGETSEQS => $ENV{'GENOMICSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.STEMPLE_LAB_SANGER.fasta',
OPTIONS => ' --bestn 101 --dnahspthreshold 298 --fsmmemory 256 --dnawordlen 17 --seedrepeat 4 --dnawordlimit 0 ',
HIT_SATURATION_LEVEL => 100,
MAX_MISMATCHES => 3,
},
STEMPLE_LAB_SANGER_PROBETRANSCRIPTALIGN =>
{
TARGETSEQS => $ENV{'TRANSCRIPTSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.STEMPLE_LAB_SANGER.fasta',
OPTIONS => ' --bestn 101 --dnahspthreshold 298 --fsmmemory 256 --dnawordlen 17 --seedrepeat 4 --dnawordlimit 0 ',
HIT_SATURATION_LEVEL => 100,
MAX_MISMATCHES =>3,
},
#WUSTL Custom arrays (only used for C.elegans AFAIK) 60 mers
WUSTL_PROBEALIGN =>
{
TARGETSEQS => $ENV{'GENOMICSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.WUSTL.fasta',
OPTIONS => ' --bestn 101 --dnahspthreshold 291 --fsmmemory 256 --dnawordlen 22 --seedrepeat 2 --dnawordlimit 0 ',
HIT_SATURATION_LEVEL => 100,
MAX_MISMATCHES => 1,
},
WUSTL_PROBETRANSCRIPTALIGN =>
{
TARGETSEQS => $ENV{'TRANSCRIPTSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.WUSTL.fasta',
OPTIONS => ' --bestn 101 --dnahspthreshold 291 --fsmmemory 256 --dnawordlen 22 --seedrepeat 2 --dnawordlimit 0 ',
HIT_SATURATION_LEVEL => 100,
MAX_MISMATCHES => 1,
},
#UCSF Custom arrays (only used for C.elegans) 50-70 mers
UCSF_PROBEALIGN =>
{
TARGETSEQS => $ENV{'GENOMICSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.UCSF.fasta',
OPTIONS => ' --bestn 101 --dnahspthreshold 291 --fsmmemory 256 --dnawordlen 22 --seedrepeat 2 --dnawordlimit 0 ',
HIT_SATURATION_LEVEL => 100,
MAX_MISMATCHES => 1,
},
UCSF_PROBETRANSCRIPTALIGN =>
{
TARGETSEQS => $ENV{'TRANSCRIPTSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.UCSF.fasta',
OPTIONS => ' --bestn 101 --dnahspthreshold 291 --fsmmemory 256 --dnawordlen 22 --seedrepeat 2 --dnawordlimit 0 ',
HIT_SATURATION_LEVEL => 100,
MAX_MISMATCHES => 1,
},
#SLRi Custom arrays (only used for C.elegans) 50-70 mers
SLRI_PROBEALIGN =>
{
TARGETSEQS => $ENV{'GENOMICSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.SLRI.fasta',
OPTIONS => ' --bestn 101 --dnahspthreshold 291 --fsmmemory 256 --dnawordlen 22 --seedrepeat 2 --dnawordlimit 0 ',
HIT_SATURATION_LEVEL => 100,
MAX_MISMATCHES => 1,
},
SLRI_PROBETRANSCRIPTALIGN =>
{
TARGETSEQS => $ENV{'TRANSCRIPTSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.SLRI.fasta',
OPTIONS => ' --bestn 101 --dnahspthreshold 291 --fsmmemory 256 --dnawordlen 22 --seedrepeat 2 --dnawordlimit 0 ',
HIT_SATURATION_LEVEL => 100,
MAX_MISMATCHES => 1,
},
# NIMBLEGen modENCODE arrays (only used for C.elegans) 60 mers
NIMBLEGEN_MODENCODE_PROBEALIGN =>
{
TARGETSEQS => $ENV{'GENOMICSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.NIMBLEGEN_MODENCODE.fasta',
OPTIONS => ' --bestn 101 --dnahspthreshold 291 --fsmmemory 256 --dnawordlen 22 --seedrepeat 2 --dnawordlimit 0 ',
HIT_SATURATION_LEVEL => 100,
MAX_MISMATCHES => 1,
},
NIMBLEGEN_MODENCODE_PROBETRANSCRIPTALIGN =>
{
TARGETSEQS => $ENV{'TRANSCRIPTSEQS'},
QUERYSEQS => $ENV{'WORK_DIR'}.'/arrays_nr.NIMBLEGEN_MODENCODE.fasta',
OPTIONS => ' --bestn 101 --dnahspthreshold 291 --fsmmemory 256 --dnawordlen 22 --seedrepeat 2 --dnawordlimit 0 ',
HIT_SATURATION_LEVEL => 100,
MAX_MISMATCHES => 1,
},
}
);
sub import {
my ($callpack) = caller(0); # Name of the calling package
my $pack = shift; # Need to move package off @_
# Get list of variables supplied, or else everything
my @vars = @_ ? @_ : keys( %Config );
return unless @vars;
# Predeclare global variables in calling package
eval "package $callpack; use vars qw("
. join(' ', map { '$'.$_ } @vars) . ")";
die $@ if $@;
foreach (@vars) {
if ( defined $Config{$_} ) {
no strict 'refs';
# Exporter does a similar job to the following
# statement, but for function names, not
# scalar variables:
*{"${callpack}::$_"} = \$Config{ $_ };
} else {
die "Error: Config: $_ not known\n";
}
}
}
1;
| kiwiroy/ensembl-analysis | modules/Bio/EnsEMBL/Analysis/Config/ProbeAlign.pm | Perl | apache-2.0 | 20,989 |
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V9::Services::CustomInterestService::CustomInterestOperation;
use strict;
use warnings;
use base qw(Google::Ads::GoogleAds::BaseEntity);
use Google::Ads::GoogleAds::Utils::GoogleAdsHelper;
sub new {
my ($class, $args) = @_;
my $self = {
create => $args->{create},
update => $args->{update},
updateMask => $args->{updateMask}};
# Delete the unassigned fields in this object for a more concise JSON payload
remove_unassigned_fields($self, $args);
bless $self, $class;
return $self;
}
1;
| googleads/google-ads-perl | lib/Google/Ads/GoogleAds/V9/Services/CustomInterestService/CustomInterestOperation.pm | Perl | apache-2.0 | 1,131 |
=head1 LICENSE
Copyright [2009-2014] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=head1 NAME
Bio::EnsEMBL::EGPipeline::GetOrthologs::RunnableDB::GetOrthologs
=head1 DESCRIPTION
=head1 AUTHOR
ckong
=cut
package Bio::EnsEMBL::EGPipeline::GetOrthologs::RunnableDB::GetOrthologs;
use strict;
use warnings;
use Data::Dumper;
use Bio::EnsEMBL::Registry;
use Bio::EnsEMBL::Utils::SqlHelper;
use base ('Bio::EnsEMBL::Hive::Process');
use Bio::EnsEMBL::Utils::Exception qw(throw);
use File::Path qw(make_path);
use File::Spec::Functions qw(catdir);
sub param_defaults {
return {
};
}
sub fetch_input {
my ($self) = @_;
# job parameter
my $mlss_id = $self->param_required('mlss_id');
my $compara = $self->param_required('compara');
my $from_sp = $self->param_required('from_sp');
# analysis parameter
my $ml_type = $self->param_required('method_link_type');
my $output_dir = $self->param_required('output_dir');
$self->param('mlss_id', $mlss_id);
$self->param('compara', $compara);
$self->param('from_sp', $from_sp);
$self->param('ml_type', $ml_type);
$self->param('output_dir', $output_dir);
return;
}
sub run {
my ($self) = @_;
# Create Compara adaptors
my $compara = $self->param('compara');
my $mlssa = Bio::EnsEMBL::Registry->get_adaptor($compara, 'compara', 'MethodLinkSpeciesSet');
my $ha = Bio::EnsEMBL::Registry->get_adaptor($compara, 'compara', 'Homology');
my $gdba = Bio::EnsEMBL::Registry->get_adaptor($compara, "compara", "GenomeDB");
die "Can't connect to Compara database specified by $compara - check command-line and registry file settings" if (!$mlssa || !$ha ||!$gdba);
# Get 'to_species' from mlss_id
my $mlss_id = $self->param('mlss_id');
my $mlss = $mlssa->fetch_by_dbID($mlss_id);
my $gdbs = $mlss->species_set_obj->genome_dbs();
my $from_sp = $self->param('from_sp');
my $to_sp ;
foreach my $gdb (@$gdbs){
$to_sp = $gdb->name() if($gdb->name() !~/$from_sp/)
}
# Create Core adaptors
my $from_meta = Bio::EnsEMBL::Registry->get_adaptor($from_sp, 'core', 'MetaContainer');
my ($from_prod_sp) = @{ $from_meta->list_value_by_key('species.production_name') };
my $to_meta = Bio::EnsEMBL::Registry->get_adaptor($to_sp,'core','MetaContainer');
my ($to_prod_sp) = @{ $to_meta->list_value_by_key('species.production_name')};
die("Problem getting DBadaptor(s) - check database connection details\n") if (!$from_meta || !$to_meta);
# Build Compara GenomeDB objects
my $ml_type = $self->param('ml_type');
my $from_gdb = $gdba->fetch_by_registry_name($from_sp);
my $to_gdb = $gdba->fetch_by_registry_name($to_sp);
my $output_dir = $self->param('output_dir');
my $output_file = $output_dir."/orthologs-$from_prod_sp-$to_prod_sp.tsv";
my $datestring = localtime();
my $division = 'Ensembl';
if ($from_meta->get_division()) {
$division = $from_meta->get_division() ;
}
open FILE , ">$output_file" or die "couldn't open file " . $output_file . " $!";
print FILE "## " . $datestring . "\n";
print FILE "## orthologs from $from_prod_sp to $to_prod_sp\n";
print FILE "## compara db " . $mlssa->dbc->dbname() . "\n";
print FILE "## division " . $division . "\n";
# Fetch homologies, returntype - hash of arrays
my $from_sp_alias = $gdba->fetch_by_registry_name($from_sp)->name();
my $homologies = $ha->fetch_all_by_MethodLinkSpeciesSet($mlss);
my $homologies_ct = scalar(@$homologies);
$self->warning("Retrieving $homologies_ct homologies of method link type $ml_type for mlss_id $mlss_id\n");
foreach my $homology (@{$homologies}) {
# 'from' member
my $from_member = $homology->get_Member_by_GenomeDB($from_gdb)->[0];
my $from_perc_id = $from_member->perc_id();
my $from_gene = $from_member->get_Transcript->get_Gene();
## Fully qualified identifiers with annotation source
## Havana genes are merged, so source is Ensembl
my $from_mod_identifier = $from_gene->source();
if ($from_mod_identifier =~ /havana/) { $from_mod_identifier = 'ensembl'; }
my $from_stable_id = $from_mod_identifier . ":" . $from_member->stable_id();
my $from_translation = $from_member->get_Translation();
if (!$from_translation) { next; }
my $from_uniprot = get_uniprot($from_translation);
$self->warning("Warning: can't find stable ID corresponding to 'from' species ($from_sp_alias)\n") if (!$from_stable_id);
# 'to' member
my $to_members = $homology->get_Member_by_GenomeDB($to_gdb);
foreach my $to_member (@$to_members) {
my $to_perc_id = $to_member->perc_id();
my $to_gene = $to_member->get_Transcript->get_Gene();
## Fully qualified identifiers with annotation source
## Havana genes are merged, so source is Ensembl
my $to_mod_identifier = $to_gene->source();
if ($to_mod_identifier =~ /havana/) { $to_mod_identifier = 'ensembl'; }
my $to_stable_id = $to_mod_identifier . ":" . $to_member->stable_id();
my $to_translation = $to_member->get_Translation();
next if (!$to_translation);
my $to_uniprot = get_uniprot($to_translation);
my $from_identifier = $from_mod_identifier . ":" . $from_gene->stable_id;
my $to_identifier = $to_mod_identifier . ":" . $to_gene->stable_id;
if (scalar(@$from_uniprot) == 0 && scalar(@$to_uniprot) == 0) {
print FILE "$from_prod_sp\t" . $from_identifier . "\t$from_stable_id\tno_uniprot\t$from_perc_id\t";
print FILE "$to_prod_sp\t" . $to_identifier . "\t$to_stable_id\tno_uniprot\t$to_perc_id\t" .$homology->description."\n";
} elsif (scalar(@$from_uniprot) == 0) {
foreach my $to_xref (@$to_uniprot) {
print FILE "$from_prod_sp\t" . $from_identifier . "\t$from_stable_id\tno_uniprot\t$from_perc_id\t";
print FILE "$to_prod_sp\t" . $to_identifier . "\t$to_stable_id\t$to_xref\t$to_perc_id\t" .$homology->description."\n";
}
} elsif (scalar(@$to_uniprot) == 0) {
foreach my $from_xref (@$from_uniprot) {
print FILE "$from_prod_sp\t" . $from_identifier . "\t$from_stable_id\t$from_xref\t$from_perc_id\t";
print FILE "$to_prod_sp\t" . $to_identifier . "\t$to_stable_id\tno_uniprot\t$to_perc_id\t" .$homology->description."\n";
}
}
else {
foreach my $to_xref (@$to_uniprot) {
foreach my $from_xref (@$from_uniprot) {
print FILE "$from_prod_sp\t" . $from_identifier . "\t$from_stable_id\t$from_xref\t$from_perc_id\t";
print FILE "$to_prod_sp\t" . $to_identifier . "\t$to_stable_id\t$to_xref\t$to_perc_id\t" .$homology->description."\n";
}
}
}
}
}
close FILE;
$self->dbc->disconnect_if_idle();
$from_meta->dbc->disconnect_if_idle();
$to_meta->dbc->disconnect_if_idle();
$mlssa->dbc->disconnect_if_idle();
$ha->dbc->disconnect_if_idle();
$gdba->dbc->disconnect_if_idle();
return;
}
sub write_output {
my ($self) = @_;
}
############
# Subroutine
############
# Get the uniprot entries associated with the canonical translation
sub get_uniprot {
my $translation = shift;
my $uniprots = $translation->get_all_DBEntries('Uniprot%');
my @uniprots;
foreach my $uniprot (@$uniprots) {
push @uniprots, $uniprot->primary_id();
}
return \@uniprots;
}
1;
| navygit/ncRNA_Pipeline | modules/Bio/EnsEMBL/EGPipeline/GetOrthologs/RunnableDB/GetOrthologs.pm | Perl | apache-2.0 | 8,241 |
#!/usr/bin/env perl
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2017] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveProcessGeneClusters;
use strict;
use warnings;
use feature 'say';
use Bio::EnsEMBL::Analysis::Tools::Algorithms::ClusterUtils;
use Bio::EnsEMBL::Analysis::Tools::GeneBuildUtils::GeneUtils qw(empty_Gene);
use parent ('Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveBaseRunnableDB');
# This module is built with the view of in some ways being the opposite to our current
# set of modules which include TranscriptConsensus, LayerAnnotation and GeneBuilder.
# These modules have always been run with the view of reducing a large set of transcripts
# to one or very few well supported transcripts. This is not biologically realistic. This
# module will instead attempt to find as many well supported transcripts as possible
sub fetch_input {
my $self = shift;
# This will take a set of input dbs and a set of logic_name and allowed biotypes
# It will then retrieve all the transcripts that fit this set. If no allowed biotypes
# hash exists for the set it will retrieve all the transcripts for that logic_name from
# every db it exists in (though I would hope for logic_names to not be spread across
# several dbs, as the set associated with each logic_name should be non-redundant).
# Also I guess if no logic names are provided then just take all from the input dbs
# Once fetch input is done there should be a geneset associated with some of the major
# groupings
my $analysis = Bio::EnsEMBL::Analysis->new(
-logic_name => $self->param('logic_name'),
-module => $self->param('module'),
);
$self->analysis($analysis);
my $dna_dba = $self->hrdb_get_dba($self->param('dna_db'));
$self->hrdb_set_con($dna_dba,'dna_db');
my $out_dba = $self->hrdb_get_dba($self->param('cluster_output_db'));
$out_dba->dnadb($dna_dba);
$self->hrdb_set_con($out_dba,'cluster_output_db');
my $input_id = $self->param('iid');
my $unprocessed_genes = [];
my $input_id_type = $self->param('iid_type');
# If the the input is a slice, then fetch everything on the slice, but remove any gene that crosses
# the 3' boundary, as this will be picked up on another slice
if($input_id_type eq 'slice') {
my $in_dba = $self->hrdb_get_dba($self->param('cluster_db'));
$in_dba->dnadb($dna_dba);
$self->hrdb_set_con($in_dba,'cluster_db');
my $slice = $self->fetch_sequence($input_id,$dna_dba);
$self->query($slice);
my $gene_adaptor = $in_dba->get_GeneAdaptor();
my $unfiltered_genes = $gene_adaptor->fetch_all_by_Slice($slice);
$unprocessed_genes = $self->remove_3_prime_boundry_genes($unfiltered_genes);
}
# If the input is an array of gene ids then loop through them and fetch them from the input db
elsif($input_id_type eq 'gene_id') {
my $in_dba = $self->hrdb_get_dba($self->param('cluster_db'));
$in_dba->dnadb($dna_dba);
$self->hrdb_set_con($in_dba,'cluster_db');
my $gene_adaptor = $in_dba->get_GeneAdaptor();
for my $db_id (@{$input_id}) {
my $unprocessed_gene = $gene_adaptor->fetch_by_dbID($db_id);
push(@{$unprocessed_genes},$unprocessed_gene);
}
}
# If the input is an array of gene ids then loop through them and fetch them from the input db
elsif($input_id_type eq 'cluster') {
my $unprocessed_gene = $self->load_cluster($input_id);
push(@{$unprocessed_genes},$unprocessed_gene);
}
$self->unprocessed_genes($unprocessed_genes);
my $logic_name_weights = $self->param('logic_name_weights');
$self->logic_name_weights($logic_name_weights);
my $single_exon_support_penalty = 1;
if($self->param('single_exon_support_penalty')) {
$single_exon_support_penalty = $self->param('single_exon_support_penalty');
}
$self->single_exon_support_penalty($single_exon_support_penalty);
return 1;
}
sub run {
my $self = shift;
my $processed_genes = $self->process_rough_genes();
my $final_genes = $self->recluster_genes($processed_genes);
$self->output_genes($final_genes);
return 1;
}
sub write_output {
my $self = shift;
my $adaptor = $self->hrdb_get_con('cluster_output_db')->get_GeneAdaptor;
my @output = @{$self->output_genes};
say "Writing genes to output db";
foreach my $gene (@output){
empty_Gene($gene);
$adaptor->store($gene);
}
say "...finished writing genes to output db";
return 1;
}
sub process_rough_genes {
my ($self) = @_;
# This sub takes in a set of cluster genes (basically where each gene is set of transcripts that have been clustered
# together based on exon overlap. Transcripts may have come from many input dbs/sources)
# For each gene: 1) calculate the set of unique exon pairs (coordinates of consecutive exons pairs)
# 2) score the transcripts against these pairs, transcripts where all the exon pairs pass the support
# cut-off are inlcuded in the candidate transcript set
# 3) remove redundant transcripts from the candidate set. For all transcripts that pass the support
# cut-off values calculate when any transcripts are redundant and remove them. A transcript is
# redundant if the structure is completely contained in another transcript. It must match exactly
# in terms of being a contiguous subset of exons.
my $output_genes = [];
my $unprocessed_genes = $self->unprocessed_genes();
foreach my $unprocessed_gene (@{$unprocessed_genes}) {
my $input_transcript_count = scalar(@{$unprocessed_gene->get_all_Transcripts});
my $exon_pairs = $self->generate_exon_pairs($unprocessed_gene);
my $candidate_transcripts = $self->score_transcript_support($unprocessed_gene,$exon_pairs);
my $candidate_transcript_count = scalar(@{$candidate_transcripts});
# my $near_final_transcripts = $self->remove_redundant_transcripts($candidate_transcripts);
my $final_transcripts = $self->compare_transcripts($candidate_transcripts); #$near_final_transcripts);
unless(scalar(@{$final_transcripts})) {
$final_transcripts = $self->find_best_remaining_transcript($unprocessed_gene,$exon_pairs);
}
unless(scalar(@{$final_transcripts})) {
next;
}
my $final_transcript_count = scalar(@{$final_transcripts});
say "Inital transcript count for gene: ".$input_transcript_count;
say "Candidate transcript count for gene: ".$candidate_transcript_count;
say "Final transcript count for gene: ".$final_transcript_count;
my $output_gene = Bio::EnsEMBL::Gene->new();
$output_gene->analysis($self->analysis);
$output_gene->biotype($self->analysis->logic_name);
foreach my $final_transcript (@{$final_transcripts}) {
$output_gene->add_Transcript($final_transcript);
}
push(@{$output_genes},$output_gene);
}
return($output_genes);
}
sub generate_exon_pairs {
my ($self,$gene) = @_;
# This is a sub to loop through every transcript in our initial transcript cluster (the 'gene') and
# generate a set of exon pairs. Exon pairs are a bit like intron supporting evidence, we want to
# use them as evidence of support for sections of different transcripts. The idea is that you take
# the start and end coords of each exon in the pair and define that as a single structure. A bit
# like a stricter version of an intron. Then as you loop over the transcripts in the cluster you
# count the number of times each exon pair exists. Maybe it's in every single transcript, maybe it's
# unique to one. After it's done you have a list of pairs of exons for the gene and a list of times
# each pair was observed. Later on the code will score the transcripts against this list and any
# transcript that does now meet the observation cut-off for its associated logic_name/biotype for
# every exon pair it contains will be droppped. The count itself is done by assigning all transcript
# dbIDs associated with the pair to that pair as a set of keys. Later the count of these keys it
# used to determine the support for a particular exon pair across the transcript cluster
my $exon_pairs = {};
my $transcripts = $gene->get_all_Transcripts();
foreach my $transcript (@{$transcripts}) {
my $exons = $transcript->get_all_translateable_Exons();
if(scalar(@{$exons}) == 1) {
my $exon = shift(@{$exons});
my $exon_start = $exon->seq_region_start;
my $exon_end = $exon->seq_region_end;
# For a single exon model we will just build a pair with the exon coords repeated
# Later, in the scoring phase we can add a penalty to the cut off for these
my $coord_string = $exon_start.":".$exon_end.":".$exon_start.":".$exon_end;
$exon_pairs->{$coord_string}->{$transcript->{'internal_transcript_id'}} = 1;
} else {
my $i=0;
for($i=0; $i<(scalar(@{$exons})-1); $i++) {
my $exon_left = ${$exons}[$i];
my $exon_right = ${$exons}[$i+1];
my $els = $exon_left->seq_region_start;
my $ele = $exon_left->seq_region_end;
my $ers = $exon_right->seq_region_start;
my $ere = $exon_right->seq_region_end;
my $coord_string = $els.":".$ele.":".$ers.":".$ere;
$exon_pairs->{$coord_string}->{$transcript->{'internal_transcript_id'}} = 1;
}
} # end else
}
return($exon_pairs);
}
sub score_transcript_support {
my ($self,$gene,$exon_pairs) = @_;
# This sub takes a clustered set of transcripts (as a gene) and a set of observed exon pair coordinates
# and uses this information to score each transcript in the gene against a weight matrix. The weight
# matrix deserves a little explanation. It should be set in the config file and can have various levels
# of detail. Here is a simple example:
#
# 'logic_name_weights' => { 'rnaseq_blast' => 4 },
#
# The above has a single logic_name, 'rnaseq_blast', which represents a set of models associated with this
# logic_name. There is a single associated value '4', which is the transcript_weight_threshold. This weight
# needs to be supported across all the exons pairs in these transcripts for a particular transcript to be
# kept. This means that every exon pair needs to be observed at least 4 times across all the transcripts
# in this gene. As the 'gene' represents a cluster of transcripts that might come from many input sources
# the support need not come from other 'rnaseq_blast' transcripts. So for example if you have models made
# by genblast or genewise that also support that exact exon pair structure, then they count towards the
# score.
#
# Now for a slightly more complicated example. Say you have a set of models associated with a logic_name,
# that you want to treat differently to the others, i.e. they are separated by biotype. So the example
# would be the 'rnaseq_blast' logic_name having a set of models that are high quality that you want to
# definitely include. The biotype could be 'rnaseq_80_100', representing RNA-seq models that have been verified
# with a protein alignment of >= 80 percent coverage and identity. This could be set in the config as:
# 'logic_name_weights' => { 'rnaseq_blast' => { 'rnaseq_80_100' => 1,
# 'default_biotype_weight' => 4 } },
#
# In the above the 'rnaseq_80_100' biotype is given a transcript_weight_threshold of '1' ('0' would also work)
# Note also that there is 'default_biotype_weight', which is set to 4. This should be set anytime you want
# to specify a particular biotype (or set of biotypes) like the example above. Any biotypes you have not directly
# assigned a weight to will get this transcript_weight_threshold. This is handy instead of having to specify
# a weight for every single biotype. You can specifiy specific ones and have everything else use 'default_biotype_weight'
#
# The matrix can then be as complicated as you like in the config. Here is an example:
# 'logic_name_weights' => { 'swiss_prot_pe12_genewise' => 1,
# 'rnaseq_blast' => { 'rnaseq_80_100' => 1,
# 'default_biotype_weight' => 4 }',
# 'genblast' => { 'human_pe12' => 2,
# 'primates_pe12' => 3,
# 'mammals_pe12' => 5,
# 'vert_pe12' => 7,
# 'primates_pe345' => 10,
# },
# },
#
# The above has 'swiss_prot_pe12_genewise', with a threshold of '1', so keep everything from that logic_name,
# 'rnaseq_blast' says take everything with biotype 'rnaseq_80_100', and for everything else set the threshold
# to '4'. For 'genblast' all biotypes are defined with different cut-offs based on increasing distance from
# the target species (we'll say baboon for that example). We trust the human protein set most, so it has a
# modest cut-off of 2 with 'primates_pe345' being the opposite end of the spectrum, where models are only
# chosen if all exon pairs are found 10 or more times across all the transcripts in this transcript cluster
# Note that no 'default_biotype_weight' was set in the example for 'genblast' as all the biotypes were fully
# defined. This is okay, but in general it is safer to always define a default once you've defined a score
# for any amount of biotypes for a particular logic name. The reason being that if you forget about a
# particular biotype it will automatically get a cut-off of 1 (at present anyway) and thus get kept
my $output_transcripts = [];
my $logic_name_weights = $self->logic_name_weights();
my $transcripts = $gene->get_all_Transcripts();
foreach my $transcript (@{$transcripts}) {
my $logic_name = $transcript->analysis->logic_name();
my $biotype = $transcript->biotype();
my $transcript_weight_threshold = 1;
my $single_exon_support_penalty = $self->single_exon_support_penalty();
# Check if the logic_name points to a hashref
if(ref($logic_name_weights->{$logic_name}) eq 'HASH') {
# If we find a weight for the biotype of the transcript, then assign it
if(exists($logic_name_weights->{$logic_name}->{$biotype})) {
$transcript_weight_threshold = $logic_name_weights->{$logic_name}->{$biotype};
$transcript->{'transcript_weight_threshold'} = $transcript_weight_threshold;
}
# Else if we don't find a weight for that biotype then look for a default weight key
# If we don't find it then the biotype will just a default of 1
elsif (exists($logic_name_weights->{$logic_name}->{'default_biotype_weight'})) {
$transcript_weight_threshold = $logic_name_weights->{$logic_name}->{'default_biotype_weight'};
$transcript->{'transcript_weight_threshold'} = $transcript_weight_threshold;
}
}
# Else the logic name (hopefully) points to a weight, so set the weight cut off to that
elsif($logic_name_weights->{$logic_name}) {
$transcript_weight_threshold = $logic_name_weights->{$logic_name};
$transcript->{'transcript_weight_threshold'} = $transcript_weight_threshold;
}
# Just in case someone sets something weird in the config, like pairing a biotype to undef in the hash...
# This will cause the transcript to be included in the final set
unless($transcript_weight_threshold) {
$transcript_weight_threshold = 1;
$transcript->{'transcript_weight_threshold'} = $transcript_weight_threshold;
}
my $keep_transcript = 1;
my $exons = $transcript->get_all_Exons();
# Loop through all exon pairs in the exon pair hash for the gene
foreach my $exon_pair_key (keys(%$exon_pairs)) {
my $exon_pair = $exon_pairs->{$exon_pair_key};
# If this exon pair was present in the transcript then count the number to times it was observed in
# total by counting the keys for it (which is the set of transcript dbIDs it was found for)
if($exon_pair->{$transcript->{'internal_transcript_id'}}) {
my $support_count = scalar(keys(%$exon_pair));
# If we have a single exon transcript we add a penalty for the support threshold
if(scalar(@{$exons}) == 1) {
unless($support_count >= ($transcript_weight_threshold + $single_exon_support_penalty)) {
$keep_transcript = 0;
}
}
else {
# If the any exon pair fails to pass the threshold then drop the transcript
unless($support_count >= $transcript_weight_threshold) {
$keep_transcript = 0;
}
}
}
}
if($keep_transcript) {
say "Keeping transcript";
push(@{$output_transcripts},$transcript);
}
}
return($output_transcripts);
}
sub remove_redundant_transcripts {
my ($self,$transcripts) = @_;
my $final_transcripts = [];
my $transcript_redundancy = {};
my $i=0;
for($i=0; $i<scalar@{$transcripts}; $i++) {
my $t1 = ${$transcripts}[$i];
my $t1_exons = $t1->get_all_translateable_Exons;
my $j = $i+1;
for($j=$i+1; $j<scalar@{$transcripts}; $j++) {
my $t2 = ${$transcripts}[$j];
my $t2_exons = $t2->get_all_translateable_Exons;
# This section is too simple at the moment. I want a way of keeping models with UTR when the the models
# are identical except for UTR. This will probably work, but doesn't seem like a great solution. The
# other issue is figuring out heuristically when to skip the comparison. There's probably some way to
# do it. Less important as this is relatively fast anyway
if(scalar(@{$t1_exons}) > scalar(@{$t2_exons})) {
my $is_redundant = $self->exon_subset($t1,$t2,$t1_exons,$t2_exons);
if($is_redundant) {
$transcript_redundancy->{$t2->{'internal_transcript_id'}} = 1;
}
}
elsif(scalar(@{$t1_exons}) == scalar(@{$t2_exons})) {
my $is_redundant = 0;
if($t1->length >= $t2->length) {
$is_redundant = $self->exon_subset($t1,$t2,$t1_exons,$t2_exons);
if($is_redundant) {
$transcript_redundancy->{$t2->{'internal_transcript_id'}} = 1;
}
} else {
$is_redundant = $self->exon_subset($t2,$t1,$t2_exons,$t1_exons);
if($is_redundant) {
$transcript_redundancy->{$t1->{'internal_transcript_id'}} = 1;
}
}
}
else {
my $is_redundant = $self->exon_subset($t2,$t1,$t2_exons,$t1_exons);
if($is_redundant) {
$transcript_redundancy->{$t1->{'internal_transcript_id'}} = 1;
}
}
}
}
foreach my $transcript (@{$transcripts}) {
unless($transcript_redundancy->{$transcript->{'internal_transcript_id'}}) {
push(@{$final_transcripts},$transcript)
}
}
return $final_transcripts;
}
sub compare_transcripts {
my ($self,$transcripts) = @_;
my $final_transcripts = [];
my $transcript_redundancy = {};
my $i=0;
for($i=0; $i<scalar@{$transcripts}; $i++) {
my $t1 = ${$transcripts}[$i];
my $j = $i+1;
for($j=$i+1; $j<scalar@{$transcripts}; $j++) {
my $t2 = ${$transcripts}[$j];
say "COMPARING: ".$t1->{'internal_transcript_id'}." TO ".$t2->{'internal_transcript_id'};
my $unique_exons_t1 = $self->find_unique_exons($t1,$t2);
my $unique_exons_t2 = $self->find_unique_exons($t2,$t1);
# In this case all exons overlap, so select one of the two and mark the other as redundant
unless($unique_exons_t1 || $unique_exons_t2) {
my $redundant_internal_transcript_id = $self->choose_best_transcript($t1,$t2);
$transcript_redundancy->{$redundant_internal_transcript_id} = 1;
}
}
}
foreach my $transcript (@{$transcripts}) {
unless($transcript_redundancy->{$transcript->{'internal_transcript_id'}}) {
push(@{$final_transcripts},$transcript)
}
}
return $final_transcripts;
}
sub choose_best_transcript {
my ($self,$transcript_a,$transcript_b) = @_;
my $redundant_internal_transcript_id;
my $transcript_a_cov = $transcript_a->{'cov'};
my $transcript_b_cov = $transcript_b->{'cov'};
my $transcript_a_pid = $transcript_a->{'pid'};
my $transcript_b_pid = $transcript_a->{'pid'};
my $transcript_a_support = $transcript_a_cov + $transcript_a_pid;
my $transcript_b_support = $transcript_b_cov + $transcript_b_pid;
my $transcript_a_weight_threshold = $transcript_a->{'transcript_weight_threshold'};
my $transcript_b_weight_threshold = $transcript_b->{'transcript_weight_threshold'};
# First pick the one with the lowest weight as this should correspond to the most reliable source
# Then if the weights are the same pick the one with the best combined coverage and pid score
if($transcript_a_weight_threshold < $transcript_b_weight_threshold) {
$redundant_internal_transcript_id = $transcript_b->{'internal_transcript_id'};
say "Removing transcript ".$transcript_b->{'internal_transcript_id'}."\nCov: ".$transcript_b_cov.
"\nPid: ".$transcript_b_pid."\n\n in favour of transcript ".$transcript_a->{'internal_transcript_id'}.
"\nCov: ".$transcript_a_cov."\nPid: ".$transcript_a_pid."\n";
} elsif($transcript_a_weight_threshold > $transcript_b_weight_threshold) {
$redundant_internal_transcript_id = $transcript_a->{'internal_transcript_id'};
say "Removing transcript ".$transcript_a->{'internal_transcript_id'}."\nCov: ".$transcript_a_cov.
"\nPid: ".$transcript_a_pid."\n\n in favour of transcript ".$transcript_b->{'internal_transcript_id'}.
"\nCov: ".$transcript_b_cov."\nPid: ".$transcript_b_pid."\n";
} elsif($transcript_a_weight_threshold == $transcript_b_weight_threshold) {
if($transcript_a_support >= $transcript_b_support) {
$redundant_internal_transcript_id = $transcript_b->{'internal_transcript_id'};
say "Removing transcript ".$transcript_b->{'internal_transcript_id'}."\nCov: ".$transcript_b_cov.
"\nPid: ".$transcript_b_pid."\n\n in favour of transcript ".$transcript_a->{'internal_transcript_id'}.
"\nCov: ".$transcript_a_cov."\nPid: ".$transcript_a_pid."\n";
} else {
$redundant_internal_transcript_id = $transcript_a->{'internal_transcript_id'};
say "Removing transcript ".$transcript_a->{'internal_transcript_id'}."\nCov: ".$transcript_a_cov.
"\nPid: ".$transcript_a_pid."\n\n in favour of transcript ".$transcript_b->{'internal_transcript_id'}.
"\nCov: ".$transcript_b_cov."\nPid: ".$transcript_b_pid."\n";
}
}
return($redundant_internal_transcript_id);
}
sub features_overlap {
my ($self,$feature_a,$feature_b) = @_;
if (($feature_a->seq_region_start() <= $feature_b->seq_region_end()) &&
($feature_a->seq_region_end() >= $feature_b->seq_region_start())) {
return 1;
}
return 0;
}
sub find_unique_exons {
my ($self,$transcript_a,$transcript_b) = @_;
foreach my $exon_a (@{$transcript_a->get_all_translateable_Exons()}) {
my $overlap = 0;
say "E_A: ".$exon_a->start().", ".$exon_a->end();
foreach my $exon_b (@{$transcript_b->get_all_translateable_Exons()}) {
say "E_B: ".$exon_b->start().", ".$exon_b->end();
$overlap = $self->features_overlap($exon_a,$exon_b);
if($overlap) {
say "OVERLAP FOUND!!!!";
say "OVERLAP INNER: ".$overlap;
last;
}
}
say "OVERLAP OUTER: ".$overlap;
unless($overlap) {
say "NO OVERLAP FOUND, UNIQUE EXON PRESENT";
return(1);
}
}
return 0;
}
sub exon_subset {
my ($self,$transcript_a,$transcript_b,$exons_a,$exons_b) = @_;
my $is_subset = 0;
my $start_exon_b = ${$exons_b}[0];
my $exon_match_count = 0;
say "Transcript A: ".$transcript_a->analysis->logic_name().", ".$transcript_a->{'internal_transcript_id'};
say "Transcript B: ".$transcript_b->analysis->logic_name().", ".$transcript_b->{'internal_transcript_id'};
my $i=0;
for($i=0; $i<scalar(@{$exons_a}); $i++) {
if(${$exons_a}[$i]->seq_region_start == $start_exon_b->seq_region_start &&
${$exons_a}[$i]->seq_region_end == $start_exon_b->seq_region_end) {
$exon_match_count++;
my $j=1;
for($j=1; $j<scalar(@{$exons_b}) && ($i+$j)<scalar(@{$exons_a}); $j++) {
if(${$exons_a}[$i+$j]->seq_region_start == ${$exons_b}[$j]->seq_region_start &&
${$exons_a}[$i+$j]->seq_region_end == ${$exons_b}[$j]->seq_region_end) {
$exon_match_count++;
}
}
}
}
if($exon_match_count == scalar(@{$exons_b})) {
say "Model ".$transcript_b->{'internal_transcript_id'}." is redundant to model ".$transcript_a->{'internal_transcript_id'};
$is_subset = 1;
}
return $is_subset;
}
sub recluster_genes {
my ($self,$processed_genes) = @_;
my $single_transcript_genes = [];
my $output_genes = [];
foreach my $processed_gene (@{$processed_genes}) {
my $transcripts = $processed_gene->get_all_Transcripts();
# If the gene has a single transcript then there is no need to recluster
if(scalar(@{$transcripts}) == 1) {
push(@{$single_transcript_genes},$processed_gene);
next;
}
# If there is more than one transcript, then make a new gene for each
elsif(scalar(@{$transcripts}) > 1) {
foreach my $transcript (@{$transcripts}) {
my $single_transcript_gene = Bio::EnsEMBL::Gene->new();
$single_transcript_gene->add_Transcript($transcript);
push(@{$single_transcript_genes},$single_transcript_gene);
}
}
else {
$self->throw("Got a gene without any transcripts");
}
}
my $biotypes_hash = $self->get_all_biotypes($single_transcript_genes);
my $biotypes_array = [keys(%$biotypes_hash)];
my $types_hash;
$types_hash->{genes} = $biotypes_array;
say "Reclustering gene models based on final transcript set";
my ($clusters, $unclustered) = cluster_Genes($single_transcript_genes,$types_hash);
say "...finished reclustering genes models";
say "Found clustered sets: ".scalar(@{$clusters});
say "Found unclustered sets: ".scalar(@{$unclustered});
say "Processing gene clusters into single gene models...";
$output_genes = $self->process_clusters($clusters,$unclustered);
say "...finished processing gene clusters into single gene models";
say "Made output genes: ".scalar(@{$output_genes});
$self->output_genes($output_genes);
}
sub process_clusters {
my ($self,$clustered,$unclustered) = @_;
my $output_genes = [];
foreach my $single_cluster (@{$unclustered}) {
my $cluster_genes = $single_cluster->get_Genes();
foreach my $single_gene (@{$cluster_genes}) {
my $output_gene = Bio::EnsEMBL::Gene->new();
$output_gene->slice($self->query());
$output_gene->analysis($self->analysis);
$output_gene->biotype($self->analysis->logic_name);
my $transcripts = $single_gene->get_all_Transcripts();
my $single_transcript = shift(@{$transcripts});
$output_gene->add_Transcript($single_transcript);
push(@{$output_genes},$output_gene);
}
}
foreach my $single_cluster (@{$clustered}) {
my $combined_gene = Bio::EnsEMBL::Gene->new();
$combined_gene->slice($self->query());
$combined_gene->analysis($self->analysis);
$combined_gene->biotype($self->analysis->logic_name);
my $cluster_genes = $single_cluster->get_Genes();
foreach my $single_gene (@{$cluster_genes}) {
my $transcripts = $single_gene->get_all_Transcripts();
my $single_transcript = shift(@{$transcripts});
$combined_gene->add_Transcript($single_transcript);
}
push(@{$output_genes},$combined_gene);
}
return($output_genes);
}
sub load_cluster {
my ($self,$cluster) = @_;
my $gene = new Bio::EnsEMBL::Gene();
my $dna_dba = $self->hrdb_get_con('dna_db');
my $input_dbs = $self->param('input_gene_dbs');
# Use the var below to give each transcript an internal id, since they are coming from multiple input dbs
my $internal_transcript_id = 0;
foreach my $adaptor_name (keys(%{$cluster})) {
unless($input_dbs->{$adaptor_name}) {
$self->throw("You are using a cluster type input id, but one of the the adaptor names in the input id did not match a corresponding db hash".
"All adaptor names must have a correspondingly named db hash passed in. Offending adaptor name:\n".$adaptor_name);
}
my $dba = $self->hrdb_get_dba($input_dbs->{$adaptor_name});
unless($dba) {
$self->throw("You are using a cluster type input id, but one of the the adaptor names in the input id did not match a corresponding db hash".
"All adaptor names must have a correspondingly named db hash passed in. Offending adaptor name:\n".$adaptor_name);
}
$dba->dnadb($dna_dba);
$self->hrdb_set_con($dba,$adaptor_name);
my $transcript_adaptor = $dba->get_TranscriptAdaptor();
my $transcript_id_array = $cluster->{$adaptor_name};
foreach my $transcript_id (@{$transcript_id_array}) {
my $transcript = $transcript_adaptor->fetch_by_dbID($transcript_id);
my $transcript_sf = $transcript->get_all_supporting_features();
my $transcript_cov = ${$transcript_sf}[0]->hcoverage();
my $transcript_pid = ${$transcript_sf}[0]->percent_id();
# These are some internal values that get used elsewhere for convenience. The internal id is needed to avoid id conflicts
$transcript->{'cov'} = $transcript_cov;
$transcript->{'pid'} = $transcript_pid;
$transcript->{'internal_transcript_id'} = $internal_transcript_id;
$internal_transcript_id++;
$gene->add_Transcript($transcript);
}
}
say "Created a new gene, transcript count: ".scalar(@{$gene->get_all_Transcripts()});
return $gene;
}
sub remove_3_prime_boundry_genes {
my ($self,$unfiltered_genes) = @_;
my $filtered_genes = [];
my $slice = $self->query();
my $slice_3_prime_end = $slice->end;
foreach my $gene (@{$unfiltered_genes}) {
unless($gene->seq_region_end > $slice_3_prime_end) {
push(@{$filtered_genes},$gene);
} else {
say "Skipping gene as it will be picked up on the adjoining another slice";
}
}
return($filtered_genes);
}
sub unprocessed_genes {
my ($self,$val) = @_;
if($val) {
$self->param('_unprocessed_genes',$val);
}
return($self->param('_unprocessed_genes'));
}
sub output_genes {
my ($self,$val) = @_;
if($val) {
$self->param('_output_genes',$val);
}
return($self->param('_output_genes'));
}
sub logic_name_weights {
my ($self,$val) = @_;
if($val) {
$self->param('_logic_name_weights',$val);
}
return($self->param('_logic_name_weights'));
}
sub single_exon_support_penalty {
my ($self,$val) = @_;
if($val) {
$self->param('_single_exon_support_penalty',$val);
}
return($self->param('_single_exon_support_penalty'));
}
sub find_best_remaining_transcript_old {
my ($self,$gene,$exon_pairs) = @_;
my $best_transcripts = [];
my $max_support_score = 0;
my $min_allowed_score = $self->param('min_backup_score');
unless(defined($min_allowed_score)) {
$min_allowed_score = 10;
}
# This will go through all the transcripts and pick the one with the highest level of support
# I'm not sure how that would work. I want to avoid biasing it towards two exon genes, but
# also don't want to pick long models just because they're long. There might be the possibility
# of averaging across exon pairs and then adding on some constant based on exon pair count. The
# constant could be small, like 0.5 * num_exon_pairs. But maybe by adding it like that I could
# select longer models over shorter models, without always picking the longer model. I think if
# a longer model has say 5 exons pairs and 4 have one support and the other has 5 then average
# is 9/5 = 1.8. Now 1.8 + (5 * 0.5) = 4.3, whereas if you had a model that had 1 pair and 5
# observations it would be 5 + (1 * 0.5) = 5.5, I think that's fair. What I want are longer
# medium to well supported models to be chosen over shorter medium to well supported models
# I might make the constant smaller than 0.5 though
my $transcripts = $gene->get_all_Transcripts();
foreach my $transcript (@{$transcripts}) {
my $support_score = $self->calculate_support_average($transcript,$exon_pairs);
my $biotype = $transcript->biotype();
$biotype .= "_br";
$transcript->biotype($biotype);
if($support_score == $max_support_score && $support_score >= $min_allowed_score) {
push(@{$best_transcripts},$transcript);
} elsif($support_score > $max_support_score && $support_score >= $min_allowed_score) {
$best_transcripts = [];
push(@{$best_transcripts},$transcript);
$max_support_score = $support_score;
}
}
return $best_transcripts;
}
sub find_best_remaining_transcript {
my ($self,$gene,$exon_pairs) = @_;
my $lowest_weight_transcripts = [];
my $max_support_score = 0;
my $lowest_weight_threshold = 999;
my $min_allowed_score = $self->param('min_backup_score');
my $max_allowed_weight = $self->param('max_backup_weight');
unless(defined($min_allowed_score)) {
$min_allowed_score = 10;
}
unless(defined($max_allowed_weight)) {
$max_allowed_weight = 10;
}
my $transcripts = $gene->get_all_Transcripts();
foreach my $transcript (@{$transcripts}) {
my $support_score = $self->calculate_support_average($transcript,$exon_pairs);
unless($support_score >= $min_allowed_score) {
next;
}
my $transcript_weight_threshold = $transcript->{'transcript_weight_threshold'};
if(($transcript_weight_threshold <= $lowest_weight_threshold) && ($transcript_weight_threshold <= $max_allowed_weight)) {
$lowest_weight_threshold = $transcript_weight_threshold;
push(@{$lowest_weight_transcripts},$transcript);
}
}
my $best_length = 0;
my $best_combined_cov_and_pid = 0;
my $final_transcript;
foreach my $transcript (@{$lowest_weight_transcripts}) {
my $cov = $transcript->{'cov'};
my $pid = $transcript->{'pid'};
my $combined_cov_and_pid = $cov + $pid;
if($combined_cov_and_pid > $best_combined_cov_and_pid) {
$best_combined_cov_and_pid = $combined_cov_and_pid;
$best_length = length($transcript->translate());
$final_transcript = $transcript;
} elsif($combined_cov_and_pid == $best_combined_cov_and_pid) {
if(length($transcript->translate() > $best_length)) {
$best_length = length($transcript->translate());
$final_transcript = $transcript;
}
}
}
if($final_transcript) {
$final_transcript->biotype($final_transcript->biotype."_br");
return [$final_transcript];
}
return [];
}
sub calculate_support_average {
my ($self,$transcript,$exon_pairs) = @_;
my $total_support_amount = 0;
my $total_exon_pairs = 0;
my $exon_pair_count_bonus = 0.2;
my $final_support = 0;
my $exons = $transcript->get_all_translateable_Exons();
if(scalar(@{$exons}) == 1) {
# single exon transcript
} else {
# Loop through all exon pairs in the exon pair hash for the gene
foreach my $exon_pair_key (keys(%$exon_pairs)) {
my $exon_pair = $exon_pairs->{$exon_pair_key};
if($exon_pair->{$transcript->{'internal_transcript_id'}}) {
$total_exon_pairs++;
$total_support_amount += scalar(keys(%$exon_pair));
}
}
my $average_support = $total_support_amount / $total_exon_pairs;
$final_support = $average_support + ($total_exon_pairs * $exon_pair_count_bonus);
} # end else
return($final_support);
}
sub get_all_biotypes {
my ($self,$master_genes_array) = @_;
my $master_biotypes_hash = {};
foreach my $gene (@{$master_genes_array}) {
unless($master_biotypes_hash->{$gene->biotype}) {
$master_biotypes_hash->{$gene->biotype} = 1;
}
}
return($master_biotypes_hash);
}
1;
| james-monkeyshines/ensembl-analysis | modules/Bio/EnsEMBL/Analysis/Hive/RunnableDB/HiveProcessGeneClusters.pm | Perl | apache-2.0 | 36,851 |
#!/usr/bin/perl
use DBI;
use Getopt::Long;
use File::Basename;
use strict;
use warnings;
my $username = "test";
my $password = "test";
my $db = "irida_test";
my $host = "localhost";
GetOptions(
'u|username=s' => \$username,
'p|password=s' => \$password,
'd|db=s' => \$db,
'h|host=s' => \$host
);
my $totalFiles = 0; #total number of files found
my $totalPairs = 0; #total number of paired files found
#connect to the DB
my $dbstring = "dbi:mysql:$db:$host";
my $dbh =
DBI->connect( $dbstring, $username, $password,
{ RaiseError => 1, AutoCommit => 0 } )
or die "Cannot connect to database: $DBI::errstr";
#get the sequence files joined with sequencing_run
my $sql = "SELECT r.id, f.id, f.file_path
FROM sequencing_run r INNER JOIN sequence_file f ON r.id=f.sequencing_run_id";
my $sth = $dbh->prepare($sql);
my $rv = $sth->execute();
my %runs; #the runs, file ids, and path names
#compile the runs, files, and path names
my ( $runId, $fileId, $filePath );
$sth->bind_columns( undef, \$runId, \$fileId, \$filePath );
while ( $sth->fetch ) {
$runs{$runId}{$fileId} = basename($filePath);
}
#prepare the insertion query
my $pairQuery = "INSERT INTO sequence_file_pair (created_date) VALUES (now())";
my $filesQuery =
"INSERT INTO sequence_file_pair_files (pair_id,files_id) VALUES (?,?)";
my $pairSth = $dbh->prepare($pairQuery);
my $filesSth = $dbh->prepare($filesQuery);
#loop through each run and find the pairs for the files
for my $runId ( keys %runs ) {
my $run = $runs{$runId};
#find pair matches for the runs
my $pairs = findPairs($run);
#foreach pair
foreach my $p ( keys $pairs ) {
#inser to sequence_file_pair
$pairSth->execute();
#get the id inserted
my $insertedId =
$dbh->last_insert_id( "", "", "sequence_file_pair", "id" );
#insert each file to sequence_file_pair_files
$filesSth->execute( $insertedId, $p );
$filesSth->execute( $insertedId, $pairs->{$p} );
}
#increment the totals
$totalFiles += scalar keys $run;
$totalPairs += ( scalar keys $pairs ) * 2;
}
#update the sequencing runs to reflect the paired end runs
my $runPairsQuery =
'UPDATE sequencing_run m SET m.layout_type="PAIRED_END" WHERE m.id IN (SELECT DISTINCT f.sequencing_run_id FROM sequence_file f INNER JOIN sequence_file_pair_files p ON f.id=p.files_id);';
$sth = $dbh->prepare($runPairsQuery);
print "Updated paired runs: " . $sth->execute() . "\n";
$dbh->commit();
$dbh->disconnect();
print "Paired $totalPairs of $totalFiles files.\n";
#find pairs of files
sub findPairs {
my $files = shift;
my %pairs;
my @fileIds = keys %$files;
#for each file
for ( my $i = 0 ; $i < @fileIds ; $i++ ) {
#get the filename
my $fname = $files->{ $fileIds[$i] };
my $matched = 0; #flag if matched
#check if we've paired this file already
if ( defined $pairs{ $fileIds[$i] } ) {
next;
}
#parse the filename
if ( $fname =~ /^(.+)_R(\d+)_\S+\.fastq.*$/ ) {
my $base = $1; #the sample name and run info
my $thisid = $2; # the pair number
#check what the number of the other file should be
my $pairId = 0;
if ( $thisid == 1 ) {
$pairId = 2;
}
elsif ( $thisid == 2 ) {
$pairId = 1;
}
#look ahead in the list to see if we can find the match
for ( my $j = $i + 1 ; $j < @fileIds ; $j++ ) {
#get the other filename
my $innerFile = $files->{ $fileIds[$j] };
#build a regular expression
my $regex = $base . "_R" . $pairId;
#test the other file name
if ( $innerFile =~ /^$regex/ ) {
#if mastched, add to the hash
$pairs{ $fileIds[$j] } = $fileIds[$i];
$matched = 1;
last;
}
}
if ( !$matched ) {
print STDERR "Warning: $fileIds[$i] - $fname not matched\n";
}
}
}
return \%pairs;
}
| phac-nml/irida | src/main/resources/scripts/paired-files/paired-files.pl | Perl | apache-2.0 | 4,255 |
=head1 LICENSE
Copyright (c) 1999-2011 The European Bioinformatics Institute and
Genome Research Limited. All rights reserved.
This software is distributed under a modified Apache license.
For license details, please see
http://www.ensembl.org/info/about/code_licence.html
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <dev@ensembl.org>.
Questions may also be sent to the Ensembl help desk at
<helpdesk@ensembl.org>.
=head1 NAME
Bio::EnsEMBL::Compara::RunnableDB::EpoLowCoverage::ImportAlignment
=head1 SYNOPSIS
=head1 DESCRIPTION
This module imports a specified alignment. This is used in the low coverage genome alignment pipeline for importing the high coverage alignment which is used to build the low coverage genomes on.
=head1 APPENDIX
The rest of the documentation details each of the object methods.
Internal methods are usually preceded with a _
=cut
package Bio::EnsEMBL::Compara::RunnableDB::EpoLowCoverage::ImportAlignment;
use strict;
use Bio::EnsEMBL::Registry;
use Bio::EnsEMBL::Compara::Production::DBSQL::DBAdaptor;
use Bio::EnsEMBL::Utils::Exception qw(throw);
use base ('Bio::EnsEMBL::Compara::RunnableDB::BaseRunnable');
=head2 fetch_input
Title : fetch_input
Usage : $self->fetch_input
Function: Fetches input data for gerp from the database
Returns : none
Args : none
=cut
sub fetch_input {
my( $self) = @_;
#create a Compara::DBAdaptor which shares the same DBI handle
#with $self->db (Hive DBAdaptor)
$self->param('comparaDBA', Bio::EnsEMBL::Compara::Production::DBSQL::DBAdaptor->new(-DBCONN=>$self->db->dbc));
$self->param('comparaDBA')->dbc->disconnect_when_inactive(0);
my $reg = "Bio::EnsEMBL::Registry";
$reg->load_registry_from_url($self->param('from_db_url'));
}
=head2 run
Title : run
Usage : $self->run
Function: Run gerp
Returns : none
Args : none
=cut
sub run {
my $self = shift;
#Quick and dirty import, assuming the 2 databases are on the same server. Useful for debugging
if ($self->param('quick')) {
$self->importAlignment_quick();
} else {
$self->importAlignment();
}
}
=head2 write_output
Title : write_output
Usage : $self->write_output
Function: Write results to the database
Returns : 1
Args : none
=cut
sub write_output {
my ($self) = @_;
return 1;
}
#Uses copy_data method from copy_data.pl script
sub importAlignment {
my $self = shift;
#if the database name is defined in the url, then open that
if ($self->param('from_db_url') =~ /mysql:\/\/.*@.*\/.+/) {
$self->param('from_comparaDBA', new Bio::EnsEMBL::Compara::DBSQL::DBAdaptor(-url=>$self->param('from_db_url')));
} else {
#open the most recent compara database
$self->param('from_comparaDBA', Bio::EnsEMBL::Registry->get_DBAdaptor("Multi", "compara"));
}
my $analysis = $self->db->get_AnalysisAdaptor->fetch_by_logic_name("ImportAlignment");
my $dbname = $self->param('from_comparaDBA')->dbc->dbname;
my $analysis_id = $analysis->dbID;
my $mlss_id = $self->param('method_link_species_set_id');
##Find min and max of the relevant internal IDs in the FROM database
my $sth = $self->param('from_comparaDBA')->dbc->prepare("SELECT
MIN(gab.genomic_align_block_id), MAX(gab.genomic_align_block_id),
MIN(ga.genomic_align_id), MAX(ga.genomic_align_id),
MIN(gag.node_id), MAX(gag.node_id),
MIN(gat.root_id), MAX(gat.root_id)
FROM genomic_align_block gab
LEFT JOIN genomic_align ga using (genomic_align_block_id)
LEFT JOIN genomic_align_group gag using (genomic_align_id)
LEFT JOIN genomic_align_tree gat ON gat.node_id = gag.node_id
WHERE
gab.method_link_species_set_id = ?");
$sth->execute($mlss_id);
my ($min_gab, $max_gab, $min_ga, $max_ga, $min_gag, $max_gag,
$min_root_id, $max_root_id) =
$sth->fetchrow_array();
$sth->finish();
#HACK to just copy over one chr (22) for testing purposes
#my $dnafrag_id = 905407;
my $dnafrag_id;
#Copy the method_link_species_set
copy_data($self->param('from_comparaDBA'), $self->param('comparaDBA'),
"method_link_species_set",
undef, undef, undef,
"SELECT * FROM method_link_species_set WHERE method_link_species_set_id = $mlss_id");
#Copy the species_set
copy_data($self->param('from_comparaDBA'), $self->param('comparaDBA'),
"species_set",
undef, undef, undef,
"SELECT species_set.* FROM species_set JOIN method_link_species_set USING (species_set_id) WHERE method_link_species_set_id = $mlss_id");
#copy genomic_align_block table
if ($dnafrag_id) {
copy_data($self->param('from_comparaDBA'), $self->param('comparaDBA'),
"genomic_align_block",
"genomic_align_block_id",
$min_gab, $max_gab,
"SELECT gab.* FROM genomic_align_block gab LEFT JOIN genomic_align ga USING (genomic_align_block_id) WHERE ga.method_link_species_set_id = $mlss_id AND dnafrag_id=$dnafrag_id");
} else {
copy_data($self->param('from_comparaDBA'), $self->param('comparaDBA'),
"genomic_align_block",
"genomic_align_block_id",
$min_gab, $max_gab,
"SELECT * FROM genomic_align_block WHERE method_link_species_set_id = $mlss_id");
}
#copy genomic_align table
if ($dnafrag_id) {
copy_data($self->param('from_comparaDBA'), $self->param('comparaDBA'),
"genomic_align",
"genomic_align_id",
$min_ga, $max_ga,
"SELECT ga.*".
" FROM genomic_align ga ".
" WHERE method_link_species_set_id = $mlss_id AND dnafrag_id=$dnafrag_id");
} else {
# copy_data($self->{'from_comparaDBA'}, $self->{'comparaDBA'},
# "genomic_align",
# "genomic_align_id",
# $min_ga, $max_ga,
# "SELECT *".
# " FROM genomic_align".
# " WHERE method_link_species_set_id = $mlss_id");
#Don't copy over ancestral genomic_aligns
copy_data($self->param('from_comparaDBA'), $self->param('comparaDBA'),
"genomic_align",
"genomic_align_id",
$min_ga, $max_ga,
"SELECT genomic_align.*".
" FROM genomic_align JOIN dnafrag USING (dnafrag_id)".
" WHERE method_link_species_set_id = $mlss_id AND genome_db_id != 63");
}
#copy genomic_align_group table
if ($dnafrag_id) {
copy_data($self->param('from_comparaDBA'), $self->param('comparaDBA'),
"genomic_align_group",
"gag.node_id",
$min_gag, $max_gag,
"SELECT gag.*".
" FROM genomic_align_group gag LEFT JOIN genomic_align USING (genomic_align_id)".
" WHERE gag.node_id IS NOT NULL AND method_link_species_set_id = $mlss_id AND dnafrag_id=$dnafrag_id");
} else {
copy_data($self->param('from_comparaDBA'), $self->param('comparaDBA'),
"genomic_align_group",
"gag.node_id",
$min_gag, $max_gag,
"SELECT gag.*".
" FROM genomic_align ga ".
" JOIN dnafrag USING (dnafrag_id)".
" LEFT JOIN genomic_align_group gag USING (genomic_align_id)".
" WHERE gag.node_id IS NOT NULL AND ga.method_link_species_set_id = $mlss_id AND genome_db_id != 63");
}
#copy genomic_align_tree table
if ($dnafrag_id) {
copy_data($self->param('from_comparaDBA'), $self->param('comparaDBA'),
"genomic_align_tree",
"root_id",
$min_root_id, $max_root_id,
"SELECT gat.*".
" FROM genomic_align_tree gat LEFT JOIN genomic_align_group USING (node_id)".
" LEFT JOIN genomic_align USING (genomic_align_id)".
" WHERE node_id IS NOT NULL AND method_link_species_set_id = $mlss_id AND dnafrag_id=$dnafrag_id");
} else {
copy_data($self->param('from_comparaDBA'), $self->param('comparaDBA'),
"genomic_align_tree",
"root_id",
$min_root_id, $max_root_id,
"SELECT gat.*".
" FROM genomic_align ga".
" JOIN dnafrag USING (dnafrag_id)".
" LEFT JOIN genomic_align_group gag USING (genomic_align_id)".
" LEFT JOIN genomic_align_tree gat USING (node_id) WHERE gag.node_id IS NOT NULL AND ga.method_link_species_set_id = $mlss_id AND genome_db_id != 63");
}
}
=head2 copy_data
Arg[1] : Bio::EnsEMBL::Compara::DBSQL::DBAdaptor $from_dba
Arg[2] : Bio::EnsEMBL::Compara::DBSQL::DBAdaptor $to_dba
Arg[3] : Bio::EnsEMBL::Compara::MethodLinkSpeciesSet $this_mlss
Arg[4] : string $table
Arg[5] : string $sql_query
Description : copy data in this table using this SQL query.
Returns :
Exceptions : throw if argument test fails
=cut
sub copy_data {
my ($from_dba, $to_dba, $table_name, $index_name, $min_id, $max_id, $query) = @_;
print "Copying data in table $table_name\n";
my $sth = $from_dba->dbc->db_handle->column_info($from_dba->dbc->dbname, undef, $table_name, '%');
$sth->execute;
my $all_rows = $sth->fetchall_arrayref;
my $binary_mode = 0;
foreach my $this_col (@$all_rows) {
if (($this_col->[5] eq "BINARY") or ($this_col->[5] eq "VARBINARY") or
($this_col->[5] eq "BLOB") or ($this_col->[5] eq "BIT")) {
$binary_mode = 1;
last;
}
}
#speed up writing of data by disabling keys, write the data, then enable
$to_dba->dbc->do("ALTER TABLE `$table_name` DISABLE KEYS");
if ($binary_mode) {
#copy_data_in_binary_mode($from_dba, $to_dba, $table_name, $query);
} else {
copy_data_in_text_mode($from_dba, $to_dba, $table_name, $index_name, $min_id, $max_id, $query);
}
$to_dba->dbc->do("ALTER TABLE `$table_name` ENABLE KEYS");
}
=head2 copy_data_in_text_mode
Arg[1] : Bio::EnsEMBL::Compara::DBSQL::DBAdaptor $from_dba
Arg[2] : Bio::EnsEMBL::Compara::DBSQL::DBAdaptor $to_dba
Arg[3] : Bio::EnsEMBL::Compara::MethodLinkSpeciesSet $this_mlss
Arg[4] : string $table
Arg[5] : string $sql_query
Description : copy data in this table using this SQL query.
Returns :
Exceptions : throw if argument test fails
=cut
sub copy_data_in_text_mode {
my ($from_dba, $to_dba, $table_name, $index_name, $min_id, $max_id, $query) = @_;
my $user = $to_dba->dbc->username;
my $pass = $to_dba->dbc->password;
my $host = $to_dba->dbc->host;
my $port = $to_dba->dbc->port;
my $dbname = $to_dba->dbc->dbname;
my $use_limit = 0;
my $start = $min_id;
# my $step = 100000;
my $step = 10000;
#If not using BETWEEN, revert back to LIMIT
if (!defined $index_name && !defined $min_id && !defined $max_id) {
$use_limit = 1;
$start = 0;
}
while (1) {
my $end = $start + $step - 1;
my $sth;
if (!$use_limit) {
$sth = $from_dba->dbc->prepare($query." AND $index_name BETWEEN $start AND $end");
} else {
$sth = $from_dba->dbc->prepare($query." LIMIT $start, $step");
}
$start += $step;
$sth->execute();
my $all_rows = $sth->fetchall_arrayref;
## EXIT CONDITION
return if (!@$all_rows);
my $filename = "/tmp/$table_name.copy_data.$$.txt";
open(TEMP, ">$filename") or die;
foreach my $this_row (@$all_rows) {
print TEMP join("\t", map {defined($_)?$_:'\N'} @$this_row), "\n";
}
close(TEMP);
if ($pass) {
unless (system("mysqlimport", "-u$user", "-p$pass", "-h$host", "-P$port", "-L", "-l", "-i", $dbname, $filename) == 0) {
throw("Failed mysqlimport -u$user -p$pass -h$host -P$port -L -l -i $dbname $filename");
}
} else {
unless (system("mysqlimport", "-u$user", "-h$host", "-P$port", "-L", "-l", "-i", $dbname, $filename) ==0) {
throw("Failed mysqlimport -u$user -h$host -P$port -L -l -i $dbname $filename");
}
}
unlink("$filename");
}
}
#Assumes the from and to databases are on the same server and downloads all entries from genomic_align_block, genomic_align,
#genomic_align_group and genomic_align_tree
sub importAlignment_quick {
my $self = shift;
#if the database name is defined in the url, then open that
if ($self->param('from_db_url') =~ /mysql:\/\/.*@.*\/.+/) {
$self->param('from_comparaDBA', new Bio::EnsEMBL::Compara::DBSQL::DBAdaptor(-url=>$self->param('from_db_url')));
} else {
#open the most recent compara database
$self->param('from_comparaDBA', Bio::EnsEMBL::Registry->get_DBAdaptor("Multi", "compara"));
}
my $analysis = $self->db->get_AnalysisAdaptor->fetch_by_logic_name("ImportAlignment");
my $dbname = $self->param('from_comparaDBA')->dbc->dbname;
my $analysis_id = $analysis->dbID;
my $mlss_id = $self->param('method_link_species_set_id');
#my $sql = "INSERT INTO genomic_align_block SELECT * FROM ?.genomic_align_block WHERE method_link_species_set_id = ?\n";
my $sql = "INSERT INTO genomic_align_block SELECT * FROM $dbname.genomic_align_block\n";
my $sth = $self->param('comparaDBA')->dbc->prepare($sql);
$sth->execute();
#$sth->execute($dbname, $mlss_id);
$sth->finish();
#$sql = "INSERT INTO genomic_align SELECT genomic_align.* FROM ?.genomic_align LEFT JOIN WHERE method_link_species_set_id = ?\n";
$sql = "INSERT INTO genomic_align SELECT * FROM $dbname.genomic_align\n";
my $sth = $self->param('comparaDBA')->dbc->prepare($sql);
$sth->execute();
#$sth->execute($dbname, $mlss_id);
$sth->finish();
#$sql = "INSERT INTO genomic_align_group SELECT genomic_align_group.* FROM ?.genomic_align_group LEFT JOIN ?.genomic_align USING (genomic_align_id) LEFT JOIN ?.genomic_align_block USING (genomic_align_block_id) WHERE genomic_align_block.method_link_species_set_id = ?\n";
$sql = "INSERT INTO genomic_align_group SELECT * FROM $dbname.genomic_align_group\n";
my $sth = $self->param('comparaDBA')->dbc->prepare($sql);
#$sth->execute($dbname, $dbname, $mlss_id);
$sth->execute();
$sth->finish();
#$sql = "INSERT INTO genomic_align_tree SELECT genomic_align_tree.* FROM ?.genomic_align_tree LEFT JOIN ?.genomic_align_group USING (node_id) LEFT JOIN ?.genomic_align USING (genomic_align_id) LEFT JOIN ?.genomic_align_block WHERE genomic_align_block.method_link_species_set_id = ?\n";
$sql = "INSERT INTO genomic_align_tree SELECT * FROM $dbname.genomic_align_tree\n";
my $sth = $self->param('comparaDBA')->dbc->prepare($sql);
#$sth->execute($dbname, $dbname, $dbname, $dbname, $mlss_id);
$sth->execute();
$sth->finish();
}
1;
| adamsardar/perl-libs-custom | EnsemblAPI/ensembl-compara/modules/Bio/EnsEMBL/Compara/RunnableDB/EpoLowCoverage/ImportAlignment.pm | Perl | apache-2.0 | 14,212 |
#!/usr/bin/perl
#===============================================================================
#
# FILE: calcula-uptime.pl
#
# USAGE: ./calcula-uptime.pl [uptime string]
#
# DESCRIPTION: uptime calculation
#
# OPTIONS: if there's an argument, considers it the uptime string. If there
# isn't, return the uptime of the current machine.
# REQUIREMENTS: ---
# BUGS: ---
# NOTES: ---
# AUTHOR: Cláudio Sampaio <patola@gmail.com>
# COMPANY: IBM
# VERSION: 0.1
# CREATED: 20-05-2012 00:45:36
# REVISION: ---
#===============================================================================
use strict;
use warnings;
##############################################
## toDate(para1)
## convert from seconds to dd-hh-mm-ss
sub toDate($)
{
my ($DAYS, $HOURS, $MINUTES, $SECONDS, $REST);
$DAYS=int ($_[0]/86400); # 3600sec*24hrs, get # days
$REST=$_[0]%86400; # get the remainder
$HOURS=int ($REST/3600); # get hrs
$REST=$REST%3600; # get the remainder
$MINUTES=int ($REST/60); # get mins
$SECONDS=$REST%60; #
return "$DAYS days, $HOURS hours, $MINUTES minutes, $SECONDS seconds.";
}
my $uptimestring=($#ARGV==0)?`cat /proc/uptime`:$ARGV[0];
if ($#ARGV < 0) { # 1 argument, and only one
$uptimestring=`cat /proc/uptime`;
} else {
$uptimestring=$ARGV[0];
}
$uptimestring=~s/\s.*//; # take off everything after a space
$uptimestring=int($uptimestring);
printf "%s\n", toDate($uptimestring);
| Patola/patolascripts | calcula-uptime.pl | Perl | apache-2.0 | 1,507 |
#!/usr/bin/env perl
# Copyright [1999-2014] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# File name: CreateComparaTestDatabase.pl
#
# Given a source and destination database this script will create
# a ensembl compara database on the same mysql server as the source and populate it
#
use strict;
use warnings;
use Getopt::Long;
use DBI;
my ($help, $srcDB, $destDB, $host, $user, $pass, $port, $seq_region_file);
my ($srcAncDB, $destAncDB);
my $ref_genome_db_name = "homo_sapiens";
#Genomes I want to extract
my @other_genome_db_names = ("pan_troglodytes", "gorilla_gorilla", "tarsius_syrichta", "mus_musculus", "rattus_norvegicus", "oryctolagus_cuniculus", "canis_familiaris", "felis_catus", "equus_caballus", "sus_scrofa", "loxodonta_africana", "bos_taurus", "macaca_mulatta", "pongo_abelii", "callithrix_jacchus");
#Need all low-coverage genome for pairwise
my @pairwise_genome_db_names = ("pan_troglodytes", "mus_musculus", "tarsius_syrichta", "oryctolagus_cuniculus", "felis_catus", "loxodonta_africana");
my $pairwise_method_link_type = '"BLASTZ_NET", "LASTZ_NET"';
my $epo_alignment_method_link_type = "EPO";
my $epo_low_coverage_alignment_method_link_type = "EPO_LOW_COVERAGE";
my $pecan_alignment_method_link_type = "PECAN";
my $constrained_element_method_link_type = "GERP_CONSTRAINED_ELEMENT";
my $conservation_score_method_link_type = "GERP_CONSERVATION_SCORE";
my $pecan_species_set_name = "amniotes";
my $epo_species_set_name = "mammals";
my $ancestral_coord_system_name = "ancestralsegment";
my $do_pairwise = 1;
my $do_pecan = 1;
my $do_epo = 1;
my $do_epo_low_coverage = 1;
my $do_proteins = 0;
GetOptions('help' => \$help,
's=s' => \$srcDB,
'sa=s' => \$srcAncDB,
'd=s' => \$destDB,
'da=s' => \$destAncDB,
'h=s' => \$host,
'u=s' => \$user,
'p=s' => \$pass,
'port=i' => \$port,
'seq_region_file=s' => \$seq_region_file);
my $usage = "Usage:
CreateComparaTestDatabase.pl -s srcDB -d destDB -h host -u user -p pass -seq_region_file file [--port port]\n";
if ($help) {
print $usage;
exit 0;
}
unless($port) {
$port = 3306;
}
# If needed command line args are missing print the usage string and quit
$srcDB and $destDB and $host and $user and $pass and $seq_region_file or die $usage;
my @seq_regions = @{do $seq_region_file};
my $dsn = "DBI:mysql:host=$host;port=$port";
# Connect to the mySQL host
my $dbh = DBI->connect( $dsn, $user, $pass, {RaiseError => 1})
or die "Could not connect to database host : " . DBI->errstr;
print "\nWARNING: If the $destDB database already exists the existing copy \n"
. "will be destroyed. Proceed (y/n)? ";
my $key = lc(getc());
unless( $key =~ /y/ ) {
$dbh->disconnect();
print "Test Genome Creation Aborted\n";
exit;
}
print "Proceeding with test genome database $destDB creation\n";
# dropping any destDB database if there
my $array_ref = $dbh->selectall_arrayref("SHOW DATABASES LIKE '$destDB'");
if (scalar @{$array_ref}) {
$dbh->do("DROP DATABASE $destDB");
}
# creating destination database
$dbh->do( "CREATE DATABASE " . $destDB )
or die "Could not create database $destDB: " . $dbh->errstr;
#Create ancestral db if required
if ($destAncDB) {
# dropping any destDB database if there
my $array_ref = $dbh->selectall_arrayref("SHOW DATABASES LIKE '$destAncDB'");
if (scalar @{$array_ref}) {
$dbh->do("DROP DATABASE $destAncDB");
}
# creating destination database
$dbh->do( "CREATE DATABASE " . $destAncDB )
or die "Could not create database $destAncDB: " . $dbh->errstr;
}
# Dump the source database table structure (w/o data) and use it to create
# the new database schema
# May have to eliminate the -p pass part... not sure
my $rc = 0xffff & system(
"mysqldump -p$pass -u $user -h $host -P $port --no-data $srcDB | " .
"mysql -p$pass -u $user -h $host -P $port $destDB");
if($rc != 0) {
$rc >>= 8;
die "mysqldump and insert failed with return code: $rc";
}
if ($destAncDB) {
my $rc = 0xffff & system(
"mysqldump -p$pass -u $user -h $host -P $port --no-data $srcAncDB | " .
"mysql -p$pass -u $user -h $host -P $port $destAncDB");
if($rc != 0) {
$rc >>= 8;
die "mysqldump and insert failed with return code: $rc";
}
}
$dbh->do("use $destDB");
#Populate method_link
$dbh->do("insert into method_link select * from $srcDB.method_link");
#Populate method_link_species_set (take all so more data can be added at a later date)
$dbh->do("insert into method_link_species_set select * from $srcDB.method_link_species_set");
#Populate method_link_species_set_tag (take all so more data can be added at a later date)
$dbh->do("INSERT INTO method_link_species_set_tag SELECT * FROM $srcDB.method_link_species_set_tag");
#Populate species_set (take all so more data can be added at a later date)
$dbh->do("INSERT INTO species_set SELECT * FROM $srcDB.species_set");
#Populate species_set_tag (take all so more data can be added at a later date)
$dbh->do("INSERT INTO species_set_tag SELECT * FROM $srcDB.species_set_tag");
#Populate genome_db
$dbh->do("insert into genome_db select * from $srcDB.genome_db");
$dbh->do("update genome_db set locator=NULL");
#Populate meta
$dbh->do("insert into meta select * from $srcDB.meta");
#Populate ncbi_taxa_node
$dbh->do("insert into ncbi_taxa_node select * from $srcDB.ncbi_taxa_node");
#Populate ncbi_taxa_name
$dbh->do("insert into ncbi_taxa_name select * from $srcDB.ncbi_taxa_name");
my $other_genome_db_ids = $dbh->selectcol_arrayref("
SELECT genome_db_id FROM genome_db
WHERE name IN (\"".join("\", \"", @other_genome_db_names)."\")
and assembly_default = 1");
#Take max of all the max_align values, used to select alignment blocks
$array_ref = $dbh->selectcol_arrayref("SELECT MAX(value) FROM method_link_species_set_tag WHERE tag='max_align'");
my $max_alignment_length = $array_ref->[0];
#my $method_link_id = $dbh->selectrow_array("
# SELECT method_link_id FROM method_link
# WHERE type = \"$method_link_type\"");
my $ref_genome_db_id = $dbh->selectrow_array("
SELECT genome_db_id FROM genome_db
WHERE name = \"$ref_genome_db_name\" and assembly_default = 1");
if ($do_pairwise) {
my $pairwise_genome_db_ids = $dbh->selectcol_arrayref("
SELECT genome_db_id FROM genome_db
WHERE name IN (\"".join("\", \"", @pairwise_genome_db_names)."\")
and assembly_default = 1");
foreach my $genome_db_id (@$pairwise_genome_db_ids) {
foreach my $seq_region (@seq_regions) {
my ($seq_region_name, $seq_region_start, $seq_region_end) = @{$seq_region};
my $lower_bound = $seq_region_start - $max_alignment_length;
my ($method_link_species_set_id, $species_set_id) = $dbh->selectrow_array(qq{
SELECT
mls.method_link_species_set_id, mls.species_set_id
FROM
$srcDB.method_link_species_set mls, $srcDB.species_set ss1, $srcDB.species_set ss2, method_link ml
WHERE
ss1.genome_db_id=$ref_genome_db_id AND
ss2.genome_db_id=$genome_db_id AND
ss1.species_set_id=ss2.species_set_id AND
mls.species_set_id=ss1.species_set_id AND
ml.method_link_id=mls.method_link_id AND
ml.type in ($pairwise_method_link_type)
});
if (!defined $method_link_species_set_id) {
print "No valid mlss found between $ref_genome_db_id and $genome_db_id\n";
next;
}
# Get dnafrag_id for the reference region
my ($dnafrag_id) = $dbh->selectrow_array(qq{
SELECT
dnafrag_id
FROM
$srcDB.dnafrag
WHERE
genome_db_id=$ref_genome_db_id AND
name=$seq_region_name
});
print "Dumping data for dnafrag $dnafrag_id (genome=$ref_genome_db_id; seq=$seq_region_name) vs. genome=$genome_db_id\n";
# Get the list of genomic_align_block_ids corresponding the the reference region
# Populate the genomic_align_block_id table
print " - dumping genomic_align entries $method_link_species_set_id\n";
$dbh->do(qq{
INSERT IGNORE INTO
genomic_align
SELECT
*
FROM
$srcDB.genomic_align
WHERE
method_link_species_set_id=$method_link_species_set_id AND
dnafrag_id = $dnafrag_id AND
dnafrag_start<=$seq_region_end AND
dnafrag_end>=$seq_region_start AND
dnafrag_start>=$lower_bound
});
# populate genomic_align_block table
print " - dumping genomic_align_block entries\n";
$dbh->do(qq{
INSERT IGNORE INTO
genomic_align_block
SELECT
gab.*
FROM
$srcDB.genomic_align_block gab, genomic_align ga
WHERE
gab.genomic_align_block_id=ga.genomic_align_block_id
});
# populate genomic_align table
print " - dumping new genomic_align entries\n";
$dbh->do(qq{
INSERT IGNORE INTO
genomic_align
SELECT
ga.*
FROM
genomic_align_block gab, $srcDB.genomic_align ga
WHERE
gab.genomic_align_block_id=ga.genomic_align_block_id
});
}
}
# populate dnafrag table
$dbh->do("insert ignore into dnafrag select d.* from genomic_align ga, $srcDB.dnafrag d where ga.dnafrag_id=d.dnafrag_id");
foreach my $genome_db_id (@$other_genome_db_ids) {
# populate synteny_region table
print "Dumping synteny data (genome=$ref_genome_db_id vs. genome=$genome_db_id)\n";
$dbh->do("insert into synteny_region select s.* from $srcDB.synteny_region s, $srcDB.dnafrag_region dr1, dnafrag d1, $srcDB.dnafrag_region dr2, dnafrag d2 where s.synteny_region_id=dr1.synteny_region_id and s.synteny_region_id=dr2.synteny_region_id and dr1.dnafrag_id=d1.dnafrag_id and dr2.dnafrag_id=d2.dnafrag_id and d1.genome_db_id=$ref_genome_db_id and d2.genome_db_id=$genome_db_id");
}
# populate dnafrag_region tables
$dbh->do("insert into dnafrag_region select dr.* from synteny_region s, $srcDB.dnafrag_region dr where s.synteny_region_id=dr.synteny_region_id");
}
if ($do_proteins) {
#These need setting
my $protein_genome_db_ids;
my $method_link_id;
foreach my $genome_db_id (@$protein_genome_db_ids) {
foreach my $seq_region (@seq_regions) {
my ($seq_region_name, $seq_region_start, $seq_region_end) = @{$seq_region};
my $lower_bound = $seq_region_start - $max_alignment_length;
my ($method_link_species_set_id, $species_set_id) = $dbh->selectrow_array(qq{
SELECT
mls.method_link_species_set_id, mls.species_set_id
FROM
$srcDB.method_link_species_set mls, $srcDB.species_set ss1, $srcDB.species_set ss2
WHERE
ss1.genome_db_id=$ref_genome_db_id AND
ss2.genome_db_id=$genome_db_id AND
ss1.species_set_id=ss2.species_set_id AND
mls.species_set_id=ss1.species_set_id AND
mls.method_link_id=$method_link_id
});
# populate peptide_align_feature table
print " - populating peptide_align_feature table\n";
$dbh->do("insert into peptide_align_feature select paf.* from $srcDB.peptide_align_feature paf, $srcDB.member m where paf.qmember_id = m.member_id and hgenome_db_id=$genome_db_id and m.genome_db_id=$ref_genome_db_id and m.chr_name=$seq_region_name and m.chr_start<$seq_region_end and m.chr_end>$seq_region_start");
$dbh->do("insert into peptide_align_feature select paf.* from $srcDB.peptide_align_feature paf, $srcDB.member m where paf.hmember_id = m.member_id and qgenome_db_id=$genome_db_id and m.genome_db_id=$ref_genome_db_id and m.chr_name=$seq_region_name and m.chr_start<$seq_region_end and m.chr_end>$seq_region_start");
# populate homology table for pairwise homologues
print " - populating homology table\n";
($method_link_species_set_id) = $dbh->selectrow_array(qq{
SELECT
mls.method_link_species_set_id
FROM
$srcDB.method_link_species_set mls, $srcDB.method_link ml
WHERE
mls.species_set_id=$species_set_id AND
mls.method_link_id=ml.method_link_id AND
ml.type="ENSEMBL_ORTHOLOGUES"
});
$dbh->do("insert into homology select h.* from $srcDB.homology h,$srcDB.homology_member hm1, $srcDB.member m1, $srcDB.homology_member hm2, $srcDB.member m2 where h.homology_id=hm1.homology_id and h.homology_id=hm2.homology_id and hm1.member_id=m1.member_id and hm2.member_id=m2.member_id and m1.genome_db_id=$ref_genome_db_id and m2.genome_db_id=$genome_db_id and m1.chr_name=$seq_region_name and m1.chr_start<$seq_region_end and m1.chr_end>$seq_region_start and h.method_link_species_set_id=$method_link_species_set_id");
# populate family table
print " - populating family table\n";
$dbh->do("insert ignore into family select f.* from $srcDB.family f, $srcDB.family_member fm, $srcDB.member m where f.family_id=fm.family_id and fm.member_id=m.member_id and m.genome_db_id=$ref_genome_db_id and m.chr_name=$seq_region_name and m.chr_start<$seq_region_end and m.chr_end>$seq_region_start");
print " - done\n";
}
}
}
if ($do_pecan) {
print "do pecan multiple alignment\n";
my $multi_alignment_mlss_id = _run_query_from_method_link_type_species_set_name($pecan_alignment_method_link_type, $pecan_species_set_name);
my $constrained_element_mlss_id = _run_query_from_method_link_type_species_set_name($constrained_element_method_link_type, $pecan_species_set_name);
foreach my $seq_region (@seq_regions) {
my ($seq_region_name, $seq_region_start, $seq_region_end) = @{$seq_region};
my $lower_bound = $seq_region_start - $max_alignment_length;
# Get dnafrag_id for the reference region
my ($dnafrag_id) = $dbh->selectrow_array(qq{
SELECT
dnafrag_id
FROM
$srcDB.dnafrag
WHERE
genome_db_id=$ref_genome_db_id AND
name=$seq_region_name
});
print "Dumping data for dnafrag $dnafrag_id (genome=$ref_genome_db_id; seq=$seq_region_name)\n";
# Get the list of genomic_align_block_ids corresponding the the reference region
# Populate the genomic_align_block_id table
print " - dumping genomic_align entries\n";
$dbh->do(qq{
INSERT IGNORE INTO
genomic_align
SELECT
*
FROM
$srcDB.genomic_align
WHERE
method_link_species_set_id=$multi_alignment_mlss_id AND
dnafrag_id = $dnafrag_id AND
dnafrag_start<=$seq_region_end AND
dnafrag_end>=$seq_region_start AND
dnafrag_start>=$lower_bound
});
# populate constrained element genomic_align_block table
print " - dumping constrained element entries\n";
$dbh->do(qq{
INSERT IGNORE INTO
constrained_element
SELECT
*
FROM
$srcDB.constrained_element
WHERE
method_link_species_set_id=$constrained_element_mlss_id AND
dnafrag_id = $dnafrag_id AND
dnafrag_start<=$seq_region_end AND
dnafrag_end>=$seq_region_start AND
dnafrag_start>=$lower_bound
});
}
# populate genomic_align_block table
print " - dumping genomic_align_block entries\n";
$dbh->do(qq{
INSERT IGNORE INTO
genomic_align_block
SELECT
gab.*
FROM
$srcDB.genomic_align_block gab, genomic_align ga
WHERE
gab.genomic_align_block_id=ga.genomic_align_block_id
});
# populate genomic_align table
print " - dumping new genomic_align entries\n";
$dbh->do(qq{
INSERT IGNORE INTO
genomic_align
SELECT
ga.*
FROM
genomic_align_block gab, $srcDB.genomic_align ga
WHERE
gab.genomic_align_block_id=ga.genomic_align_block_id
});
#populate conservation_score table
print " - dumping conservation_score entries\n";
$dbh->do(qq{
INSERT IGNORE INTO
conservation_score
SELECT
cs.*
FROM
$srcDB.conservation_score cs, genomic_align_block gab
WHERE
gab.genomic_align_block_id=cs.genomic_align_block_id
AND
gab.method_link_species_set_id=$multi_alignment_mlss_id
});
}
if ($do_epo) {
print "do EPO multiple alignment\n";
my $multi_alignment_mlss_id = _run_query_from_method_link_type_species_set_name($epo_alignment_method_link_type, $epo_species_set_name);
foreach my $seq_region (@seq_regions) {
my ($seq_region_name, $seq_region_start, $seq_region_end) = @{$seq_region};
my $lower_bound = $seq_region_start - $max_alignment_length;
# Get dnafrag_id for the reference region
my ($dnafrag_id) = $dbh->selectrow_array(qq{
SELECT
dnafrag_id
FROM
$srcDB.dnafrag
WHERE
genome_db_id=$ref_genome_db_id AND
name=$seq_region_name
});
print "Dumping data for dnafrag $dnafrag_id (genome=$ref_genome_db_id; seq=$seq_region_name)\n";
# Populate the genomic_align table
print " - dumping genomic_align entries\n";
$dbh->do(qq{
INSERT IGNORE INTO
genomic_align
SELECT
*
FROM
$srcDB.genomic_align
WHERE
method_link_species_set_id=$multi_alignment_mlss_id AND
dnafrag_id = $dnafrag_id AND
dnafrag_start<=$seq_region_end AND
dnafrag_end>=$seq_region_start AND
dnafrag_start>=$lower_bound
});
# populate genomic_align_block table with extant species
print " - dumping genomic_align_block entries\n";
$dbh->do(qq{
INSERT IGNORE INTO
genomic_align_block
SELECT
gab.*
FROM
$srcDB.genomic_align_block gab, genomic_align ga
WHERE
gab.genomic_align_block_id=ga.genomic_align_block_id
});
# populate genomic_align table with extant species
print " - dumping new genomic_align entries\n";
$dbh->do(qq{
INSERT IGNORE INTO
genomic_align
SELECT
ga.*
FROM
genomic_align_block gab, $srcDB.genomic_align ga
WHERE
gab.genomic_align_block_id=ga.genomic_align_block_id
});
# populate genomic_align_tree table with extant and ancestral nodes
print " - dumping genomic_align_tree entries\n";
$dbh->do(qq{
INSERT IGNORE INTO
genomic_align_tree
SELECT
gat2.*
FROM
genomic_align ga
JOIN
$srcDB.genomic_align_tree gat1
USING
(node_id)
JOIN
$srcDB.genomic_align_tree gat2
USING
(root_id)
GROUP BY
gat2.node_id
});
# populate genomic_align table with ancestral species
print " - dumping new genomic_align ancestral entries\n";
$dbh->do(qq{
INSERT IGNORE INTO
genomic_align
SELECT
ga.*
FROM
genomic_align_tree
JOIN
$srcDB.genomic_align ga
USING
(node_id)
});
# populate genomic_align_block table with ancestral species
print " - dumping genomic_align_block ancestral entries\n";
$dbh->do(qq{
INSERT IGNORE INTO
genomic_align_block
SELECT
gab.*
FROM
genomic_align
JOIN
$srcDB.genomic_align_block gab
USING
(genomic_align_block_id)
WHERE
gab.method_link_species_set_id=$multi_alignment_mlss_id
GROUP BY
genomic_align_block_id
});
#populate the dnafrag table
print " - dumping new dnafrag ancestral entries\n";
$dbh->do(qq{
INSERT IGNORE INTO
dnafrag
SELECT
dnafrag.*
FROM
$srcDB.dnafrag
JOIN
genomic_align USING (dnafrag_id)
});
}
if ($destAncDB) {
#Populate ancestral core database
$dbh->do("use $destAncDB");
print " - populate the coord_system table\n";
$dbh->do(qq{
INSERT INTO coord_system
SELECT
coord_system.*
FROM
$srcAncDB.coord_system
});
print " - dumping seq_region entries\n";
$dbh->do(qq{
INSERT INTO seq_region
SELECT
seq_region.*
FROM
$srcAncDB.seq_region
JOIN
$destDB.dnafrag USING (name)
WHERE
coord_system_name = \"$ancestral_coord_system_name\";
});
print " - dumping dna entries\n";
$dbh->do(qq{
INSERT INTO dna
SELECT
dna.*
FROM
$srcAncDB.dna
JOIN
seq_region USING (seq_region_id)
});
#Back to compara db
$dbh->do("use $destDB");
}
}
if ($do_epo_low_coverage) {
print "do EPO multiple alignment\n";
my $multi_alignment_mlss_id = _run_query_from_method_link_type_species_set_name($epo_low_coverage_alignment_method_link_type, $epo_species_set_name);
my $constrained_element_mlss_id = _run_query_from_method_link_type_species_set_name($constrained_element_method_link_type, $epo_species_set_name);
foreach my $seq_region (@seq_regions) {
my ($seq_region_name, $seq_region_start, $seq_region_end) = @{$seq_region};
my $lower_bound = $seq_region_start - $max_alignment_length;
# Get dnafrag_id for the reference region
my ($dnafrag_id) = $dbh->selectrow_array(qq{
SELECT
dnafrag_id
FROM
$srcDB.dnafrag
WHERE
genome_db_id=$ref_genome_db_id AND
name=$seq_region_name
});
print "Dumping data for dnafrag $dnafrag_id (genome=$ref_genome_db_id; seq=$seq_region_name)\n";
# Populate the genomic_align table
print " - dumping genomic_align entries\n";
$dbh->do(qq{
INSERT IGNORE INTO
genomic_align
SELECT
*
FROM
$srcDB.genomic_align
WHERE
method_link_species_set_id=$multi_alignment_mlss_id AND
dnafrag_id = $dnafrag_id AND
dnafrag_start<=$seq_region_end AND
dnafrag_end>=$seq_region_start AND
dnafrag_start>=$lower_bound
});
# populate constrained element genomic_align_block table
print " - dumping constrained element entries\n";
$dbh->do(qq{
INSERT IGNORE INTO
constrained_element
SELECT
*
FROM
$srcDB.constrained_element
WHERE
method_link_species_set_id=$constrained_element_mlss_id AND
dnafrag_id = $dnafrag_id AND
dnafrag_start<=$seq_region_end AND
dnafrag_end>=$seq_region_start AND
dnafrag_start>=$lower_bound
});
}
# populate genomic_align_block table with extant species
print " - dumping genomic_align_block entries\n";
$dbh->do(qq{
INSERT IGNORE INTO
genomic_align_block
SELECT
gab.*
FROM
$srcDB.genomic_align_block gab, genomic_align ga
WHERE
gab.genomic_align_block_id=ga.genomic_align_block_id
});
#Do not limit to just these species, or the ConservationScoreAdaptor tests will not work
#my $gdb_str = join ",", $ref_genome_db_id, @$other_genome_db_ids;
# populate genomic_align table with extant species
print " - dumping new genomic_align entries\n";
$dbh->do(qq{
INSERT IGNORE INTO
genomic_align
SELECT
ga.*
FROM
genomic_align_block gab, $srcDB.genomic_align ga
WHERE
gab.genomic_align_block_id=ga.genomic_align_block_id
});
# populate genomic_align_tree table with extant and ancestral nodes
print " - dumping genomic_align_tree entries\n";
$dbh->do(qq{
INSERT IGNORE INTO
genomic_align_tree
SELECT
gat2.*
FROM
genomic_align ga
JOIN
$srcDB.genomic_align_tree gat1
USING
(node_id)
JOIN
$srcDB.genomic_align_tree gat2
USING
(root_id)
GROUP BY
gat2.node_id
});
#populate conservation_score table
print " - dumping conservation_score entries\n";
$dbh->do(qq{
INSERT IGNORE INTO
conservation_score
SELECT
cs.*
FROM
$srcDB.conservation_score cs, genomic_align_block gab
WHERE
gab.genomic_align_block_id=cs.genomic_align_block_id
AND
gab.method_link_species_set_id=$multi_alignment_mlss_id
});
}
if ($do_proteins) {
foreach my $seq_region (@seq_regions) {
my ($seq_region_name, $seq_region_start, $seq_region_end) = @{$seq_region};
print "Dumping data for dnafrag (genome=$ref_genome_db_id; seq=$seq_region_name)\n";
print " - populating protein trees\n";
## Get the fisrt all the leaves which correspond to members in this region
my $num = $dbh->do("insert ignore into protein_tree_node select ptn.* from $srcDB.protein_tree_node ptn, $srcDB.protein_tree_member ptm, $srcDB.member m WHERE ptn.node_id=ptm.node_id and ptm.member_id=m.member_id and m.genome_db_id=$ref_genome_db_id and m.chr_name=$seq_region_name and m.chr_start<$seq_region_end and m.chr_end>$seq_region_start");
while ($num > 0) {
## Add parent nodes until we hit the root
$num = $dbh->do("insert ignore into protein_tree_node select ptn1.* from $srcDB.protein_tree_node ptn1, protein_tree_node ptn2 WHERE ptn1.node_id=ptn2.parent_id and ptn2.parent_id > 1");
}
## Add all the nodes underlying the roots
$dbh->do("insert ignore into protein_tree_node select ptn1.* from $srcDB.protein_tree_node ptn1, protein_tree_node ptn2 WHERE ptn2.parent_id = 1 and ptn1.left_index BETWEEN ptn2.left_index and ptn2.right_index");
## Add all relevant entries in the protein_tree_member table
$dbh->do("insert ignore into protein_tree_member select ptm.* from $srcDB.protein_tree_member ptm, protein_tree_node ptn2 WHERE ptn2.node_id = ptm.node_id");
## Add all relevant entries in the protein_tree_tag table
$dbh->do("insert ignore into protein_tree_tag select ptt.* from $srcDB.protein_tree_tag ptt, protein_tree_node ptn2 WHERE ptn2.node_id = ptt.node_id");
print " - populating homology table with self-data\n";
my ($species_set_id) = $dbh->selectrow_array(qq{
SELECT
ss1.species_set_id
FROM
$srcDB.species_set ss1, $srcDB.species_set ss2
WHERE
ss1.species_set_id=ss2.species_set_id AND
ss1.genome_db_id=$ref_genome_db_id
GROUP BY ss1.species_set_id HAVING count(*) = 1
});
my ($method_link_species_set_id) = $dbh->selectrow_array(qq{
SELECT
mlss.method_link_species_set_id
FROM
$srcDB.method_link_species_set mlss, $srcDB.method_link ml
WHERE
mlss.species_set_id=$species_set_id AND
mlss.method_link_id=ml.method_link_id AND
ml.type="ENSEMBL_PARALOGUES"
});
$dbh->do("insert into homology select distinct h.* from $srcDB.homology h,$srcDB.homology_member hm1, $srcDB.member m1, $srcDB.homology_member hm2, $srcDB.member m2 where h.homology_id=hm1.homology_id and h.homology_id=hm2.homology_id and hm1.member_id=m1.member_id and hm2.member_id=m2.member_id and m1.genome_db_id=$ref_genome_db_id and m2.genome_db_id=$ref_genome_db_id and m1.chr_name=$seq_region_name and m1.chr_start<$seq_region_end and m1.chr_end>$seq_region_start and m1.member_id <> m2.member_id and h.method_link_species_set_id=$method_link_species_set_id");
print " - done\n";
}
# populate homology_member table
$dbh->do("insert into homology_member select hm.* from homology h, $srcDB.homology_member hm where h.homology_id=hm.homology_id");
# populate family_member table
$dbh->do("insert into family_member select fm.* from family f, $srcDB.family_member fm where f.family_id=fm.family_id");
# populate member table
$dbh->do("insert ignore into member select m.* from family_member fm, $srcDB.member m where fm.member_id=m.member_id");
$dbh->do("insert ignore into member select m.* from homology_member hm, $srcDB.member m where hm.member_id=m.member_id");
$dbh->do("insert ignore into member select m.* from homology_member hm, $srcDB.member m where hm.peptide_member_id=m.member_id");
# populate sequence table
$dbh->do("insert ignore into sequence select s.* from member m, $srcDB.sequence s where m.sequence_id=s.sequence_id");
# populate taxon table
# $dbh->do("insert ignore into taxon select t.* from member m, $srcDB.taxon t where m.taxon_id=t.taxon_id");
# $dbh->do("insert ignore into taxon select t.* from genome_db g, $srcDB.taxon t where g.taxon_id=t.taxon_id");
# populate the method_link_species.....not it is needed with the current schema
# it will when moving to the multiple alignment enabled schema.
# need to do something a bit more clever here to just add what we really
# method_link_species_set entries from genomic_align_block
#$dbh->do("insert into method_link_species_set select mlss.* from $srcDB.method_link_species_set mlss, genomic_align_block gab where gab.method_link_species_set_id=mlss.method_link_species_set_id group by mlss.method_link_species_set_id");
# method_link_species_set entries from homology
#$dbh->do("insert into method_link_species_set select mlss.* from $srcDB.method_link_species_set mlss, homology h where h.method_link_species_set_id=mlss.method_link_species_set_id group by mlss.method_link_species_set_id");
# method_link_species_set entries from family
#$dbh->do("insert into method_link_species_set select mlss.* from $srcDB.method_link_species_set mlss, family f where f.method_link_species_set_id=mlss.method_link_species_set_id group by mlss.method_link_species_set_id");
# method_link_species_set entries from synteny_region/dnafrag_region
#$dbh->do("insert into method_link_species_set select mlss.* from $srcDB.method_link_species_set mlss, synteny_region sr where sr.method_link_species_set_id=mlss.method_link_species_set_id group by mlss.method_link_species_set_id");
# species_set entries
#$dbh->do("insert into species_set select ss.* from method_link_species_set mlss left join $srcDB.species_set ss using (species_set_id) group by ss.species_set_id, ss.genome_db_id");
#conservation_score entry
#$dbh->do("insert into method_link_species_set select mlss.* from $srcDB.method_link_species_set mlss, $srcDB.method_link ml where ml.method_link_id=mlss.method_link_id and ml.type = \"$conservation_score_method_link_type\"");
}
# populate dnafrag table from entries in the genomic_align table
$dbh->do("insert ignore into dnafrag select d.* from genomic_align ga, $srcDB.dnafrag d where ga.dnafrag_id=d.dnafrag_id");
# Now output the seq_region files needed to create the corresponding core databases. Also do the ref_species seq_region_id in case there are duplicated regions
foreach my $genome_db_id ($ref_genome_db_id, @$other_genome_db_ids) {
my $array_ref = $dbh->selectcol_arrayref("select name from genome_db where genome_db_id=$genome_db_id");
my $species_name = lc($array_ref->[0]);
$species_name =~ s/\s+/_/g;
my $file = $species_name . ".seq_region_file";
open F, ">$file" or
die "can not open $file\n";
print F "[\n";
$array_ref = $dbh->selectall_arrayref("select d.name,g.dnafrag_start,g.dnafrag_end from dnafrag d, genomic_align g where d.dnafrag_id=g.dnafrag_id and d.genome_db_id=$genome_db_id order by d.name, g.dnafrag_start,g.dnafrag_end");
my ($last_name, $last_start,$last_end);
foreach my $row (@{$array_ref}) {
my ($name,$start,$end) = @{$row};
unless (defined $last_name && defined $last_start && defined $last_end) {
($last_name, $last_start,$last_end) = ($name,$start,$end);
next;
}
if ($name eq $last_name && $start - $last_end < 100000) {
$last_end = $end;
} elsif (($name eq $last_name && $start - $last_end >= 100000) ||
$name ne $last_name) {
print F "[\"$last_name\", $last_start,$last_end],\n";
($last_name, $last_start,$last_end) = ($name,$start,$end);
}
}
print F "[\"$last_name\", $last_start,$last_end]\n]\n";
close F;
}
$dbh->disconnect();
print "Test genome database $destDB created\n";
#cmd to dump .sql and .txt files
#/usr/local/ensembl/mysql/bin/mysqldump -hia64f -uensadmin -p${ENSADMIN_PSW} -P3306 --socket=/mysql/data_3306/mysql.sock -T ./ abel_core_test
sub _run_query_from_method_link_type_species_set_name {
my ($method_link_type, $species_set_name) = @_;
my $method_link_species_set_id;
$method_link_species_set_id = $dbh->selectrow_array(qq{
SELECT
method_link_species_set_id
FROM
$srcDB.method_link_species_set mlss
JOIN
$srcDB.species_set_tag USING (species_set_id)
JOIN
$srcDB.method_link USING (method_link_id)
WHERE
species_set_tag.tag = "name" AND
species_set_tag.value = \"$species_set_name\"
AND method_link.type = \"$method_link_type\"
});
return $method_link_species_set_id;
}
#Based on routine from MethodLinkSpeciesSetAdaptor.pm
sub _run_query_from_method_link_id_genome_db_ids {
my ($method_link_id, $genome_db_ids) = @_;
my $method_link_species_set_id;
my $species_set_id = _get_species_set_id_from_genome_db_ids($genome_db_ids);
if ($species_set_id) {
$method_link_species_set_id = $dbh->selectrow_array(qq{
SELECT
method_link_species_set_id
FROM
$srcDB.method_link_species_set mlss
WHERE
species_set_id = \"$species_set_id\"
AND method_link_id = \"$method_link_id\"
});
}
return $method_link_species_set_id;
}
#Based on routine from MethodLinkSpeciesSetAdaptor.pm
sub _get_species_set_id_from_genome_db_ids {
my ($genome_db_ids) = @_;
my $species_set_id;
## Fetch all the species_set which contain all these species_set_ids
my $all_rows = $dbh->selectall_arrayref(qq{
SELECT
species_set_id,
COUNT(*) as count
FROM
$srcDB.species_set
WHERE
genome_db_id in (}.join(",", @$genome_db_ids).qq{)
GROUP BY species_set_id
HAVING count = }.(scalar(@$genome_db_ids)));
if (!@$all_rows) {
return undef;
}
my $species_set_ids = [map {$_->[0]} @$all_rows];
## Keep only the species_set which does not contain any other genome_db_id
$all_rows = $dbh->selectall_arrayref(qq{
SELECT
species_set_id,
COUNT(*) as count
FROM
$srcDB.species_set
WHERE
species_set_id in (}.join(",", @$species_set_ids).qq{)
GROUP BY species_set_id
HAVING count = }.(scalar(@$genome_db_ids)));
if (!@$all_rows) {
return undef;
} elsif (@$all_rows > 1) {
warning("Several species_set_ids have been found for genome_db_ids (".
join(",", @$genome_db_ids)."): ".join(",", map {$_->[0]} @$all_rows));
}
$species_set_id = $all_rows->[0]->[0];
return $species_set_id;
}
exit 0;
| dbolser-ebi/ensembl-compara | modules/t/CreateComparaTestDatabase.pl | Perl | apache-2.0 | 37,234 |
#!/usr/bin/perl
package RVM v1.0.0;
use v5.14;
# Pragmas
use strict;
use warnings;
#Arquivos com as funcoes de assembly implementadas
use f_arit;
use f_jumps;
use f_tests;
use f_memo;
use f_stk;
use f_io;
return 1;
| renatocf/MAC0242-PROJECT | lib/RVM/functions.pm | Perl | apache-2.0 | 220 |
#
# Copyright 2018 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package apps::github::mode::pullrequests;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
use centreon::plugins::http;
use JSON;
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
$options{options}->add_options(arguments =>
{
"hostname:s" => { name => 'hostname', default => 'api.github.com' },
"port:s" => { name => 'port', default => '443' },
"proto:s" => { name => 'proto', default => 'https' },
"timeout:s" => { name => 'timeout' },
"proxyurl:s" => { name => 'proxyurl' },
"ssl-opt:s@" => { name => 'ssl_opt' },
"warning:s" => { name => 'warning' },
"critical:s" => { name => 'critical' },
"owner:s" => { name => 'owner' },
"repository:s" => { name => 'repository' },
});
$self->{http} = centreon::plugins::http->new(output => $self->{output});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
if (!defined($self->{option_results}->{repository})) {
$self->{output}->add_option_msg(short_msg => "Please set the repository option");
$self->{output}->option_exit();
}
if (!defined($self->{option_results}->{owner})) {
$self->{output}->add_option_msg(short_msg => "Please set the owner option");
$self->{output}->option_exit();
}
if (($self->{perfdata}->threshold_validate(label => 'warning', value => $self->{option_results}->{warning})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong warning threshold '" . $self->{option_results}->{warning} . "'.");
$self->{output}->option_exit();
}
if (($self->{perfdata}->threshold_validate(label => 'critical', value => $self->{option_results}->{critical})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong critical threshold '" . $self->{option_results}->{critical} . "'.");
$self->{output}->option_exit();
}
$self->{option_results}->{url_path} = "/repos/".$self->{option_results}->{owner}."/".$self->{option_results}->{repository}."/pulls";
$self->{option_results}->{get_param} = ['state=open', 'per_page=1000'];
$self->{http}->set_options(%{$self->{option_results}});
}
sub run {
my ($self, %options) = @_;
my $jsoncontent = $self->{http}->request();
my $json = JSON->new;
my $webcontent;
eval {
$webcontent = $json->decode($jsoncontent);
};
if ($@) {
$self->{output}->add_option_msg(short_msg => "Cannot decode json response");
$self->{output}->option_exit();
}
# Number of pull requests is array length
my $nb_pulls = @{$webcontent};
my $exit = $self->{perfdata}->threshold_check(value => $nb_pulls, threshold => [ { label => 'critical', exit_litteral => 'critical' }, , { label => 'warning', exit_litteral => 'warning' } ]);
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("%d pull requests are open", $nb_pulls ));
$self->{output}->perfdata_add(label => 'pullrequests',
value => $nb_pulls,
warning => $self->{perfdata}->get_perfdata_for_output(label => 'warning'),
critical => $self->{perfdata}->get_perfdata_for_output(label => 'critical'),
min => 0
);
$self->{output}->display();
$self->{output}->exit();
}
1;
__END__
=head1 MODE
Check GitHub's number of pull requests for a repository
=over 8
=item B<--hostname>
IP Addr/FQDN of the GitHub's API (Default: api.gitub.com)
=item B<--port>
Port used by GitHub's API (Default: '443')
=item B<--proto>
Specify https if needed (Default: 'https')
=item B<--proxyurl>
Proxy URL if any
=item B<--timeout>
Threshold for HTTP timeout (Default: 5)
=item B<--ssl-opt>
Set SSL Options (--ssl-opt="SSL_version => TLSv1" --ssl-opt="SSL_verify_mode => SSL_VERIFY_NONE").
=item B<--warning>
Threshold warning.
=item B<--critical>
Threshold critical.
=item B<--owner>
Specify GitHub's owner
=item B<--repository>
Specify GitHub's repository
=back
=cut
| wilfriedcomte/centreon-plugins | apps/github/mode/pullrequests.pm | Perl | apache-2.0 | 5,183 |
:- load_compilation_module(library('regtypes/regtypes_tr')).
:- add_sentence_trans(expand_regtypes/2).
:- new_declaration(regtype/1).
:- new_declaration(regtype/2).
:- op(1150, fx,(regtype)).
:- op(1150,xfx,(regtype)).
%% in basic_props: :- meta_predicate regtype(goal).
| leuschel/ecce | www/CiaoDE/ciao/lib/regtypes/regtypes.pl | Perl | apache-2.0 | 280 |
package Google::Ads::AdWords::v201402::AdParamService::ApiExceptionFault;
use strict;
use warnings;
{ # BLOCK to scope variables
sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201402' }
__PACKAGE__->__set_name('ApiExceptionFault');
__PACKAGE__->__set_nillable();
__PACKAGE__->__set_minOccurs();
__PACKAGE__->__set_maxOccurs();
__PACKAGE__->__set_ref();
use base qw(
SOAP::WSDL::XSD::Typelib::Element
Google::Ads::AdWords::v201402::ApiException
);
}
1;
=pod
=head1 NAME
Google::Ads::AdWords::v201402::AdParamService::ApiExceptionFault
=head1 DESCRIPTION
Perl data type class for the XML Schema defined element
ApiExceptionFault from the namespace https://adwords.google.com/api/adwords/cm/v201402.
A fault element of type ApiException.
=head1 METHODS
=head2 new
my $element = Google::Ads::AdWords::v201402::AdParamService::ApiExceptionFault->new($data);
Constructor. The following data structure may be passed to new():
$a_reference_to, # see Google::Ads::AdWords::v201402::ApiException
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| gitpan/GOOGLE-ADWORDS-PERL-CLIENT | lib/Google/Ads/AdWords/v201402/AdParamService/ApiExceptionFault.pm | Perl | apache-2.0 | 1,079 |
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V9::Services::CampaignConversionGoalService::MutateCampaignConversionGoalsResponse;
use strict;
use warnings;
use base qw(Google::Ads::GoogleAds::BaseEntity);
use Google::Ads::GoogleAds::Utils::GoogleAdsHelper;
sub new {
my ($class, $args) = @_;
my $self = {results => $args->{results}};
# Delete the unassigned fields in this object for a more concise JSON payload
remove_unassigned_fields($self, $args);
bless $self, $class;
return $self;
}
1;
| googleads/google-ads-perl | lib/Google/Ads/GoogleAds/V9/Services/CampaignConversionGoalService/MutateCampaignConversionGoalsResponse.pm | Perl | apache-2.0 | 1,072 |
#!/usr/bin/env perl
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) 2017 - 2020, Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://curl.haxx.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
###########################################################################
# Usage:
# perl mk-lib1521.pl < ../../include/curl/curl.h > lib1521.c
# minimum and maximum long signed values
my $minlong = "LONG_MIN";
my $maxlong = "LONG_MAX";
# maximum long unsigned value
my $maxulong = "ULONG_MAX";
print <<HEADER
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \\| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \\___|\\___/|_| \\_\\_____|
*
* Copyright (C) 2017, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at https://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
#include "test.h"
#include "memdebug.h"
#include <limits.h>
/* This source code is generated by mk-lib1521.pl ! */
struct data {
char *blaha;
};
#define LO $minlong
#define HI $maxlong
#define OFF_LO (curl_off_t) LO
#define OFF_HI (curl_off_t) $maxulong
#define OFF_NO (curl_off_t) 0
/* Unexpected error.
CURLE_NOT_BUILT_IN - means disabled at build
CURLE_UNKNOWN_OPTION - means no such option (anymore?)
CURLE_SSL_ENGINE_NOTFOUND - set unknown ssl engine
CURLE_UNSUPPORTED_PROTOCOL - set bad HTTP version
CURLE_BAD_FUNCTION_ARGUMENT - unsupported value
*/
#define UNEX(x) ((x) && \\
((x) != CURLE_NOT_BUILT_IN) && \\
((x) != CURLE_UNKNOWN_OPTION) && \\
((x) != CURLE_SSL_ENGINE_NOTFOUND) && \\
((x) != CURLE_UNSUPPORTED_PROTOCOL) && \\
((x) != CURLE_BAD_FUNCTION_ARGUMENT) )
static size_t writecb(char *buffer, size_t size, size_t nitems,
void *outstream)
{
(void)buffer;
(void)size;
(void)nitems;
(void)outstream;
return 0;
}
static size_t readcb(char *buffer,
size_t size,
size_t nitems,
void *instream)
{
(void)buffer;
(void)size;
(void)nitems;
(void)instream;
return 0;
}
static int err(const char *name, CURLcode val, int lineno)
{
printf("CURLOPT_%s returned %d, \\"%s\\" on line %d\\n",
name, val, curl_easy_strerror(val), lineno);
return (int)val;
}
static int geterr(const char *name, CURLcode val, int lineno)
{
printf("CURLINFO_%s returned %d, \\"%s\\" on line %d\\n",
name, val, curl_easy_strerror(val), lineno);
return (int)val;
}
static curl_progress_callback progresscb;
static curl_write_callback headercb;
static curl_debug_callback debugcb;
static curl_trailer_callback trailercb;
static curl_ssl_ctx_callback ssl_ctx_cb;
static curl_ioctl_callback ioctlcb;
static curl_sockopt_callback sockoptcb;
static curl_opensocket_callback opensocketcb;
static curl_seek_callback seekcb;
static curl_sshkeycallback ssh_keycb;
static curl_chunk_bgn_callback chunk_bgn_cb;
static curl_chunk_end_callback chunk_end_cb;
static curl_fnmatch_callback fnmatch_cb;
static curl_closesocket_callback closesocketcb;
static curl_xferinfo_callback xferinfocb;
static curl_resolver_start_callback resolver_start_cb;
int test(char *URL)
{
CURL *curl = NULL;
CURL *dep = NULL;
CURLSH *share = NULL;
char errorbuffer[CURL_ERROR_SIZE];
void *conv_from_network_cb = NULL;
void *conv_to_network_cb = NULL;
void *conv_from_utf8_cb = NULL;
void *interleavecb = NULL;
char *stringpointerextra = (char *)"moooo";
struct curl_slist *slist = NULL;
struct curl_httppost *httppost = NULL;
curl_mime *mimepost = NULL;
FILE *stream = stderr;
struct data object;
char *charp;
long val;
curl_off_t oval;
double dval;
curl_socket_t sockfd;
struct curl_certinfo *certinfo;
struct curl_tlssessioninfo *tlssession;
CURLcode res = CURLE_OK;
(void)URL; /* not used */
global_init(CURL_GLOBAL_ALL);
easy_init(dep);
easy_init(curl);
share = curl_share_init();
if(!share) {
res = CURLE_OUT_OF_MEMORY;
goto test_cleanup;
}
HEADER
;
while(<STDIN>) {
if($_ =~ /^ CURLOPT\(([^ ]*), ([^ ]*), (\d*)\)/) {
my ($name, $type, $val)=($1, $2, $3);
my $w=" ";
my $pref = "${w}res = curl_easy_setopt(curl, $name,";
my $i = ' ' x (length($w) + 23);
my $check = " if(UNEX(res)) {\n err(\"$name\", res, __LINE__);\n goto test_cleanup;\n }\n";
if($type eq "CURLOPTTYPE_STRINGPOINT") {
print "${pref} \"string\");\n$check";
print "${pref} NULL);\n$check";
}
elsif($type eq "CURLOPTTYPE_LONG") {
print "${pref} 0L);\n$check";
print "${pref} 22L);\n$check";
print "${pref} LO);\n$check";
print "${pref} HI);\n$check";
}
elsif($type eq "CURLOPTTYPE_OBJECTPOINT") {
if($name =~ /DEPENDS/) {
print "${pref} dep);\n$check";
}
elsif($name =~ "SHARE") {
print "${pref} share);\n$check";
}
elsif($name eq "CURLOPT_ERRORBUFFER") {
print "${pref} errorbuffer);\n$check";
}
elsif(($name eq "CURLOPT_POSTFIELDS") ||
($name eq "CURLOPT_COPYPOSTFIELDS")) {
# set size to zero to avoid it being "illegal"
print " (void)curl_easy_setopt(curl, CURLOPT_POSTFIELDSIZE, 0);\n";
print "${pref} stringpointerextra);\n$check";
}
elsif($name eq "CURLOPT_HTTPPOST") {
print "${pref} httppost);\n$check";
}
elsif($name eq "CURLOPT_MIMEPOST") {
print "${pref} mimepost);\n$check";
}
elsif($name eq "CURLOPT_STDERR") {
print "${pref} stream);\n$check";
}
else {
print "${pref} &object);\n$check";
}
print "${pref} NULL);\n$check";
}
elsif($type eq "CURLOPTTYPE_SLISTPOINT") {
print "${pref} slist);\n$check";
}
elsif($type eq "CURLOPTTYPE_FUNCTIONPOINT") {
if($name =~ /([^ ]*)FUNCTION/) {
my $l=lc($1);
$l =~ s/^curlopt_//;
print "${pref}\n$i${l}cb);\n$check";
}
else {
print "${pref} &func);\n$check";
}
print "${pref} NULL);\n$check";
}
elsif($type eq "CURLOPTTYPE_OFF_T") {
# play conservative to work with 32bit curl_off_t
print "${pref} OFF_NO);\n$check";
print "${pref} OFF_HI);\n$check";
print "${pref} OFF_LO);\n$check";
}
else {
print STDERR "\n---- $type\n";
}
}
elsif($_ =~ /^ CURLINFO_NONE/) {
$infomode = 1;
}
elsif($infomode &&
($_ =~ /^ CURLINFO_([^ ]*) *= *CURLINFO_([^ ]*)/)) {
my ($info, $type)=($1, $2);
my $c = " res = curl_easy_getinfo(curl, CURLINFO_$info,";
my $check = " if(UNEX(res)) {\n geterr(\"$info\", res, __LINE__);\n goto test_cleanup;\n }\n";
if($type eq "STRING") {
print "$c &charp);\n$check";
}
elsif($type eq "LONG") {
print "$c &val);\n$check";
}
elsif($type eq "OFF_T") {
print "$c &oval);\n$check";
}
elsif($type eq "DOUBLE") {
print "$c &dval);\n$check";
}
elsif($type eq "SLIST") {
print "$c &slist);\n$check";
print " if(slist)\n curl_slist_free_all(slist);\n";
}
elsif($type eq "SOCKET") {
print "$c &sockfd);\n$check";
}
elsif($type eq "PTR") {
if($info eq "CERTINFO") {
print "$c &certinfo);\n$check";
}
elsif(($info eq "TLS_SESSION") ||
($info eq "TLS_SSL_PTR")) {
print "$c &tlssession);\n$check";
}
else {
print STDERR "$info/$type is unsupported\n";
}
}
else {
print STDERR "$type is unsupported\n";
}
}
}
print <<FOOTER
curl_easy_setopt(curl, 1, 0);
res = CURLE_OK;
test_cleanup:
curl_easy_cleanup(curl);
curl_easy_cleanup(dep);
curl_share_cleanup(share);
curl_global_cleanup();
return (int)res;
}
FOOTER
;
| LiberatorUSA/GUCEF | dependencies/curl/tests/libtest/mk-lib1521.pl | Perl | apache-2.0 | 9,762 |
#
# Copyright 2021 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::evertz::AEA47721::snmp::plugin;
use strict;
use warnings;
use base qw(centreon::plugins::script_snmp);
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
%{$self->{modes}} = (
'stream-status' => 'network::evertz::AEA47721::snmp::mode::streamstatus',
);
return $self;
}
1;
__END__
=head1 PLUGIN DESCRIPTION
Check Audio Embedder Card in SNMP.
=cut
| Tpo76/centreon-plugins | network/evertz/AEA47721/snmp/plugin.pm | Perl | apache-2.0 | 1,270 |
#
# Copyright 2018 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package storage::fujitsu::eternus::dx::ssh::mode::volumestats;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
use centreon::plugins::values;
my $maps_counters = {
vol => {
'000_read-iops' => {
set => {
key_values => [ { name => 'read_iops' }, { name => 'display' } ],
output_template => 'Read IOPS : %d',
perfdatas => [
{ label => 'read_iops', value => 'read_iops_absolute', template => '%d',
unit => 'iops', min => 0, label_extra_instance => 1, instance_use => 'display_absolute' },
],
},
},
'001_write-iops' => {
set => {
key_values => [ { name => 'write_iops' }, { name => 'display' } ],
output_template => 'Write IOPS : %d',
perfdatas => [
{ label => 'write_iops', value => 'write_iops_absolute', template => '%d',
unit => 'iops', min => 0, label_extra_instance => 1, instance_use => 'display_absolute' },
],
},
},
'002_read-traffic' => {
set => {
key_values => [ { name => 'read_throughput' }, { name => 'display' } ],
output_template => 'Read Traffic : %s %s/s', output_change_bytes => 2,
perfdatas => [
{ label => 'read_throughput', value => 'read_throughput_absolute', template => '%d',
unit => 'b/s', min => 0, label_extra_instance => 1, instance_use => 'display_absolute' },
],
},
},
'003_write-traffic' => {
set => {
key_values => [ { name => 'write_throughput' }, { name => 'display' } ],
output_template => 'Write Traffic : %s %s/s', output_change_bytes => 2,
perfdatas => [
{ label => 'write_throughput', value => 'write_throughput_absolute', template => '%d',
unit => 'b/s', min => 0, label_extra_instance => 1, instance_use => 'display_absolute' },
],
},
},
'004_read-response-time' => {
set => {
key_values => [ { name => 'read_response_time' }, { name => 'display' } ],
output_template => 'Read Response Time : %d ms',
perfdatas => [
{ label => 'read_response_time', value => 'read_response_time_absolute', template => '%d',
unit => 'ms', min => 0, label_extra_instance => 1, instance_use => 'display_absolute' },
],
},
},
'005_write-response-time' => {
set => {
key_values => [ { name => 'write_response_time' }, { name => 'display' } ],
output_template => 'Write Response Time : %d ms',
perfdatas => [
{ label => 'write_response_time', value => 'write_response_time_absolute', template => '%d',
unit => 'ms', min => 0, label_extra_instance => 1, instance_use => 'display_absolute' },
],
},
},
'006_read-processing-time' => {
set => {
key_values => [ { name => 'read_processing_time' }, { name => 'display' } ],
output_template => 'Read Processing Time : %d ms',
perfdatas => [
{ label => 'read_processing_time', value => 'read_processing_time_absolute', template => '%d',
unit => 'ms', min => 0, label_extra_instance => 1, instance_use => 'display_absolute' },
],
},
},
'007_write-processing-time' => {
set => {
key_values => [ { name => 'write_processing_time' }, { name => 'display' } ],
output_template => 'Write Processing Time : %d ms',
perfdatas => [
{ label => 'write_processing_time', value => 'write_processing_time_absolute', template => '%d',
unit => 'ms', min => 0, label_extra_instance => 1, instance_use => 'display_absolute' },
],
},
},
'008_read-cache-hit-rate' => {
set => {
key_values => [ { name => 'read_cache_hit_rate' }, { name => 'display' } ],
output_template => 'Read Cache Hit Rate : %d %%',
perfdatas => [
{ label => 'read_cache_hit_rate', value => 'read_cache_hit_rate_absolute', template => '%d',
unit => '%', min => 0, max => 100, label_extra_instance => 1, instance_use => 'display_absolute' },
],
},
},
'009_write-cache-hit-rate' => {
set => {
key_values => [ { name => 'write_cache_hit_rate' }, { name => 'display' } ],
output_template => 'Write Cache Hit Rate : %d %%',
perfdatas => [
{ label => 'write_cache_hit_rate', value => 'write_cache_hit_rate_absolute', template => '%d',
unit => '%', min => 0, max => 100, label_extra_instance => 1, instance_use => 'display_absolute' },
],
},
},
}
};
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
$options{options}->add_options(arguments =>
{
"hostname:s" => { name => 'hostname' },
"ssh-option:s@" => { name => 'ssh_option' },
"ssh-path:s" => { name => 'ssh_path' },
"ssh-command:s" => { name => 'ssh_command', default => 'ssh' },
"timeout:s" => { name => 'timeout', default => 30 },
"command:s" => { name => 'command', default => 'show' },
"command-path:s" => { name => 'command_path' },
"command-options:s" => { name => 'command_options', default => ' performance -type host-io' },
"no-component:s" => { name => 'no_component' },
"filter-name:s" => { name => 'filter_name' },
});
$self->{no_components} = undef;
foreach my $key (('vol')) {
foreach (keys %{$maps_counters->{$key}}) {
my ($id, $name) = split /_/;
if (!defined($maps_counters->{$key}->{$_}->{threshold}) || $maps_counters->{$key}->{$_}->{threshold} != 0) {
$options{options}->add_options(arguments => {
'warning-' . $name . ':s' => { name => 'warning-' . $name },
'critical-' . $name . ':s' => { name => 'critical-' . $name },
});
}
$maps_counters->{$key}->{$_}->{obj} = centreon::plugins::values->new(output => $self->{output}, perfdata => $self->{perfdata},
label => $name);
$maps_counters->{$key}->{$_}->{obj}->set(%{$maps_counters->{$key}->{$_}->{set}});
}
}
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
if (defined($self->{option_results}->{hostname}) && $self->{option_results}->{hostname} ne '') {
$self->{option_results}->{remote} = 1;
}
foreach my $key (('vol')) {
foreach (keys %{$maps_counters->{$key}}) {
$maps_counters->{$key}->{$_}->{obj}->init(option_results => $self->{option_results});
}
}
if (defined($self->{option_results}->{no_component})) {
if ($self->{option_results}->{no_component} ne '') {
$self->{no_components} = $self->{option_results}->{no_component};
} else {
$self->{no_components} = 'critical';
}
}
}
sub run {
my ($self, %options) = @_;
$self->manage_selection();
my $multiple = 1;
if (scalar(keys %{$self->{vol}}) == 1) {
$multiple = 0;
}
if ($multiple == 1) {
$self->{output}->output_add(severity => 'OK',
short_msg => 'All Volumes are ok');
}
foreach my $id (sort keys %{$self->{vol}}) {
my ($short_msg, $short_msg_append, $long_msg, $long_msg_append) = ('', '', '', '');
my @exits = ();
foreach (sort keys %{$maps_counters->{vol}}) {
my $obj = $maps_counters->{vol}->{$_}->{obj};
$obj->set(instance => $id);
my ($value_check) = $obj->execute(values => $self->{vol}->{$id});
if ($value_check != 0) {
$long_msg .= $long_msg_append . $obj->output_error();
$long_msg_append = ', ';
next;
}
my $exit2 = $obj->threshold_check();
push @exits, $exit2;
my $output = $obj->output();
$long_msg .= $long_msg_append . $output;
$long_msg_append = ', ';
if (!$self->{output}->is_status(litteral => 1, value => $exit2, compare => 'ok')) {
$short_msg .= $short_msg_append . $output;
$short_msg_append = ', ';
}
$obj->perfdata(extra_instance => $multiple);
}
$self->{output}->output_add(long_msg => "Volume '$self->{vol}->{$id}->{display}' $long_msg");
my $exit = $self->{output}->get_most_critical(status => [ @exits ]);
if (!$self->{output}->is_status(litteral => 1, value => $exit, compare => 'ok')) {
$self->{output}->output_add(severity => $exit,
short_msg => "Volume '$self->{vol}->{$id}->{display}' $short_msg"
);
}
if ($multiple == 0) {
$self->{output}->output_add(short_msg => "Volume '$self->{vol}->{$id}->{display}' $long_msg");
}
}
$self->{output}->display();
$self->{output}->exit();
}
sub manage_selection {
my ($self, %options) = @_;
my $stdout = centreon::plugins::misc::execute(output => $self->{output},
options => $self->{option_results},
ssh_pipe => 1,
command => $self->{option_results}->{command},
command_path => $self->{option_results}->{command_path},
command_options => $self->{option_results}->{command_options});
#Volume IOPS(IOPS) Throughput(MB/s) Response Time(msec.) Processing Time(msec.) Cache Hit Rate(%)
#No. Name Read / Write Read / Write Read / Write Read / Write Read / Write / Prefetch
#----- -------------------------------- ---------- ---------- ---------- ---------- ---------- ---------- ---------- ---------- ---------- ---------- ----------
# 1 VOL001 6621 5192 589 379 17055 12056 10025 10010 41 37 36
# 2 VOL002 7791 6608 613 292 12148 11045 10005 10007 41 37 36
$self->{vol} = {};
my %template_label = (cache_hit_rate => { labels => ['read', 'write', 'prefetch'] });
my @template_values = ();
foreach (split /\n/, $stdout) {
if (/^Volume/) {
while (/(\sIOPS|\sThroughput|\sResponse Time|\sProcessing Time|[^C]\sCache Hit Rate)/mgi) {
my $value = lc(centreon::plugins::misc::trim($1));
$value =~ s/ /_/g;
my $labels = ['read', 'write'];
if (defined($template_label{$value})) {
$labels = $template_label{$value}->{labels};
}
foreach (@{$labels}) {
push @template_values, { label => $_ . '_' . $value };
}
}
next;
}
next if (/----|Name/i);
my $value = centreon::plugins::misc::trim($_);
my @matches = split /\s+/, $value;
if (defined($self->{option_results}->{filter_name}) && $self->{option_results}->{filter_name} ne '' &&
$matches[1] !~ /$self->{option_results}->{filter_name}/) {
$self->{output}->output_add(long_msg => "Skipping '" . $matches[1] . "': no matching filter name.");
next;
}
my %counters = ();
for (my $i = 0; $i < scalar(@template_values); $i++) {
$counters{$template_values[$i]->{label}} = $matches[$i + 2];
}
$counters{read_throughput} *= 1000 * 1000 * 8 if (defined($counters{read_throughput}));
$counters{write_throughput} *= 1000 * 1000 * 8 if (defined($counters{write_throughput}));
$self->{vol}->{$matches[0]} = { display => $matches[1],
%counters
};
}
if (scalar(keys %{$self->{vol}}) <= 0) {
$self->{output}->output_add(severity => defined($self->{no_components}) ? $self->{no_components} : 'unknown',
short_msg => 'No components are checked.');
}
}
1;
__END__
=head1 MODE
Check Volume statistics.
=over 8
=item B<--hostname>
Hostname to query.
=item B<--ssh-option>
Specify multiple options like the user (example: --ssh-option='-l=centreon-engine' --ssh-option='-p=52').
=item B<--ssh-path>
Specify ssh command path (default: none)
=item B<--ssh-command>
Specify ssh command (default: 'ssh'). Useful to use 'plink'.
=item B<--timeout>
Timeout in seconds for the command (Default: 30).
=item B<--command>
Command to get information (Default: 'show').
Can be changed if you have output in a file.
=item B<--command-path>
Command path (Default: none).
=item B<--command-options>
Command options (Default: 'performance -type host-io').
=item B<--no-component>
Set the threshold where no components (Default: 'unknown' returns).
=item B<--filter-name>
Filter by name (regexp can be used).
=item B<--warning-*>
Threshold warning.
Can be: 'read-iops', 'write-iops', 'read-traffic', 'write-traffic',
'read-response-time', 'write-response-time', 'read-processing-time', 'write-processing-time',
'read-cache-hit-rate', 'write-cache-hit-rate'.
=item B<--critical-*>
Threshold critical.
Can be: 'read-iops', 'write-iops', 'read-traffic', 'write-traffic',
'read-response-time', 'write-response-time', 'read-processing-time', 'write-processing-time',
'read-cache-hit-rate', 'write-cache-hit-rate'.
=back
=cut
| wilfriedcomte/centreon-plugins | storage/fujitsu/eternus/dx/ssh/mode/volumestats.pm | Perl | apache-2.0 | 16,180 |
=HEAD1 LICENSE
Copyright [1999-2014] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
package CrossMap;
# Process
# - Get binary, input file & type (gff, bed, vcf)
# - See if it has UCSC names; if so convert the names 1st before doing anything else (get the converting chain for the source assembly)
# - Run CrossMap on this (write to a temp file)
# - Get source and target assembly - convert to chain file location
# - Run CrossMap on this
# - Return the location of the file
use base qw/Bio::EnsEMBL::Hive::Process/;
sub param_defaults {
return {
ucsc_names => 0, # input has UCSC names and must be converted before anything else
crossmap_binary => 'CrossMap', #location of CrossMap
# species => '', #name of the species to process
# input => '', #input file
# output => '', #expected output file
# chain_dir => '', #location of the chain files on disk
# source_assembly => '', #assembly to convert from
# target_assembly => '', #assembly to convert to
};
}
sub fetch_input {
my ($self) = @_;
my $type = $self->param_required('type');
my $type_count = grep { $type eq $_ } qw/bam bed gff gtf vcf wig bigwig/;
$self->throw("Do not understand the type '${type}'") if ! $type_count;
$self->param('type', 'gff') if $type eq 'gtf'; # type is just gff not gtf
return 1;
}
sub run {
my ($self) = @_;
my $input = $self->param_required('input');
my $new_input = $self->convert_UCSC($input);
my $output = $self->convert($new_input);
$self->param('output', $output);
return 1;
}
sub convert_UCSC {
my ($self, $input) = @_;
return $input unless $self->param('ucsc_names');
my $output = "${input}.ucsc";
$self->run_CrossMap($self->chain_file('ucsc'), $input, $output);
return $output;
}
sub convert {
my ($self, $input) = @_;
my $target_assembly = $self->param_required('target_assembly');
my $output = ($self->param_is_defined('output')) ? $self->param('output') : "${input}.${target_assembly}";
$self->run_CrossMap($self->chain_file($target_assembly), $input, $output);
return $output;
}
# Assumes paths like
# /path/to/chain_dir/species/sourceassembly/targetassembly.chain
# /nfs/chains/homo_sapiens/GRCh37/GRCh38.chain
# /nfs/chains/homo_sapiens/GRCh38/GRCh37.chain
# /nfs/chains/homo_sapiens/GRCh37/ucsc.chain
sub chain_file {
my ($self, $target_assembly) = @_;
my $species = $self->param_required('species');
my $source_assembly = $self->param_required('source_assembly');
my $dir = File::Spec->catdir($self->param_required('chain_dir'), $species, $source_assembly);
my $file = "${target_assembly}.chain";
return File::Spec->catfile($dir, $file);
}
sub run_CrossMap {
my ($self, $chain_file, $input_file, $output_file) = @_;
$self->throw("Cannot do mapping. Chain file '${chain_file}' does not exist") if ! -f $chain_file;
$self->throw("Cannot do mapping. Input file '${input_file}' does not exist") if ! -f $input_file;
my $bin = $self->param_required('crossmap_binary');
my $type = $self->param_required('type');
# Command: CrossMap type chain input output
my $cmd = "${bin} ${type} ${chain_file} ${input_file} ${output_file}";
return $self->run_cmd($cmd);
}
sub run_cmd {
my ($self, $cmd) = @_;
warn("About to run '${cmd}'");
my $output = `$cmd 2>&1`;
my $rc = $? >> 8;
$self->throw("Cannot run program '$cmd'. Return code was ${rc}. Program output was $output") if $rc;
return ($rc, $output);
}
1; | andrewyatz/ens-sandbox | ensembl_chains/CrossMap.pm | Perl | apache-2.0 | 4,004 |
=head1 LICENSE
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2017] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
=head1 NAME
Bio::EnsEMBL::Analysis::Runnable::Dust -
=head1 SYNOPSIS
my $dust = Bio::EnsEMBL::Analysis::Runnable::Dust->
new(
-query => $slice,
-program => 'tcdust',
);
$dust->run;
my @repeats = @{$dust->output};
=head1 DESCRIPTION
Dust is a wrapper for the tcdust program which runs the dust algorithm
to identify and mask simple repeats
=cut
package Bio::EnsEMBL::Analysis::Runnable::Dust;
use strict;
use warnings;
use Bio::EnsEMBL::Analysis::Runnable;
use Bio::EnsEMBL::Utils::Exception qw(throw warning);
use Bio::EnsEMBL::Utils::Argument qw( rearrange );
use vars qw(@ISA);
@ISA = qw(Bio::EnsEMBL::Analysis::Runnable);
=head2 new
Arg [1] : Bio::EnsEMBL::Analysis::Runnable::Dust
Arg [2] : int, masking threshold
Arg [3] : int, word size
Arg [4] : int, window size
Function : create a new Bio::EnsEMBL::Analysis::Runnable::Dust
Returntype: Bio::EnsEMBL::Analysis::Runnable::Dust
Exceptions:
Example :
=cut
sub new {
my ($class,@args) = @_;
my $self = $class->SUPER::new(@args);
my ($level, $word_size, $window_size) = rearrange(['MASKING_THRESHOLD',
'WORD_SIZE',
'WINDOW_SIZE',
], @args);
##################
#SETTING DEFAULTS#
##################
$self->program('tcdust') if(!$self->program);
##################
$self->level($level) if($level);
$self->word_size($word_size) if($word_size);
$self->window_size($window_size) if($window_size);
return $self;
}
#container methods
=head2 level
Arg [1] : Bio::EnsEMBL::Analysis::Runnable::Dust
Arg [2] : int for specified value (for more info see tcdust)
Function : container for specified variable. This pod refers to the
three methods below level, window size and word size. These are simple
containers which dont do more than hold and return an given value
nothing is defined
Returntype: int
Exceptions:
Example :
=cut
sub level{
my $self = shift;
$self->{'level'} = shift if(@_);
return $self->{'level'};
}
=head2 window_size
Arg [1] : Bio::EnsEMBL::Analysis::Runnable::Dust
Arg [2] : int for specified value (for more info see tcdust)
Function : container for specified variable. This pod refers to the
three methods below level, window size and word size. These are simple
containers which dont do more than hold and return an given value
nothing is defined
Returntype: int
Exceptions:
Example :
=cut
sub window_size{
my $self = shift;
$self->{'window_size'} = shift if(@_);
return $self->{'window_size'};
}
=head2 word_size
Arg [1] : Bio::EnsEMBL::Analysis::Runnable::Dust
Arg [2] : int for specified value (for more info see tcdust)
Function : container for specified variable. This pod refers to the
three methods below level, window size and word size. These are simple
containers which dont do more than hold and return an given value
nothing is defined
Returntype: int
Exceptions:
Example :
=cut
sub word_size{
my $self = shift;
$self->{'word_size'} = shift if(@_);
return $self->{'word_size'};
}
#utility methods
=head2 run_analysis
Arg [1] : Bio::EnsEMBL::Analysis::Runnable::Dust
Arg [2] : string, program name
Function : constructs a commandline and runs the program passed
in, the generic method in Runnable isnt used as Dust doesnt
fit this module
Returntype: none
Exceptions: throws if run failed because system doesnt
return 0
Example :
=cut
sub run_analysis{
my ($self, $program) = @_;
if(!$program){
$program = $self->program;
}
throw($program." is not executable Dust::run_analysis ")
unless($program && -x $program);
my $command = $self->program;
$command .= " -l ".$self->level if($self->level);
$command .= " -k ".$self->word_size if($self->word_size);
$command .= " -w ".$self->window_size if($self->window_size);
$command .= " -x ".$self->queryfile." > ".$self->resultsfile;
print "Running analysis ".$command."\n";
system($command) == 0 or throw("FAILED to run ".$command);
}
=head2 parse_results
Arg [1] : Bio::EnsEMBL::Analysis::Runnable::Dust
Arg [2] : string, filename
Function : open and parse the results file into repeat
features
Returntype: none
Exceptions: throws on failure to open or close output file
Example :
=cut
sub parse_results{
my ($self, $results) = @_;
if(!$results){
$results = $self->resultsfile;
}
my $ff = $self->feature_factory;
my @output;
open(DUST, $results) or throw("FAILED to open ".$results);
LINE:while(<DUST>){
chomp;
next LINE if(/^>/);
if (/(\d+)\.\.(\d+)/) {
my ($start, $end) = ($1, $2);
$start++;
$end++;
my $rc = $ff->create_repeat_consensus('dust', 'dust', 'simple', 'N');
my $rf = $ff->create_repeat_feature($start, $end, 0, 0, $start,
$end, $rc, $self->query->name,
$self->query);
push(@output, $rf);
}
}
$self->output(\@output);
close(DUST) or throw("FAILED to close ".$results);
}
| james-monkeyshines/ensembl-analysis | modules/Bio/EnsEMBL/Analysis/Runnable/Dust.pm | Perl | apache-2.0 | 6,216 |
#!/usr/bin/env perl
#-------------------------------------------------------------------------------
# NAME: genome_features.pl
# PURPOSE: web script to output pseq aligned to a genome
# USAGE: genome_features.pl?genasm_id=<genasm_id>;[(chr=<chr>;gstart=<gstart>;gstop=<gstop>)||(pseq_id=<pseq_id>)]
#-------------------------------------------------------------------------------
use strict;
use warnings;
use FindBin;
use lib "$FindBin::RealBin/../perl5", "$FindBin::RealBin/../perl5-ext";
use Unison::WWW;
use Unison::WWW::Page;
use Unison;
use Unison::Exceptions;
use Unison::Utilities::genome_features;
my $p = new Unison::WWW::Page;
my $u = $p->{unison};
my $v = $p->Vars();
# verify parameters
if (
!(
defined $v->{genasm_id}
&& (
(
defined $v->{chr} && defined $v->{gstart} && defined $v->{gstop}
)
|| ( defined $v->{pseq_id} )
)
&& ( defined $v->{params_id} )
)
)
{
$p->die(&usage);
}
# merge defaults and options
my %opts = ( %Unison::Utilities::genome_features::opts, %$v );
# get tempfiles for the genome-feature png and imagemap
my ( $png_fh, $png_fn, $png_urn ) = $p->tempfile( SUFFIX => '.png' );
my $imagemap = '';
try {
my $panel =
Unison::Utilities::genome_features::genome_features_panel( $u, %opts );
# write the png to the temp file
$png_fh->print( $panel->gd()->png() );
$png_fh->close();
# assemble the imagemap as a string
foreach my $box ( $panel->boxes() ) {
my ( $feature, $x1, $y1, $x2, $y2 ) = @$box;
my $fstart = $feature->start; # should be unique
my $fname = $feature->name; # should be unique
next if not defined $fname;
if ( my ($pseq_id) = $fname =~ m/^Unison:(\d+)/ ) {
my $text = $u->best_annotation($pseq_id) || '?';
$imagemap .=
qq(<AREA SHAPE="RECT" COORDS="$x1,$y1,$x2,$y2" TOOLTIP="$text" HREF="pseq_summary.pl?pseq_id=$pseq_id">\n);
}
}
}
catch Unison::Exception with {
$p->die(shift);
};
print $p->render(
"Genome Map",
"<center><img src=\"$png_urn\" usemap=\"#GENOME_MAP\"></center>",
"<MAP NAME=\"GENOME_MAP\">\n",
$imagemap, "</MAP>\n"
);
#-------------------------------------------------------------------------------
# NAME: usage
# PURPOSE: return usage string
#-------------------------------------------------------------------------------
sub usage {
return (
"USAGE: genome_features.pl ? genasm_id=<gensam_id> params_id=<params_id> "
. "[(chr=<chr> ; gstart=<gstart> ; gstop=<gstop> "
. "|| pseq_id=<pseq_id>]" );
}
| unison/unison | www/genome_features.pl | Perl | apache-2.0 | 2,677 |
package OpenXPKI::Server::API2::Plugin::Profile::get_cert_profiles;
use OpenXPKI::Server::API2::EasyPlugin;
=head1 NAME
OpenXPKI::Server::API2::Plugin::Profile::get_cert_profiles
=head1 COMMANDS
=cut
# Project modules
use OpenXPKI::Server::Context qw( CTX );
use OpenXPKI::Exception;
=head2 get_cert_profiles
Return a I<HashRef> with all UI profiles. The key is the ID of the profile, the
value is a I<HashRef> with additional data (currently only a label).
B<Parameters>
=over
=item * C<showall> I<Bool> - show also non-UI profiles. Default: FALSE
=back
B<Changes compared to API v1:> Parameter C<NOHIDE> was renamed to C<showall>
=cut
command "get_cert_profiles" => {
showall => { isa => 'Bool', default => 0, },
} => sub {
my ($self, $params) = @_;
my $config = CTX('config');
my $profiles = {};
my @profile_names = $config->get_keys('profile');
for my $profile (@profile_names) {
next if ($profile =~ /^(template|default|sample)$/);
my $label = $config->get([ 'profile', $profile, 'label' ]) || $profile;
my $desc = $config->get([ 'profile', $profile, 'description' ]) || '';
my $do_list = 1;
# only list profiles where at least one style has a config entry "ui"
if (not $params->showall) {
##! 32: "Evaluate UI for $profile"
$do_list = 0;
my @style_names = $config->get_keys([ 'profile', $profile, 'style' ]);
for my $style (@style_names) {
if ($config->exists([ 'profile', $profile, 'style', $style, 'ui' ])) {
##! 32: 'Found ui style ' . $style
$do_list = 1;
last;
}
}
##! 32: 'No UI styles found'
}
$profiles->{$profile} = { value => $profile, label => $label, description => $desc } if ($do_list);
}
##! 16: 'Profiles ' .Dumper $profiles
return $profiles;
};
__PACKAGE__->meta->make_immutable;
| oliwel/openxpki | core/server/OpenXPKI/Server/API2/Plugin/Profile/get_cert_profiles.pm | Perl | apache-2.0 | 1,985 |
package Tapper::Installer::Precondition::Package;
use strict;
use warnings;
use 5.010;
use Tapper::Installer::Precondition::Exec;
use File::Basename;
use Moose;
extends 'Tapper::Installer::Precondition';
=head1 NAME
Tapper::Installer::Precondition::Package - Install a package to a given location
=head1 SYNOPSIS
use Tapper::Installer::Precondition::Package;
=head1 FUNCTIONS
=head2 install
This function encapsulates installing one single package. At the moment, .tar,
.tar.gz, .tar.bz2, rpm and deb are recognised.
Recognised options for package preconditions are:
* filename - absolute or relative path of the package file (relativ to package_dir in config)
* target_directory - directory where to unpack package
*
@param hash reference - contains all information about the package
@return success - 0
@return error - error string
=cut
sub install
{
my ($self, $package) = @_;
if ($package->{url}) {
my ($proto, $fullpath) = $package->{url} =~ m|^(\w+)://(.+)$|;
given($proto) {
when ('nfs') {
my $nfs_dir='/mnt/nfs';
$self->makedir($nfs_dir);
my $path = dirname $fullpath;
my $filename = basename $fullpath;
my ($error, $retval) = $self->log_and_exec("mount $path $nfs_dir");
return ("Can't mount nfs share $path to $nfs_dir: $retval") if $error;
delete $package->{url};
$package->{filename} = "$nfs_dir/$filename";
$self->install($package);
($error, $retval) = $self->log_and_exec("umount $nfs_dir");
return 0;
}
default { return ("Procol'$proto' is not supported") }
}
}
my $filename = $package->{filename};
$self->log->debug("installing $filename");
my $basedir = $self->cfg->{paths}{base_dir};
# install into subdir
if ($package->{target_directory}) {
$basedir .= $package->{target_directory};
$self->makedir($basedir) if not -d $basedir;
}
my $package_dir = '';
$package_dir = $self->cfg->{paths}{package_dir};
my $pkg = $filename;
$pkg = "$package_dir/$filename" unless $filename =~ m(^/);
my ($error, $type) = $self->get_file_type("$pkg");
return("Can't get file type of $filename: $type") if $error;
my $output;
$self->log->debug("type is $type");
given($type){
when("gzip") {
($error, $output) = $self->log_and_exec("tar --no-same-owner -C $basedir -xzf $pkg");
return("can't unpack package $filename: $output\n") if $error;
}
when("tar") {
($error, $output) = $self->log_and_exec("tar --no-same-owner -C $basedir -xf $pkg");
return("can't unpack package $filename: $output\n") if $error;
}
when("bz2") {
($error, $output) = $self->log_and_exec("tar --no-same-owner -C $basedir -xjf $pkg");
return("can't unpack package $filename: $output\n") if $error;
}
when("deb") {
system("cp $pkg $basedir/");
$pkg = basename $pkg;
my $exec = Tapper::Installer::Precondition::Exec->new($self->cfg);
return $exec->install({command => "dpkg -i $pkg"});
}
when("rpm") {
system("cp $pkg $basedir/");
$pkg = basename $pkg;
my $exec = Tapper::Installer::Precondition::Exec->new($self->cfg);
# use -U to overwrite possibly existing older package
return $exec->install({command => "rpm -U $pkg"});
}
default{
$self->log->warn(qq($pkg is of unrecognised file type "$type"));
return(qq($pkg is of unrecognised file type "$type"));
}
}
return(0);
}
1;
| tapper/Tapper-Installer | lib/Tapper/Installer/Precondition/Package.pm | Perl | bsd-2-clause | 4,455 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.