code stringlengths 2 1.05M | repo_name stringlengths 5 101 | path stringlengths 4 991 | language stringclasses 3 values | license stringclasses 5 values | size int64 2 1.05M |
|---|---|---|---|---|---|
:- use_module(library(clpfd)).
:- use_module(library(lists)).
?- ensure_loaded('hanjie_examples.pl').
?- ensure_loaded('hanjie_generation.pl').
%FOR EXAMPLES OF HANJIE PUZZLES, LOAD FILE "hanjie_examples.pl"
%predicate fo solving hanjie puzzles
%initializes a grid (list of lists) of uninstanciated variables with the apropriate size for the specified column and row hints.
%a hint must be a list of lists where every sublist contains the size and sequence of gray blocks in that line
%e.g.: [[1,1],[1]] means the first line must have two blocks of gray squares with length one and the second line must only have one
%the grid is then flattened to restrict the domain of the variables to [0,1].
%additional constrains are added with the predicate hanjie_constraints/3
%afterwards, the labeling/2 predicate is used and the resulting grid is drawn on screen
%IMPORTANT: EVERY ROW/COLUMN MUST HAVE A HINT, EVEN IF IT IS 0, OTHERWISE THE SIZE OF THE GRID WOULD HAVE TO BE INDICATED MANUALLY
hanjie(ColHints,RowHints):-
length(ColHints,GridWidth),
length(RowHints,GridHeight),
make_grid(Solution,GridWidth,GridHeight),
flatten(Solution,Vars),
domain(Vars,0,1),
!,
hanjie_constraints(ColHints,RowHints,Solution),
flatten(ColHints, TotalColHints),flatten(RowHints, TotalRowHints),
sum(TotalColHints, #=, TotalValue), sum(TotalRowHints, #=, TotalValue),
sum(Vars, #=, TotalValue),
labeling([ff],Vars),
draw_grid(Solution,ColHints,RowHints).
%this predicate allows the puzzle solution to be stored in a file given by the specified path
hanjie_solve_to_file(ColHints,RowHints, Filepath):-
open(Filepath,append,S1),
length(ColHints,GridWidth),
length(RowHints,GridHeight),
make_grid(Solution,GridWidth,GridHeight),
flatten(Solution,Vars),
domain(Vars,0,1),
!,
hanjie_constraints(ColHints,RowHints,Solution),
flatten(ColHints, TotalColHints),flatten(RowHints, TotalRowHints),
sum(TotalColHints, #=, TotalValue), sum(TotalRowHints, #=, TotalValue),
sum(Vars, #=, TotalValue),
labeling([ff],Vars),
draw_grid(S1,Solution,ColHints,RowHints),
close(S1).
%this predicate will generate a random hanjie puzzle and solve it by calling hanjie_generate and hanjie
hanjie_generate_and_solve(NumCols,NumRows):-
generate_hanjie(NumCols,NumRows,ColHints,RowHints),
!,
hanjie(ColHints,RowHints).
%does the same as hanjie/2 but measures statistics for calculating the solution instead of drawing the grid
hanjie_stats(ColHints,RowHints):-
length(ColHints,GridWidth),
length(RowHints,GridHeight),
make_grid(Solution,GridWidth,GridHeight),
flatten(Solution,Vars),
domain(Vars,0,1),
!,
print('dumping previous fd stats:'),nl,
fd_statistics,
statistics(walltime,[W0|_]),
statistics(runtime,[T0|_]),
hanjie_constraints(ColHints,RowHints,Solution),
flatten(ColHints, TotalColHints),flatten(RowHints, TotalRowHints),
sum(TotalColHints, #=, TotalValue), sum(TotalRowHints, #=, TotalValue),
sum(Vars, #=, TotalValue),
statistics(walltime,[W1|_]),
statistics(runtime,[T1|_]),
labeling([ff],Vars),
statistics(walltime,[W2|_]),
statistics(runtime,[T2|_]),
T is T1-T0,
W is W1-W0,
Tl is T2-T1,
Wl is W2-W1,
nl,nl,nl,nl,nl,nl,nl,nl,nl,nl,nl,nl,nl,nl,nl,nl,
format('creating constraints took CPU ~3d sec.~n', [T]),
format('creating constraints took a total of ~3d sec.~n', [W]),
format('labeling took CPU ~3d sec.~n', [Tl]),
format('labeling took a total of ~3d sec.~n', [Wl]),
nl,
fd_statistics.
%creates the constrains for each row of the grid using restrict_rows/2 and the row hints.
%uses the predicate transpose/2 (lists library) to obtain a list of the columns and creates the constraints for
%them as well, using restrict_rows/2 and the column hints
hanjie_constraints(ColHints,RowHints,Grid):-
restrict_rows(Grid,RowHints),
transpose(Grid,Columns),
restrict_rows(Columns,ColHints).
%restricts the domain of each element of the grid by ensuring the sum of all elements is equal to the sum of the hints
%(0 corresponds to blank squares and 1 corresponds to gray squares of the hanjie puzzle)
%to ensure the sequences of colored squares are maintained, this predicate creates an automaton for each row
restrict_rows([],[]).
restrict_rows([R|Rs],[H|Hs]):-
length(R,MaxSum),
(%This 'or' allows lists to be completely uninstantiated
var(H),
HintLength is floor((MaxSum+1)/2),
length(H,HintLength),
checkHints(H,MaxSum)
;
nonvar(H),
checkHints(H,MaxSum)
),
RowSum #=< MaxSum,
sum(H,#=,RowSum),
sum(R,#=,RowSum),
create_transitions(H, Arcs, start, FinalState,1),
append(R, [0], RowWithExtraZero), %a zero is added to simplify the automaton (every gray block must be followed by at least one blank square, even the last one)
automaton(RowWithExtraZero, [source(start), sink(FinalState)], [arc(start,0,start) | Arcs]),
restrict_rows(Rs,Hs).
%checks if the hints are variables. If so, the domain is assigned to the variable
checkHints([],_):-!.
checkHints([H|Hs],MaxSum):-
(var(H),domain([H],0,MaxSum);
integer(H)),
checkHints(Hs,MaxSum).
%this predicate is used to generate the transitions (arcs) between each state of the automaton
%it goes through every value of the Hint list, decrementing them until they reach 0 and creating mandatory transitions
%to ensure continuous gray blocks. When the Hint reaches 0 it creates transitions to ensure at least one blank block after the gray one
%(to allow for 1 or more white squares after each gray block, the automaton will be an NFA)
%if the first square of a block has a hint with value 0 (FirstSquare = 1), the CurState is set as NextState (hint is ignored)
create_transitions([], [], FinalState, FinalState,_).
create_transitions([Hint|Hs], Transitions, CurState, FinalState,FirstSquare) :-
(Hint #= 0, %finished current 'gray' blocks segment
%'gray' blocks must be followed by AT LEAST ONE 'blank' square (loop to current state with 'blank' blocks)
%(an extra 'blank' square was added to end of the row for the case when the 'gray' block ends at the grid's border)
(FirstSquare =:=0,
Transitions = [arc(CurState,0,CurState), arc(CurState,0,NextState) | Ts],
create_transitions(Hs, Ts, NextState, FinalState,1);
FirstSquare =:=1,
Transitions = [arc(CurState,0,CurState)],
create_transitions(Hs, Ts, CurState, FinalState,1))
;
%in this case, we aren't finished with the gray block yet and as such need more 'gray' squares to advance
Hint #> 0,
Transitions = [arc(CurState,1,NextState) | Ts],
NewHint #= Hint-1,
create_transitions([NewHint|Hs], Ts, NextState, FinalState,0)).
%------------------------------------------------------UTILITIES-------------------------------------------------------
%flattens a list of lists into a single list
flatten([],[]):-!.
flatten([[]|Gs],Fg):-
!,
flatten(Gs,Fg).
flatten([[G1|G1s]|Gs],[G1|Fg]):-
flatten([G1s|Gs],Fg).
%this predicate is used to create a grid (list of lists) of uninstanciated variables with the specified dimensions
make_grid(Grid,Width,Height):-
length(Grid,Height),
make_rows(Grid,Width).
make_rows([],_):-!.
make_rows([G|Gs],Width):-
length(G,Width),
make_rows(Gs,Width).
%this predicate is used to determine the size of the largest sublist in a list of lists
largest_sub_list([],0):-!.
largest_sub_list([L|Ls],N):-
largest_sub_list(Ls,M1),
length(L,M2),
(M1 > M2,
N is M1;
N is M2),
!.
%--------------------------------------------------END UTILITIES-------------------------------------------------------
%-----------------------------------------------DISPLAY PREDICATES-----------------------------------------------------
%predicates for drawing the hanjie's puzzle grid
sign(0,' ').
sign(1,'X').
draw_grid([B|Bs],ColHints,RowHints):-
largest_sub_list(RowHints,HChars),
HSpacing is HChars*2,
nl,nl,
HSpacingForCols is HSpacing+1,
draw_column_hints(ColHints,HSpacingForCols),
length(B,N),
putChars(' ',HSpacing),
print('|'),print_separator(N,'-'),
pg([B|Bs],RowHints,HSpacing),
putChars(' ',HSpacing),
print('|'),print_separator(N,'-'),nl.
pg([B],[RH],HLength) :-
draw_row_hints(RH,HLength),
print_line(B), !.
pg([B|Bs],[RH|RHs],HLength) :-
draw_row_hints(RH,HLength),
print_line(B),
length(B,N),
putChars(' ',HLength),
print('|'),
print_separator(N,'+'),
pg(Bs,RHs,HLength).
print_line([]) :-
print('|'),nl.
print_line([L|Ls]):-
sign(L,Symbol),
print('|'),
print(Symbol),
print_line(Ls).
print_separator(1,_):-print('-|'), nl.
print_separator(N,MidChar):-
N > 1,
N1 is N-1,
print('-'),print(MidChar),
print_separator(N1,MidChar).
putChars(_,0):-!.
putChars(Char,N):-
N > 0,
N1 is N-1,
print(Char),
putChars(Char,N1).
draw_column_hints(ColHints,HSpacing):-
largest_sub_list(ColHints,VSpacing),
dch(ColHints,HSpacing,VSpacing).
dch(_,_,0):-!.
dch(ColHints,HSpacing,VSpacing):-
VSpacing > 0,
putChars(' ',HSpacing),
draw_elements_at_vpos(ColHints, VSpacing),nl,
NewVSpacing is VSpacing-1,
dch(ColHints,HSpacing,NewVSpacing).
draw_elements_at_vpos([],_):-!.
draw_elements_at_vpos([CH|CHs],VSpacing):-
length(CH,NumHints),
(NumHints < VSpacing,!,
print(' ')
;
ElementPos is NumHints-VSpacing,
nth0(ElementPos,CH,Value),
(Value < 10,!,
print(Value);
print('#')),
print(' ')),
draw_elements_at_vpos(CHs,VSpacing).
draw_row_hints([],_):-!.
draw_row_hints([R|Rs],HSize):-
HSize>0,
length([R|Rs],L),
(HSize > L*2,!,
NewHSize is HSize-2,
print(' '),
draw_row_hints([R|Rs],NewHSize)
;
NewHSize is HSize-2,
(R<10,!,
print(R),print(' ');
print('# ')),
draw_row_hints(Rs,NewHSize)).
%-------------------------------------------END DISPLAY PREDICATES-----------------------------------------------------
%-----------------------------------------------FILE I/O PREDICATES-----------------------------------------------------
%predicates for drawing the hanjie's puzzle grid in a file
draw_grid(Stream,[B|Bs],ColHints,RowHints):-
largest_sub_list(RowHints,HChars),
HSpacing is HChars*2,
write(Stream, '\n\n'),
HSpacingForCols is HSpacing+1,
draw_column_hints(Stream, ColHints,HSpacingForCols),
length(B,N),
putChars(Stream,' ',HSpacing),
write(Stream,'|'),print_separator(Stream,N,'-'),
pg(Stream,[B|Bs],RowHints,HSpacing),
putChars(Stream,' ',HSpacing),
write(Stream,'|'),print_separator(Stream,N,'-'),write(Stream,'\n').
pg(Stream,[B],[RH],HLength) :-
draw_row_hints(Stream,RH,HLength),
print_line(Stream,B), !.
pg(Stream,[B|Bs],[RH|RHs],HLength) :-
draw_row_hints(Stream,RH,HLength),
print_line(Stream,B),
length(B,N),
putChars(Stream,' ',HLength),
write(Stream,'|'),
print_separator(Stream,N,'+'),
pg(Stream,Bs,RHs,HLength).
print_line(Stream,[]) :-
write(Stream,'|\n').
print_line(Stream,[L|Ls]):-
sign(L,Symbol),
write(Stream,'|'),
write(Stream,Symbol),
print_line(Stream,Ls).
print_separator(Stream,1,_):-write(Stream,'-|'), write(Stream,'\n').
print_separator(Stream,N,MidChar):-
N > 1,
N1 is N-1,
write(Stream,'-'),write(Stream,MidChar),
print_separator(Stream,N1,MidChar).
putChars(_,_,0):-!.
putChars(Stream,Char,N):-
N > 0,
N1 is N-1,
write(Stream,Char),
putChars(Stream,Char,N1).
draw_column_hints(Stream,ColHints,HSpacing):-
largest_sub_list(ColHints,VSpacing),
dch(Stream,ColHints,HSpacing,VSpacing).
dch(_,_,_,0):-!.
dch(Stream,ColHints,HSpacing,VSpacing):-
VSpacing > 0,
putChars(Stream,' ',HSpacing),
draw_elements_at_vpos(Stream,ColHints, VSpacing),write(Stream,'\n'),
NewVSpacing is VSpacing-1,
dch(Stream,ColHints,HSpacing,NewVSpacing).
draw_elements_at_vpos(_,[],_):-!.
draw_elements_at_vpos(Stream,[CH|CHs],VSpacing):-
length(CH,NumHints),
(NumHints < VSpacing,!,
write(Stream,' ')
;
ElementPos is NumHints-VSpacing,
nth0(ElementPos,CH,Value),
(Value < 10,!,
write(Stream,Value);
write(Stream,'#')),
write(Stream,' ')),
draw_elements_at_vpos(Stream,CHs,VSpacing).
draw_row_hints(_,[],_):-!.
draw_row_hints(Stream,[R|Rs],HSize):-
HSize>0,
length([R|Rs],L),
(HSize > L*2,!,
NewHSize is HSize-2,
write(Stream,' '),
draw_row_hints(Stream,[R|Rs],NewHSize)
;
NewHSize is HSize-2,
(R<10,!,
write(Stream,R),write(Stream,' ');
write(Stream,'# ')),
draw_row_hints(Stream,Rs,NewHSize)).
%-------------------------------------------END DISPLAY PREDICATES----------------------------------------------------- | luiscleto/feup-plog-hanjie-solver | src/hanjie.pl | Perl | mit | 12,175 |
package Noosphere;
use strict;
# show license information for the site
#
sub getLicense {
return paddingTable(clearBox("Legalese",(new Template("license.html"))->expand()));
}
# get the "about" (history, background) page.
#
sub getAbout {
return paddingTable(clearBox('The '.getConfig('projname').' Story',(new Template('about.html'))->expand()));
}
# get the feedback info page
#
sub getFeedback {
return paddingTable(clearBox('Feedback',(new Template('feedback.html'))->expand()));
}
1;
| holtzermann17/Noosphere | lib/Noosphere/Docs.pm | Perl | mit | 514 |
/* Part of INCLP(R)
Author: Leslie De Koninck
E-mail: Leslie.DeKoninck@cs.kuleuven.be
WWW: http://www.swi-prolog.org
Copyright (c) 2006-2011, K.U. Leuven
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
:- module(inclpr_inversion,
[
all_occurrences/1,
invert/3
]).
:- use_module(library(lists),
[
reverse/2
]).
% Module for creating the inverse of functions with one occurrence per variable
% and for counting the number of variable occurrences.
% invert(Function,Variable,Inverse)
%
% Creates the inverse of function <Function> with respect to variable
% <Variable> and stores it into <Inverse>. This means that a constraint
% <Function> = 0 is transformed into an equivalent constraint
% <Variable> = <Inverse>.
invert(Term,Var,Inverse) :-
get_attr(Var,inclpr_occurrence_count,one(ReversePath)),
reverse(ReversePath,Path),
invert(Term,Path,i(-0.0,0.0),Inverse).
% invert(Function,Path,InverseIn,InverseOut)
%
% Converts the constraint <Function> = <InverseIn> into the equivalent
% constraint <Variable> = <InverseOut> where <Variable> is located by following
% the path given by <Path>.
% A path is a list of 1's and 2's and represents where in the tree formed by
% binary operators the variable is located. For example if <Path> is the list
% [1,2] and <Function> is (A+B)*(C+D) then first the subfunction A+B is chosen
% because the first element of path is 1 and then B is chosen because the
% second element of path is 2. Paths are built while counting the number of
% occurrences for each variable.
invert(Term,Path,InverseIn,InverseOut) :-
( var(Term)
-> InverseOut = InverseIn
; functor(Term,Op,Arity),
( Arity =:= 2
-> Path = [H|T],
arg(1,Term,L),
arg(2,Term,R),
( H = 1
-> invert_binary_left(Op,InverseIn,R,InverseTemp),
invert(L,T,InverseTemp,InverseOut)
; invert_binary_right(Op,L,InverseIn,InverseTemp),
invert(R,T,InverseTemp,InverseOut)
)
; arg(1,Term,X),
invert(X,Path,-InverseIn,InverseOut)
)
).
% invert_binary_left(Operator,InverseIn,Right,InverseOut)
%
% Converts the constraint <Function> = <InverseIn> where <Function> is composed
% of binary operator <Operator> applied to arguments <Left> and <Right> into an
% equivalent constraint <Left> = <InverseOut>.
invert_binary_left(+,L,R,L-R).
invert_binary_left(-,L,R,L+R).
invert_binary_left(*,L,R,L/R).
invert_binary_left(/,L,R,L*R).
invert_binary_left(**,L,R,root(L,R)).
% invert_binary_right(Operator,Left,InverseIn,InverseOut)
%
% Converts the constraint <Function> = <InverseIn> where <Function> is composed
% of binary operator <Operator> applied to arguments <Left> and <Right> into an
% equivalent constraint <Right> = <InverseOut>.
invert_binary_right(+,L,R,R-L).
invert_binary_right(-,L,R,L-R).
invert_binary_right(*,L,R,R/L).
invert_binary_right(/,L,R,L/R).
% all_occurrences(Function)
%
% Counts all occurrences of all variables in <Function> and stores them in
% the attribute inclpr_occurrence_count of each variable. The attribute either
% is the term `one(ReversePath)' with <ReversePath> the reverse of a path (see
% invert/4) or the term `more' for variables occurring more than once.
% Expects that all functors are either unary or binary
all_occurrences(Term) :- all_occurrences(Term,[]).
% all_occurrences(Function,ReversePath)
%
% The same as all_occurrences/1 but with a given initial reverse path in
% <ReversePath>.
all_occurrences(Term,ReversePath) :-
( var(Term)
-> ( get_attr(Term,inclpr_occurrence_count,_)
-> put_attr(Term,inclpr_occurrence_count,more)
; put_attr(Term,inclpr_occurrence_count,one(ReversePath))
)
; number(Term)
-> true
; functor(Term,_,Arity),
( Arity =:= 2
-> arg(1,Term,Arg1),
arg(2,Term,Arg2),
all_occurrences(Arg1,[1|ReversePath]),
all_occurrences(Arg2,[2|ReversePath])
; arg(1,Term,Arg),
all_occurrences(Arg,ReversePath)
)
). | TeamSPoon/logicmoo_workspace | docker/rootfs/usr/local/lib/swipl/library/clp/inclpr/inclpr_inversion.pl | Perl | mit | 5,268 |
package Transposome::Annotation;
use 5.010;
use Moose;
use List::Util qw(sum max);
use File::Path qw(make_path);
use Storable qw(thaw);
use POSIX qw(strftime);
#use Log::Any qw($log);
use Path::Class::File;
use File::Basename;
use File::Spec;
use Try::Tiny;
use Transposome::Log;
use namespace::autoclean;
#use Data::Dump::Color;
with 'Transposome::Annotation::Methods',
'Transposome::Role::File',
'Transposome::Role::Util';
=head1 NAME
Transposome::Annotation - Annotate clusters for repeat types.
=head1 VERSION
Version 0.12.1
=cut
our $VERSION = '0.12.1';
$VERSION = eval $VERSION;
=head1 SYNOPSIS
use Transposome::Annotation;
my $cluster_file = '/path/to/cluster_file.cls';
my $seqct = 'total_seqs_in_analysis'; # Integer
my $cls_tot = 'total_reads_clustered'; # Integer
my $annotation = Transposome::Annotation->new( database => 'repeat_db.fas',
dir => 'outdir',
file => 'report.txt' );
my $annotation_results =
$annotation->annotate_clusters({ cluster_directory => $cls_dir_path,
singletons_file => $singletons_file_path,
total_sequence_num => $seqct,
total_cluster_num => $cls_tot });
$annotation->clusters_annotation_to_summary( $annotation_results );
=cut
has 'database' => (
is => 'ro',
isa => 'Path::Class::File',
required => 1,
coerce => 1,
);
has 'evalue' => (
is => 'ro',
isa => 'Num',
default => 10,
);
has 'report' => (
is => 'ro',
isa => 'Path::Class::File',
required => 0,
coerce => 1,
);
has 'blastn_exec' => (
is => 'rw',
isa => 'Str',
reader => 'get_blastn_exec',
writer => 'set_blastn_exec',
predicate => 'has_blastn_exec',
);
has 'makeblastdb_exec' => (
is => 'rw',
isa => 'Str',
reader => 'get_makeblastdb_exec',
writer => 'set_makeblastdb_exec',
predicate => 'has_makeblastdb_exec',
);
has 'debug' => (
is => 'ro',
isa => 'Bool',
predicate => 'has_debug',
lazy => 1,
default => 0,
);
has 'log_to_screen' => (
is => 'ro',
isa => 'Bool',
predicate => 'has_log_to_screen',
lazy => 1,
default => 1,
);
sub BUILD {
my $self = shift;
my @path = split /:|;/, $ENV{PATH};
for my $p (@path) {
my $bl = File::Spec->catfile($p, 'blastn');
my $mb = File::Spec->catfile($p, 'makeblastdb');
if (-e $bl && -x $bl && -e $mb && -x $mb) {
$self->set_blastn_exec($bl);
$self->set_makeblastdb_exec($mb);
}
}
try {
die unless $self->has_makeblastdb_exec;
}
catch {
#$log->error("Unable to find makeblastdb. Check your PATH to see that it is installed. Exiting.");
say STDERR "\n[ERROR]: Unable to find makeblastdb. Check your PATH to see that it is installed. Exiting.\n";
exit(1);
};
try {
die unless $self->has_blastn_exec;
}
catch {
#$log->error("Unable to find blastn. Check your PATH to see that it is installed. Exiting.");
say STDERR "\n[ERROR]: Unable to find blastn. Check your PATH to see that it is installed. Exiting.\n";
exit(1);
};
}
=head1 METHODS
=head2 annotate_clusters
Title : annotation_clusters
Usage : $annotation->annotate_clusters();
Function: Runs the annotation pipeline within Transposome.
Returns : A Perl hash containing the cluster annotation results.
The following is an example data structure returned by
the annotate_clusters method:
{ annotation_report => $anno_rp_path,
annotation_summary => $anno_sum_rep_path,
singletons_report => $singles_rp_path,
total_sequence_num => $total_readct,
repeat_fraction => $rep_frac,
cluster_blast_reports => $blasts,
cluster_superfamilies => $superfams }
A description of the hash values returned:
Return_type
annotation_report - path to the cluster annotation file Scalar
annotation_summary - path to the cluster annotation summary Scalar
file
singletons_file - path to the singletons annotation file Scalar
total_sequence_num - the total number of reads clusters Scalar
repeat_fraction - the repeat fraction of the genome Scalar
cluster_blast_reports - the individual cluster blast reports ArrayRef
cluster_suparfamilies - the top superfamily hit for each ArraryRef
cluster
Args : A Perl hash containing data for annotation.
The following is an example data structure taken by
the annotate_clusters method:
{ cluster_directory => $cls_dir_path,
singletons_file => $singletons_file_path,
total_sequence_num => $seqct,
total_cluster_num => $cls_tot }
A description of the hash values taken:
Arg_type
cluster_directory - the directory of cluster FASTA files Scalar
singletons_file - the FASTA file of singleton sequences Scalar
total_sequence_num - the number of sequences that went into Scalar
the clustering (returned from store_seq()
from Transposome::SeqStore),
total_cluster_num - the total number of clusters (also Scalar
returned from make_clusters() from
Transposome::Cluster).
=cut
sub annotate_clusters {
my $self = shift;
my ($cluster_data) = @_;
#my $config = $self->config;
my $cls_with_merges_dir = $cluster_data->{cluster_directory};
my $singletons_file_path = $cluster_data->{singletons_file};
my $seqct = $cluster_data->{total_sequence_num};
my $clsct = $cluster_data->{total_cluster_num};
unless (defined $seqct) {
die "\n[ERROR]: 'total_sequence_num' value is not set. Exiting.";
}
unless (defined $clsct) {
die "\n[ERROR]: 'total_cluster_num' value is not set. Exiting.";
}
# set paths for annotate_clusters() method
my $database = $self->database->absolute;
my $db_path = $self->make_blastdb($database);
my $out_dir = $self->dir->relative;
my $blastn = $self->get_blastn_exec;
# cluster report path
my $report = $self->file->relative;
my ($rpname, $rppath, $rpsuffix) = fileparse($report, qr/\.[^.]*/);
my $rp_path = Path::Class::File->new($out_dir, $rpname.$rpsuffix);
# set paths for annotation files
my $anno_rep = $rpname."_annotations.tsv";
my $anno_summary_rep = $rpname."_annotations_summary.tsv";
my $anno_rp_path = Path::Class::File->new($out_dir, $anno_rep);
my $anno_sum_rep_path = Path::Class::File->new($out_dir, $anno_summary_rep);
# results and variables controlling method behavior
my $thread_range = sprintf("%.0f", $self->threads * $self->cpus);
my $total_readct = 0;
my $evalue = $self->evalue;
my $rep_frac = $clsct / $seqct;
my $single_tot = $seqct - $clsct;
my $single_frac = 1 - $rep_frac;
# log results
my $log_obj = Transposome::Log->new( config => $self->config, log_to_screen => $self->log_to_screen );
my $log = $log_obj->get_transposome_logger;
my $st = POSIX::strftime('%d-%m-%Y %H:%M:%S', localtime);
$log->info("Transposome::Annotation::annotate_clusters started at: $st.");
my ($repeatmap, $type_map) = $self->map_repeat_types($database);
my %repeats = %{ thaw($repeatmap) };
#dd $type_map;
## get input files
opendir my $dir, $cls_with_merges_dir
or die "\n[ERROR]: Could not open directory: $cls_with_merges_dir. Exiting.\n";
my @clus_fas_files = grep /^CL.*fa.*$|^G.*fa.*$/, readdir $dir;
closedir $dir;
if (@clus_fas_files < 1) {
$log->error("Could not find any fasta files in $cls_with_merges_dir. ".
"This can result from using too few sequences. ".
"Please report this error if the problem persists. Exiting.");
exit(1);
}
## set path to output dir
my $annodir = $cls_with_merges_dir."_annotations";
my $out_path = File::Spec->rel2abs($annodir);
make_path($annodir, {verbose => 0, mode => 0711,});
# data structures for holding mapping results
my %all_cluster_annotations; # container for annotations; used for creating summary
my %top_hit_superfam; # container for top_hit -> family -> superfamily mapping; used for creating summary
## annotate singletons, then add total to results
my $singleton_annotations
= $self->_annotate_singletons({
repeat_map => \%repeats,
type_map => $type_map,
singletons_file => $singletons_file_path,
annotation_report => $rpname,
total_singleton_num => $single_tot,
blast_evalue => $evalue,
threads => $thread_range,
blastdb => $db_path,
output_directory => $out_dir,
blast_exe => $blastn });
my $singleton_hits = $singleton_annotations->{singletons_file};
my $singleton_rep_frac = $singleton_annotations->{singletons_rep_frac};
my $singles_rp_path = $singleton_annotations->{singletons_report};
my $blasts = $singleton_annotations->{blast_reports};
my $superfams = $singleton_annotations->{superfamily_hits};
my $cluster_annotations = $singleton_annotations->{cluster_annotations};
my $top_hit_superfam = $singleton_annotations->{top_hit_superfamily};
my $cluster_annot = $singleton_annotations->{top_hit_cluster_annot};
my $true_singleton_rep_frac = $single_frac * $singleton_rep_frac;
my $total_rep_frac = $true_singleton_rep_frac + $rep_frac;
for my $file (@clus_fas_files) {
next if $file =~ /singletons/;
my $query = File::Spec->catfile($cls_with_merges_dir, $file);
my ($fname, $fpath, $fsuffix) = fileparse($query, qr/\.[^.]*/);
my $blast_res = $fname;
my ($filebase, $readct) = split /\_/, $fname, 2;
$total_readct += $readct;
$blast_res =~ s/\.[^.]+$//;
$blast_res .= "_blast_$evalue.tsv";
my $blast_file_path = Path::Class::File->new($out_path, $blast_res);
my $blast_out = $self->search_clusters({ blast_exe => $blastn,
query_file => $query,
evalue => $evalue,
blastdb => $db_path,
thread_range => $thread_range });
my ($hit_ct, $top_hit, $top_hit_frac, $blhits)
= $self->_parse_blast_to_top_hit($blast_out, $blast_file_path);
next unless defined $top_hit && defined $hit_ct;
push @$blasts, $blhits unless !%$blhits;
($top_hit_superfam, $cluster_annot)
= $self->_blast_to_annotation({ filebase => $filebase,
top_hit => $$top_hit,
top_hit_frac => $$top_hit_frac,
readct => $readct,
repeat_type => $type_map->{$$top_hit},
repeat_map => \%repeats });
push @$superfams, $top_hit_superfam
unless not defined $top_hit_superfam or !%$top_hit_superfam;
push @$cluster_annotations, $cluster_annot
unless not defined $cluster_annot or !%$cluster_annot;
}
@all_cluster_annotations{keys %$_} = values %$_ for @$cluster_annotations;
@top_hit_superfam{keys %$_} = values %$_ for @$superfams;
open my $out, '>', $anno_rp_path
or die "\n[ERROR]: Could not open file: $anno_rp_path\n";
say $out join "\t", "Cluster", "Read_count", "Type", "Order",
"Superfamily", "Family","Top_hit","Top_hit_genome_fraction";
for my $readct (reverse sort { $a <=> $b } keys %all_cluster_annotations) {
my @annots = $self->mk_vec($all_cluster_annotations{$readct});
my $cluster = shift @annots;
say $out join "\t", $cluster, $readct, join "\t", @annots;
}
close $out;
unlink glob("$db_path*");
# log results
my $ft = POSIX::strftime('%d-%m-%Y %H:%M:%S', localtime);
$log->info("Transposome::Annotation::annotate_clusters completed at: $ft.");
$log->info("Results - Total sequences: $seqct");
$log->info("Results - Total sequences clustered: $clsct");
$log->info("Results - Total sequences unclustered: $single_tot");
$log->info("Results - Repeat fraction from clusters: $rep_frac");
$log->info("Results - Singleton repeat fraction: $singleton_rep_frac");
$log->info("Results - Total repeat fraction (theoretical): $total_rep_frac");
return ({
annotation_report => $anno_rp_path,
annotation_summary => $anno_sum_rep_path,
singletons_report => $singles_rp_path,
total_sequence_num => $seqct,
total_annotated_num => $total_readct,
repeat_fraction => $rep_frac,
cluster_blast_reports => $blasts,
cluster_superfamilies => \%top_hit_superfam });
}
=head2 _annotate_singletons
Title : _annotation_singletons
Usage : This is a private method, do not use it directly.
Function: Runs the annotation for singleton sequences within Transposome.
Return_type
Returns : In order, 1) path to the singletons annotation file, Scalar
4) the repeat fraction of the singletons, Scalar
Arg_type
Args : In order, 1) singletons file generated by make_clusters() Scalar
from Transposome::Cluster
=cut
sub _annotate_singletons {
my $self = shift;
my ($annotation_data) = @_;
my $repeats = $annotation_data->{repeat_map};
my $singletons_file_path = $annotation_data->{singletons_file};
my $rpname = $annotation_data->{annotation_report};
my $single_tot = $annotation_data->{total_singleton_num};
my $evalue = $annotation_data->{blast_evalue};
my $thread_range = $annotation_data->{threads};
my $db_path = $annotation_data->{blastdb};
my $out_dir = $annotation_data->{output_directory};
my $blastn = $annotation_data->{blast_exe};
my $type_map = $annotation_data->{type_map};
my $top_hit_superfam = {};
my $hit_superfam = {};
my $cluster_annot = {};
my $top_hit_cluster_annot = {};
my @blasts;
my @superfams;
my @cluster_annotations;
# set paths for annotation files
my $singles_rep = $rpname."_singletons_annotations.tsv";
my $singles_rp_path = Path::Class::File->new($out_dir, $singles_rep);
my $singles_rep_sum = $rpname."_singletons_annotations_summary.tsv";
my $singles_rp_sum_path = Path::Class::File->new($out_dir, $singles_rep_sum);
my $exit_code = $self->search_singletons({ blast_exe => $blastn,
query_file => $singletons_file_path,
evalue => $evalue,
blastdb => $db_path,
thread_range => $thread_range,
blast_output_file => $singles_rp_path});
my ($singleton_hits, $singleton_rep_frac) = (0, 0);
my (%blasthits, @blct_out);
if (-s $singles_rp_path) {
open my $singles_fh, '<', $singles_rp_path
or die "\n[ERROR]: Could not open file: $singles_rp_path\n";
while (<$singles_fh>) {
chomp;
$singleton_hits++;
my @f = split;
$blasthits{$f[1]}++;
}
close $singles_fh;
}
for my $hittype (keys %blasthits) {
push @blct_out, $blasthits{$hittype}."\t".$hittype."\n";
}
if ($singleton_hits > 0) {
$singleton_rep_frac = $singleton_hits / $single_tot;
}
push @blasts, \%blasthits;
## mapping singleton blast hits to repeat types
my ($hit_ct, $top_hit, $top_hit_perc, $blhits)
= $self->_parse_blast_to_top_hit(\@blct_out, $singles_rp_sum_path);
return unless defined $top_hit && defined $hit_ct;
($top_hit_superfam, $top_hit_cluster_annot)
= $self->_blast_to_annotation({ filebase => 'singletons',
top_hit => $$top_hit,
top_hit_frac => $$top_hit_perc,
readct => $singleton_hits,
repeat_map => $repeats,
repeat_type => $type_map->{$$top_hit} });
for my $hit (keys %blasthits) {
my $hit_perc = sprintf("%.12f", $blasthits{$hit} / $single_tot);
($hit_superfam, $cluster_annot)
= $self->_blast_to_annotation({ filebase => 'singletons',
top_hit => $hit,
top_hit_frac => $hit_perc,
readct => $singleton_hits,
repeat_map => $repeats,
repeat_type => $type_map->{$hit} });
push @superfams, $hit_superfam
unless not defined $hit_superfam or !%$hit_superfam;
push @cluster_annotations, $cluster_annot
unless not defined $cluster_annot or !%$cluster_annot;
}
push @superfams, $top_hit_superfam
unless not defined $top_hit_superfam or !%$top_hit_superfam;
push @cluster_annotations, $top_hit_cluster_annot
unless not defined $top_hit_cluster_annot or !%$top_hit_cluster_annot;
return ({ singletons_file => $singleton_hits,
singletons_rep_frac => $singleton_rep_frac,
singletons_report => $singles_rp_path,
blast_reports => \@blasts,
superfamily_hits => \@superfams,
cluster_annotations => \@cluster_annotations,
top_hit_superfamily => $top_hit_superfam,
top_hit_cluster_annot => $top_hit_cluster_annot });
}
=head2 _parse_blast_to_top_hit
Title : _parse_blast_to_top_hit
Usage : This is a private method, do not use it directly.
Function: Calculates the top blast hit for each cluster.
Return_type
Returns : In order, 1) the total hit count ScalarRef
2) the top blast hit ScalarRef
3) the top blast hit percentage ScalarRef
4) a hash of all the hits and their counts HashRef
Arg_type
Args : In order, 1) the blast hits for the cluster ArrayRef
2) the blast output file Scalar
=cut
sub _parse_blast_to_top_hit {
my $self = shift;
my ($blast_out, $blast_file_path) = @_;
my %blhits;
my $hit_ct = 0;
for my $hit (@$blast_out) {
chomp $hit;
my ($ct, $hittype) = split /\t/, $hit;
next unless defined $ct;
if ($hittype =~ /\#\w+\#?/) {
$hittype =~ s/\#.*//;
}
$blhits{$hittype} = $ct;
$hit_ct++;
}
my $sum = sum values %blhits;
if ($hit_ct > 0) {
open my $out, '>', $blast_file_path or die "\n[ERROR]: Could not open file: $blast_file_path\n";
my $top_hit = (reverse sort { $blhits{$a} <=> $blhits{$b} } keys %blhits)[0];
my $top_hit_perc = sprintf("%.2f", $blhits{$top_hit} / $sum);
keys %blhits; #reset iterator
for my $hits (reverse sort { $blhits{$a} <=> $blhits{$b} } keys %blhits) {
my $hit_perc = sprintf("%.2f", $blhits{$hits} / $sum);
say $out join "\t", $hits, $blhits{$hits}, $hit_perc;
}
close $out;
return (\$hit_ct, \$top_hit, \$top_hit_perc, \%blhits);
}
else { ## if (!%blhits) {
unlink $blast_file_path;
return (undef, undef, undef, undef);
}
}
=head2 _blast_to_annotation
Title : _blast_to_annotation
Usage : This is a private method, do not use it directly.
Function: This method takes the blast hits and uses a key of repeat
types to determine the taxonomic lineage for each repeat.
Return_type
Returns : In order, 1) the repeat annotation for each HashRef
top hit (per cluster)
2) a hash containing all hits and counts per HashRef
superfamily
Arg_type
Args : In order, 1) a hash containing taxonomic HashRef
relationships for all repeat types
2) the name of the cluster file being annotated Scalar
3) the total number of reads with a blast hit Scalar
4) the top blast hit ScalarRef
5) the top blast hit percentage ScalarRef
=cut
sub _blast_to_annotation {
my $self = shift;
my ($anno_data) = @_;
my $repeats = $anno_data->{repeat_map};
my $top_hit_superfam = {};
my $cluster_annot = {};
keys %$repeats;
for my $type (keys %$repeats) {
if (defined $anno_data->{repeat_type} && $type eq $anno_data->{repeat_type}) {
if ($type =~ /(pseudogene|integrated_virus|autonomous_replication_sequence)/) {
$anno_data->{'class'} = $1;
($top_hit_superfam, $cluster_annot) = $self->map_hit_family($repeats->{$type}, $anno_data);
}
elsif ($type =~ /(satellite)/i) {
$anno_data->{'class'} = $1;
($top_hit_superfam, $cluster_annot) = $self->map_hit_family($repeats->{$type}, $anno_data);
}
elsif ($type eq 'ltr_retrotransposon') {
$anno_data->{'class'} = $type;
($top_hit_superfam, $cluster_annot) = $self->map_hit_family($repeats->{$type}, $anno_data);
}
elsif ($type eq 'non-ltr_retrotransposon') {
$anno_data->{'class'} = $type;
($top_hit_superfam, $cluster_annot) = $self->map_hit_family($repeats->{$type}, $anno_data);
}
elsif ($type eq 'endogenous_retrovirus') {
$anno_data->{'class'} = $type;
($top_hit_superfam, $cluster_annot) = $self->map_hit_family($repeats->{$type}, $anno_data);
}
elsif ($type eq 'dna_transposon') {
$anno_data->{'class'} = $type;
($top_hit_superfam, $cluster_annot) = $self->map_hit_family($repeats->{$type}, $anno_data);
}
else {
my $unk_fam = q{ };
$anno_data->{'class'} = 'unknown';
($top_hit_superfam, $cluster_annot) = $self->map_hit_family($repeats->{$type}, $anno_data);
}
}
}
return ($top_hit_superfam, $cluster_annot);
}
=head1 AUTHOR
S. Evan Staton, C<< <evan at evanstaton.com> >>
=head1 BUGS
Please report any bugs or feature requests through the project site at
L<https://github.com/sestaton/Transposome/issues>. I will be notified,
and there will be a record of the issue. Alternatively, I can also be
reached at the email address listed above to resolve any questions.
=head1 SUPPORT
You can find documentation for this module with the perldoc command.
perldoc Transposome::Annotation
=head1 LICENSE AND COPYRIGHT
Copyright (C) 2013-2017 S. Evan Staton
This program is distributed under the MIT (X11) License, which should be distributed with the package.
If not, it can be found here: L<http://www.opensource.org/licenses/mit-license.php>
=cut
__PACKAGE__->meta->make_immutable;
1;
| sestaton/Transposome | lib/Transposome/Annotation.pm | Perl | mit | 24,507 |
#
# Copyright 2022 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package apps::checkmyws::mode::status;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
use centreon::plugins::http;
use JSON;
my $thresholds = {
ws => [
['^0$', 'OK'],
['^1$', 'WARNING'],
['^2$', 'CRITICAL'],
['.*', 'UNKNOWN'],
],
};
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$options{options}->add_options(arguments => {
"hostname:s" => { name => 'hostname', default => 'api.checkmy.ws'},
"port:s" => { name => 'port', },
"proto:s" => { name => 'proto', default => "https" },
"urlpath:s" => { name => 'url_path', default => "/api/status" },
"uid:s" => { name => 'uid' },
"timeout:s" => { name => 'timeout' },
"threshold-overload:s@" => { name => 'threshold_overload' },
});
$self->{http} = centreon::plugins::http->new(%options);
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
if (($self->{perfdata}->threshold_validate(label => 'warning', value => $self->{option_results}->{warning})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong warning threshold '" . $self->{option_results}->{warning} . "'.");
$self->{output}->option_exit();
}
if (($self->{perfdata}->threshold_validate(label => 'critical', value => $self->{option_results}->{critical})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong critical threshold '" . $self->{option_results}->{critical} . "'.");
$self->{output}->option_exit();
}
if ((!defined($self->{option_results}->{uid}))) {
$self->{output}->add_option_msg(short_msg => "You need to set uid option");
$self->{output}->option_exit();
}
$self->{overload_th} = {};
foreach my $val (@{$self->{option_results}->{threshold_overload}}) {
if ($val !~ /^(.*?),(.*)$/) {
$self->{output}->add_option_msg(short_msg => "Wrong threshold-overload option '" . $val . "'.");
$self->{output}->option_exit();
}
my ($section, $status, $filter) = ('ws', $1, $2);
if ($self->{output}->is_litteral_status(status => $status) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong threshold-overload status '" . $val . "'.");
$self->{output}->option_exit();
}
$self->{overload_th}->{$section} = [] if (!defined($self->{overload_th}->{$section}));
push @{$self->{overload_th}->{$section}}, {filter => $filter, status => $status};
}
$self->{option_results}->{url_path} = $self->{option_results}->{url_path}."/".$self->{option_results}->{uid};
$self->{http}->set_options(%{$self->{option_results}});
}
sub run {
my ($self, %options) = @_;
my $jsoncontent = $self->{http}->request();
my $json = JSON->new;
my $webcontent;
eval {
$webcontent = $json->decode($jsoncontent);
};
if ($@) {
$self->{output}->add_option_msg(short_msg => "Cannot decode json response");
$self->{output}->option_exit();
}
my %map_output = (
-3 => 'Disable',
-2 => 'Not scheduled',
-1 => 'Pending...',
);
my $state = $webcontent->{state};
my $output = defined($map_output{$state}) ? $map_output{$state} : $webcontent->{state_code_str};
my $exit = $self->get_severity(section => 'ws', value => $state);
$self->{output}->output_add(severity => $exit,
short_msg => $output);
if (defined($webcontent->{lastvalues}->{httptime})) {
my $perfdata = $webcontent->{lastvalues}->{httptime};
my $mean_time = 0;
foreach my $location (keys %$perfdata) {
$mean_time += $perfdata->{$location};
$self->{output}->perfdata_add(label => $location, unit => 'ms',
value => $perfdata->{$location},
min => 0
);
}
$self->{output}->perfdata_add(label => 'mean_time', unit => 'ms',
value => $mean_time / scalar(keys %$perfdata),
min => 0
) if (scalar(keys %$perfdata) > 0);
$self->{output}->perfdata_add(label => 'yslow_page_load_time', unit => 'ms',
value => $webcontent->{metas}->{yslow_page_load_time},
min => 0
) if (defined($webcontent->{metas}->{yslow_page_load_time}));
$self->{output}->perfdata_add(label => 'yslow_score',
value => $webcontent->{metas}->{yslow_score},
min => 0, max => 100
) if (defined($webcontent->{metas}->{yslow_score}));
}
$self->{output}->display();
$self->{output}->exit();
}
sub get_severity {
my ($self, %options) = @_;
my $status = 'UNKNOWN'; # default
if (defined($self->{overload_th}->{$options{section}})) {
foreach (@{$self->{overload_th}->{$options{section}}}) {
if ($options{value} =~ /$_->{filter}/i) {
$status = $_->{status};
return $status;
}
}
}
foreach (@{$thresholds->{$options{section}}}) {
if ($options{value} =~ /$$_[0]/i) {
$status = $$_[1];
return $status;
}
}
return $status;
}
1;
__END__
=head1 MODE
Check website status
=over 8
=item B<--hostname>
Checkmyws api host (Default: 'api.checkmy.ws')
=item B<--port>
Port used by checkmyws
=item B<--proto>
Specify https if needed (Default: 'https')
=item B<--urlpath>
Set path to get checkmyws information (Default: '/api/status')
=item B<--timeout>
Threshold for HTTP timeout (Default: 5)
=item B<--uid>
ID for checkmyws API
=item B<--threshold-overload>
Set to overload default threshold values (syntax: status,regexp)
It used before default thresholds (order stays).
Example: --threshold-overload='CRITICAL,^(?!(0)$)'
=back
=cut
| centreon/centreon-plugins | apps/checkmyws/mode/status.pm | Perl | apache-2.0 | 6,851 |
package Venn::ResultClass::HashRefInflatorFloatConvert;
=head1 NAME
Slightly modified to convert results from scientific notation into floats.
DBIx::Class::ResultClass::HashRefInflator - Get raw hashrefs from a resultset
=head1 SYNOPSIS
use DBIx::Class::ResultClass::HashRefInflator;
my $rs = $schema->resultset('CD');
$rs->result_class('DBIx::Class::ResultClass::HashRefInflator');
while (my $hashref = $rs->next) {
...
}
OR as an attribute:
my $rs = $schema->resultset('CD')->search({}, {
result_class => 'DBIx::Class::ResultClass::HashRefInflator',
});
while (my $hashref = $rs->next) {
...
}
=head1 DESCRIPTION
DBIx::Class is faster than older ORMs like Class::DBI but it still isn't
designed primarily for speed. Sometimes you need to quickly retrieve the data
from a massive resultset, while skipping the creation of fancy result objects.
Specifying this class as a B<result_class> for a resultset will change C<< $rs->next >>
to return a plain data hash-ref (or a list of such hash-refs if B<< $rs->all >> is used).
There are two ways of applying this class to a resultset:
=over
=item *
Specify B<< $rs->result_class >> on a specific resultset to affect only that
resultset (and any chained off of it); or
=item *
Specify B<< __PACKAGE__->result_class >> on your source object to force all
uses of that result source to be inflated to hash-refs - this approach is not
recommended.
=back
Note: this module was mostly copied from DBIx::Class::ResultClass::HashRefInflator.
=head1 AUTHOR
Venn Engineering
Josh Arenberg, Norbert Csongradi, Ryan Kupfer, Hai-Long Nguyen
=head1 LICENSE
Copyright 2013,2014,2015 Morgan Stanley
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
use strict;
use warnings;
use 5.010;
use Scalar::Util qw / reftype looks_like_number /;
#use Data::Types qw/ is_float is_int /;
no if $] >= 5.018, warnings => q{experimental::smartmatch};
# this class is designed for speed, sorry about the formatting
## no critic
##############
# NOTE
#
# Generally people use this to gain as much speed as possible. If a new &mk_hash is
# implemented, it should be benchmarked using the maint/benchmark_hashrefinflator.pl
# script (in addition to passing all tests of course :)
# This coderef is a simple recursive function
# Arguments: ($me, $prefetch, $is_root) from inflate_result() below
my $mk_hash;
$mk_hash = sub {
my $hash = {
# the main hash could be an undef if we are processing a skipped-over join
$_[0] ? %{walkhash($_[0])} : (),
# the second arg is a hash of arrays for each prefetched relation
map { $_ => (
ref $_[1]->{$_}[0] eq 'ARRAY'
? [ map { $mk_hash->( @$_ ) || () } @{$_[1]->{$_}} ]
: $mk_hash->( @{$_[1]->{$_}} )
) } ($_[1] ? keys %{$_[1]} : ())
};
($_[2] || keys %$hash) ? $hash : undef;
};
=head1 METHODS
=head2 inflate_result
Inflates the result and prefetched data into a hash-ref (invoked by L<DBIx::Class::ResultSet>)
# HRI->inflate_result ($resultsource_instance, $main_data_hashref, $prefetch_data_hashref)
=cut
sub inflate_result {
return $mk_hash->($_[2], $_[3], 'is_root');
}
=head2 walkhash(\%in, \%out)
Walk a hash
param \%entry | \@entry | $entry : (HashRef|ArrayRef|Scalar) Incoming item to walk
return : (HashRef|ArrayRef|Scalar) Float converted of same type
=cut
sub walkhash {
my ($entry) = @_;
my $type = reftype($entry) // "SCALAR";
given ($type) {
when (/^HASH$/) {
for my $key (keys %$entry) {
$entry->{$key} = walkhash($entry->{$key});
}
}
when (/^ARRAY$/) {
my @tmp;
for my $arr (@$entry) {
push @tmp, walkhash($arr);
}
$entry = \@tmp;
}
when (/^SCALAR$/) {
if (looks_like_number($entry)) {
return sprintf("%.2f", $entry);
}
else {
return $entry;
}
}
}
return $entry;
}
=head1 CAVEATS
=over
=item *
This will not work for relationships that have been prefetched. Consider the
following:
my $artist = $artitsts_rs->search({}, {prefetch => 'cds' })->first;
my $cds = $artist->cds;
$cds->result_class('DBIx::Class::ResultClass::HashRefInflator');
my $first = $cds->first;
B<$first> will B<not> be a hashref, it will be a normal CD row since
HashRefInflator only affects resultsets at inflation time, and prefetch causes
relations to be inflated when the master B<$artist> row is inflated.
=item *
Column value inflation, e.g., using modules like
L<DBIx::Class::InflateColumn::DateTime>, is not performed.
The returned hash contains the raw database values.
=back
=cut
1;
| Morgan-Stanley/venn-core | lib/Venn/ResultClass/HashRefInflatorFloatConvert.pm | Perl | apache-2.0 | 5,360 |
#
# Copyright 2022 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package apps::mq::ibmmq::mqi::mode::listqueues;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$options{options}->add_options(arguments => {});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
}
sub manage_selection {
my ($self, %options) = @_;
my $result = $options{custom}->execute_command(
command => 'InquireQueueStatus',
attrs => { QStatusAttrs => ['QName'] }
);
$self->{queue} = {};
foreach (@$result) {
$self->{queue}->{$_->{QName}} = {
name => $_->{QName}
};
}
}
sub run {
my ($self, %options) = @_;
$self->manage_selection(%options);
foreach (sort keys %{$self->{queue}}) {
$self->{output}->output_add(long_msg =>
sprintf(
'[name = %s]',
$self->{queue}->{$_}->{name}
)
);
}
$self->{output}->output_add(
severity => 'OK',
short_msg => 'List queues:'
);
$self->{output}->display(nolabel => 1, force_ignore_perfdata => 1, force_long_output => 1);
$self->{output}->exit();
}
sub disco_format {
my ($self, %options) = @_;
$self->{output}->add_disco_format(elements => ['name']);
}
sub disco_show {
my ($self, %options) = @_;
$self->manage_selection(%options);
foreach (values %{$self->{queue}}) {
$self->{output}->add_disco_entry(%$_);
}
}
1;
__END__
=head1 MODE
List queues.
=over 8
=back
=cut
| centreon/centreon-plugins | apps/mq/ibmmq/mqi/mode/listqueues.pm | Perl | apache-2.0 | 2,423 |
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V9::Services::AssetSetService::MutateAssetSetResult;
use strict;
use warnings;
use base qw(Google::Ads::GoogleAds::BaseEntity);
use Google::Ads::GoogleAds::Utils::GoogleAdsHelper;
sub new {
my ($class, $args) = @_;
my $self = {
assetSet => $args->{assetSet},
resourceName => $args->{resourceName}};
# Delete the unassigned fields in this object for a more concise JSON payload
remove_unassigned_fields($self, $args);
bless $self, $class;
return $self;
}
1;
| googleads/google-ads-perl | lib/Google/Ads/GoogleAds/V9/Services/AssetSetService/MutateAssetSetResult.pm | Perl | apache-2.0 | 1,095 |
=head1 LICENSE
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2022] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
=head1 NAME
Bio::EnsEMBL::Analysis::Runnable::Snap -
=head1 SYNOPSIS
my $runnable = Bio::EnsEMBL::Analysis::Runnable::Snap->new(
-query => $slice,
-program => 'snap',
);
$runnable->run;
my @predictions = @{$runnable->output};
=head1 DESCRIPTION
Wrapper to run the genefinder gene predictor and then parse the results
into prediction transcripts
=head1 METHODS
=cut
package Bio::EnsEMBL::Analysis::Runnable::Snap;
use strict;
use warnings;
use Bio::EnsEMBL::Analysis::Runnable::BaseAbInitio;
use Bio::SeqIO;
use Bio::Seq;
use Bio::EnsEMBL::Utils::Exception qw(throw warning);
use Bio::EnsEMBL::Utils::Argument qw( rearrange );
use vars qw(@ISA);
@ISA = qw(Bio::EnsEMBL::Analysis::Runnable::BaseAbInitio);
sub new {
my ($class,@args) = @_;
my $self = $class->SUPER::new(@args);
######################
#SETTING THE DEFAULTS#
######################
$self->program('snap') if(!$self->program);
######################
throw("Must defined a matrix file like ".
"/usr/local/ensembl/Zoe/HMM/worm with the -matrix option ")
if(!$self->matrix);
$self->unaltered_slice($self->query);
my $new_seq = Bio::Seq->new(
-seq => $self->query->seq,
-id => $self->query->name,
);
$self->query($new_seq);
return $self;
}
=head2 run_analysis
Arg [1] : Bio::EnsEMBL::Analysis::Runnable::Snap
Arg [2] : string, program name
Function : create and open a commandline for the program genefinder
Returntype: none
Exceptions: throws if the program in not executable or the system
command fails to execute
Example :
=cut
sub run_analysis{
my ($self, $program) = @_;
if(!$program){
$program = $self->program;
}
throw($program." is not executable Snap::run_analysis ")
unless($program && -x $program);
my $command = $self->program." ".$self->matrix." ".$self->queryfile.
" > ".$self->resultsfile;
print STDERR "Running analysis ".$command."\n";
system($command) == 0 or throw("FAILED to run ".$command);
}
=head2 parse_results
Arg [1] : Bio::EnsEMBL::Analysis::Runnable::Snap
Arg [2] : string, resultsfile name
Function : parse the results file into prediction exons then
collate them into prediction transcripts and calculate their
phases
Returntype: none
Exceptions: throws if cant open or close results file or the parsing
doesnt work
Example :
=cut
sub parse_results{
my ($self, $results) = @_;
if(!$results){
$results = $self->resultsfile;
}
$self->query($self->unaltered_slice);
my $ff = $self->feature_factory;
my $exon_count;
my $current_trans;
open(OUT, "<".$results) or throw("FAILED to open ".$results.
"Snap:parse_results");
while(<OUT>){
/^\>/ and next;
my @element = split;
throw("Unable to parse Snap output ".@element." in output ".
"array expecting 9") if(@element != 9);
if (!$current_trans ||
$current_trans ne $element[8]) {
$exon_count = 0;
$current_trans = $element[8];
}
$exon_count++;
my $name = $current_trans.".".$exon_count;
my $start = $element[1];
my $end = $element[2];
my $score = $element[4];
throw("strand wrongly formated $element[6] not + or -")
unless ($element[3] eq '+' || $element[3] eq '-');
my $strand = 1;
$strand = -1 if($element[3] eq '-');
my $start_phase = (3 - $element[5]) % 3;
my $exon = $ff->create_prediction_exon($start,
$end,
$strand,
$score,
0,
$start_phase,
$name,
$self->query,
$self->analysis);
$self->exon_groups($current_trans, $exon);
}
$self->create_transcripts;
close(OUT) or throw("FAILED to close ".$results.
"Snap:parse_results");
}
=head2 unaltered_slice
Arg [1] : Bio::EnsEMBL::Analysis::Runnable::Snap
Arg [2] : Bio::EnsEMBL::Slice
Function : holder for the given slice as the one passed
to Snap has to have desc removed otherwise parser doesnt
work properly
Returntype: Bio::EnsEMBL::Slice
Exceptions:
Example :
=cut
sub unaltered_slice{
my ($self, $arg) = @_;
if($arg){
$self->{'unaltered_slice'} = $arg;
}
return $self->{'unaltered_slice'};
}
1;
| Ensembl/ensembl-analysis | modules/Bio/EnsEMBL/Analysis/Runnable/Snap.pm | Perl | apache-2.0 | 5,614 |
#------------------------------------------------------------------------------
# File: QuickTime.pm
#
# Description: Read QuickTime and MP4 meta information
#
# Revisions: 10/04/2005 - P. Harvey Created
# 12/19/2005 - P. Harvey Added MP4 support
# 09/22/2006 - P. Harvey Added M4A support
# 07/27/2010 - P. Harvey Updated to 2010-05-03 QuickTime spec
#
# References:
#
# 1) http://developer.apple.com/mac/library/documentation/QuickTime/QTFF/QTFFChap1/qtff1.html
# 2) http://search.cpan.org/dist/MP4-Info-1.04/
# 3) http://www.geocities.com/xhelmboyx/quicktime/formats/mp4-layout.txt
# 4) http://wiki.multimedia.cx/index.php?title=Apple_QuickTime
# 5) ISO 14496-12 (http://read.pudn.com/downloads64/ebook/226547/ISO_base_media_file_format.pdf)
# 6) ISO 14496-16 (http://www.iec-normen.de/previewpdf/info_isoiec14496-16%7Bed2.0%7Den.pdf)
# 7) http://atomicparsley.sourceforge.net/mpeg-4files.html
# 8) http://wiki.multimedia.cx/index.php?title=QuickTime_container
# 9) http://www.adobe.com/devnet/xmp/pdfs/XMPSpecificationPart3.pdf (Oct 2008)
# 10) http://code.google.com/p/mp4v2/wiki/iTunesMetadata
# 11) http://www.canieti.com.mx/assets/files/1011/IEC_100_1384_DC.pdf
# 12) QuickTime file format specification 2010-05-03
# 13) http://www.adobe.com/devnet/flv/pdf/video_file_format_spec_v10.pdf
# 14) http://standards.iso.org/ittf/PubliclyAvailableStandards/c051533_ISO_IEC_14496-12_2008.zip
# 15) http://getid3.sourceforge.net/source/module.audio-video.quicktime.phps
# 16) http://qtra.apple.com/atoms.html
# 17) http://www.etsi.org/deliver/etsi_ts/126200_126299/126244/10.01.00_60/ts_126244v100100p.pdf
# 18) https://github.com/appsec-labs/iNalyzer/blob/master/scinfo.m
# 19) http://nah6.com/~itsme/cvs-xdadevtools/iphone/tools/decodesinf.pl
# 20) https://developer.apple.com/legacy/library/documentation/quicktime/reference/QT7-1_Update_Reference/QT7-1_Update_Reference.pdf
# 21) Francois Bonzon private communication
# 22) https://developer.apple.com/library/mac/documentation/QuickTime/QTFF/Metadata/Metadata.html
# 23) http://atomicparsley.sourceforge.net/mpeg-4files.html
# 24) https://github.com/sergiomb2/libmp4v2/wiki/iTunesMetadata
#------------------------------------------------------------------------------
package Image::ExifTool::QuickTime;
use strict;
use vars qw($VERSION $AUTOLOAD %stringEncoding);
use Image::ExifTool qw(:DataAccess :Utils);
use Image::ExifTool::Exif;
use Image::ExifTool::GPS;
$VERSION = '2.36';
sub ProcessMOV($$;$);
sub ProcessKeys($$$);
sub ProcessMetaKeys($$$);
sub ProcessMetaData($$$);
sub ProcessEncodingParams($$$);
sub ProcessSampleDesc($$$);
sub ProcessHybrid($$$);
sub ProcessRights($$$);
# ++vvvvvvvvvvvv++ (in QuickTimeStream.pl)
sub Process_mebx($$$);
sub Process_3gf($$$);
sub Process_gps0($$$);
sub Process_gsen($$$);
sub ProcessTTAD($$$);
sub ProcessNMEA($$$);
sub SaveMetaKeys($$$);
# ++^^^^^^^^^^^^++
sub ParseItemLocation($$);
sub ParseContentDescribes($$);
sub ParseItemInfoEntry($$);
sub ParseItemPropAssoc($$);
sub FixWrongFormat($);
sub GetMatrixStructure($$);
sub ConvertISO6709($);
sub ConvInvISO6709($);
sub ConvertChapterList($);
sub PrintChapter($);
sub PrintGPSCoordinates($);
sub PrintInvGPSCoordinates($);
sub UnpackLang($;$);
sub WriteKeys($$$);
sub WriteQuickTime($$$);
sub WriteMOV($$);
sub GetLangInfo($$);
sub CheckQTValue($$$);
# MIME types for all entries in the ftypLookup with file extensions
# (defaults to 'video/mp4' if not found in this lookup)
my %mimeLookup = (
'3G2' => 'video/3gpp2',
'3GP' => 'video/3gpp',
AAX => 'audio/vnd.audible.aax',
DVB => 'video/vnd.dvb.file',
F4A => 'audio/mp4',
F4B => 'audio/mp4',
JP2 => 'image/jp2',
JPM => 'image/jpm',
JPX => 'image/jpx',
M4A => 'audio/mp4',
M4B => 'audio/mp4',
M4P => 'audio/mp4',
M4V => 'video/x-m4v',
MOV => 'video/quicktime',
MQV => 'video/quicktime',
HEIC => 'image/heic',
HEVC => 'image/heic-sequence',
HEIF => 'image/heif',
CRX => 'video/x-canon-crx', # (will get overridden)
);
# look up file type from ftyp atom type, with MIME type in comment if known
# (ref http://www.ftyps.com/)
my %ftypLookup = (
'3g2a' => '3GPP2 Media (.3G2) compliant with 3GPP2 C.S0050-0 V1.0', # video/3gpp2
'3g2b' => '3GPP2 Media (.3G2) compliant with 3GPP2 C.S0050-A V1.0.0', # video/3gpp2
'3g2c' => '3GPP2 Media (.3G2) compliant with 3GPP2 C.S0050-B v1.0', # video/3gpp2
'3ge6' => '3GPP (.3GP) Release 6 MBMS Extended Presentations', # video/3gpp
'3ge7' => '3GPP (.3GP) Release 7 MBMS Extended Presentations', # video/3gpp
'3gg6' => '3GPP Release 6 General Profile', # video/3gpp
'3gp1' => '3GPP Media (.3GP) Release 1 (probably non-existent)', # video/3gpp
'3gp2' => '3GPP Media (.3GP) Release 2 (probably non-existent)', # video/3gpp
'3gp3' => '3GPP Media (.3GP) Release 3 (probably non-existent)', # video/3gpp
'3gp4' => '3GPP Media (.3GP) Release 4', # video/3gpp
'3gp5' => '3GPP Media (.3GP) Release 5', # video/3gpp
'3gp6' => '3GPP Media (.3GP) Release 6 Basic Profile', # video/3gpp
'3gp6' => '3GPP Media (.3GP) Release 6 Progressive Download', # video/3gpp
'3gp6' => '3GPP Media (.3GP) Release 6 Streaming Servers', # video/3gpp
'3gs7' => '3GPP Media (.3GP) Release 7 Streaming Servers', # video/3gpp
'aax ' => 'Audible Enhanced Audiobook (.AAX)', #PH
'avc1' => 'MP4 Base w/ AVC ext [ISO 14496-12:2005]', # video/mp4
'CAEP' => 'Canon Digital Camera',
'caqv' => 'Casio Digital Camera',
'CDes' => 'Convergent Design',
'da0a' => 'DMB MAF w/ MPEG Layer II aud, MOT slides, DLS, JPG/PNG/MNG images',
'da0b' => 'DMB MAF, extending DA0A, with 3GPP timed text, DID, TVA, REL, IPMP',
'da1a' => 'DMB MAF audio with ER-BSAC audio, JPG/PNG/MNG images',
'da1b' => 'DMB MAF, extending da1a, with 3GPP timed text, DID, TVA, REL, IPMP',
'da2a' => 'DMB MAF aud w/ HE-AAC v2 aud, MOT slides, DLS, JPG/PNG/MNG images',
'da2b' => 'DMB MAF, extending da2a, with 3GPP timed text, DID, TVA, REL, IPMP',
'da3a' => 'DMB MAF aud with HE-AAC aud, JPG/PNG/MNG images',
'da3b' => 'DMB MAF, extending da3a w/ BIFS, 3GPP timed text, DID, TVA, REL, IPMP',
'dmb1' => 'DMB MAF supporting all the components defined in the specification',
'dmpf' => 'Digital Media Project', # various
'drc1' => 'Dirac (wavelet compression), encapsulated in ISO base media (MP4)',
'dv1a' => 'DMB MAF vid w/ AVC vid, ER-BSAC aud, BIFS, JPG/PNG/MNG images, TS',
'dv1b' => 'DMB MAF, extending dv1a, with 3GPP timed text, DID, TVA, REL, IPMP',
'dv2a' => 'DMB MAF vid w/ AVC vid, HE-AAC v2 aud, BIFS, JPG/PNG/MNG images, TS',
'dv2b' => 'DMB MAF, extending dv2a, with 3GPP timed text, DID, TVA, REL, IPMP',
'dv3a' => 'DMB MAF vid w/ AVC vid, HE-AAC aud, BIFS, JPG/PNG/MNG images, TS',
'dv3b' => 'DMB MAF, extending dv3a, with 3GPP timed text, DID, TVA, REL, IPMP',
'dvr1' => 'DVB (.DVB) over RTP', # video/vnd.dvb.file
'dvt1' => 'DVB (.DVB) over MPEG-2 Transport Stream', # video/vnd.dvb.file
'F4A ' => 'Audio for Adobe Flash Player 9+ (.F4A)', # audio/mp4
'F4B ' => 'Audio Book for Adobe Flash Player 9+ (.F4B)', # audio/mp4
'F4P ' => 'Protected Video for Adobe Flash Player 9+ (.F4P)', # video/mp4
'F4V ' => 'Video for Adobe Flash Player 9+ (.F4V)', # video/mp4
'isc2' => 'ISMACryp 2.0 Encrypted File', # ?/enc-isoff-generic
'iso2' => 'MP4 Base Media v2 [ISO 14496-12:2005]', # video/mp4
'isom' => 'MP4 Base Media v1 [IS0 14496-12:2003]', # video/mp4
'JP2 ' => 'JPEG 2000 Image (.JP2) [ISO 15444-1 ?]', # image/jp2
'JP20' => 'Unknown, from GPAC samples (prob non-existent)',
'jpm ' => 'JPEG 2000 Compound Image (.JPM) [ISO 15444-6]', # image/jpm
'jpx ' => 'JPEG 2000 with extensions (.JPX) [ISO 15444-2]', # image/jpx
'KDDI' => '3GPP2 EZmovie for KDDI 3G cellphones', # video/3gpp2
#LCAG => (found in CompatibleBrands of Leica MOV videos)
'M4A ' => 'Apple iTunes AAC-LC (.M4A) Audio', # audio/x-m4a
'M4B ' => 'Apple iTunes AAC-LC (.M4B) Audio Book', # audio/mp4
'M4P ' => 'Apple iTunes AAC-LC (.M4P) AES Protected Audio', # audio/mp4
'M4V ' => 'Apple iTunes Video (.M4V) Video', # video/x-m4v
'M4VH' => 'Apple TV (.M4V)', # video/x-m4v
'M4VP' => 'Apple iPhone (.M4V)', # video/x-m4v
'mj2s' => 'Motion JPEG 2000 [ISO 15444-3] Simple Profile', # video/mj2
'mjp2' => 'Motion JPEG 2000 [ISO 15444-3] General Profile', # video/mj2
'mmp4' => 'MPEG-4/3GPP Mobile Profile (.MP4/3GP) (for NTT)', # video/mp4
'mp21' => 'MPEG-21 [ISO/IEC 21000-9]', # various
'mp41' => 'MP4 v1 [ISO 14496-1:ch13]', # video/mp4
'mp42' => 'MP4 v2 [ISO 14496-14]', # video/mp4
'mp71' => 'MP4 w/ MPEG-7 Metadata [per ISO 14496-12]', # various
'MPPI' => 'Photo Player, MAF [ISO/IEC 23000-3]', # various
'mqt ' => 'Sony / Mobile QuickTime (.MQV) US Patent 7,477,830 (Sony Corp)', # video/quicktime
'MSNV' => 'MPEG-4 (.MP4) for SonyPSP', # audio/mp4
'NDAS' => 'MP4 v2 [ISO 14496-14] Nero Digital AAC Audio', # audio/mp4
'NDSC' => 'MPEG-4 (.MP4) Nero Cinema Profile', # video/mp4
'NDSH' => 'MPEG-4 (.MP4) Nero HDTV Profile', # video/mp4
'NDSM' => 'MPEG-4 (.MP4) Nero Mobile Profile', # video/mp4
'NDSP' => 'MPEG-4 (.MP4) Nero Portable Profile', # video/mp4
'NDSS' => 'MPEG-4 (.MP4) Nero Standard Profile', # video/mp4
'NDXC' => 'H.264/MPEG-4 AVC (.MP4) Nero Cinema Profile', # video/mp4
'NDXH' => 'H.264/MPEG-4 AVC (.MP4) Nero HDTV Profile', # video/mp4
'NDXM' => 'H.264/MPEG-4 AVC (.MP4) Nero Mobile Profile', # video/mp4
'NDXP' => 'H.264/MPEG-4 AVC (.MP4) Nero Portable Profile', # video/mp4
'NDXS' => 'H.264/MPEG-4 AVC (.MP4) Nero Standard Profile', # video/mp4
'odcf' => 'OMA DCF DRM Format 2.0 (OMA-TS-DRM-DCF-V2_0-20060303-A)', # various
'opf2' => 'OMA PDCF DRM Format 2.1 (OMA-TS-DRM-DCF-V2_1-20070724-C)',
'opx2' => 'OMA PDCF DRM + XBS extensions (OMA-TS-DRM_XBS-V1_0-20070529-C)',
'pana' => 'Panasonic Digital Camera',
'qt ' => 'Apple QuickTime (.MOV/QT)', # video/quicktime
'ROSS' => 'Ross Video',
'sdv ' => 'SD Memory Card Video', # various?
'ssc1' => 'Samsung stereoscopic, single stream',
'ssc2' => 'Samsung stereoscopic, dual stream',
'XAVC' => 'Sony XAVC', #PH
'heic' => 'High Efficiency Image Format HEVC still image (.HEIC)', # image/heic
'hevc' => 'High Efficiency Image Format HEVC sequence (.HEICS)', # image/heic-sequence
'mif1' => 'High Efficiency Image Format still image (.HEIF)', # image/heif
'msf1' => 'High Efficiency Image Format sequence (.HEIFS)', # image/heif-sequence
'crx ' => 'Canon Raw (.CRX)', #PH (CR3 or CRM; use Canon CompressorVersion to decide)
);
# information for time/date-based tags (time zero is Jan 1, 1904)
my %timeInfo = (
Notes => 'converted from UTC to local time if the QuickTimeUTC option is set',
# It is not uncommon for brain-dead software to use the wrong time zero,
# so assume a time zero of Jan 1, 1970 if the date is before this
RawConv => q{
my $offset = (66 * 365 + 17) * 24 * 3600;
return $val - $offset if $val >= $offset or $$self{OPTIONS}{QuickTimeUTC};
if ($val and not $$self{IsWriting}) {
$self->WarnOnce('Patched incorrect time zero for QuickTime date/time tag',1);
}
return $val;
},
Shift => 'Time',
Writable => 1,
Permanent => 1,
DelValue => 0,
# Note: This value will be in UTC if generated by a system that is aware of the time zone
ValueConv => 'ConvertUnixTime($val, $self->Options("QuickTimeUTC"))',
ValueConvInv => 'GetUnixTime($val, $self->Options("QuickTimeUTC")) + (66 * 365 + 17) * 24 * 3600',
PrintConv => '$self->ConvertDateTime($val)',
PrintConvInv => '$self->InverseDateTime($val)',
# (can't put Groups here because they aren't constant!)
);
# information for duration tags
my %durationInfo = (
ValueConv => '$$self{TimeScale} ? $val / $$self{TimeScale} : $val',
PrintConv => '$$self{TimeScale} ? ConvertDuration($val) : $val',
);
# handle unknown tags
my %unknownInfo = (
Unknown => 1,
ValueConv => '$val =~ /^([\x20-\x7e]*)\0*$/ ? $1 : \$val',
);
# parsing for most of the 3gp udta language text boxes
my %langText = (
Notes => 'used in 3gp videos',
IText => 6,
Avoid => 1,
);
# 4-character Vendor ID codes (ref PH)
my %vendorID = (
appl => 'Apple',
fe20 => 'Olympus (fe20)', # (FE200)
FFMP => 'FFmpeg',
'GIC '=> 'General Imaging Co.',
kdak => 'Kodak',
KMPI => 'Konica-Minolta',
leic => 'Leica',
mino => 'Minolta',
niko => 'Nikon',
NIKO => 'Nikon',
olym => 'Olympus',
pana => 'Panasonic',
pent => 'Pentax',
pr01 => 'Olympus (pr01)', # (FE100,FE110,FE115)
sany => 'Sanyo',
'SMI '=> 'Sorenson Media Inc.',
ZORA => 'Zoran Corporation',
'AR.D'=> 'Parrot AR.Drone',
' KD '=> 'Kodak', # (FZ201)
);
# QuickTime data atom encodings for string types (ref 12)
%stringEncoding = (
1 => 'UTF8',
2 => 'UTF16',
3 => 'ShiftJIS',
4 => 'UTF8',
5 => 'UTF16',
);
my %graphicsMode = (
# (ref http://homepage.mac.com/vanhoek/MovieGuts%20docs/64.html)
0x00 => 'srcCopy',
0x01 => 'srcOr',
0x02 => 'srcXor',
0x03 => 'srcBic',
0x04 => 'notSrcCopy',
0x05 => 'notSrcOr',
0x06 => 'notSrcXor',
0x07 => 'notSrcBic',
0x08 => 'patCopy',
0x09 => 'patOr',
0x0a => 'patXor',
0x0b => 'patBic',
0x0c => 'notPatCopy',
0x0d => 'notPatOr',
0x0e => 'notPatXor',
0x0f => 'notPatBic',
0x20 => 'blend',
0x21 => 'addPin',
0x22 => 'addOver',
0x23 => 'subPin',
0x24 => 'transparent',
0x25 => 'addMax',
0x26 => 'subOver',
0x27 => 'addMin',
0x31 => 'grayishTextOr',
0x32 => 'hilite',
0x40 => 'ditherCopy',
# the following ref ISO/IEC 15444-3
0x100 => 'Alpha',
0x101 => 'White Alpha',
0x102 => 'Pre-multiplied Black Alpha',
0x110 => 'Component Alpha',
);
my %channelLabel = (
0xFFFFFFFF => 'Unknown',
0 => 'Unused',
100 => 'UseCoordinates',
1 => 'Left',
2 => 'Right',
3 => 'Center',
4 => 'LFEScreen',
5 => 'LeftSurround',
6 => 'RightSurround',
7 => 'LeftCenter',
8 => 'RightCenter',
9 => 'CenterSurround',
10 => 'LeftSurroundDirect',
11 => 'RightSurroundDirect',
12 => 'TopCenterSurround',
13 => 'VerticalHeightLeft',
14 => 'VerticalHeightCenter',
15 => 'VerticalHeightRight',
16 => 'TopBackLeft',
17 => 'TopBackCenter',
18 => 'TopBackRight',
33 => 'RearSurroundLeft',
34 => 'RearSurroundRight',
35 => 'LeftWide',
36 => 'RightWide',
37 => 'LFE2',
38 => 'LeftTotal',
39 => 'RightTotal',
40 => 'HearingImpaired',
41 => 'Narration',
42 => 'Mono',
43 => 'DialogCentricMix',
44 => 'CenterSurroundDirect',
45 => 'Haptic',
200 => 'Ambisonic_W',
201 => 'Ambisonic_X',
202 => 'Ambisonic_Y',
203 => 'Ambisonic_Z',
204 => 'MS_Mid',
205 => 'MS_Side',
206 => 'XY_X',
207 => 'XY_Y',
301 => 'HeadphonesLeft',
302 => 'HeadphonesRight',
304 => 'ClickTrack',
305 => 'ForeignLanguage',
400 => 'Discrete',
0x10000 => 'Discrete_0',
0x10001 => 'Discrete_1',
0x10002 => 'Discrete_2',
0x10003 => 'Discrete_3',
0x10004 => 'Discrete_4',
0x10005 => 'Discrete_5',
0x10006 => 'Discrete_6',
0x10007 => 'Discrete_7',
0x10008 => 'Discrete_8',
0x10009 => 'Discrete_9',
0x1000a => 'Discrete_10',
0x1000b => 'Discrete_11',
0x1000c => 'Discrete_12',
0x1000d => 'Discrete_13',
0x1000e => 'Discrete_14',
0x1000f => 'Discrete_15',
0x1ffff => 'Discrete_65535',
);
# properties which don't get inherited from the parent
my %dontInherit = (
ispe => 1, # size of parent may be different
hvcC => 1, # (likely redundant)
);
# tags that may be duplicated and directories that may contain duplicate tags
# (used only to avoid warnings when Validate-ing)
my %dupTagOK = ( mdat => 1, trak => 1, free => 1, infe => 1, sgpd => 1, dimg => 1, CCDT => 1,
sbgp => 1, csgm => 1, uuid => 1, cdsc => 1, maxr => 1, '----' => 1 );
my %dupDirOK = ( ipco => 1, '----' => 1 );
# the usual atoms required to decode timed metadata with the ExtractEmbedded option
my %eeStd = ( stco => 'stbl', co64 => 'stbl', stsz => 'stbl', stz2 => 'stbl',
stsc => 'stbl', stts => 'stbl' );
# boxes and their containers for the various handler types that we want to save
# when the ExtractEmbedded is enabled (currently only the 'gps ' container name is
# used, but others have been checked against all available sample files and may be
# useful in the future if the names are used for different boxes on other locations)
my %eeBox = (
# (note: vide is only processed if specific atoms exist in the VideoSampleDesc)
vide => { %eeStd,
JPEG => 'stsd',
# avcC => 'stsd', # (uncomment to parse H264 stream)
},
text => { %eeStd },
meta => { %eeStd },
sbtl => { %eeStd },
data => { %eeStd },
camm => { %eeStd }, # (Insta360)
'' => { 'gps ' => 'moov' }, # (no handler -- in top level 'moov' box)
);
# QuickTime atoms
%Image::ExifTool::QuickTime::Main = (
PROCESS_PROC => \&ProcessMOV,
WRITE_PROC => \&WriteQuickTime, # (only needs to be defined for directories to process when writing)
GROUPS => { 2 => 'Video' },
meta => { # 'meta' is found here in my Sony ILCE-7S MP4 sample - PH
Name => 'Meta',
SubDirectory => {
TagTable => 'Image::ExifTool::QuickTime::Meta',
Start => 4, # skip 4-byte version number header
},
},
meco => { #ISO14496-12:2015
Name => 'OtherMeta',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::OtherMeta' },
},
free => [
{
Name => 'KodakFree',
# (found in Kodak M5370 MP4 videos)
Condition => '$$valPt =~ /^\0\0\0.Seri/s',
SubDirectory => { TagTable => 'Image::ExifTool::Kodak::Free' },
},{
Name => 'Pittasoft',
# (Pittasoft Blackview dashcam MP4 videos)
Condition => '$$valPt =~ /^\0\0..(cprt|sttm|ptnm|ptrh|thum|gps |3gf )/s',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::Pittasoft' },
},{
Unknown => 1,
Binary => 1,
},
# (also Samsung WB750 uncompressed thumbnail data starting with "SDIC\0")
],
# fre1 - 4 bytes: "june" (Kodak PixPro SP360)
frea => {
Name => 'Kodak_frea',
SubDirectory => { TagTable => 'Image::ExifTool::Kodak::frea' },
},
skip => [
{
Name => 'CanonSkip',
Condition => '$$valPt =~ /^\0.{3}(CNDB|CNCV|CNMN|CNFV|CNTH|CNDM)/s',
SubDirectory => { TagTable => 'Image::ExifTool::Canon::Skip' },
},
{
Name => 'PreviewImage', # (found in DuDuBell M1 dashcam MOV files)
Groups => { 2 => 'Preview' },
Condition => '$$valPt =~ /^.{12}\xff\xd8\xff/',
Binary => 1,
RawConv => q{
my $len = Get32u(\$val, 8);
return undef unless length($val) >= $len + 12;
return substr($val, 12, $len);
},
},
{ Name => 'Skip', Unknown => 1, Binary => 1 },
],
wide => { Unknown => 1, Binary => 1 },
ftyp => { #MP4
Name => 'FileType',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::FileType' },
},
pnot => {
Name => 'Preview',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::Preview' },
},
PICT => {
Name => 'PreviewPICT',
Groups => { 2 => 'Preview' },
Binary => 1,
},
pict => { #8
Name => 'PreviewPICT',
Groups => { 2 => 'Preview' },
Binary => 1,
},
# (note that moov is present for an HEIF sequence)
moov => {
Name => 'Movie',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::Movie' },
},
moof => {
Name => 'MovieFragment',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::MovieFragment' },
},
# mfra - movie fragment random access: contains tfra (track fragment random access), and
# mfro (movie fragment random access offset) (ref 5)
mdat => { Name => 'MovieData', Unknown => 1, Binary => 1 },
'mdat-size' => {
Name => 'MovieDataSize',
Notes => q{
not a real tag ID, this tag represents the size of the 'mdat' data in bytes
and is used in the AvgBitrate calculation
},
},
'mdat-offset' => 'MovieDataOffset',
junk => { Unknown => 1, Binary => 1 }, #8
uuid => [
{ #9 (MP4 files)
Name => 'XMP',
# *** this is where ExifTool writes XMP in MP4 videos (as per XMP spec) ***
Condition => '$$valPt=~/^\xbe\x7a\xcf\xcb\x97\xa9\x42\xe8\x9c\x71\x99\x94\x91\xe3\xaf\xac/',
WriteGroup => 'XMP', # (write main XMP tags here)
SubDirectory => {
TagTable => 'Image::ExifTool::XMP::Main',
Start => 16,
},
},
{ #11 (MP4 files)
Name => 'UUID-PROF',
Condition => '$$valPt=~/^PROF!\xd2\x4f\xce\xbb\x88\x69\x5c\xfa\xc9\xc7\x40/',
SubDirectory => {
TagTable => 'Image::ExifTool::QuickTime::Profile',
Start => 24, # uid(16) + version(1) + flags(3) + count(4)
},
},
{ #PH (Flip MP4 files)
Name => 'UUID-Flip',
Condition => '$$valPt=~/^\x4a\xb0\x3b\x0f\x61\x8d\x40\x75\x82\xb2\xd9\xfa\xce\xd3\x5f\xf5/',
SubDirectory => {
TagTable => 'Image::ExifTool::QuickTime::Flip',
Start => 16,
},
},
# "\x98\x7f\xa3\xdf\x2a\x85\x43\xc0\x8f\x8f\xd9\x7c\x47\x1e\x8e\xea" - unknown data in Flip videos
{ #PH (Canon CR3)
Name => 'UUID-Canon2',
WriteLast => 1, # MUST come after mdat or DPP will drop mdat when writing!
Condition => '$$valPt=~/^\x21\x0f\x16\x87\x91\x49\x11\xe4\x81\x11\x00\x24\x21\x31\xfc\xe4/',
SubDirectory => {
TagTable => 'Image::ExifTool::Canon::uuid2',
Start => 16,
},
},
{ #PH (Canon CR3)
Name => 'PreviewImage',
Condition => '$$valPt=~/^\xea\xf4\x2b\x5e\x1c\x98\x4b\x88\xb9\xfb\xb7\xdc\x40\x6e\x4d\x16/',
Groups => { 2 => 'Preview' },
# 0x00 - undef[16]: UUID
# 0x10 - int32u[2]: "0 1" (version and/or item count?)
# 0x18 - int32u: PRVW atom size
# 0x20 - int32u: 'PRVW'
# 0x30 - int32u: 0
# 0x34 - int16u: 1
# 0x36 - int16u: image width
# 0x38 - int16u: image height
# 0x3a - int16u: 1
# 0x3c - int32u: preview length
RawConv => '$val = substr($val, 0x30); $self->ValidateImage(\$val, $tag)',
},
{ #8
Name => 'UUID-Unknown',
%unknownInfo,
},
],
_htc => {
Name => 'HTCInfo',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::HTCInfo' },
},
udta => {
Name => 'UserData',
SubDirectory => { TagTable => 'Image::ExifTool::FLIR::UserData' },
},
thum => { #PH
Name => 'ThumbnailImage',
Groups => { 2 => 'Preview' },
Binary => 1,
},
ardt => { #PH
Name => 'ARDroneFile',
ValueConv => 'length($val) > 4 ? substr($val,4) : $val', # remove length
},
prrt => { #PH
Name => 'ARDroneTelemetry',
Notes => q{
telemetry information for each video frame: status1, status2, time, pitch,
roll, yaw, speed, altitude
},
ValueConv => q{
my $size = length $val;
return \$val if $size < 12 or not $$self{OPTIONS}{Binary};
my $len = Get16u(\$val, 2);
my $str = '';
SetByteOrder('II');
my $pos = 12;
while ($pos + $len <= $size) {
my $s1 = Get16u(\$val, $pos);
# s2: 7=take-off?, 3=moving, 4=hovering, 9=landing?, 2=landed
my $s2 = Get16u(\$val, $pos + 2);
$str .= "$s1 $s2";
my $num = int(($len-4)/4);
my ($i, $v);
for ($i=0; $i<$num; ++$i) {
my $pt = $pos + 4 + $i * 4;
if ($i > 0 && $i < 4) {
$v = GetFloat(\$val, $pt); # pitch/roll/yaw
} else {
$v = Get32u(\$val, $pt);
# convert time to sec, and speed(NC)/altitude to metres
$v /= 1000 if $i <= 5;
}
$str .= " $v";
}
$str .= "\n";
$pos += $len;
}
SetByteOrder('MM');
return \$str;
},
},
udat => { #PH (GPS NMEA-format log written by Datakam Player software)
Name => 'GPSLog',
Binary => 1, # (actually ASCII, but very lengthy)
},
# meta - proprietary XML information written by some Flip cameras - PH
# beam - 16 bytes found in an iPhone video
IDIT => { #PH (written by DuDuBell M1, VSYS M6L dashcams)
Name => 'DateTimeOriginal',
Description => 'Date/Time Original',
Groups => { 2 => 'Time' },
Format => 'string', # (removes trailing "\0")
Shift => 'Time',
Writable => 1,
Permanent => 1,
DelValue => '0000-00-00T00:00:00+0000',
ValueConv => '$val=~tr/-/:/; $val',
ValueConvInv => '$val=~s/(\d+):(\d+):/$1-$2-/; $val',
PrintConv => '$self->ConvertDateTime($val)',
PrintConvInv => '$self->InverseDateTime($val)',
},
gps0 => { #PH (DuDuBell M1, VSYS M6L)
Name => 'GPSTrack',
SubDirectory => {
TagTable => 'Image::ExifTool::QuickTime::Stream',
ProcessProc => \&Process_gps0,
},
},
gsen => { #PH (DuDuBell M1, VSYS M6L)
Name => 'GSensor',
SubDirectory => {
TagTable => 'Image::ExifTool::QuickTime::Stream',
ProcessProc => \&Process_gsen,
},
},
# gpsa - seen hex "01 20 00 00" (DuDuBell M1, VSYS M6L)
# gsea - 20 bytes hex "05 00's..." (DuDuBell M1) "05 08 02 01 ..." (VSYS M6L)
);
# MPEG-4 'ftyp' atom
# (ref http://developer.apple.com/mac/library/documentation/QuickTime/QTFF/QTFFChap1/qtff1.html)
%Image::ExifTool::QuickTime::FileType = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
GROUPS => { 2 => 'Video' },
FORMAT => 'int32u',
0 => {
Name => 'MajorBrand',
Format => 'undef[4]',
PrintConv => \%ftypLookup,
},
1 => {
Name => 'MinorVersion',
Format => 'undef[4]',
ValueConv => 'sprintf("%x.%x.%x", unpack("nCC", $val))',
},
2 => {
Name => 'CompatibleBrands',
Format => 'undef[$size-8]',
# ignore any entry with a null, and return others as a list
ValueConv => 'my @a=($val=~/.{4}/sg); @a=grep(!/\0/,@a); \@a',
},
);
# proprietary HTC atom (HTC One MP4 video)
%Image::ExifTool::QuickTime::HTCInfo = (
PROCESS_PROC => \&ProcessMOV,
GROUPS => { 2 => 'Video' },
NOTES => 'Tags written by some HTC camera phones.',
slmt => {
Name => 'Unknown_slmt',
Unknown => 1,
Format => 'int32u', # (observed values: 4)
},
);
# atoms used in QTIF files
%Image::ExifTool::QuickTime::ImageFile = (
PROCESS_PROC => \&ProcessMOV,
GROUPS => { 2 => 'Image' },
NOTES => 'Tags used in QTIF QuickTime Image Files.',
idsc => {
Name => 'ImageDescription',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::ImageDesc' },
},
idat => {
Name => 'ImageData',
Binary => 1,
},
iicc => {
Name => 'ICC_Profile',
SubDirectory => { TagTable => 'Image::ExifTool::ICC_Profile::Main' },
},
);
# image description data block
%Image::ExifTool::QuickTime::ImageDesc = (
PROCESS_PROC => \&ProcessHybrid,
VARS => { ID_LABEL => 'ID/Index' },
GROUPS => { 2 => 'Image' },
FORMAT => 'int16u',
2 => {
Name => 'CompressorID',
Format => 'string[4]',
# not very useful since this isn't a complete list and name is given below
# # ref http://developer.apple.com/mac/library/documentation/QuickTime/QTFF/QTFFChap3/qtff3.html
# PrintConv => {
# cvid => 'Cinepak',
# jpeg => 'JPEG',
# 'smc '=> 'Graphics',
# 'rle '=> 'Animation',
# rpza => 'Apple Video',
# kpcd => 'Kodak Photo CD',
# 'png '=> 'Portable Network Graphics',
# mjpa => 'Motion-JPEG (format A)',
# mjpb => 'Motion-JPEG (format B)',
# SVQ1 => 'Sorenson video, version 1',
# SVQ3 => 'Sorenson video, version 3',
# mp4v => 'MPEG-4 video',
# 'dvc '=> 'NTSC DV-25 video',
# dvcp => 'PAL DV-25 video',
# 'gif '=> 'Compuserve Graphics Interchange Format',
# h263 => 'H.263 video',
# tiff => 'Tagged Image File Format',
# 'raw '=> 'Uncompressed RGB',
# '2vuY'=> "Uncompressed Y'CbCr, 3x8-bit 4:2:2 (2vuY)",
# 'yuv2'=> "Uncompressed Y'CbCr, 3x8-bit 4:2:2 (yuv2)",
# v308 => "Uncompressed Y'CbCr, 8-bit 4:4:4",
# v408 => "Uncompressed Y'CbCr, 8-bit 4:4:4:4",
# v216 => "Uncompressed Y'CbCr, 10, 12, 14, or 16-bit 4:2:2",
# v410 => "Uncompressed Y'CbCr, 10-bit 4:4:4",
# v210 => "Uncompressed Y'CbCr, 10-bit 4:2:2",
# hvc1 => 'HEVC', #PH
# },
},
10 => {
Name => 'VendorID',
Format => 'string[4]',
RawConv => 'length $val ? $val : undef',
PrintConv => \%vendorID,
SeparateTable => 'VendorID',
},
# 14 - ("Quality" in QuickTime docs) ??
16 => 'SourceImageWidth',
17 => 'SourceImageHeight',
18 => { Name => 'XResolution', Format => 'fixed32u' },
20 => { Name => 'YResolution', Format => 'fixed32u' },
# 24 => 'FrameCount', # always 1 (what good is this?)
25 => {
Name => 'CompressorName',
Format => 'string[32]',
# (sometimes this is a Pascal string, and sometimes it is a C string)
RawConv => q{
$val=substr($val,1,ord($1)) if $val=~/^([\0-\x1f])/ and ord($1)<length($val);
length $val ? $val : undef;
},
},
41 => 'BitDepth',
#
# Observed offsets for child atoms of various CompressorID types:
#
# CompressorID Offset Child atoms
# ----------- ------ ----------------
# avc1 86 avcC, btrt, colr, pasp, fiel, clap, svcC
# mp4v 86 esds, pasp
# s263 86 d263
#
btrt => {
Name => 'BitrateInfo',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::Bitrate' },
},
# Reference for fiel, colr, pasp, clap:
# https://developer.apple.com/library/mac/technotes/tn2162/_index.html#//apple_ref/doc/uid/DTS40013070-CH1-TNTAG9
fiel => {
Name => 'VideoFieldOrder',
ValueConv => 'join(" ", unpack("C*",$val))',
PrintConv => [{
1 => 'Progressive',
2 => '2:1 Interlaced',
}],
},
colr => {
Name => 'ColorRepresentation',
ValueConv => 'join(" ", substr($val,0,4), unpack("x4n*",$val))',
},
pasp => {
Name => 'PixelAspectRatio',
ValueConv => 'join(":", unpack("N*",$val))',
},
clap => {
Name => 'CleanAperture',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::CleanAperture' },
},
avcC => {
# (see http://thompsonng.blogspot.ca/2010/11/mp4-file-format-part-2.html)
Name => 'AVCConfiguration',
Unknown => 1,
Binary => 1,
},
JPEG => { # (found in CR3 images; used as a flag to identify JpgFromRaw 'vide' stream)
Name => 'JPEGInfo',
# (4 bytes all zero)
Unknown => 1,
Binary => 1,
},
# hvcC - HEVC configuration
# svcC - 7 bytes: 00 00 00 00 ff e0 00
# esds - elementary stream descriptor
# d263
gama => { Name => 'Gamma', Format => 'fixed32u' },
# mjqt - default quantization table for MJPEG
# mjht - default Huffman table for MJPEG
# csgm ? (seen in hevc video)
CMP1 => { # Canon CR3
Name => 'CMP1',
SubDirectory => { TagTable => 'Image::ExifTool::Canon::CMP1' },
},
CDI1 => { # Canon CR3
Name => 'CDI1',
SubDirectory => {
TagTable => 'Image::ExifTool::Canon::CDI1',
Start => 4,
},
},
# JPEG - 4 bytes all 0 (Canon CR3)
# free - (Canon CR3)
#
# spherical video v2 stuff (untested)
#
st3d => {
Name => 'Stereoscopic3D',
Format => 'int8u',
ValueConv => '$val =~ s/.* //; $val', # (remove leading version/flags bytes?)
PrintConv => {
0 => 'Monoscopic',
1 => 'Stereoscopic Top-Bottom',
2 => 'Stereoscopic Left-Right',
3 => 'Stereoscopic Stereo-Custom', # (provisional in spec as of 2017-10-10)
},
},
sv3d => {
Name => 'SphericalVideo',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::sv3d' },
},
);
# 'sv3d' atom information (ref https://github.com/google/spatial-media/blob/master/docs/spherical-video-v2-rfc.md)
%Image::ExifTool::QuickTime::sv3d = (
PROCESS_PROC => \&ProcessMOV,
GROUPS => { 2 => 'Video' },
NOTES => q{
Tags defined by the Spherical Video V2 specification. See
L<https://github.com/google/spatial-media/blob/master/docs/spherical-video-v2-rfc.md>
for the specification.
},
svhd => {
Name => 'MetadataSource',
Format => 'undef',
ValueConv => '$val=~tr/\0//d; $val', # (remove version/flags? and terminator?)
},
proj => {
Name => 'Projection',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::proj' },
},
);
# 'proj' atom information (ref https://github.com/google/spatial-media/blob/master/docs/spherical-video-v2-rfc.md)
%Image::ExifTool::QuickTime::proj = (
PROCESS_PROC => \&ProcessMOV,
GROUPS => { 2 => 'Video' },
prhd => {
Name => 'ProjectionHeader',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::prhd' },
},
cbmp => {
Name => 'CubemapProj',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::cbmp' },
},
equi => {
Name => 'EquirectangularProj',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::equi' },
},
# mshp - MeshProjection (P.I.T.A. to decode, for not much reward, see ref)
);
# 'prhd' atom information (ref https://github.com/google/spatial-media/blob/master/docs/spherical-video-v2-rfc.md)
%Image::ExifTool::QuickTime::prhd = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
GROUPS => { 2 => 'Video' },
FORMAT => 'fixed32s',
# 0 - version (high 8 bits) / flags (low 24 bits)
1 => 'PoseYawDegrees',
2 => 'PosePitchDegrees',
3 => 'PoseRollDegrees',
);
# 'cbmp' atom information (ref https://github.com/google/spatial-media/blob/master/docs/spherical-video-v2-rfc.md)
%Image::ExifTool::QuickTime::cbmp = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
GROUPS => { 2 => 'Video' },
FORMAT => 'int32u',
# 0 - version (high 8 bits) / flags (low 24 bits)
1 => 'Layout',
2 => 'Padding',
);
# 'equi' atom information (ref https://github.com/google/spatial-media/blob/master/docs/spherical-video-v2-rfc.md)
%Image::ExifTool::QuickTime::equi = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
GROUPS => { 2 => 'Video' },
FORMAT => 'int32u', # (actually 0.32 fixed point)
# 0 - version (high 8 bits) / flags (low 24 bits)
1 => { Name => 'ProjectionBoundsTop', ValueConv => '$val / 4294967296' },
2 => { Name => 'ProjectionBoundsBottom',ValueConv => '$val / 4294967296' },
3 => { Name => 'ProjectionBoundsLeft', ValueConv => '$val / 4294967296' },
4 => { Name => 'ProjectionBoundsRight', ValueConv => '$val / 4294967296' },
);
# 'btrt' atom information (ref http://lists.freedesktop.org/archives/gstreamer-commits/2011-October/054459.html)
%Image::ExifTool::QuickTime::Bitrate = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
GROUPS => { 2 => 'Video' },
FORMAT => 'int32u',
PRIORITY => 0, # often filled with zeros
0 => 'BufferSize',
1 => 'MaxBitrate',
2 => 'AverageBitrate',
);
# 'clap' atom information (ref https://developer.apple.com/library/mac/technotes/tn2162/_index.html#//apple_ref/doc/uid/DTS40013070-CH1-TNTAG9)
%Image::ExifTool::QuickTime::CleanAperture = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
GROUPS => { 2 => 'Video' },
FORMAT => 'rational64u',
0 => 'CleanApertureWidth',
1 => 'CleanApertureHeight',
2 => 'CleanApertureOffsetX',
3 => 'CleanApertureOffsetY',
);
# preview data block
%Image::ExifTool::QuickTime::Preview = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
WRITE_PROC => \&Image::ExifTool::WriteBinaryData,
CHECK_PROC => \&Image::ExifTool::CheckBinaryData,
GROUPS => { 2 => 'Image' },
FORMAT => 'int16u',
0 => {
Name => 'PreviewDate',
Format => 'int32u',
Groups => { 2 => 'Time' },
%timeInfo,
},
2 => 'PreviewVersion',
3 => {
Name => 'PreviewAtomType',
Format => 'string[4]',
},
5 => 'PreviewAtomIndex',
);
# movie atoms
%Image::ExifTool::QuickTime::Movie = (
PROCESS_PROC => \&ProcessMOV,
WRITE_PROC => \&WriteQuickTime,
GROUPS => { 2 => 'Video' },
mvhd => {
Name => 'MovieHeader',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::MovieHeader' },
},
trak => {
Name => 'Track',
CanCreate => 0, # don't create this atom
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::Track' },
},
udta => {
Name => 'UserData',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::UserData' },
},
meta => { # 'meta' is found here in my EX-F1 MOV sample - PH
Name => 'Meta',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::Meta' },
},
iods => {
Name => 'InitialObjectDescriptor',
Flags => ['Binary','Unknown'],
},
uuid => [
{ #11 (MP4 files) (also found in QuickTime::Track)
Name => 'UUID-USMT',
Condition => '$$valPt=~/^USMT!\xd2\x4f\xce\xbb\x88\x69\x5c\xfa\xc9\xc7\x40/',
SubDirectory => {
TagTable => 'Image::ExifTool::QuickTime::UserMedia',
Start => 16,
},
},
{ #PH (Canon SX280)
Name => 'UUID-Canon',
Condition => '$$valPt=~/^\x85\xc0\xb6\x87\x82\x0f\x11\xe0\x81\x11\xf4\xce\x46\x2b\x6a\x48/',
SubDirectory => {
TagTable => 'Image::ExifTool::Canon::uuid',
Start => 16,
},
},
{
Name => 'UUID-Unknown',
%unknownInfo,
},
],
cmov => {
Name => 'CompressedMovie',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::CMovie' },
},
htka => { # (written by HTC One M8 in slow-motion 1280x720 video - PH)
Name => 'HTCTrack',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::Track' },
},
'gps ' => { # GPS data written by Novatek cameras (parsed in QuickTimeStream.pl)
Name => 'GPSDataList',
Unknown => 1,
Binary => 1,
},
meco => { #ISO14496-12:2015
Name => 'OtherMeta',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::OtherMeta' },
},
# prfl - Profile (ref 12)
# clip - clipping --> contains crgn (clip region) (ref 12)
# mvex - movie extends --> contains mehd (movie extends header), trex (track extends) (ref 14)
# ICAT - 4 bytes: "6350" (Nikon CoolPix S6900), "6500" (Panasonic FT7)
);
# (ref CFFMediaFormat-2_1.pdf)
%Image::ExifTool::QuickTime::MovieFragment = (
PROCESS_PROC => \&ProcessMOV,
WRITE_PROC => \&WriteQuickTime,
GROUPS => { 2 => 'Video' },
# mfhd - movie fragment header
traf => {
Name => 'TrackFragment',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::TrackFragment' },
},
meta => { #ISO14496-12:2015
Name => 'Meta',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::Meta' },
},
);
# (ref CFFMediaFormat-2_1.pdf)
%Image::ExifTool::QuickTime::TrackFragment = (
PROCESS_PROC => \&ProcessMOV,
WRITE_PROC => \&WriteQuickTime,
GROUPS => { 2 => 'Video' },
meta => { #ISO14496-12:2015
Name => 'Meta',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::Meta' },
},
# tfhd - track fragment header
# edts - edits --> contains elst (edit list) (ref PH)
# tfdt - track fragment base media decode time
# trik - trick play box
# trun - track fragment run box
# avcn - AVC NAL unit storage box
# secn - sample encryption box
# saio - sample auxiliary information offsets box
# sbgp - sample to group box
# sgpd - sample group description box
# sdtp - independent and disposable samples (ref 5)
# subs - sub-sample information (ref 5)
);
# movie header data block
%Image::ExifTool::QuickTime::MovieHeader = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
WRITE_PROC => \&Image::ExifTool::WriteBinaryData,
CHECK_PROC => \&Image::ExifTool::CheckBinaryData,
GROUPS => { 2 => 'Video' },
FORMAT => 'int32u',
DATAMEMBER => [ 0, 1, 2, 3, 4 ],
0 => {
Name => 'MovieHeaderVersion',
Format => 'int8u',
RawConv => '$$self{MovieHeaderVersion} = $val',
},
1 => {
Name => 'CreateDate',
Groups => { 2 => 'Time' },
%timeInfo,
# this is int64u if MovieHeaderVersion == 1 (ref 13)
Hook => '$$self{MovieHeaderVersion} and $format = "int64u", $varSize += 4',
},
2 => {
Name => 'ModifyDate',
Groups => { 2 => 'Time' },
%timeInfo,
# this is int64u if MovieHeaderVersion == 1 (ref 13)
Hook => '$$self{MovieHeaderVersion} and $format = "int64u", $varSize += 4',
},
3 => {
Name => 'TimeScale',
RawConv => '$$self{TimeScale} = $val',
},
4 => {
Name => 'Duration',
%durationInfo,
# this is int64u if MovieHeaderVersion == 1 (ref 13)
Hook => '$$self{MovieHeaderVersion} and $format = "int64u", $varSize += 4',
},
5 => {
Name => 'PreferredRate',
ValueConv => '$val / 0x10000',
},
6 => {
Name => 'PreferredVolume',
Format => 'int16u',
ValueConv => '$val / 256',
PrintConv => 'sprintf("%.2f%%", $val * 100)',
},
9 => {
Name => 'MatrixStructure',
Format => 'fixed32s[9]',
# (the right column is fixed 2.30 instead of 16.16)
ValueConv => q{
my @a = split ' ',$val;
$_ /= 0x4000 foreach @a[2,5,8];
return "@a";
},
},
18 => { Name => 'PreviewTime', %durationInfo },
19 => { Name => 'PreviewDuration', %durationInfo },
20 => { Name => 'PosterTime', %durationInfo },
21 => { Name => 'SelectionTime', %durationInfo },
22 => { Name => 'SelectionDuration',%durationInfo },
23 => { Name => 'CurrentTime', %durationInfo },
24 => 'NextTrackID',
);
# track atoms
%Image::ExifTool::QuickTime::Track = (
PROCESS_PROC => \&ProcessMOV,
WRITE_PROC => \&WriteQuickTime,
GROUPS => { 1 => 'Track#', 2 => 'Video' },
tkhd => {
Name => 'TrackHeader',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::TrackHeader' },
},
udta => {
Name => 'UserData',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::UserData' },
},
mdia => { #MP4
Name => 'Media',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::Media' },
},
meta => { #PH (MOV)
Name => 'Meta',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::Meta' },
},
tref => {
Name => 'TrackRef',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::TrackRef' },
},
tapt => {
Name => 'TrackAperture',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::TrackAperture' },
},
uuid => [
{ #11 (MP4 files) (also found in QuickTime::Movie)
Name => 'UUID-USMT',
Condition => '$$valPt=~/^USMT!\xd2\x4f\xce\xbb\x88\x69\x5c\xfa\xc9\xc7\x40/',
SubDirectory => {
TagTable => 'Image::ExifTool::QuickTime::UserMedia',
Start => 16,
},
},
{ #https://github.com/google/spatial-media/blob/master/docs/spherical-video-rfc.md
Name => 'SphericalVideoXML',
Condition => '$$valPt=~/^\xff\xcc\x82\x63\xf8\x55\x4a\x93\x88\x14\x58\x7a\x02\x52\x1f\xdd/',
WriteGroup => 'GSpherical', # write only GSpherical XMP tags here
HandlerType => 'vide', # only write in video tracks
SubDirectory => {
TagTable => 'Image::ExifTool::XMP::Main',
Start => 16,
WriteProc => 'Image::ExifTool::XMP::WriteGSpherical',
},
},
{
Name => 'UUID-Unknown',
%unknownInfo,
},
],
meco => { #ISO14492-12:2015 pg 83
Name => 'OtherMeta',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::OtherMeta' },
},
# edts - edits --> contains elst (edit list)
# clip - clipping --> contains crgn (clip region)
# matt - track matt --> contains kmat (compressed matt)
# load - track loading settings
# imap - track input map --> contains ' in' --> contains ' ty', obid
# prfl - Profile (ref 12)
);
# track header data block
%Image::ExifTool::QuickTime::TrackHeader = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
WRITE_PROC => \&Image::ExifTool::WriteBinaryData,
CHECK_PROC => \&Image::ExifTool::CheckBinaryData,
GROUPS => { 1 => 'Track#', 2 => 'Video' },
FORMAT => 'int32u',
DATAMEMBER => [ 0, 1, 2, 5, 7 ],
0 => {
Name => 'TrackHeaderVersion',
Format => 'int8u',
Priority => 0,
RawConv => '$$self{TrackHeaderVersion} = $val',
},
1 => {
Name => 'TrackCreateDate',
Priority => 0,
Groups => { 2 => 'Time' },
%timeInfo,
# this is int64u if TrackHeaderVersion == 1 (ref 13)
Hook => '$$self{TrackHeaderVersion} and $format = "int64u", $varSize += 4',
},
2 => {
Name => 'TrackModifyDate',
Priority => 0,
Groups => { 2 => 'Time' },
%timeInfo,
# this is int64u if TrackHeaderVersion == 1 (ref 13)
Hook => '$$self{TrackHeaderVersion} and $format = "int64u", $varSize += 4',
},
3 => {
Name => 'TrackID',
Priority => 0,
},
5 => {
Name => 'TrackDuration',
Priority => 0,
%durationInfo,
# this is int64u if TrackHeaderVersion == 1 (ref 13)
Hook => '$$self{TrackHeaderVersion} and $format = "int64u", $varSize += 4',
},
7 => { # (used only for writing MatrixStructure)
Name => 'ImageSizeLookahead',
Hidden => 1,
Format => 'int32u[14]',
RawConv => '$$self{ImageSizeLookahead} = $val; undef',
},
8 => {
Name => 'TrackLayer',
Format => 'int16u',
Priority => 0,
},
9 => {
Name => 'TrackVolume',
Format => 'int16u',
Priority => 0,
ValueConv => '$val / 256',
PrintConv => 'sprintf("%.2f%%", $val * 100)',
},
10 => {
Name => 'MatrixStructure',
Format => 'fixed32s[9]',
Notes => 'writable for the video track via the Composite Rotation tag',
Writable => 1,
Permanent => 1,
# only set rotation if image size is non-zero
RawConvInv => \&GetMatrixStructure,
# (the right column is fixed 2.30 instead of 16.16)
ValueConv => q{
my @a = split ' ',$val;
$_ /= 0x4000 foreach @a[2,5,8];
return "@a";
},
ValueConvInv => q{
my @a = split ' ',$val;
$_ *= 0x4000 foreach @a[2,5,8];
return "@a";
},
},
19 => {
Name => 'ImageWidth',
Priority => 0,
RawConv => \&FixWrongFormat,
},
20 => {
Name => 'ImageHeight',
Priority => 0,
RawConv => \&FixWrongFormat,
},
);
# user data atoms
%Image::ExifTool::QuickTime::UserData = (
PROCESS_PROC => \&ProcessMOV,
WRITE_PROC => \&WriteQuickTime,
CHECK_PROC => \&CheckQTValue,
GROUPS => { 1 => 'UserData', 2 => 'Video' },
WRITABLE => 1,
PREFERRED => 1, # (preferred over Keys tags when writing)
FORMAT => 'string',
WRITE_GROUP => 'UserData',
LANG_INFO => \&GetLangInfo,
NOTES => q{
Tag ID's beginning with the copyright symbol (hex 0xa9) are multi-language
text. Alternate language tags are accessed by adding a dash followed by a
3-character ISO 639-2 language code to the tag name. ExifTool will extract
any multi-language user data tags found, even if they aren't in this table.
Note when creating new tags,
L<ItemList|Image::ExifTool::TagNames/QuickTime ItemList Tags> tags are
preferred over these, so to create the tag when a same-named ItemList tag
exists, either "UserData" must be specified (eg. C<-UserData:Artist=Monet>
on the command line), or the PREFERRED level must be changed via
L<the config file|../config.html#PREF>.
},
"\xa9cpy" => { Name => 'Copyright', Groups => { 2 => 'Author' } },
"\xa9day" => {
Name => 'ContentCreateDate',
Groups => { 2 => 'Time' },
Shift => 'Time',
# handle values in the form "2010-02-12T13:27:14-0800" (written by Apple iPhone)
ValueConv => q{
require Image::ExifTool::XMP;
$val = Image::ExifTool::XMP::ConvertXMPDate($val);
$val =~ s/([-+]\d{2})(\d{2})$/$1:$2/; # add colon to timezone if necessary
return $val;
},
ValueConvInv => q{
require Image::ExifTool::XMP;
$val = Image::ExifTool::XMP::FormatXMPDate($val);
$val =~ s/([-+]\d{2}):(\d{2})$/$1$2/; # remove time zone colon
return $val;
},
PrintConv => '$self->ConvertDateTime($val)',
PrintConvInv => '$self->InverseDateTime($val)',
},
"\xa9ART" => 'Artist', #PH (iTunes 8.0.2)
"\xa9alb" => 'Album', #PH (iTunes 8.0.2)
"\xa9arg" => 'Arranger', #12
"\xa9ark" => 'ArrangerKeywords', #12
"\xa9cmt" => 'Comment', #PH (iTunes 8.0.2)
"\xa9cok" => 'ComposerKeywords', #12
"\xa9com" => 'Composer', #12
"\xa9dir" => 'Director', #12
"\xa9ed1" => 'Edit1',
"\xa9ed2" => 'Edit2',
"\xa9ed3" => 'Edit3',
"\xa9ed4" => 'Edit4',
"\xa9ed5" => 'Edit5',
"\xa9ed6" => 'Edit6',
"\xa9ed7" => 'Edit7',
"\xa9ed8" => 'Edit8',
"\xa9ed9" => 'Edit9',
"\xa9fmt" => 'Format',
"\xa9gen" => 'Genre', #PH (iTunes 8.0.2)
"\xa9grp" => 'Grouping', #PH (NC)
"\xa9inf" => 'Information',
"\xa9isr" => 'ISRCCode', #12
"\xa9lab" => 'RecordLabelName', #12
"\xa9lal" => 'RecordLabelURL', #12
"\xa9lyr" => 'Lyrics', #PH (NC)
"\xa9mak" => 'Make', #12
"\xa9mal" => 'MakerURL', #12
"\xa9mod" => 'Model', #PH
"\xa9nam" => 'Title', #12
"\xa9pdk" => 'ProducerKeywords', #12
"\xa9phg" => 'RecordingCopyright', #12
"\xa9prd" => 'Producer',
"\xa9prf" => 'Performers',
"\xa9prk" => 'PerformerKeywords', #12
"\xa9prl" => 'PerformerURL',
"\xa9req" => 'Requirements',
"\xa9snk" => 'SubtitleKeywords', #12
"\xa9snm" => 'Subtitle', #12
"\xa9src" => 'SourceCredits', #12
"\xa9swf" => 'SongWriter', #12
"\xa9swk" => 'SongWriterKeywords', #12
"\xa9swr" => 'SoftwareVersion', #12
"\xa9too" => 'Encoder', #PH (NC)
"\xa9trk" => 'Track', #PH (NC)
"\xa9wrt" => { Name => 'Composer', Avoid => 1 }, # ("\xa9com" is preferred in UserData)
"\xa9xyz" => { #PH (iPhone 3GS)
Name => 'GPSCoordinates',
Groups => { 2 => 'Location' },
ValueConv => \&ConvertISO6709,
ValueConvInv => \&ConvInvISO6709,
PrintConv => \&PrintGPSCoordinates,
PrintConvInv => \&PrintInvGPSCoordinates,
},
# \xa9 tags written by DJI Phantom 3: (ref PH)
"\xa9xsp" => 'SpeedX', #PH (guess)
"\xa9ysp" => 'SpeedY', #PH (guess)
"\xa9zsp" => 'SpeedZ', #PH (guess)
"\xa9fpt" => 'Pitch', #PH
"\xa9fyw" => 'Yaw', #PH
"\xa9frl" => 'Roll', #PH
"\xa9gpt" => 'CameraPitch', #PH
"\xa9gyw" => 'CameraYaw', #PH
"\xa9grl" => 'CameraRoll', #PH
"\xa9enc" => 'EncoderID', #PH (forum9271)
# and the following entries don't have the proper 4-byte header for \xa9 tags:
"\xa9dji" => { Name => 'UserData_dji', Format => 'undef', Binary => 1, Unknown => 1, Hidden => 1 },
"\xa9res" => { Name => 'UserData_res', Format => 'undef', Binary => 1, Unknown => 1, Hidden => 1 },
"\xa9uid" => { Name => 'UserData_uid', Format => 'undef', Binary => 1, Unknown => 1, Hidden => 1 },
"\xa9mdl" => {
Name => 'Model',
Notes => 'non-standard-format DJI tag',
Format => 'string',
Avoid => 1,
},
# end DJI tags
name => 'Name',
WLOC => {
Name => 'WindowLocation',
Format => 'int16u',
},
LOOP => {
Name => 'LoopStyle',
Format => 'int32u',
PrintConv => {
1 => 'Normal',
2 => 'Palindromic',
},
},
SelO => {
Name => 'PlaySelection',
Format => 'int8u',
},
AllF => {
Name => 'PlayAllFrames',
Format => 'int8u',
},
meta => {
Name => 'Meta',
SubDirectory => {
TagTable => 'Image::ExifTool::QuickTime::Meta',
Start => 4, # must skip 4-byte version number header
},
},
'ptv '=> {
Name => 'PrintToVideo',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::Video' },
},
hnti => {
Name => 'HintInfo',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::HintInfo' },
},
hinf => {
Name => 'HintTrackInfo',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::HintTrackInfo' },
},
hinv => 'HintVersion', #PH (guess)
XMP_ => { #PH (Adobe CS3 Bridge)
Name => 'XMP',
WriteGroup => 'XMP', # (write main tags here)
# *** this is where ExifTool writes XMP in MOV videos (as per XMP spec) ***
SubDirectory => { TagTable => 'Image::ExifTool::XMP::Main' },
},
# the following are 3gp tags, references:
# http://atomicparsley.sourceforge.net
# http://www.3gpp.org/ftp/tsg_sa/WG4_CODEC/TSGS4_25/Docs/
# (note that all %langText tags are Avoid => 1)
cprt => { Name => 'Copyright', %langText, Groups => { 2 => 'Author' } },
auth => { Name => 'Author', %langText, Groups => { 2 => 'Author' } },
titl => { Name => 'Title', %langText },
dscp => { Name => 'Description',%langText },
perf => { Name => 'Performer', %langText },
gnre => { Name => 'Genre', %langText },
albm => { Name => 'Album', %langText },
coll => { Name => 'CollectionName', %langText }, #17
rtng => {
Name => 'Rating',
# (4-byte flags, 4-char entity, 4-char criteria, 2-byte lang, string)
RawConv => q{
return '<err>' unless length $val >= 14;
my $str = 'Entity=' . substr($val,4,4) . ' Criteria=' . substr($val,8,4);
$str =~ tr/\0-\x1f\x7f-\xff//d; # remove unprintable characters
my $lang = Image::ExifTool::QuickTime::UnpackLang(Get16u(\$val, 12));
$lang = $lang ? "($lang) " : '';
$val = substr($val, 14);
$val = $self->Decode($val, 'UCS2') if $val =~ /^\xfe\xff/;
return $lang . $str . ' ' . $val;
},
},
clsf => {
Name => 'Classification',
# (4-byte flags, 4-char entity, 2-byte index, 2-byte lang, string)
RawConv => q{
return '<err>' unless length $val >= 12;
my $str = 'Entity=' . substr($val,4,4) . ' Index=' . Get16u(\$val,8);
$str =~ tr/\0-\x1f\x7f-\xff//d; # remove unprintable characters
my $lang = Image::ExifTool::QuickTime::UnpackLang(Get16u(\$val, 10));
$lang = $lang ? "($lang) " : '';
$val = substr($val, 12);
$val = $self->Decode($val, 'UCS2') if $val =~ /^\xfe\xff/;
return $lang . $str . ' ' . $val;
},
},
kywd => {
Name => 'Keywords',
# (4 byte flags, 2-byte lang, 1-byte count, count x pascal strings)
RawConv => q{
return '<err>' unless length $val >= 7;
my $lang = Image::ExifTool::QuickTime::UnpackLang(Get16u(\$val, 4));
$lang = $lang ? "($lang) " : '';
my $num = Get8u(\$val, 6);
my ($i, @vals);
my $pos = 7;
for ($i=0; $i<$num; ++$i) {
last if $pos >= length $val;
my $len = Get8u(\$val, $pos++);
last if $pos + $len > length $val;
my $v = substr($val, $pos, $len);
$v = $self->Decode($v, 'UCS2') if $v =~ /^\xfe\xff/;
push @vals, $v;
$pos += $len;
}
my $sep = $self->Options('ListSep');
return $lang . join($sep, @vals);
},
},
loci => {
Name => 'LocationInformation',
Groups => { 2 => 'Location' },
# (4-byte flags, 2-byte lang, location string, 1-byte role, 4-byte fixed longitude,
# 4-byte fixed latitude, 4-byte fixed altitude, body string, notes string)
RawConv => q{
return '<err>' unless length $val >= 6;
my $lang = Image::ExifTool::QuickTime::UnpackLang(Get16u(\$val, 4));
$lang = $lang ? "($lang) " : '';
$val = substr($val, 6);
my $str;
if ($val =~ /^\xfe\xff/) {
$val =~ s/^(\xfe\xff(.{2})*?)\0\0//s or return '<err>';
$str = $self->Decode($1, 'UCS2');
} else {
$val =~ s/^(.*?)\0//s or return '<err>';
$str = $1;
}
$str = '(none)' unless length $str;
return '<err>' if length $val < 13;
my $role = Get8u(\$val, 0);
my $lon = GetFixed32s(\$val, 1);
my $lat = GetFixed32s(\$val, 5);
my $alt = GetFixed32s(\$val, 9);
my $roleStr = {0=>'shooting',1=>'real',2=>'fictional',3=>'reserved'}->{$role};
$str .= ' Role=' . ($roleStr || "unknown($role)");
$str .= sprintf(' Lat=%.5f Lon=%.5f Alt=%.2f', $lat, $lon, $alt);
$val = substr($val, 13);
if ($val =~ s/^(\xfe\xff(.{2})*?)\0\0//s) {
$str .= ' Body=' . $self->Decode($1, 'UCS2');
} elsif ($val =~ s/^(.*?)\0//s) {
$str .= " Body=$1";
}
if ($val =~ s/^(\xfe\xff(.{2})*?)\0\0//s) {
$str .= ' Notes=' . $self->Decode($1, 'UCS2');
} elsif ($val =~ s/^(.*?)\0//s) {
$str .= " Notes=$1";
}
return $lang . $str;
},
},
yrrc => {
Name => 'Year',
Groups => { 2 => 'Time' },
RawConv => 'length($val) >= 6 ? Get16u(\$val,4) : "<err>"',
},
urat => { #17
Name => 'UserRating',
RawConv => q{
return '<err>' unless length $val >= 8;
return Get8u(\$val, 7);
},
},
# tsel - TrackSelection (ref 17)
# Apple tags (ref 16)
angl => { Name => 'CameraAngle', Format => 'string' }, # (NC)
clfn => { Name => 'ClipFileName', Format => 'string' }, # (NC)
clid => { Name => 'ClipID', Format => 'string' }, # (NC)
cmid => { Name => 'CameraID', Format => 'string' }, # (NC)
cmnm => { # (NC)
Name => 'Model',
Description => 'Camera Model Name',
Avoid => 1,
Format => 'string', # (necessary to remove the trailing NULL)
},
date => { # (NC)
Name => 'DateTimeOriginal',
Description => 'Date/Time Original',
Groups => { 2 => 'Time' },
Shift => 'Time',
ValueConv => q{
require Image::ExifTool::XMP;
$val = Image::ExifTool::XMP::ConvertXMPDate($val);
$val =~ s/([-+]\d{2})(\d{2})$/$1:$2/; # add colon to timezone if necessary
return $val;
},
ValueConvInv => q{
require Image::ExifTool::XMP;
$val = Image::ExifTool::XMP::FormatXMPDate($val);
$val =~ s/([-+]\d{2}):(\d{2})$/$1$2/; # remove time zone colon
return $val;
},
PrintConv => '$self->ConvertDateTime($val)',
PrintConvInv => '$self->InverseDateTime($val)',
},
manu => { # (SX280)
Name => 'Make',
Avoid => 1,
# (with Canon there are 6 unknown bytes before the model: "\0\0\0\0\x15\xc7")
RawConv => '$val=~s/^\0{4}..//s; $val=~s/\0.*//; $val',
},
modl => { # (Samsung GT-S8530, Canon SX280)
Name => 'Model',
Description => 'Camera Model Name',
Avoid => 1,
# (with Canon there are 6 unknown bytes before the model: "\0\0\0\0\x15\xc7")
RawConv => '$val=~s/^\0{4}..//s; $val=~s/\0.*//; $val',
},
reel => { Name => 'ReelName', Format => 'string' }, # (NC)
scen => { Name => 'Scene', Format => 'string' }, # (NC)
shot => { Name => 'ShotName', Format => 'string' }, # (NC)
slno => { Name => 'SerialNumber', Format => 'string' }, # (NC)
apmd => { Name => 'ApertureMode', Format => 'undef' }, #20
kgtt => { #http://lists.ffmpeg.org/pipermail/ffmpeg-devel-irc/2012-June/000707.html
# 'TrackType' will expand to 'Track#Type' when found inside a track
Name => 'TrackType',
# set flag to process this as international text
# even though the tag ID doesn't start with 0xa9
IText => 4, # IText with 4-byte header
},
chpl => { # (Nero chapter list)
Name => 'ChapterList',
ValueConv => \&ConvertChapterList,
PrintConv => \&PrintChapter,
},
# ndrm - 7 bytes (0 0 0 1 0 0 0) Nero Digital Rights Management? (PH)
# other non-Apple tags (ref 16)
# hpix - HipixRichPicture (ref 16, HIPIX)
# strk - sub-track information (ref 16, ISO)
#
# Manufacturer-specific metadata
#
TAGS => [ #PH
# these tags were initially discovered in a Pentax movie,
# but similar information is found in videos from other manufacturers
{
Name => 'FujiFilmTags',
Condition => '$$valPt =~ /^FUJIFILM DIGITAL CAMERA\0/',
SubDirectory => {
TagTable => 'Image::ExifTool::FujiFilm::MOV',
ByteOrder => 'LittleEndian',
},
},
{
Name => 'KodakTags',
Condition => '$$valPt =~ /^EASTMAN KODAK COMPANY/',
SubDirectory => {
TagTable => 'Image::ExifTool::Kodak::MOV',
ByteOrder => 'LittleEndian',
},
},
{
Name => 'KonicaMinoltaTags',
Condition => '$$valPt =~ /^KONICA MINOLTA DIGITAL CAMERA/',
SubDirectory => {
TagTable => 'Image::ExifTool::Minolta::MOV1',
ByteOrder => 'LittleEndian',
},
},
{
Name => 'MinoltaTags',
Condition => '$$valPt =~ /^MINOLTA DIGITAL CAMERA/',
SubDirectory => {
TagTable => 'Image::ExifTool::Minolta::MOV2',
ByteOrder => 'LittleEndian',
},
},
{
Name => 'NikonTags',
Condition => '$$valPt =~ /^NIKON DIGITAL CAMERA\0/',
SubDirectory => {
TagTable => 'Image::ExifTool::Nikon::MOV',
ByteOrder => 'LittleEndian',
},
},
{
Name => 'OlympusTags1',
Condition => '$$valPt =~ /^OLYMPUS DIGITAL CAMERA\0.{9}\x01\0/s',
SubDirectory => {
TagTable => 'Image::ExifTool::Olympus::MOV1',
ByteOrder => 'LittleEndian',
},
},
{
Name => 'OlympusTags2',
Condition => '$$valPt =~ /^OLYMPUS DIGITAL CAMERA(?!\0.{21}\x0a\0{3})/s',
SubDirectory => {
TagTable => 'Image::ExifTool::Olympus::MOV2',
ByteOrder => 'LittleEndian',
},
},
{
Name => 'OlympusTags3',
Condition => '$$valPt =~ /^OLYMPUS DIGITAL CAMERA\0/',
SubDirectory => {
TagTable => 'Image::ExifTool::Olympus::MP4',
ByteOrder => 'LittleEndian',
},
},
{
Name => 'OlympusTags4',
Condition => '$$valPt =~ /^.{16}OLYM\0/s',
SubDirectory => {
TagTable => 'Image::ExifTool::Olympus::MOV3',
Start => 12,
},
},
{
Name => 'PentaxTags',
Condition => '$$valPt =~ /^PENTAX DIGITAL CAMERA\0/',
SubDirectory => {
TagTable => 'Image::ExifTool::Pentax::MOV',
ByteOrder => 'LittleEndian',
},
},
{
Name => 'SamsungTags',
Condition => '$$valPt =~ /^SAMSUNG DIGITAL CAMERA\0/',
SubDirectory => {
TagTable => 'Image::ExifTool::Samsung::MP4',
ByteOrder => 'LittleEndian',
},
},
{
Name => 'SanyoMOV',
Condition => q{
$$valPt =~ /^SANYO DIGITAL CAMERA\0/ and
$self->{VALUE}->{FileType} eq "MOV"
},
SubDirectory => {
TagTable => 'Image::ExifTool::Sanyo::MOV',
ByteOrder => 'LittleEndian',
},
},
{
Name => 'SanyoMP4',
Condition => q{
$$valPt =~ /^SANYO DIGITAL CAMERA\0/ and
$self->{VALUE}->{FileType} eq "MP4"
},
SubDirectory => {
TagTable => 'Image::ExifTool::Sanyo::MP4',
ByteOrder => 'LittleEndian',
},
},
{
Name => 'UnknownTags',
Unknown => 1,
Binary => 1
},
],
# ---- Canon ----
CNCV => { Name => 'CompressorVersion', Format => 'string' }, #PH (5D Mark II)
CNMN => {
Name => 'Model', #PH (EOS 550D)
Description => 'Camera Model Name',
Avoid => 1,
Format => 'string', # (necessary to remove the trailing NULL)
},
CNFV => { Name => 'FirmwareVersion', Format => 'string' }, #PH (EOS 550D)
CNTH => { #PH (PowerShot S95)
Name => 'CanonCNTH',
SubDirectory => { TagTable => 'Image::ExifTool::Canon::CNTH' },
},
CNOP => { #PH (7DmkII)
Name => 'CanonCNOP',
SubDirectory => { TagTable => 'Image::ExifTool::Canon::CNOP' },
},
# CNDB - 2112 bytes (550D)
# CNDM - 4 bytes - 0xff,0xd8,0xff,0xd9 (S95)
# CNDG - 10232 bytes, mostly zeros (N100)
# ---- Casio ----
QVMI => { #PH
Name => 'CasioQVMI',
# Casio stores standard EXIF-format information in MOV videos (eg. EX-S880)
SubDirectory => {
TagTable => 'Image::ExifTool::Exif::Main',
ProcessProc => \&Image::ExifTool::Exif::ProcessExif, # (because ProcessMOV is default)
DirName => 'IFD0',
Multi => 0, # (no NextIFD pointer)
Start => 10,
ByteOrder => 'BigEndian',
},
},
# ---- FujiFilm ----
FFMV => { #PH (FinePix HS20EXR)
Name => 'FujiFilmFFMV',
SubDirectory => { TagTable => 'Image::ExifTool::FujiFilm::FFMV' },
},
MVTG => { #PH (FinePix HS20EXR)
Name => 'FujiFilmMVTG',
SubDirectory => {
TagTable => 'Image::ExifTool::Exif::Main',
ProcessProc => \&Image::ExifTool::Exif::ProcessExif, # (because ProcessMOV is default)
DirName => 'IFD0',
Start => 16,
Base => '$start',
ByteOrder => 'LittleEndian',
},
},
# ---- GoPro ---- (ref PH)
GoPr => 'GoProType', # (Hero3+)
FIRM => { Name => 'FirmwareVersion', Avoid => 1 }, # (Hero4)
LENS => 'LensSerialNumber', # (Hero4)
CAME => { # (Hero4)
Name => 'SerialNumberHash',
Description => 'Camera Serial Number Hash',
ValueConv => 'unpack("H*",$val)',
ValueConvInv => 'pack("H*",$val)',
},
# SETT? 12 bytes (Hero4)
# MUID? 32 bytes (Hero4, starts with serial number hash)
# HMMT? 404 bytes (Hero4, all zero)
# BCID? 26 bytes (Hero5, all zero)
# GUMI? 16 bytes (Hero5)
"FOV\0" => 'FieldOfView', #forum8938 (Hero2) seen: "Wide"
GPMF => {
Name => 'GoProGPMF',
SubDirectory => { TagTable => 'Image::ExifTool::GoPro::GPMF' },
},
# free (all zero)
"\xa9TSC" => 'StartTimeScale', # (Hero6)
"\xa9TSZ" => 'StartTimeSampleSize', # (Hero6)
"\xa9TIM" => 'StartTimecode', #PH (NC)
# --- HTC ----
htcb => {
Name => 'HTCBinary',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::HTCBinary' },
},
# ---- Kodak ----
DcMD => {
Name => 'KodakDcMD',
SubDirectory => { TagTable => 'Image::ExifTool::Kodak::DcMD' },
},
SNum => { Name => 'SerialNumber', Avoid => 1, Groups => { 2 => 'Camera' } },
ptch => { Name => 'Pitch', Format => 'rational64s', Avoid => 1 }, # Units??
_yaw => { Name => 'Yaw', Format => 'rational64s', Avoid => 1 }, # Units??
roll => { Name => 'Roll', Format => 'rational64s', Avoid => 1 }, # Units??
_cx_ => { Name => 'CX', Format => 'rational64s', Unknown => 1 },
_cy_ => { Name => 'CY', Format => 'rational64s', Unknown => 1 },
rads => { Name => 'Rads', Format => 'rational64s', Unknown => 1 },
lvlm => { Name => 'LevelMeter', Format => 'rational64s', Unknown => 1 }, # (guess)
Lvlm => { Name => 'LevelMeter', Format => 'rational64s', Unknown => 1 }, # (guess)
pose => { Name => 'pose', SubDirectory => { TagTable => 'Image::ExifTool::Kodak::pose' } },
# AMBA => Ambarella AVC atom (unknown data written by Kodak Playsport video cam)
# tmlp - 1 byte: 0 (PixPro SP360/4KVR360)
# pivi - 72 bytes (PixPro SP360)
# pive - 12 bytes (PixPro SP360)
# loop - 4 bytes: 0 0 0 0 (PixPro 4KVR360)
# m cm - 2 bytes: 0 0 (PixPro 4KVR360)
# m ev - 2 bytes: 0 0 (PixPro SP360/4KVR360) (exposure comp?)
# m vr - 2 bytes: 0 1 (PixPro 4KVR360) (virtual reality?)
# m wb - 4 bytes: 0 0 0 0 (PixPro SP360/4KVR360) (white balance?)
# mclr - 4 bytes: 0 0 0 0 (PixPro SP360/4KVR360)
# mmtr - 4 bytes: 0,6 0 0 0 (PixPro SP360/4KVR360)
# mflr - 4 bytes: 0 0 0 0 (PixPro SP360)
# lvlm - 24 bytes (PixPro SP360)
# Lvlm - 24 bytes (PixPro 4KVR360)
# ufdm - 4 bytes: 0 0 0 1 (PixPro SP360)
# mtdt - 1 byte: 0 (PixPro SP360/4KVR360)
# gdta - 75240 bytes (PixPro SP360)
# EIS1 - 4 bytes: 03 07 00 00 (PixPro 4KVR360)
# EIS2 - 4 bytes: 04 97 00 00 (PixPro 4KVR360)
# ---- LG ----
adzc => { Name => 'Unknown_adzc', Unknown => 1, Hidden => 1, %langText }, # "false\0/","true\0/"
adze => { Name => 'Unknown_adze', Unknown => 1, Hidden => 1, %langText }, # "false\0/"
adzm => { Name => 'Unknown_adzm', Unknown => 1, Hidden => 1, %langText }, # "\x0e\x04/","\x10\x06"
# ---- Microsoft ----
Xtra => { #PH (microsoft)
Name => 'MicrosoftXtra',
SubDirectory => { TagTable => 'Image::ExifTool::Microsoft::Xtra' },
},
# ---- Minolta ----
MMA0 => { #PH (DiMage 7Hi)
Name => 'MinoltaMMA0',
SubDirectory => { TagTable => 'Image::ExifTool::Minolta::MMA' },
},
MMA1 => { #PH (Dimage A2)
Name => 'MinoltaMMA1',
SubDirectory => { TagTable => 'Image::ExifTool::Minolta::MMA' },
},
# ---- Nikon ----
NCDT => { #PH
Name => 'NikonNCDT',
SubDirectory => { TagTable => 'Image::ExifTool::Nikon::NCDT' },
},
# ---- Olympus ----
scrn => { #PH (TG-810)
Name => 'OlympusPreview',
Condition => '$$valPt =~ /^.{4}\xff\xd8\xff\xdb/s',
SubDirectory => { TagTable => 'Image::ExifTool::Olympus::scrn' },
},
# ---- Panasonic/Leica ----
PANA => { #PH
Name => 'PanasonicPANA',
SubDirectory => { TagTable => 'Image::ExifTool::Panasonic::PANA' },
},
LEIC => { #PH
Name => 'LeicaLEIC',
SubDirectory => { TagTable => 'Image::ExifTool::Panasonic::PANA' },
},
# ---- Pentax ----
thmb => [ # (apparently defined by 3gpp, ref 16)
{ #PH (Pentax Q)
Name => 'MakerNotePentax5a',
Condition => '$$valPt =~ /^PENTAX \0II/',
SubDirectory => {
TagTable => 'Image::ExifTool::Pentax::Main',
ProcessProc => \&Image::ExifTool::Exif::ProcessExif, # (because ProcessMOV is default)
Start => 10,
Base => '$start - 10',
ByteOrder => 'LittleEndian',
},
},{ #PH (TG-810)
Name => 'OlympusThumbnail',
Condition => '$$valPt =~ /^.{4}\xff\xd8\xff\xdb/s',
SubDirectory => { TagTable => 'Image::ExifTool::Olympus::thmb' },
},{ #17 (format is in bytes 3-7)
Name => 'ThumbnailImage',
Condition => '$$valPt =~ /^.{8}\xff\xd8\xff\xdb/s',
Groups => { 2 => 'Preview' },
RawConv => 'substr($val, 8)',
Binary => 1,
},{ #17 (format is in bytes 3-7)
Name => 'ThumbnailPNG',
Condition => '$$valPt =~ /^.{8}\x89PNG\r\n\x1a\n/s',
Groups => { 2 => 'Preview' },
RawConv => 'substr($val, 8)',
Binary => 1,
},{
Name => 'UnknownThumbnail',
Groups => { 2 => 'Preview' },
Binary => 1,
},
],
PENT => { #PH
Name => 'PentaxPENT',
SubDirectory => {
TagTable => 'Image::ExifTool::Pentax::PENT',
ByteOrder => 'LittleEndian',
},
},
PXTH => { #PH (Pentax K-01)
Name => 'PentaxPreview',
SubDirectory => { TagTable => 'Image::ExifTool::Pentax::PXTH' },
},
PXMN => [{ #PH (Pentax K-01)
Name => 'MakerNotePentax5b',
Condition => '$$valPt =~ /^PENTAX \0MM/',
SubDirectory => {
TagTable => 'Image::ExifTool::Pentax::Main',
ProcessProc => \&Image::ExifTool::Exif::ProcessExif, # (because ProcessMOV is default)
Start => 10,
Base => '$start - 10',
ByteOrder => 'BigEndian',
},
},{ #PH (Pentax 645Z)
Name => 'MakerNotePentax5c',
Condition => '$$valPt =~ /^PENTAX \0II/',
SubDirectory => {
TagTable => 'Image::ExifTool::Pentax::Main',
ProcessProc => \&Image::ExifTool::Exif::ProcessExif, # (because ProcessMOV is default)
Start => 10,
Base => '$start - 10',
ByteOrder => 'LittleEndian',
},
},{
Name => 'MakerNotePentaxUnknown',
Binary => 1,
}],
# ---- Ricoh ----
RTHU => { #PH (GR)
Name => 'PreviewImage',
Groups => { 2 => 'Preview' },
RawConv => '$self->ValidateImage(\$val, $tag)',
},
RMKN => { #PH (GR)
Name => 'RicohRMKN',
SubDirectory => {
TagTable => 'Image::ExifTool::Exif::Main',
ProcessProc => \&Image::ExifTool::ProcessTIFF, # (because ProcessMOV is default)
},
},
# ---- Samsung ----
vndr => 'Vendor', #PH (Samsung PL70)
SDLN => 'PlayMode', #PH (NC, Samsung ST80 "SEQ_PLAY")
INFO => {
Name => 'SamsungINFO',
SubDirectory => { TagTable => 'Image::ExifTool::Samsung::INFO' },
},
'@sec' => { #PH (Samsung WB30F)
Name => 'SamsungSec',
SubDirectory => { TagTable => 'Image::ExifTool::Samsung::sec' },
},
'smta' => { #PH (Samsung SM-C101)
Name => 'SamsungSmta',
SubDirectory => {
TagTable => 'Image::ExifTool::Samsung::smta',
Start => 4,
},
},
cver => 'CodeVersion', #PH (guess, Samsung MV900F)
# ducp - 4 bytes all zero (Samsung ST96,WB750), 52 bytes all zero (Samsung WB30F)
# edli - 52 bytes all zero (Samsung WB30F)
# @etc - 4 bytes all zero (Samsung WB30F)
# saut - 4 bytes all zero (Samsung SM-N900T)
# smrd - string "TRUEBLUE" (Samsung SM-C101)
# ---- TomTom Bandit Action Cam ----
TTMD => {
Name => 'TomTomMetaData',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::TomTom' },
},
# ---- Unknown ----
# CDET - 128 bytes (unknown origin)
# mtyp - 4 bytes all zero (some drone video)
# kgrf - 8 bytes all zero ? (in udta inside trak atom)
# kgcg - 128 bytes 0's and 1's
# kgsi - 4 bytes "00 00 00 80"
# FIEL - 18 bytes "FIEL\0\x01\0\0\0..."
#
# other 3rd-party tags
# (ref http://code.google.com/p/mp4parser/source/browse/trunk/isoparser/src/main/resources/isoparser-default.properties?r=814)
#
ccid => 'ContentID',
icnu => 'IconURI',
infu => 'InfoURL',
cdis => 'ContentDistributorID',
albr => { Name => 'AlbumArtist', Groups => { 2 => 'Author' } },
cvru => 'CoverURI',
lrcu => 'LyricsURI',
tags => { # found in Audible .m4b audio books (ref PH)
Name => 'Audible_tags',
SubDirectory => { TagTable => 'Image::ExifTool::Audible::tags' },
},
);
# Unknown information stored in HTC One (M8) videos - PH
%Image::ExifTool::QuickTime::HTCBinary = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
GROUPS => { 0 => 'MakerNotes', 1 => 'HTC', 2 => 'Video' },
TAG_PREFIX => 'HTCBinary',
FORMAT => 'int32u',
FIRST_ENTRY => 0,
# 0 - values: 1
# 1 - values: 0
# 2 - values: 0
# 3 - values: FileSize minus 12 (why?)
# 4 - values: 12
);
# TomTom Bandit Action Cam metadata (ref PH)
%Image::ExifTool::QuickTime::TomTom = (
PROCESS_PROC => \&ProcessMOV,
GROUPS => { 2 => 'Video' },
NOTES => 'Tags found in TomTom Bandit Action Cam MP4 videos.',
TTAD => {
Name => 'TomTomAD',
SubDirectory => {
TagTable => 'Image::ExifTool::QuickTime::Stream',
ProcessProc => \&Image::ExifTool::QuickTime::ProcessTTAD,
},
},
TTHL => { Name => 'TomTomHL', Binary => 1, Unknown => 1 }, # (mostly zeros)
# (TTID values are different for each video)
TTID => { Name => 'TomTomID', ValueConv => 'unpack("x4H*",$val)' },
TTVI => { Name => 'TomTomVI', Format => 'int32u', Unknown => 1 }, # seen: "0 1 61 508 508"
# TTVD seen: "normal 720p 60fps 60fps 16/9 wide 1x"
TTVD => { Name => 'TomTomVD', ValueConv => 'my @a = ($val =~ /[\x20-\x7f]+/g); "@a"' },
);
# User-specific media data atoms (ref 11)
%Image::ExifTool::QuickTime::UserMedia = (
PROCESS_PROC => \&ProcessMOV,
GROUPS => { 2 => 'Video' },
MTDT => {
Name => 'MetaData',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::MetaData' },
},
);
# User-specific media data atoms (ref 11)
%Image::ExifTool::QuickTime::MetaData = (
PROCESS_PROC => \&ProcessMetaData,
GROUPS => { 2 => 'Video' },
TAG_PREFIX => 'MetaData',
0x01 => 'Title',
0x03 => {
Name => 'ProductionDate',
Groups => { 2 => 'Time' },
Shift => 'Time',
Writable => 1,
Permanent => 1,
DelValue => '0000/00/00 00:00:00',
# translate from format "YYYY/mm/dd HH:MM:SS"
ValueConv => '$val=~tr{/}{:}; $val',
ValueConvInv => '$val=~s[^(\d{4}):(\d{2}):][$1/$2/]; $val',
PrintConv => '$self->ConvertDateTime($val)',
PrintConvInv => '$self->InverseDateTime($val)',
},
0x04 => 'Software',
0x05 => 'Product',
0x0a => {
Name => 'TrackProperty',
RawConv => 'my @a=unpack("Nnn",$val); "@a"',
PrintConv => [
{ 0 => 'No presentation', BITMASK => { 0 => 'Main track' } },
{ 0 => 'No attributes', BITMASK => { 15 => 'Read only' } },
'"Priority $val"',
],
},
0x0b => {
Name => 'TimeZone',
Groups => { 2 => 'Time' },
Writable => 1,
Permanent => 1,
DelValue => 0,
RawConv => 'Get16s(\$val,0)',
RawConvInv => 'Set16s($val)',
PrintConv => 'TimeZoneString($val)',
PrintConvInv => q{
return undef unless $val =~ /^([-+])(\d{1,2}):?(\d{2})$/'
my $tzmin = $2 * 60 + $3;
$tzmin = -$tzmin if $1 eq '-';
return $tzmin;
}
},
0x0c => {
Name => 'ModifyDate',
Groups => { 2 => 'Time' },
Shift => 'Time',
Writable => 1,
Permanent => 1,
DelValue => '0000/00/00 00:00:00',
# translate from format "YYYY/mm/dd HH:MM:SS"
ValueConv => '$val=~tr{/}{:}; $val',
ValueConvInv => '$val=~s[^(\d{4}):(\d{2}):][$1/$2/]; $val',
PrintConv => '$self->ConvertDateTime($val)',
PrintConvInv => '$self->InverseDateTime($val)',
},
);
# compressed movie atoms (ref http://wiki.multimedia.cx/index.php?title=QuickTime_container#cmov)
%Image::ExifTool::QuickTime::CMovie = (
PROCESS_PROC => \&ProcessMOV,
GROUPS => { 2 => 'Video' },
dcom => 'Compression',
# cmvd - compressed movie data
);
# Profile atoms (ref 11)
%Image::ExifTool::QuickTime::Profile = (
PROCESS_PROC => \&ProcessMOV,
GROUPS => { 2 => 'Video' },
FPRF => {
Name => 'FileGlobalProfile',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::FileProf' },
},
APRF => {
Name => 'AudioProfile',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::AudioProf' },
},
VPRF => {
Name => 'VideoProfile',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::VideoProf' },
},
OLYM => { #PH
Name => 'OlympusOLYM',
SubDirectory => {
TagTable => 'Image::ExifTool::Olympus::OLYM',
ByteOrder => 'BigEndian',
},
},
);
# FPRF atom information (ref 11)
%Image::ExifTool::QuickTime::FileProf = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
GROUPS => { 2 => 'Video' },
FORMAT => 'int32u',
0 => { Name => 'FileProfileVersion', Unknown => 1 }, # unknown = uninteresting
1 => {
Name => 'FileFunctionFlags',
PrintConv => { BITMASK => {
28 => 'Fragmented',
29 => 'Additional tracks',
30 => 'Edited', # (main AV track is edited)
}},
},
# 2 - reserved
);
# APRF atom information (ref 11)
%Image::ExifTool::QuickTime::AudioProf = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
GROUPS => { 2 => 'Audio' },
FORMAT => 'int32u',
0 => { Name => 'AudioProfileVersion', Unknown => 1 },
1 => 'AudioTrackID',
2 => {
Name => 'AudioCodec',
Format => 'undef[4]',
},
3 => {
Name => 'AudioCodecInfo',
Unknown => 1,
PrintConv => 'sprintf("0x%.4x", $val)',
},
4 => {
Name => 'AudioAttributes',
PrintConv => { BITMASK => {
0 => 'Encrypted',
1 => 'Variable bitrate',
2 => 'Dual mono',
}},
},
5 => {
Name => 'AudioAvgBitrate',
ValueConv => '$val * 1000',
PrintConv => 'ConvertBitrate($val)',
},
6 => {
Name => 'AudioMaxBitrate',
ValueConv => '$val * 1000',
PrintConv => 'ConvertBitrate($val)',
},
7 => 'AudioSampleRate',
8 => 'AudioChannels',
);
# VPRF atom information (ref 11)
%Image::ExifTool::QuickTime::VideoProf = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
GROUPS => { 2 => 'Video' },
FORMAT => 'int32u',
0 => { Name => 'VideoProfileVersion', Unknown => 1 },
1 => 'VideoTrackID',
2 => {
Name => 'VideoCodec',
Format => 'undef[4]',
},
3 => {
Name => 'VideoCodecInfo',
Unknown => 1,
PrintConv => 'sprintf("0x%.4x", $val)',
},
4 => {
Name => 'VideoAttributes',
PrintConv => { BITMASK => {
0 => 'Encrypted',
1 => 'Variable bitrate',
2 => 'Variable frame rate',
3 => 'Interlaced',
}},
},
5 => {
Name => 'VideoAvgBitrate',
ValueConv => '$val * 1000',
PrintConv => 'ConvertBitrate($val)',
},
6 => {
Name => 'VideoMaxBitrate',
ValueConv => '$val * 1000',
PrintConv => 'ConvertBitrate($val)',
},
7 => {
Name => 'VideoAvgFrameRate',
Format => 'fixed32u',
PrintConv => 'int($val * 1000 + 0.5) / 1000',
},
8 => {
Name => 'VideoMaxFrameRate',
Format => 'fixed32u',
PrintConv => 'int($val * 1000 + 0.5) / 1000',
},
9 => {
Name => 'VideoSize',
Format => 'int16u[2]',
PrintConv => '$val=~tr/ /x/; $val',
},
10 => {
Name => 'PixelAspectRatio',
Format => 'int16u[2]',
PrintConv => '$val=~tr/ /:/; $val',
},
);
# meta atoms
%Image::ExifTool::QuickTime::Meta = (
PROCESS_PROC => \&ProcessMOV,
WRITE_PROC => \&WriteQuickTime,
GROUPS => { 1 => 'Meta', 2 => 'Video' },
ilst => {
Name => 'ItemList',
SubDirectory => {
TagTable => 'Image::ExifTool::QuickTime::ItemList',
HasData => 1, # process atoms as containers with 'data' elements
},
},
# MP4 tags (ref 5)
hdlr => {
Name => 'Handler',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::Handler' },
},
dinf => {
Name => 'DataInfo', # (don't change this name -- used to recognize directory when writing)
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::DataInfo' },
},
ipmc => {
Name => 'IPMPControl',
Flags => ['Binary','Unknown'],
},
iloc => {
Name => 'ItemLocation',
RawConv => \&ParseItemLocation,
WriteHook => \&ParseItemLocation,
Notes => 'parsed, but not extracted as a tag',
},
ipro => {
Name => 'ItemProtection',
Flags => ['Binary','Unknown'],
},
iinf => [{
Name => 'ItemInformation',
Condition => '$$valPt =~ /^\0/', # (check for version 0)
SubDirectory => {
TagTable => 'Image::ExifTool::QuickTime::ItemInfo',
Start => 6, # (4-byte version/flags + 2-byte count)
},
},{
Name => 'ItemInformation',
SubDirectory => {
TagTable => 'Image::ExifTool::QuickTime::ItemInfo',
Start => 8, # (4-byte version/flags + 4-byte count)
},
}],
'xml ' => {
Name => 'XML',
Flags => [ 'Binary', 'Protected' ],
SubDirectory => {
TagTable => 'Image::ExifTool::XMP::XML',
IgnoreProp => { NonRealTimeMeta => 1 }, # ignore container for Sony 'nrtm'
},
},
'keys' => {
Name => 'Keys',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::Keys' },
},
bxml => {
Name => 'BinaryXML',
Flags => ['Binary','Unknown'],
},
pitm => [{
Name => 'PrimaryItemReference',
Condition => '$$valPt =~ /^\0/', # (version 0?)
RawConv => '$$self{PrimaryItem} = unpack("x4n",$val)',
WriteHook => sub { my ($val,$et) = @_; $$et{PrimaryItem} = unpack("x4n",$val); },
},{
Name => 'PrimaryItemReference',
RawConv => '$$self{PrimaryItem} = unpack("x4N",$val)',
WriteHook => sub { my ($val,$et) = @_; $$et{PrimaryItem} = unpack("x4N",$val); },
}],
free => { #PH
Name => 'Free',
Flags => ['Binary','Unknown'],
},
iprp => {
Name => 'ItemProperties',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::ItemProp' },
},
iref => {
Name => 'ItemReference',
# the version is needed to parse some of the item references
Condition => '$$self{ItemRefVersion} = ord($$valPt); 1',
SubDirectory => {
TagTable => 'Image::ExifTool::QuickTime::ItemRef',
Start => 4,
},
},
# idat
);
# additional metadata container (ref ISO14496-12:2015)
%Image::ExifTool::QuickTime::OtherMeta = (
PROCESS_PROC => \&ProcessMOV,
WRITE_PROC => \&WriteQuickTime,
GROUPS => { 2 => 'Video' },
mere => {
Name => 'MetaRelation',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::MetaRelation' },
},
meta => {
Name => 'Meta',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::Meta' },
},
);
# metabox relation (ref ISO14496-12:2015)
%Image::ExifTool::QuickTime::MetaRelation = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
GROUPS => { 2 => 'Video' },
FORMAT => 'int32u',
# 0 => 'MetaRelationVersion',
# 1 => 'FirstMetaboxHandlerType',
# 2 => 'FirstMetaboxHandlerType',
# 3 => { Name => 'MetaboxRelation', Format => 'int8u' },
);
%Image::ExifTool::QuickTime::ItemProp = (
PROCESS_PROC => \&ProcessMOV,
WRITE_PROC => \&WriteQuickTime,
GROUPS => { 2 => 'Image' },
ipco => {
Name => 'ItemPropertyContainer',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::ItemPropCont' },
},
ipma => {
Name => 'ItemPropertyAssociation',
RawConv => \&ParseItemPropAssoc,
WriteHook => \&ParseItemPropAssoc,
Notes => 'parsed, but not extracted as a tag',
},
);
%Image::ExifTool::QuickTime::ItemPropCont = (
PROCESS_PROC => \&ProcessMOV,
WRITE_PROC => \&WriteQuickTime,
PERMANENT => 1, # (can't be deleted)
GROUPS => { 2 => 'Image' },
VARS => { START_INDEX => 1 }, # show verbose indices starting at 1
colr => [{
Name => 'ICC_Profile',
Condition => '$$valPt =~ /^(prof|rICC)/',
SubDirectory => {
TagTable => 'Image::ExifTool::ICC_Profile::Main',
Start => 4,
},
},{
Name => 'Unknown_colr',
Flags => ['Binary','Unknown','Hidden'],
}],
irot => {
Name => 'Rotation',
Format => 'int8u',
Writable => 'int8u',
ValueConv => '$val * 90',
ValueConvInv => 'int($val / 90 + 0.5)',
},
ispe => {
Name => 'ImageSpatialExtent',
Condition => '$$valPt =~ /^\0{4}/', # (version/flags == 0/0)
RawConv => q{
my @dim = unpack("x4N*", $val);
return undef if @dim < 2;
unless ($$self{DOC_NUM}) {
$self->FoundTag(ImageWidth => $dim[0]);
$self->FoundTag(ImageHeight => $dim[1]);
}
return join ' ', @dim;
},
PrintConv => '$val =~ tr/ /x/; $val',
},
pixi => {
Name => 'ImagePixelDepth',
Condition => '$$valPt =~ /^\0{4}./s', # (version/flags == 0/0 and count)
RawConv => 'join " ", unpack("x5C*", $val)',
},
auxC => {
Name => 'AuxiliaryImageType',
Format => 'undef',
RawConv => '$val = substr($val, 4); $val =~ s/\0.*//s; $val',
},
pasp => {
Name => 'PixelAspectRatio',
Format => 'int32u',
Writable => 'int32u',
},
rloc => {
Name => 'RelativeLocation',
Format => 'int32u',
RawConv => '$val =~ s/^\S+\s+//; $val', # remove version/flags
},
clap => {
Name => 'CleanAperture',
Format => 'rational64u',
Notes => '4 numbers: width, height, left and top',
},
hvcC => {
Name => 'HEVCConfiguration',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::HEVCConfig' },
},
);
# HEVC configuration (ref https://github.com/MPEGGroup/isobmff/blob/master/IsoLib/libisomediafile/src/HEVCConfigAtom.c)
%Image::ExifTool::QuickTime::HEVCConfig = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
GROUPS => { 2 => 'Video' },
FIRST_ENTRY => 0,
0 => 'HEVCConfigurationVersion',
1 => {
Name => 'GeneralProfileSpace',
Mask => 0xc0,
BitShift => 6,
PrintConv => { 0 => 'Conforming' },
},
1.1 => {
Name => 'GeneralTierFlag',
Mask => 0x20,
BitShift => 5,
PrintConv => {
0 => 'Main Tier',
1 => 'High Tier',
},
},
1.2 => {
Name => 'GeneralProfileIDC',
Mask => 0x1f,
PrintConv => {
0 => 'No Profile',
1 => 'Main Profile',
2 => 'Main 10 Profile',
3 => 'Main Still Picture Profile',
},
},
2 => {
Name => 'GenProfileCompatibilityFlags',
Format => 'int32u',
PrintConv => { BITMASK => {
31 => 'No Profile', # (bit 0 in stream)
30 => 'Main', # (bit 1 in stream)
29 => 'Main 10', # (bit 2 in stream)
28 => 'Main Still Picture', # (bit 3 in stream)
}},
},
6 => {
Name => 'ConstraintIndicatorFlags',
Format => 'int8u[6]',
},
12 => {
Name => 'GeneralLevelIDC',
PrintConv => 'sprintf("%d (level %.1f)", $val, $val/30)',
},
13 => {
Name => 'MinSpatialSegmentationIDC',
Format => 'int16u',
Mask => 0x0fff,
},
15 => {
Name => 'ParallelismType',
Mask => 0x03,
},
16 => {
Name => 'ChromaFormat',
Mask => 0x03,
PrintConv => {
0 => 'Monochrome',
1 => '4:2:0',
2 => '4:2:2',
3 => '4:4:4',
},
},
17 => {
Name => 'BitDepthLuma',
Mask => 0x07,
ValueConv => '$val + 8',
},
18 => {
Name => 'BitDepthChroma',
Mask => 0x07,
ValueConv => '$val + 8',
},
19 => {
Name => 'AverageFrameRate',
Format => 'int16u',
ValueConv => '$val / 256',
},
21 => {
Name => 'ConstantFrameRate',
Mask => 0xc0,
BitShift => 6,
PrintConv => {
0 => 'Unknown',
1 => 'Constant Frame Rate',
2 => 'Each Temporal Layer is Constant Frame Rate',
},
},
21.1 => {
Name => 'NumTemporalLayers',
Mask => 0x38,
BitShift => 3,
},
21.2 => {
Name => 'TemporalIDNested',
Mask => 0x04,
BitShift => 2,
PrintConv => { 0 => 'No', 1 => 'Yes' },
},
#21.3 => {
# Name => 'NALUnitLengthSize',
# Mask => 0x03,
# ValueConv => '$val + 1',
# PrintConv => { 1 => '8-bit', 2 => '16-bit', 4 => '32-bit' },
#},
#22 => 'NumberOfNALUnitArrays',
# (don't decode the NAL unit arrays)
);
%Image::ExifTool::QuickTime::ItemRef = (
PROCESS_PROC => \&ProcessMOV,
WRITE_PROC => \&WriteQuickTime,
GROUPS => { 2 => 'Image' },
# (Note: ExifTool's ItemRefVersion may be used to test the iref version number)
# dimg - DerivedImage
# thmb - Thumbnail
# auxl - AuxiliaryImage
cdsc => {
Name => 'ContentDescribes',
Notes => 'parsed, but not extracted as a tag',
RawConv => \&ParseContentDescribes,
WriteHook => \&ParseContentDescribes,
},
);
%Image::ExifTool::QuickTime::ItemInfo = (
PROCESS_PROC => \&ProcessMOV,
WRITE_PROC => \&WriteQuickTime,
GROUPS => { 2 => 'Image' },
# avc1 - AVC image
# hvc1 - HEVC image
# lhv1 - L-HEVC image
# infe - ItemInformationEntry
# infe types: avc1,hvc1,lhv1,Exif,xml1,iovl(overlay image),grid,mime,hvt1(tile image)
infe => {
Name => 'ItemInfoEntry',
RawConv => \&ParseItemInfoEntry,
WriteHook => \&ParseItemInfoEntry,
Notes => 'parsed, but not extracted as a tag',
},
);
# track reference atoms
%Image::ExifTool::QuickTime::TrackRef = (
PROCESS_PROC => \&ProcessMOV,
GROUPS => { 1 => 'Track#', 2 => 'Video' },
chap => { Name => 'ChapterListTrackID', Format => 'int32u' },
tmcd => { Name => 'TimeCode', Format => 'int32u' },
mpod => { #PH (FLIR MP4)
Name => 'ElementaryStreamTrack',
Format => 'int32u',
ValueConv => '$val =~ s/^1 //; $val', # (why 2 numbers? -- ignore the first if "1")
},
# also: sync, scpt, ssrc, iTunesInfo
cdsc => {
Name => 'ContentDescribes',
Format => 'int32u',
PrintConv => '"Track $val"',
},
# cdep (Structural Dependency QT tag?)
);
# track aperture mode dimensions atoms
# (ref https://developer.apple.com/library/mac/#documentation/QuickTime/QTFF/QTFFChap2/qtff2.html)
%Image::ExifTool::QuickTime::TrackAperture = (
PROCESS_PROC => \&ProcessMOV,
GROUPS => { 1 => 'Track#', 2 => 'Video' },
clef => {
Name => 'CleanApertureDimensions',
Format => 'fixed32u',
Count => 3,
ValueConv => '$val =~ s/^.*? //; $val', # remove flags word
PrintConv => '$val =~ tr/ /x/; $val',
},
prof => {
Name => 'ProductionApertureDimensions',
Format => 'fixed32u',
Count => 3,
ValueConv => '$val =~ s/^.*? //; $val',
PrintConv => '$val =~ tr/ /x/; $val',
},
enof => {
Name => 'EncodedPixelsDimensions',
Format => 'fixed32u',
Count => 3,
ValueConv => '$val =~ s/^.*? //; $val',
PrintConv => '$val =~ tr/ /x/; $val',
},
);
# item list atoms
# -> these atoms are unique, and contain one or more 'data' atoms
%Image::ExifTool::QuickTime::ItemList = (
PROCESS_PROC => \&ProcessMOV,
WRITE_PROC => \&WriteQuickTime,
CHECK_PROC => \&CheckQTValue,
WRITABLE => 1,
PREFERRED => 2, # (preferred over UserData and Keys tags when writing)
FORMAT => 'string',
GROUPS => { 1 => 'ItemList', 2 => 'Audio' },
WRITE_GROUP => 'ItemList',
LANG_INFO => \&GetLangInfo,
NOTES => q{
This is the preferred location for creating new QuickTime tags. Tags in
this table support alternate languages which are accessed by adding a
3-character ISO 639-2 language code and an optional ISO 3166-1 alpha 2
country code to the tag name (eg. "ItemList:Title-fra" or
"ItemList::Title-fra-FR"). When creating a new Meta box to contain the
ItemList directory, by default ExifTool does not specify a
L<Handler|Image::ExifTool::TagNames/QuickTime Handler Tags>, but the
API L<QuickTimeHandler|../ExifTool.html#QuickTimeHandler> option may be used to include an 'mdir' Handler box.
},
# in this table, binary 1 and 2-byte "data"-type tags are interpreted as
# int8u and int16u. Multi-byte binary "data" tags are extracted as binary data.
# (Note that the Preferred property is set to 0 for some tags to prevent them
# from being created when a same-named tag already exists in the table)
"\xa9ART" => 'Artist',
"\xa9alb" => 'Album',
"\xa9aut" => { Name => 'Author', Avoid => 1, Groups => { 2 => 'Author' } }, #forum10091 ('auth' is preferred)
"\xa9cmt" => 'Comment',
"\xa9com" => { Name => 'Composer', Avoid => 1, }, # ("\xa9wrt" is preferred in ItemList)
"\xa9day" => {
Name => 'ContentCreateDate',
Groups => { 2 => 'Time' },
Shift => 'Time',
# handle values in the form "2010-02-12T13:27:14-0800"
ValueConv => q{
require Image::ExifTool::XMP;
$val = Image::ExifTool::XMP::ConvertXMPDate($val);
$val =~ s/([-+]\d{2})(\d{2})$/$1:$2/; # add colon to timezone if necessary
return $val;
},
ValueConvInv => q{
require Image::ExifTool::XMP;
$val = Image::ExifTool::XMP::FormatXMPDate($val);
$val =~ s/([-+]\d{2}):(\d{2})$/$1$2/; # remove time zone colon
return $val;
},
PrintConv => '$self->ConvertDateTime($val)',
PrintConvInv => '$self->InverseDateTime($val)',
},
"\xa9des" => 'Description', #4
"\xa9enc" => 'EncodedBy', #10
"\xa9gen" => 'Genre',
"\xa9grp" => 'Grouping',
"\xa9lyr" => 'Lyrics',
"\xa9nam" => 'Title',
# "\xa9st3" ? #10
"\xa9too" => 'Encoder',
"\xa9trk" => 'Track',
"\xa9wrt" => 'Composer',
'----' => {
Name => 'iTunesInfo',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::iTunesInfo' },
},
aART => { Name => 'AlbumArtist', Groups => { 2 => 'Author' } },
covr => { Name => 'CoverArt', Groups => { 2 => 'Preview' } },
cpil => { #10
Name => 'Compilation',
Format => 'int8u', #23
PrintConv => { 0 => 'No', 1 => 'Yes' },
},
disk => {
Name => 'DiskNumber',
Format => 'undef', # (necessary to prevent decoding as string!)
ValueConv => 'length($val) >= 6 ? join(" of ",unpack("x2nn",$val)) : \$val',
ValueConvInv => 'my @a = split / of /, $val; @a==2 ? pack("n3",0,@a) : undef',
},
pgap => { #10
Name => 'PlayGap',
Format => 'int8u', #23
PrintConv => {
0 => 'Insert Gap',
1 => 'No Gap',
},
},
tmpo => {
Name => 'BeatsPerMinute',
Format => 'int16u', # marked as boolean but really int16u in my sample
},
trkn => {
Name => 'TrackNumber',
Format => 'undef', # (necessary to prevent decoding as string!)
ValueConv => 'length($val) >= 6 ? join(" of ",unpack("x2nn",$val)) : \$val',
ValueConvInv => 'my @a = split / of /, $val; @a==2 ? pack("n3",0,@a) : undef',
},
#
# Note: it is possible that the tags below are not being decoded properly
# because I don't have samples to verify many of these - PH
#
akID => { #10
Name => 'AppleStoreAccountType',
Format => 'int8u', #24
PrintConv => {
0 => 'iTunes',
1 => 'AOL',
},
},
albm => { Name => 'Album', Avoid => 1 }, #(ffmpeg source)
apID => 'AppleStoreAccount',
atID => { #10 (or TV series)
Name => 'AlbumTitleID',
Format => 'int32u',
},
auth => { Name => 'Author', Groups => { 2 => 'Author' } },
catg => 'Category', #7
cnID => { #10
Name => 'AppleStoreCatalogID',
Format => 'int32u',
},
cprt => { Name => 'Copyright', Groups => { 2 => 'Author' } },
dscp => { Name => 'Description', Avoid => 1 },
desc => { Name => 'Description', Avoid => 1 }, #7
gnre => { #10
Name => 'Genre',
Avoid => 1,
# (Note: not written as int16u if numerical, although it should be)
PrintConv => q{
return $val unless $val =~ /^\d+$/;
require Image::ExifTool::ID3;
Image::ExifTool::ID3::PrintGenre($val - 1); # note the "- 1"
},
PrintConvInv => q{
require Image::ExifTool::ID3;
my $id = Image::ExifTool::ID3::GetGenreID($val);
return defined $id ? $id : $val;
},
},
egid => 'EpisodeGlobalUniqueID', #7
geID => { #10
Name => 'GenreID',
Format => 'int32u',
SeparateTable => 1,
PrintConv => { #21/PH (based on https://affiliate.itunes.apple.com/resources/documentation/genre-mapping/)
2 => 'Music|Blues',
3 => 'Music|Comedy',
4 => "Music|Children's Music",
5 => 'Music|Classical',
6 => 'Music|Country',
7 => 'Music|Electronic',
8 => 'Music|Holiday',
9 => 'Music|Classical|Opera',
10 => 'Music|Singer/Songwriter',
11 => 'Music|Jazz',
12 => 'Music|Latino',
13 => 'Music|New Age',
14 => 'Music|Pop',
15 => 'Music|R&B/Soul',
16 => 'Music|Soundtrack',
17 => 'Music|Dance',
18 => 'Music|Hip-Hop/Rap',
19 => 'Music|World',
20 => 'Music|Alternative',
21 => 'Music|Rock',
22 => 'Music|Christian & Gospel',
23 => 'Music|Vocal',
24 => 'Music|Reggae',
25 => 'Music|Easy Listening',
26 => 'Podcasts',
27 => 'Music|J-Pop',
28 => 'Music|Enka',
29 => 'Music|Anime',
30 => 'Music|Kayokyoku',
31 => 'Music Videos',
32 => 'TV Shows',
33 => 'Movies',
34 => 'Music',
35 => 'iPod Games',
36 => 'App Store',
37 => 'Tones',
38 => 'Books',
39 => 'Mac App Store',
40 => 'Textbooks',
50 => 'Music|Fitness & Workout',
51 => 'Music|Pop|K-Pop',
52 => 'Music|Karaoke',
53 => 'Music|Instrumental',
74 => 'Audiobooks|News',
75 => 'Audiobooks|Programs & Performances',
1001 => 'Music|Alternative|College Rock',
1002 => 'Music|Alternative|Goth Rock',
1003 => 'Music|Alternative|Grunge',
1004 => 'Music|Alternative|Indie Rock',
1005 => 'Music|Alternative|New Wave',
1006 => 'Music|Alternative|Punk',
1007 => 'Music|Blues|Chicago Blues',
1009 => 'Music|Blues|Classic Blues',
1010 => 'Music|Blues|Contemporary Blues',
1011 => 'Music|Blues|Country Blues',
1012 => 'Music|Blues|Delta Blues',
1013 => 'Music|Blues|Electric Blues',
1014 => "Music|Children's Music|Lullabies",
1015 => "Music|Children's Music|Sing-Along",
1016 => "Music|Children's Music|Stories",
1017 => 'Music|Classical|Avant-Garde',
1018 => 'Music|Classical|Baroque Era',
1019 => 'Music|Classical|Chamber Music',
1020 => 'Music|Classical|Chant',
1021 => 'Music|Classical|Choral',
1022 => 'Music|Classical|Classical Crossover',
1023 => 'Music|Classical|Early Music',
1024 => 'Music|Classical|Impressionist',
1025 => 'Music|Classical|Medieval Era',
1026 => 'Music|Classical|Minimalism',
1027 => 'Music|Classical|Modern Era',
1028 => 'Music|Classical|Opera',
1029 => 'Music|Classical|Orchestral',
1030 => 'Music|Classical|Renaissance',
1031 => 'Music|Classical|Romantic Era',
1032 => 'Music|Classical|Wedding Music',
1033 => 'Music|Country|Alternative Country',
1034 => 'Music|Country|Americana',
1035 => 'Music|Country|Bluegrass',
1036 => 'Music|Country|Contemporary Bluegrass',
1037 => 'Music|Country|Contemporary Country',
1038 => 'Music|Country|Country Gospel',
1039 => 'Music|Country|Honky Tonk',
1040 => 'Music|Country|Outlaw Country',
1041 => 'Music|Country|Traditional Bluegrass',
1042 => 'Music|Country|Traditional Country',
1043 => 'Music|Country|Urban Cowboy',
1044 => 'Music|Dance|Breakbeat',
1045 => 'Music|Dance|Exercise',
1046 => 'Music|Dance|Garage',
1047 => 'Music|Dance|Hardcore',
1048 => 'Music|Dance|House',
1049 => "Music|Dance|Jungle/Drum'n'bass",
1050 => 'Music|Dance|Techno',
1051 => 'Music|Dance|Trance',
1052 => 'Music|Jazz|Big Band',
1053 => 'Music|Jazz|Bop',
1054 => 'Music|Easy Listening|Lounge',
1055 => 'Music|Easy Listening|Swing',
1056 => 'Music|Electronic|Ambient',
1057 => 'Music|Electronic|Downtempo',
1058 => 'Music|Electronic|Electronica',
1060 => 'Music|Electronic|IDM/Experimental',
1061 => 'Music|Electronic|Industrial',
1062 => 'Music|Singer/Songwriter|Alternative Folk',
1063 => 'Music|Singer/Songwriter|Contemporary Folk',
1064 => 'Music|Singer/Songwriter|Contemporary Singer/Songwriter',
1065 => 'Music|Singer/Songwriter|Folk-Rock',
1066 => 'Music|Singer/Songwriter|New Acoustic',
1067 => 'Music|Singer/Songwriter|Traditional Folk',
1068 => 'Music|Hip-Hop/Rap|Alternative Rap',
1069 => 'Music|Hip-Hop/Rap|Dirty South',
1070 => 'Music|Hip-Hop/Rap|East Coast Rap',
1071 => 'Music|Hip-Hop/Rap|Gangsta Rap',
1072 => 'Music|Hip-Hop/Rap|Hardcore Rap',
1073 => 'Music|Hip-Hop/Rap|Hip-Hop',
1074 => 'Music|Hip-Hop/Rap|Latin Rap',
1075 => 'Music|Hip-Hop/Rap|Old School Rap',
1076 => 'Music|Hip-Hop/Rap|Rap',
1077 => 'Music|Hip-Hop/Rap|Underground Rap',
1078 => 'Music|Hip-Hop/Rap|West Coast Rap',
1079 => 'Music|Holiday|Chanukah',
1080 => 'Music|Holiday|Christmas',
1081 => "Music|Holiday|Christmas: Children's",
1082 => 'Music|Holiday|Christmas: Classic',
1083 => 'Music|Holiday|Christmas: Classical',
1084 => 'Music|Holiday|Christmas: Jazz',
1085 => 'Music|Holiday|Christmas: Modern',
1086 => 'Music|Holiday|Christmas: Pop',
1087 => 'Music|Holiday|Christmas: R&B',
1088 => 'Music|Holiday|Christmas: Religious',
1089 => 'Music|Holiday|Christmas: Rock',
1090 => 'Music|Holiday|Easter',
1091 => 'Music|Holiday|Halloween',
1092 => 'Music|Holiday|Holiday: Other',
1093 => 'Music|Holiday|Thanksgiving',
1094 => 'Music|Christian & Gospel|CCM',
1095 => 'Music|Christian & Gospel|Christian Metal',
1096 => 'Music|Christian & Gospel|Christian Pop',
1097 => 'Music|Christian & Gospel|Christian Rap',
1098 => 'Music|Christian & Gospel|Christian Rock',
1099 => 'Music|Christian & Gospel|Classic Christian',
1100 => 'Music|Christian & Gospel|Contemporary Gospel',
1101 => 'Music|Christian & Gospel|Gospel',
1103 => 'Music|Christian & Gospel|Praise & Worship',
1104 => 'Music|Christian & Gospel|Southern Gospel',
1105 => 'Music|Christian & Gospel|Traditional Gospel',
1106 => 'Music|Jazz|Avant-Garde Jazz',
1107 => 'Music|Jazz|Contemporary Jazz',
1108 => 'Music|Jazz|Crossover Jazz',
1109 => 'Music|Jazz|Dixieland',
1110 => 'Music|Jazz|Fusion',
1111 => 'Music|Jazz|Latin Jazz',
1112 => 'Music|Jazz|Mainstream Jazz',
1113 => 'Music|Jazz|Ragtime',
1114 => 'Music|Jazz|Smooth Jazz',
1115 => 'Music|Latino|Latin Jazz',
1116 => 'Music|Latino|Contemporary Latin',
1117 => 'Music|Latino|Pop Latino',
1118 => 'Music|Latino|Raices', # (Raíces)
1119 => 'Music|Latino|Urbano latino',
1120 => 'Music|Latino|Baladas y Boleros',
1121 => 'Music|Latino|Rock y Alternativo',
1122 => 'Music|Brazilian',
1123 => 'Music|Latino|Musica Mexicana', # (Música Mexicana)
1124 => 'Music|Latino|Musica tropical', # (Música tropical)
1125 => 'Music|New Age|Environmental',
1126 => 'Music|New Age|Healing',
1127 => 'Music|New Age|Meditation',
1128 => 'Music|New Age|Nature',
1129 => 'Music|New Age|Relaxation',
1130 => 'Music|New Age|Travel',
1131 => 'Music|Pop|Adult Contemporary',
1132 => 'Music|Pop|Britpop',
1133 => 'Music|Pop|Pop/Rock',
1134 => 'Music|Pop|Soft Rock',
1135 => 'Music|Pop|Teen Pop',
1136 => 'Music|R&B/Soul|Contemporary R&B',
1137 => 'Music|R&B/Soul|Disco',
1138 => 'Music|R&B/Soul|Doo Wop',
1139 => 'Music|R&B/Soul|Funk',
1140 => 'Music|R&B/Soul|Motown',
1141 => 'Music|R&B/Soul|Neo-Soul',
1142 => 'Music|R&B/Soul|Quiet Storm',
1143 => 'Music|R&B/Soul|Soul',
1144 => 'Music|Rock|Adult Alternative',
1145 => 'Music|Rock|American Trad Rock',
1146 => 'Music|Rock|Arena Rock',
1147 => 'Music|Rock|Blues-Rock',
1148 => 'Music|Rock|British Invasion',
1149 => 'Music|Rock|Death Metal/Black Metal',
1150 => 'Music|Rock|Glam Rock',
1151 => 'Music|Rock|Hair Metal',
1152 => 'Music|Rock|Hard Rock',
1153 => 'Music|Rock|Metal',
1154 => 'Music|Rock|Jam Bands',
1155 => 'Music|Rock|Prog-Rock/Art Rock',
1156 => 'Music|Rock|Psychedelic',
1157 => 'Music|Rock|Rock & Roll',
1158 => 'Music|Rock|Rockabilly',
1159 => 'Music|Rock|Roots Rock',
1160 => 'Music|Rock|Singer/Songwriter',
1161 => 'Music|Rock|Southern Rock',
1162 => 'Music|Rock|Surf',
1163 => 'Music|Rock|Tex-Mex',
1165 => 'Music|Soundtrack|Foreign Cinema',
1166 => 'Music|Soundtrack|Musicals',
1167 => 'Music|Comedy|Novelty',
1168 => 'Music|Soundtrack|Original Score',
1169 => 'Music|Soundtrack|Soundtrack',
1171 => 'Music|Comedy|Standup Comedy',
1172 => 'Music|Soundtrack|TV Soundtrack',
1173 => 'Music|Vocal|Standards',
1174 => 'Music|Vocal|Traditional Pop',
1175 => 'Music|Jazz|Vocal Jazz',
1176 => 'Music|Vocal|Vocal Pop',
1177 => 'Music|World|Afro-Beat',
1178 => 'Music|World|Afro-Pop',
1179 => 'Music|World|Cajun',
1180 => 'Music|World|Celtic',
1181 => 'Music|World|Celtic Folk',
1182 => 'Music|World|Contemporary Celtic',
1183 => 'Music|Reggae|Modern Dancehall',
1184 => 'Music|World|Drinking Songs',
1185 => 'Music|Indian|Indian Pop',
1186 => 'Music|World|Japanese Pop',
1187 => 'Music|World|Klezmer',
1188 => 'Music|World|Polka',
1189 => 'Music|World|Traditional Celtic',
1190 => 'Music|World|Worldbeat',
1191 => 'Music|World|Zydeco',
1192 => 'Music|Reggae|Roots Reggae',
1193 => 'Music|Reggae|Dub',
1194 => 'Music|Reggae|Ska',
1195 => 'Music|World|Caribbean',
1196 => 'Music|World|South America',
1197 => 'Music|Arabic',
1198 => 'Music|World|North America',
1199 => 'Music|World|Hawaii',
1200 => 'Music|World|Australia',
1201 => 'Music|World|Japan',
1202 => 'Music|World|France',
1203 => 'Music|World|Africa',
1204 => 'Music|World|Asia',
1205 => 'Music|World|Europe',
1206 => 'Music|World|South Africa',
1207 => 'Music|Jazz|Hard Bop',
1208 => 'Music|Jazz|Trad Jazz',
1209 => 'Music|Jazz|Cool Jazz',
1210 => 'Music|Blues|Acoustic Blues',
1211 => 'Music|Classical|High Classical',
1220 => 'Music|Brazilian|Axe', # (Axé)
1221 => 'Music|Brazilian|Bossa Nova',
1222 => 'Music|Brazilian|Choro',
1223 => 'Music|Brazilian|Forro', # (Forró)
1224 => 'Music|Brazilian|Frevo',
1225 => 'Music|Brazilian|MPB',
1226 => 'Music|Brazilian|Pagode',
1227 => 'Music|Brazilian|Samba',
1228 => 'Music|Brazilian|Sertanejo',
1229 => 'Music|Brazilian|Baile Funk',
1230 => 'Music|Alternative|Chinese Alt',
1231 => 'Music|Alternative|Korean Indie',
1232 => 'Music|Chinese',
1233 => 'Music|Chinese|Chinese Classical',
1234 => 'Music|Chinese|Chinese Flute',
1235 => 'Music|Chinese|Chinese Opera',
1236 => 'Music|Chinese|Chinese Orchestral',
1237 => 'Music|Chinese|Chinese Regional Folk',
1238 => 'Music|Chinese|Chinese Strings',
1239 => 'Music|Chinese|Taiwanese Folk',
1240 => 'Music|Chinese|Tibetan Native Music',
1241 => 'Music|Hip-Hop/Rap|Chinese Hip-Hop',
1242 => 'Music|Hip-Hop/Rap|Korean Hip-Hop',
1243 => 'Music|Korean',
1244 => 'Music|Korean|Korean Classical',
1245 => 'Music|Korean|Korean Trad Song',
1246 => 'Music|Korean|Korean Trad Instrumental',
1247 => 'Music|Korean|Korean Trad Theater',
1248 => 'Music|Rock|Chinese Rock',
1249 => 'Music|Rock|Korean Rock',
1250 => 'Music|Pop|C-Pop',
1251 => 'Music|Pop|Cantopop/HK-Pop',
1252 => 'Music|Pop|Korean Folk-Pop',
1253 => 'Music|Pop|Mandopop',
1254 => 'Music|Pop|Tai-Pop',
1255 => 'Music|Pop|Malaysian Pop',
1256 => 'Music|Pop|Pinoy Pop',
1257 => 'Music|Pop|Original Pilipino Music',
1258 => 'Music|Pop|Manilla Sound',
1259 => 'Music|Pop|Indo Pop',
1260 => 'Music|Pop|Thai Pop',
1261 => 'Music|Vocal|Trot',
1262 => 'Music|Indian',
1263 => 'Music|Indian|Bollywood',
1264 => 'Music|Indian|Regional Indian|Tamil',
1265 => 'Music|Indian|Regional Indian|Telugu',
1266 => 'Music|Indian|Regional Indian',
1267 => 'Music|Indian|Devotional & Spiritual',
1268 => 'Music|Indian|Sufi',
1269 => 'Music|Indian|Indian Classical',
1270 => 'Music|World|Russian Chanson',
1271 => 'Music|World|Dini',
1272 => 'Music|World|Halk',
1273 => 'Music|World|Sanat',
1274 => 'Music|World|Dangdut',
1275 => 'Music|World|Indonesian Religious',
1276 => 'Music|World|Calypso',
1277 => 'Music|World|Soca',
1278 => 'Music|Indian|Ghazals',
1279 => 'Music|Indian|Indian Folk',
1280 => 'Music|World|Arabesque',
1281 => 'Music|World|Afrikaans',
1282 => 'Music|World|Farsi',
1283 => 'Music|World|Israeli',
1284 => 'Music|Arabic|Khaleeji',
1285 => 'Music|Arabic|North African',
1286 => 'Music|Arabic|Arabic Pop',
1287 => 'Music|Arabic|Islamic',
1288 => 'Music|Soundtrack|Sound Effects',
1289 => 'Music|Folk',
1290 => 'Music|Orchestral',
1291 => 'Music|Marching',
1293 => 'Music|Pop|Oldies',
1294 => 'Music|Country|Thai Country',
1295 => 'Music|World|Flamenco',
1296 => 'Music|World|Tango',
1297 => 'Music|World|Fado',
1298 => 'Music|World|Iberia',
1299 => 'Music|World|Russian',
1300 => 'Music|World|Turkish',
1301 => 'Podcasts|Arts',
1302 => 'Podcasts|Society & Culture|Personal Journals',
1303 => 'Podcasts|Comedy',
1304 => 'Podcasts|Education',
1305 => 'Podcasts|Kids & Family',
1306 => 'Podcasts|Arts|Food',
1307 => 'Podcasts|Health',
1309 => 'Podcasts|TV & Film',
1310 => 'Podcasts|Music',
1311 => 'Podcasts|News & Politics',
1314 => 'Podcasts|Religion & Spirituality',
1315 => 'Podcasts|Science & Medicine',
1316 => 'Podcasts|Sports & Recreation',
1318 => 'Podcasts|Technology',
1320 => 'Podcasts|Society & Culture|Places & Travel',
1321 => 'Podcasts|Business',
1323 => 'Podcasts|Games & Hobbies',
1324 => 'Podcasts|Society & Culture',
1325 => 'Podcasts|Government & Organizations',
1337 => 'Music Videos|Classical|Piano',
1401 => 'Podcasts|Arts|Literature',
1402 => 'Podcasts|Arts|Design',
1404 => 'Podcasts|Games & Hobbies|Video Games',
1405 => 'Podcasts|Arts|Performing Arts',
1406 => 'Podcasts|Arts|Visual Arts',
1410 => 'Podcasts|Business|Careers',
1412 => 'Podcasts|Business|Investing',
1413 => 'Podcasts|Business|Management & Marketing',
1415 => 'Podcasts|Education|K-12',
1416 => 'Podcasts|Education|Higher Education',
1417 => 'Podcasts|Health|Fitness & Nutrition',
1420 => 'Podcasts|Health|Self-Help',
1421 => 'Podcasts|Health|Sexuality',
1438 => 'Podcasts|Religion & Spirituality|Buddhism',
1439 => 'Podcasts|Religion & Spirituality|Christianity',
1440 => 'Podcasts|Religion & Spirituality|Islam',
1441 => 'Podcasts|Religion & Spirituality|Judaism',
1443 => 'Podcasts|Society & Culture|Philosophy',
1444 => 'Podcasts|Religion & Spirituality|Spirituality',
1446 => 'Podcasts|Technology|Gadgets',
1448 => 'Podcasts|Technology|Tech News',
1450 => 'Podcasts|Technology|Podcasting',
1454 => 'Podcasts|Games & Hobbies|Automotive',
1455 => 'Podcasts|Games & Hobbies|Aviation',
1456 => 'Podcasts|Sports & Recreation|Outdoor',
1459 => 'Podcasts|Arts|Fashion & Beauty',
1460 => 'Podcasts|Games & Hobbies|Hobbies',
1461 => 'Podcasts|Games & Hobbies|Other Games',
1462 => 'Podcasts|Society & Culture|History',
1463 => 'Podcasts|Religion & Spirituality|Hinduism',
1464 => 'Podcasts|Religion & Spirituality|Other',
1465 => 'Podcasts|Sports & Recreation|Professional',
1466 => 'Podcasts|Sports & Recreation|College & High School',
1467 => 'Podcasts|Sports & Recreation|Amateur',
1468 => 'Podcasts|Education|Educational Technology',
1469 => 'Podcasts|Education|Language Courses',
1470 => 'Podcasts|Education|Training',
1471 => 'Podcasts|Business|Business News',
1472 => 'Podcasts|Business|Shopping',
1473 => 'Podcasts|Government & Organizations|National',
1474 => 'Podcasts|Government & Organizations|Regional',
1475 => 'Podcasts|Government & Organizations|Local',
1476 => 'Podcasts|Government & Organizations|Non-Profit',
1477 => 'Podcasts|Science & Medicine|Natural Sciences',
1478 => 'Podcasts|Science & Medicine|Medicine',
1479 => 'Podcasts|Science & Medicine|Social Sciences',
1480 => 'Podcasts|Technology|Software How-To',
1481 => 'Podcasts|Health|Alternative Health',
1602 => 'Music Videos|Blues',
1603 => 'Music Videos|Comedy',
1604 => "Music Videos|Children's Music",
1605 => 'Music Videos|Classical',
1606 => 'Music Videos|Country',
1607 => 'Music Videos|Electronic',
1608 => 'Music Videos|Holiday',
1609 => 'Music Videos|Classical|Opera',
1610 => 'Music Videos|Singer/Songwriter',
1611 => 'Music Videos|Jazz',
1612 => 'Music Videos|Latin',
1613 => 'Music Videos|New Age',
1614 => 'Music Videos|Pop',
1615 => 'Music Videos|R&B/Soul',
1616 => 'Music Videos|Soundtrack',
1617 => 'Music Videos|Dance',
1618 => 'Music Videos|Hip-Hop/Rap',
1619 => 'Music Videos|World',
1620 => 'Music Videos|Alternative',
1621 => 'Music Videos|Rock',
1622 => 'Music Videos|Christian & Gospel',
1623 => 'Music Videos|Vocal',
1624 => 'Music Videos|Reggae',
1625 => 'Music Videos|Easy Listening',
1626 => 'Music Videos|Podcasts',
1627 => 'Music Videos|J-Pop',
1628 => 'Music Videos|Enka',
1629 => 'Music Videos|Anime',
1630 => 'Music Videos|Kayokyoku',
1631 => 'Music Videos|Disney',
1632 => 'Music Videos|French Pop',
1633 => 'Music Videos|German Pop',
1634 => 'Music Videos|German Folk',
1635 => 'Music Videos|Alternative|Chinese Alt',
1636 => 'Music Videos|Alternative|Korean Indie',
1637 => 'Music Videos|Chinese',
1638 => 'Music Videos|Chinese|Chinese Classical',
1639 => 'Music Videos|Chinese|Chinese Flute',
1640 => 'Music Videos|Chinese|Chinese Opera',
1641 => 'Music Videos|Chinese|Chinese Orchestral',
1642 => 'Music Videos|Chinese|Chinese Regional Folk',
1643 => 'Music Videos|Chinese|Chinese Strings',
1644 => 'Music Videos|Chinese|Taiwanese Folk',
1645 => 'Music Videos|Chinese|Tibetan Native Music',
1646 => 'Music Videos|Hip-Hop/Rap|Chinese Hip-Hop',
1647 => 'Music Videos|Hip-Hop/Rap|Korean Hip-Hop',
1648 => 'Music Videos|Korean',
1649 => 'Music Videos|Korean|Korean Classical',
1650 => 'Music Videos|Korean|Korean Trad Song',
1651 => 'Music Videos|Korean|Korean Trad Instrumental',
1652 => 'Music Videos|Korean|Korean Trad Theater',
1653 => 'Music Videos|Rock|Chinese Rock',
1654 => 'Music Videos|Rock|Korean Rock',
1655 => 'Music Videos|Pop|C-Pop',
1656 => 'Music Videos|Pop|Cantopop/HK-Pop',
1657 => 'Music Videos|Pop|Korean Folk-Pop',
1658 => 'Music Videos|Pop|Mandopop',
1659 => 'Music Videos|Pop|Tai-Pop',
1660 => 'Music Videos|Pop|Malaysian Pop',
1661 => 'Music Videos|Pop|Pinoy Pop',
1662 => 'Music Videos|Pop|Original Pilipino Music',
1663 => 'Music Videos|Pop|Manilla Sound',
1664 => 'Music Videos|Pop|Indo Pop',
1665 => 'Music Videos|Pop|Thai Pop',
1666 => 'Music Videos|Vocal|Trot',
1671 => 'Music Videos|Brazilian',
1672 => 'Music Videos|Brazilian|Axe', # (Axé)
1673 => 'Music Videos|Brazilian|Baile Funk',
1674 => 'Music Videos|Brazilian|Bossa Nova',
1675 => 'Music Videos|Brazilian|Choro',
1676 => 'Music Videos|Brazilian|Forro',
1677 => 'Music Videos|Brazilian|Frevo',
1678 => 'Music Videos|Brazilian|MPB',
1679 => 'Music Videos|Brazilian|Pagode',
1680 => 'Music Videos|Brazilian|Samba',
1681 => 'Music Videos|Brazilian|Sertanejo',
1682 => 'Music Videos|Classical|High Classical',
1683 => 'Music Videos|Fitness & Workout',
1684 => 'Music Videos|Instrumental',
1685 => 'Music Videos|Jazz|Big Band',
1686 => 'Music Videos|Pop|K-Pop',
1687 => 'Music Videos|Karaoke',
1688 => 'Music Videos|Rock|Heavy Metal',
1689 => 'Music Videos|Spoken Word',
1690 => 'Music Videos|Indian',
1691 => 'Music Videos|Indian|Bollywood',
1692 => 'Music Videos|Indian|Regional Indian|Tamil',
1693 => 'Music Videos|Indian|Regional Indian|Telugu',
1694 => 'Music Videos|Indian|Regional Indian',
1695 => 'Music Videos|Indian|Devotional & Spiritual',
1696 => 'Music Videos|Indian|Sufi',
1697 => 'Music Videos|Indian|Indian Classical',
1698 => 'Music Videos|World|Russian Chanson',
1699 => 'Music Videos|World|Dini',
1700 => 'Music Videos|World|Halk',
1701 => 'Music Videos|World|Sanat',
1702 => 'Music Videos|World|Dangdut',
1703 => 'Music Videos|World|Indonesian Religious',
1704 => 'Music Videos|Indian|Indian Pop',
1705 => 'Music Videos|World|Calypso',
1706 => 'Music Videos|World|Soca',
1707 => 'Music Videos|Indian|Ghazals',
1708 => 'Music Videos|Indian|Indian Folk',
1709 => 'Music Videos|World|Arabesque',
1710 => 'Music Videos|World|Afrikaans',
1711 => 'Music Videos|World|Farsi',
1712 => 'Music Videos|World|Israeli',
1713 => 'Music Videos|Arabic',
1714 => 'Music Videos|Arabic|Khaleeji',
1715 => 'Music Videos|Arabic|North African',
1716 => 'Music Videos|Arabic|Arabic Pop',
1717 => 'Music Videos|Arabic|Islamic',
1718 => 'Music Videos|Soundtrack|Sound Effects',
1719 => 'Music Videos|Folk',
1720 => 'Music Videos|Orchestral',
1721 => 'Music Videos|Marching',
1723 => 'Music Videos|Pop|Oldies',
1724 => 'Music Videos|Country|Thai Country',
1725 => 'Music Videos|World|Flamenco',
1726 => 'Music Videos|World|Tango',
1727 => 'Music Videos|World|Fado',
1728 => 'Music Videos|World|Iberia',
1729 => 'Music Videos|World|Russian',
1730 => 'Music Videos|World|Turkish',
1731 => 'Music Videos|Alternative|College Rock',
1732 => 'Music Videos|Alternative|Goth Rock',
1733 => 'Music Videos|Alternative|Grunge',
1734 => 'Music Videos|Alternative|Indie Rock',
1735 => 'Music Videos|Alternative|New Wave',
1736 => 'Music Videos|Alternative|Punk',
1737 => 'Music Videos|Blues|Acoustic Blues',
1738 => 'Music Videos|Blues|Chicago Blues',
1739 => 'Music Videos|Blues|Classic Blues',
1740 => 'Music Videos|Blues|Contemporary Blues',
1741 => 'Music Videos|Blues|Country Blues',
1742 => 'Music Videos|Blues|Delta Blues',
1743 => 'Music Videos|Blues|Electric Blues',
1744 => "Music Videos|Children's Music|Lullabies",
1745 => "Music Videos|Children's Music|Sing-Along",
1746 => "Music Videos|Children's Music|Stories",
1747 => 'Music Videos|Christian & Gospel|CCM',
1748 => 'Music Videos|Christian & Gospel|Christian Metal',
1749 => 'Music Videos|Christian & Gospel|Christian Pop',
1750 => 'Music Videos|Christian & Gospel|Christian Rap',
1751 => 'Music Videos|Christian & Gospel|Christian Rock',
1752 => 'Music Videos|Christian & Gospel|Classic Christian',
1753 => 'Music Videos|Christian & Gospel|Contemporary Gospel',
1754 => 'Music Videos|Christian & Gospel|Gospel',
1755 => 'Music Videos|Christian & Gospel|Praise & Worship',
1756 => 'Music Videos|Christian & Gospel|Southern Gospel',
1757 => 'Music Videos|Christian & Gospel|Traditional Gospel',
1758 => 'Music Videos|Classical|Avant-Garde',
1759 => 'Music Videos|Classical|Baroque Era',
1760 => 'Music Videos|Classical|Chamber Music',
1761 => 'Music Videos|Classical|Chant',
1762 => 'Music Videos|Classical|Choral',
1763 => 'Music Videos|Classical|Classical Crossover',
1764 => 'Music Videos|Classical|Early Music',
1765 => 'Music Videos|Classical|Impressionist',
1766 => 'Music Videos|Classical|Medieval Era',
1767 => 'Music Videos|Classical|Minimalism',
1768 => 'Music Videos|Classical|Modern Era',
1769 => 'Music Videos|Classical|Orchestral',
1770 => 'Music Videos|Classical|Renaissance',
1771 => 'Music Videos|Classical|Romantic Era',
1772 => 'Music Videos|Classical|Wedding Music',
1773 => 'Music Videos|Comedy|Novelty',
1774 => 'Music Videos|Comedy|Standup Comedy',
1775 => 'Music Videos|Country|Alternative Country',
1776 => 'Music Videos|Country|Americana',
1777 => 'Music Videos|Country|Bluegrass',
1778 => 'Music Videos|Country|Contemporary Bluegrass',
1779 => 'Music Videos|Country|Contemporary Country',
1780 => 'Music Videos|Country|Country Gospel',
1781 => 'Music Videos|Country|Honky Tonk',
1782 => 'Music Videos|Country|Outlaw Country',
1783 => 'Music Videos|Country|Traditional Bluegrass',
1784 => 'Music Videos|Country|Traditional Country',
1785 => 'Music Videos|Country|Urban Cowboy',
1786 => 'Music Videos|Dance|Breakbeat',
1787 => 'Music Videos|Dance|Exercise',
1788 => 'Music Videos|Dance|Garage',
1789 => 'Music Videos|Dance|Hardcore',
1790 => 'Music Videos|Dance|House',
1791 => "Music Videos|Dance|Jungle/Drum'n'bass",
1792 => 'Music Videos|Dance|Techno',
1793 => 'Music Videos|Dance|Trance',
1794 => 'Music Videos|Easy Listening|Lounge',
1795 => 'Music Videos|Easy Listening|Swing',
1796 => 'Music Videos|Electronic|Ambient',
1797 => 'Music Videos|Electronic|Downtempo',
1798 => 'Music Videos|Electronic|Electronica',
1799 => 'Music Videos|Electronic|IDM/Experimental',
1800 => 'Music Videos|Electronic|Industrial',
1801 => 'Music Videos|Hip-Hop/Rap|Alternative Rap',
1802 => 'Music Videos|Hip-Hop/Rap|Dirty South',
1803 => 'Music Videos|Hip-Hop/Rap|East Coast Rap',
1804 => 'Music Videos|Hip-Hop/Rap|Gangsta Rap',
1805 => 'Music Videos|Hip-Hop/Rap|Hardcore Rap',
1806 => 'Music Videos|Hip-Hop/Rap|Hip-Hop',
1807 => 'Music Videos|Hip-Hop/Rap|Latin Rap',
1808 => 'Music Videos|Hip-Hop/Rap|Old School Rap',
1809 => 'Music Videos|Hip-Hop/Rap|Rap',
1810 => 'Music Videos|Hip-Hop/Rap|Underground Rap',
1811 => 'Music Videos|Hip-Hop/Rap|West Coast Rap',
1812 => 'Music Videos|Holiday|Chanukah',
1813 => 'Music Videos|Holiday|Christmas',
1814 => "Music Videos|Holiday|Christmas: Children's",
1815 => 'Music Videos|Holiday|Christmas: Classic',
1816 => 'Music Videos|Holiday|Christmas: Classical',
1817 => 'Music Videos|Holiday|Christmas: Jazz',
1818 => 'Music Videos|Holiday|Christmas: Modern',
1819 => 'Music Videos|Holiday|Christmas: Pop',
1820 => 'Music Videos|Holiday|Christmas: R&B',
1821 => 'Music Videos|Holiday|Christmas: Religious',
1822 => 'Music Videos|Holiday|Christmas: Rock',
1823 => 'Music Videos|Holiday|Easter',
1824 => 'Music Videos|Holiday|Halloween',
1825 => 'Music Videos|Holiday|Thanksgiving',
1826 => 'Music Videos|Jazz|Avant-Garde Jazz',
1828 => 'Music Videos|Jazz|Bop',
1829 => 'Music Videos|Jazz|Contemporary Jazz',
1830 => 'Music Videos|Jazz|Cool Jazz',
1831 => 'Music Videos|Jazz|Crossover Jazz',
1832 => 'Music Videos|Jazz|Dixieland',
1833 => 'Music Videos|Jazz|Fusion',
1834 => 'Music Videos|Jazz|Hard Bop',
1835 => 'Music Videos|Jazz|Latin Jazz',
1836 => 'Music Videos|Jazz|Mainstream Jazz',
1837 => 'Music Videos|Jazz|Ragtime',
1838 => 'Music Videos|Jazz|Smooth Jazz',
1839 => 'Music Videos|Jazz|Trad Jazz',
1840 => 'Music Videos|Latin|Alternative & Rock in Spanish',
1841 => 'Music Videos|Latin|Baladas y Boleros',
1842 => 'Music Videos|Latin|Contemporary Latin',
1843 => 'Music Videos|Latin|Latin Jazz',
1844 => 'Music Videos|Latin|Latin Urban',
1845 => 'Music Videos|Latin|Pop in Spanish',
1846 => 'Music Videos|Latin|Raices',
1847 => 'Music Videos|Latin|Musica Mexicana', # (Música Mexicana)
1848 => 'Music Videos|Latin|Salsa y Tropical',
1849 => 'Music Videos|New Age|Healing',
1850 => 'Music Videos|New Age|Meditation',
1851 => 'Music Videos|New Age|Nature',
1852 => 'Music Videos|New Age|Relaxation',
1853 => 'Music Videos|New Age|Travel',
1854 => 'Music Videos|Pop|Adult Contemporary',
1855 => 'Music Videos|Pop|Britpop',
1856 => 'Music Videos|Pop|Pop/Rock',
1857 => 'Music Videos|Pop|Soft Rock',
1858 => 'Music Videos|Pop|Teen Pop',
1859 => 'Music Videos|R&B/Soul|Contemporary R&B',
1860 => 'Music Videos|R&B/Soul|Disco',
1861 => 'Music Videos|R&B/Soul|Doo Wop',
1862 => 'Music Videos|R&B/Soul|Funk',
1863 => 'Music Videos|R&B/Soul|Motown',
1864 => 'Music Videos|R&B/Soul|Neo-Soul',
1865 => 'Music Videos|R&B/Soul|Soul',
1866 => 'Music Videos|Reggae|Modern Dancehall',
1867 => 'Music Videos|Reggae|Dub',
1868 => 'Music Videos|Reggae|Roots Reggae',
1869 => 'Music Videos|Reggae|Ska',
1870 => 'Music Videos|Rock|Adult Alternative',
1871 => 'Music Videos|Rock|American Trad Rock',
1872 => 'Music Videos|Rock|Arena Rock',
1873 => 'Music Videos|Rock|Blues-Rock',
1874 => 'Music Videos|Rock|British Invasion',
1875 => 'Music Videos|Rock|Death Metal/Black Metal',
1876 => 'Music Videos|Rock|Glam Rock',
1877 => 'Music Videos|Rock|Hair Metal',
1878 => 'Music Videos|Rock|Hard Rock',
1879 => 'Music Videos|Rock|Jam Bands',
1880 => 'Music Videos|Rock|Prog-Rock/Art Rock',
1881 => 'Music Videos|Rock|Psychedelic',
1882 => 'Music Videos|Rock|Rock & Roll',
1883 => 'Music Videos|Rock|Rockabilly',
1884 => 'Music Videos|Rock|Roots Rock',
1885 => 'Music Videos|Rock|Singer/Songwriter',
1886 => 'Music Videos|Rock|Southern Rock',
1887 => 'Music Videos|Rock|Surf',
1888 => 'Music Videos|Rock|Tex-Mex',
1889 => 'Music Videos|Singer/Songwriter|Alternative Folk',
1890 => 'Music Videos|Singer/Songwriter|Contemporary Folk',
1891 => 'Music Videos|Singer/Songwriter|Contemporary Singer/Songwriter',
1892 => 'Music Videos|Singer/Songwriter|Folk-Rock',
1893 => 'Music Videos|Singer/Songwriter|New Acoustic',
1894 => 'Music Videos|Singer/Songwriter|Traditional Folk',
1895 => 'Music Videos|Soundtrack|Foreign Cinema',
1896 => 'Music Videos|Soundtrack|Musicals',
1897 => 'Music Videos|Soundtrack|Original Score',
1898 => 'Music Videos|Soundtrack|Soundtrack',
1899 => 'Music Videos|Soundtrack|TV Soundtrack',
1900 => 'Music Videos|Vocal|Standards',
1901 => 'Music Videos|Vocal|Traditional Pop',
1902 => 'Music Videos|Jazz|Vocal Jazz',
1903 => 'Music Videos|Vocal|Vocal Pop',
1904 => 'Music Videos|World|Africa',
1905 => 'Music Videos|World|Afro-Beat',
1906 => 'Music Videos|World|Afro-Pop',
1907 => 'Music Videos|World|Asia',
1908 => 'Music Videos|World|Australia',
1909 => 'Music Videos|World|Cajun',
1910 => 'Music Videos|World|Caribbean',
1911 => 'Music Videos|World|Celtic',
1912 => 'Music Videos|World|Celtic Folk',
1913 => 'Music Videos|World|Contemporary Celtic',
1914 => 'Music Videos|World|Europe',
1915 => 'Music Videos|World|France',
1916 => 'Music Videos|World|Hawaii',
1917 => 'Music Videos|World|Japan',
1918 => 'Music Videos|World|Klezmer',
1919 => 'Music Videos|World|North America',
1920 => 'Music Videos|World|Polka',
1921 => 'Music Videos|World|South Africa',
1922 => 'Music Videos|World|South America',
1923 => 'Music Videos|World|Traditional Celtic',
1924 => 'Music Videos|World|Worldbeat',
1925 => 'Music Videos|World|Zydeco',
1926 => 'Music Videos|Christian & Gospel',
1928 => 'Music Videos|Classical|Art Song',
1929 => 'Music Videos|Classical|Brass & Woodwinds',
1930 => 'Music Videos|Classical|Solo Instrumental',
1931 => 'Music Videos|Classical|Contemporary Era',
1932 => 'Music Videos|Classical|Oratorio',
1933 => 'Music Videos|Classical|Cantata',
1934 => 'Music Videos|Classical|Electronic',
1935 => 'Music Videos|Classical|Sacred',
1936 => 'Music Videos|Classical|Guitar',
1938 => 'Music Videos|Classical|Violin',
1939 => 'Music Videos|Classical|Cello',
1940 => 'Music Videos|Classical|Percussion',
1941 => 'Music Videos|Electronic|Dubstep',
1942 => 'Music Videos|Electronic|Bass',
1943 => 'Music Videos|Hip-Hop/Rap|UK Hip-Hop',
1944 => 'Music Videos|Reggae|Lovers Rock',
1945 => 'Music Videos|Alternative|EMO',
1946 => 'Music Videos|Alternative|Pop Punk',
1947 => 'Music Videos|Alternative|Indie Pop',
1948 => 'Music Videos|New Age|Yoga',
1949 => 'Music Videos|Pop|Tribute',
1950 => 'Music Videos|Pop|Shows',
1951 => 'Music Videos|Cuban',
1952 => 'Music Videos|Cuban|Mambo',
1953 => 'Music Videos|Cuban|Chachacha',
1954 => 'Music Videos|Cuban|Guajira',
1955 => 'Music Videos|Cuban|Son',
1956 => 'Music Videos|Cuban|Bolero',
1957 => 'Music Videos|Cuban|Guaracha',
1958 => 'Music Videos|Cuban|Timba',
1959 => 'Music Videos|Soundtrack|Video Game',
1960 => 'Music Videos|Indian|Regional Indian|Punjabi|Punjabi Pop',
1961 => 'Music Videos|Indian|Regional Indian|Bengali|Rabindra Sangeet',
1962 => 'Music Videos|Indian|Regional Indian|Malayalam',
1963 => 'Music Videos|Indian|Regional Indian|Kannada',
1964 => 'Music Videos|Indian|Regional Indian|Marathi',
1965 => 'Music Videos|Indian|Regional Indian|Gujarati',
1966 => 'Music Videos|Indian|Regional Indian|Assamese',
1967 => 'Music Videos|Indian|Regional Indian|Bhojpuri',
1968 => 'Music Videos|Indian|Regional Indian|Haryanvi',
1969 => 'Music Videos|Indian|Regional Indian|Odia',
1970 => 'Music Videos|Indian|Regional Indian|Rajasthani',
1971 => 'Music Videos|Indian|Regional Indian|Urdu',
1972 => 'Music Videos|Indian|Regional Indian|Punjabi',
1973 => 'Music Videos|Indian|Regional Indian|Bengali',
1974 => 'Music Videos|Indian|Indian Classical|Carnatic Classical',
1975 => 'Music Videos|Indian|Indian Classical|Hindustani Classical',
4000 => 'TV Shows|Comedy',
4001 => 'TV Shows|Drama',
4002 => 'TV Shows|Animation',
4003 => 'TV Shows|Action & Adventure',
4004 => 'TV Shows|Classic',
4005 => 'TV Shows|Kids',
4006 => 'TV Shows|Nonfiction',
4007 => 'TV Shows|Reality TV',
4008 => 'TV Shows|Sci-Fi & Fantasy',
4009 => 'TV Shows|Sports',
4010 => 'TV Shows|Teens',
4011 => 'TV Shows|Latino TV',
4401 => 'Movies|Action & Adventure',
4402 => 'Movies|Anime',
4403 => 'Movies|Classics',
4404 => 'Movies|Comedy',
4405 => 'Movies|Documentary',
4406 => 'Movies|Drama',
4407 => 'Movies|Foreign',
4408 => 'Movies|Horror',
4409 => 'Movies|Independent',
4410 => 'Movies|Kids & Family',
4411 => 'Movies|Musicals',
4412 => 'Movies|Romance',
4413 => 'Movies|Sci-Fi & Fantasy',
4414 => 'Movies|Short Films',
4415 => 'Movies|Special Interest',
4416 => 'Movies|Thriller',
4417 => 'Movies|Sports',
4418 => 'Movies|Western',
4419 => 'Movies|Urban',
4420 => 'Movies|Holiday',
4421 => 'Movies|Made for TV',
4422 => 'Movies|Concert Films',
4423 => 'Movies|Music Documentaries',
4424 => 'Movies|Music Feature Films',
4425 => 'Movies|Japanese Cinema',
4426 => 'Movies|Jidaigeki',
4427 => 'Movies|Tokusatsu',
4428 => 'Movies|Korean Cinema',
4429 => 'Movies|Russian',
4430 => 'Movies|Turkish',
4431 => 'Movies|Bollywood',
4432 => 'Movies|Regional Indian',
4433 => 'Movies|Middle Eastern',
4434 => 'Movies|African',
6000 => 'App Store|Business',
6001 => 'App Store|Weather',
6002 => 'App Store|Utilities',
6003 => 'App Store|Travel',
6004 => 'App Store|Sports',
6005 => 'App Store|Social Networking',
6006 => 'App Store|Reference',
6007 => 'App Store|Productivity',
6008 => 'App Store|Photo & Video',
6009 => 'App Store|News',
6010 => 'App Store|Navigation',
6011 => 'App Store|Music',
6012 => 'App Store|Lifestyle',
6013 => 'App Store|Health & Fitness',
6014 => 'App Store|Games',
6015 => 'App Store|Finance',
6016 => 'App Store|Entertainment',
6017 => 'App Store|Education',
6018 => 'App Store|Books',
6020 => 'App Store|Medical',
6021 => 'App Store|Magazines & Newspapers',
6022 => 'App Store|Catalogs',
6023 => 'App Store|Food & Drink',
6024 => 'App Store|Shopping',
6025 => 'App Store|Stickers',
7001 => 'App Store|Games|Action',
7002 => 'App Store|Games|Adventure',
7003 => 'App Store|Games|Arcade',
7004 => 'App Store|Games|Board',
7005 => 'App Store|Games|Card',
7006 => 'App Store|Games|Casino',
7007 => 'App Store|Games|Dice',
7008 => 'App Store|Games|Educational',
7009 => 'App Store|Games|Family',
7011 => 'App Store|Games|Music',
7012 => 'App Store|Games|Puzzle',
7013 => 'App Store|Games|Racing',
7014 => 'App Store|Games|Role Playing',
7015 => 'App Store|Games|Simulation',
7016 => 'App Store|Games|Sports',
7017 => 'App Store|Games|Strategy',
7018 => 'App Store|Games|Trivia',
7019 => 'App Store|Games|Word',
8001 => 'Tones|Ringtones|Alternative',
8002 => 'Tones|Ringtones|Blues',
8003 => "Tones|Ringtones|Children's Music",
8004 => 'Tones|Ringtones|Classical',
8005 => 'Tones|Ringtones|Comedy',
8006 => 'Tones|Ringtones|Country',
8007 => 'Tones|Ringtones|Dance',
8008 => 'Tones|Ringtones|Electronic',
8009 => 'Tones|Ringtones|Enka',
8010 => 'Tones|Ringtones|French Pop',
8011 => 'Tones|Ringtones|German Folk',
8012 => 'Tones|Ringtones|German Pop',
8013 => 'Tones|Ringtones|Hip-Hop/Rap',
8014 => 'Tones|Ringtones|Holiday',
8015 => 'Tones|Ringtones|Inspirational',
8016 => 'Tones|Ringtones|J-Pop',
8017 => 'Tones|Ringtones|Jazz',
8018 => 'Tones|Ringtones|Kayokyoku',
8019 => 'Tones|Ringtones|Latin',
8020 => 'Tones|Ringtones|New Age',
8021 => 'Tones|Ringtones|Classical|Opera',
8022 => 'Tones|Ringtones|Pop',
8023 => 'Tones|Ringtones|R&B/Soul',
8024 => 'Tones|Ringtones|Reggae',
8025 => 'Tones|Ringtones|Rock',
8026 => 'Tones|Ringtones|Singer/Songwriter',
8027 => 'Tones|Ringtones|Soundtrack',
8028 => 'Tones|Ringtones|Spoken Word',
8029 => 'Tones|Ringtones|Vocal',
8030 => 'Tones|Ringtones|World',
8050 => 'Tones|Alert Tones|Sound Effects',
8051 => 'Tones|Alert Tones|Dialogue',
8052 => 'Tones|Alert Tones|Music',
8053 => 'Tones|Ringtones',
8054 => 'Tones|Alert Tones',
8055 => 'Tones|Ringtones|Alternative|Chinese Alt',
8056 => 'Tones|Ringtones|Alternative|College Rock',
8057 => 'Tones|Ringtones|Alternative|Goth Rock',
8058 => 'Tones|Ringtones|Alternative|Grunge',
8059 => 'Tones|Ringtones|Alternative|Indie Rock',
8060 => 'Tones|Ringtones|Alternative|Korean Indie',
8061 => 'Tones|Ringtones|Alternative|New Wave',
8062 => 'Tones|Ringtones|Alternative|Punk',
8063 => 'Tones|Ringtones|Anime',
8064 => 'Tones|Ringtones|Arabic',
8065 => 'Tones|Ringtones|Arabic|Arabic Pop',
8066 => 'Tones|Ringtones|Arabic|Islamic',
8067 => 'Tones|Ringtones|Arabic|Khaleeji',
8068 => 'Tones|Ringtones|Arabic|North African',
8069 => 'Tones|Ringtones|Blues|Acoustic Blues',
8070 => 'Tones|Ringtones|Blues|Chicago Blues',
8071 => 'Tones|Ringtones|Blues|Classic Blues',
8072 => 'Tones|Ringtones|Blues|Contemporary Blues',
8073 => 'Tones|Ringtones|Blues|Country Blues',
8074 => 'Tones|Ringtones|Blues|Delta Blues',
8075 => 'Tones|Ringtones|Blues|Electric Blues',
8076 => 'Tones|Ringtones|Brazilian',
8077 => 'Tones|Ringtones|Brazilian|Axe', # (Axé)
8078 => 'Tones|Ringtones|Brazilian|Baile Funk',
8079 => 'Tones|Ringtones|Brazilian|Bossa Nova',
8080 => 'Tones|Ringtones|Brazilian|Choro',
8081 => 'Tones|Ringtones|Brazilian|Forro', # (Forró)
8082 => 'Tones|Ringtones|Brazilian|Frevo',
8083 => 'Tones|Ringtones|Brazilian|MPB',
8084 => 'Tones|Ringtones|Brazilian|Pagode',
8085 => 'Tones|Ringtones|Brazilian|Samba',
8086 => 'Tones|Ringtones|Brazilian|Sertanejo',
8087 => "Tones|Ringtones|Children's Music|Lullabies",
8088 => "Tones|Ringtones|Children's Music|Sing-Along",
8089 => "Tones|Ringtones|Children's Music|Stories",
8090 => 'Tones|Ringtones|Chinese',
8091 => 'Tones|Ringtones|Chinese|Chinese Classical',
8092 => 'Tones|Ringtones|Chinese|Chinese Flute',
8093 => 'Tones|Ringtones|Chinese|Chinese Opera',
8094 => 'Tones|Ringtones|Chinese|Chinese Orchestral',
8095 => 'Tones|Ringtones|Chinese|Chinese Regional Folk',
8096 => 'Tones|Ringtones|Chinese|Chinese Strings',
8097 => 'Tones|Ringtones|Chinese|Taiwanese Folk',
8098 => 'Tones|Ringtones|Chinese|Tibetan Native Music',
8099 => 'Tones|Ringtones|Christian & Gospel',
8100 => 'Tones|Ringtones|Christian & Gospel|CCM',
8101 => 'Tones|Ringtones|Christian & Gospel|Christian Metal',
8102 => 'Tones|Ringtones|Christian & Gospel|Christian Pop',
8103 => 'Tones|Ringtones|Christian & Gospel|Christian Rap',
8104 => 'Tones|Ringtones|Christian & Gospel|Christian Rock',
8105 => 'Tones|Ringtones|Christian & Gospel|Classic Christian',
8106 => 'Tones|Ringtones|Christian & Gospel|Contemporary Gospel',
8107 => 'Tones|Ringtones|Christian & Gospel|Gospel',
8108 => 'Tones|Ringtones|Christian & Gospel|Praise & Worship',
8109 => 'Tones|Ringtones|Christian & Gospel|Southern Gospel',
8110 => 'Tones|Ringtones|Christian & Gospel|Traditional Gospel',
8111 => 'Tones|Ringtones|Classical|Avant-Garde',
8112 => 'Tones|Ringtones|Classical|Baroque Era',
8113 => 'Tones|Ringtones|Classical|Chamber Music',
8114 => 'Tones|Ringtones|Classical|Chant',
8115 => 'Tones|Ringtones|Classical|Choral',
8116 => 'Tones|Ringtones|Classical|Classical Crossover',
8117 => 'Tones|Ringtones|Classical|Early Music',
8118 => 'Tones|Ringtones|Classical|High Classical',
8119 => 'Tones|Ringtones|Classical|Impressionist',
8120 => 'Tones|Ringtones|Classical|Medieval Era',
8121 => 'Tones|Ringtones|Classical|Minimalism',
8122 => 'Tones|Ringtones|Classical|Modern Era',
8123 => 'Tones|Ringtones|Classical|Orchestral',
8124 => 'Tones|Ringtones|Classical|Renaissance',
8125 => 'Tones|Ringtones|Classical|Romantic Era',
8126 => 'Tones|Ringtones|Classical|Wedding Music',
8127 => 'Tones|Ringtones|Comedy|Novelty',
8128 => 'Tones|Ringtones|Comedy|Standup Comedy',
8129 => 'Tones|Ringtones|Country|Alternative Country',
8130 => 'Tones|Ringtones|Country|Americana',
8131 => 'Tones|Ringtones|Country|Bluegrass',
8132 => 'Tones|Ringtones|Country|Contemporary Bluegrass',
8133 => 'Tones|Ringtones|Country|Contemporary Country',
8134 => 'Tones|Ringtones|Country|Country Gospel',
8135 => 'Tones|Ringtones|Country|Honky Tonk',
8136 => 'Tones|Ringtones|Country|Outlaw Country',
8137 => 'Tones|Ringtones|Country|Thai Country',
8138 => 'Tones|Ringtones|Country|Traditional Bluegrass',
8139 => 'Tones|Ringtones|Country|Traditional Country',
8140 => 'Tones|Ringtones|Country|Urban Cowboy',
8141 => 'Tones|Ringtones|Dance|Breakbeat',
8142 => 'Tones|Ringtones|Dance|Exercise',
8143 => 'Tones|Ringtones|Dance|Garage',
8144 => 'Tones|Ringtones|Dance|Hardcore',
8145 => 'Tones|Ringtones|Dance|House',
8146 => "Tones|Ringtones|Dance|Jungle/Drum'n'bass",
8147 => 'Tones|Ringtones|Dance|Techno',
8148 => 'Tones|Ringtones|Dance|Trance',
8149 => 'Tones|Ringtones|Disney',
8150 => 'Tones|Ringtones|Easy Listening',
8151 => 'Tones|Ringtones|Easy Listening|Lounge',
8152 => 'Tones|Ringtones|Easy Listening|Swing',
8153 => 'Tones|Ringtones|Electronic|Ambient',
8154 => 'Tones|Ringtones|Electronic|Downtempo',
8155 => 'Tones|Ringtones|Electronic|Electronica',
8156 => 'Tones|Ringtones|Electronic|IDM/Experimental',
8157 => 'Tones|Ringtones|Electronic|Industrial',
8158 => 'Tones|Ringtones|Fitness & Workout',
8159 => 'Tones|Ringtones|Folk',
8160 => 'Tones|Ringtones|Hip-Hop/Rap|Alternative Rap',
8161 => 'Tones|Ringtones|Hip-Hop/Rap|Chinese Hip-Hop',
8162 => 'Tones|Ringtones|Hip-Hop/Rap|Dirty South',
8163 => 'Tones|Ringtones|Hip-Hop/Rap|East Coast Rap',
8164 => 'Tones|Ringtones|Hip-Hop/Rap|Gangsta Rap',
8165 => 'Tones|Ringtones|Hip-Hop/Rap|Hardcore Rap',
8166 => 'Tones|Ringtones|Hip-Hop/Rap|Hip-Hop',
8167 => 'Tones|Ringtones|Hip-Hop/Rap|Korean Hip-Hop',
8168 => 'Tones|Ringtones|Hip-Hop/Rap|Latin Rap',
8169 => 'Tones|Ringtones|Hip-Hop/Rap|Old School Rap',
8170 => 'Tones|Ringtones|Hip-Hop/Rap|Rap',
8171 => 'Tones|Ringtones|Hip-Hop/Rap|Underground Rap',
8172 => 'Tones|Ringtones|Hip-Hop/Rap|West Coast Rap',
8173 => 'Tones|Ringtones|Holiday|Chanukah',
8174 => 'Tones|Ringtones|Holiday|Christmas',
8175 => "Tones|Ringtones|Holiday|Christmas: Children's",
8176 => 'Tones|Ringtones|Holiday|Christmas: Classic',
8177 => 'Tones|Ringtones|Holiday|Christmas: Classical',
8178 => 'Tones|Ringtones|Holiday|Christmas: Jazz',
8179 => 'Tones|Ringtones|Holiday|Christmas: Modern',
8180 => 'Tones|Ringtones|Holiday|Christmas: Pop',
8181 => 'Tones|Ringtones|Holiday|Christmas: R&B',
8182 => 'Tones|Ringtones|Holiday|Christmas: Religious',
8183 => 'Tones|Ringtones|Holiday|Christmas: Rock',
8184 => 'Tones|Ringtones|Holiday|Easter',
8185 => 'Tones|Ringtones|Holiday|Halloween',
8186 => 'Tones|Ringtones|Holiday|Thanksgiving',
8187 => 'Tones|Ringtones|Indian',
8188 => 'Tones|Ringtones|Indian|Bollywood',
8189 => 'Tones|Ringtones|Indian|Devotional & Spiritual',
8190 => 'Tones|Ringtones|Indian|Ghazals',
8191 => 'Tones|Ringtones|Indian|Indian Classical',
8192 => 'Tones|Ringtones|Indian|Indian Folk',
8193 => 'Tones|Ringtones|Indian|Indian Pop',
8194 => 'Tones|Ringtones|Indian|Regional Indian',
8195 => 'Tones|Ringtones|Indian|Sufi',
8196 => 'Tones|Ringtones|Indian|Regional Indian|Tamil',
8197 => 'Tones|Ringtones|Indian|Regional Indian|Telugu',
8198 => 'Tones|Ringtones|Instrumental',
8199 => 'Tones|Ringtones|Jazz|Avant-Garde Jazz',
8201 => 'Tones|Ringtones|Jazz|Big Band',
8202 => 'Tones|Ringtones|Jazz|Bop',
8203 => 'Tones|Ringtones|Jazz|Contemporary Jazz',
8204 => 'Tones|Ringtones|Jazz|Cool Jazz',
8205 => 'Tones|Ringtones|Jazz|Crossover Jazz',
8206 => 'Tones|Ringtones|Jazz|Dixieland',
8207 => 'Tones|Ringtones|Jazz|Fusion',
8208 => 'Tones|Ringtones|Jazz|Hard Bop',
8209 => 'Tones|Ringtones|Jazz|Latin Jazz',
8210 => 'Tones|Ringtones|Jazz|Mainstream Jazz',
8211 => 'Tones|Ringtones|Jazz|Ragtime',
8212 => 'Tones|Ringtones|Jazz|Smooth Jazz',
8213 => 'Tones|Ringtones|Jazz|Trad Jazz',
8214 => 'Tones|Ringtones|Pop|K-Pop',
8215 => 'Tones|Ringtones|Karaoke',
8216 => 'Tones|Ringtones|Korean',
8217 => 'Tones|Ringtones|Korean|Korean Classical',
8218 => 'Tones|Ringtones|Korean|Korean Trad Instrumental',
8219 => 'Tones|Ringtones|Korean|Korean Trad Song',
8220 => 'Tones|Ringtones|Korean|Korean Trad Theater',
8221 => 'Tones|Ringtones|Latin|Alternative & Rock in Spanish',
8222 => 'Tones|Ringtones|Latin|Baladas y Boleros',
8223 => 'Tones|Ringtones|Latin|Contemporary Latin',
8224 => 'Tones|Ringtones|Latin|Latin Jazz',
8225 => 'Tones|Ringtones|Latin|Latin Urban',
8226 => 'Tones|Ringtones|Latin|Pop in Spanish',
8227 => 'Tones|Ringtones|Latin|Raices',
8228 => 'Tones|Ringtones|Latin|Musica Mexicana', # (Música Mexicana)
8229 => 'Tones|Ringtones|Latin|Salsa y Tropical',
8230 => 'Tones|Ringtones|Marching Bands',
8231 => 'Tones|Ringtones|New Age|Healing',
8232 => 'Tones|Ringtones|New Age|Meditation',
8233 => 'Tones|Ringtones|New Age|Nature',
8234 => 'Tones|Ringtones|New Age|Relaxation',
8235 => 'Tones|Ringtones|New Age|Travel',
8236 => 'Tones|Ringtones|Orchestral',
8237 => 'Tones|Ringtones|Pop|Adult Contemporary',
8238 => 'Tones|Ringtones|Pop|Britpop',
8239 => 'Tones|Ringtones|Pop|C-Pop',
8240 => 'Tones|Ringtones|Pop|Cantopop/HK-Pop',
8241 => 'Tones|Ringtones|Pop|Indo Pop',
8242 => 'Tones|Ringtones|Pop|Korean Folk-Pop',
8243 => 'Tones|Ringtones|Pop|Malaysian Pop',
8244 => 'Tones|Ringtones|Pop|Mandopop',
8245 => 'Tones|Ringtones|Pop|Manilla Sound',
8246 => 'Tones|Ringtones|Pop|Oldies',
8247 => 'Tones|Ringtones|Pop|Original Pilipino Music',
8248 => 'Tones|Ringtones|Pop|Pinoy Pop',
8249 => 'Tones|Ringtones|Pop|Pop/Rock',
8250 => 'Tones|Ringtones|Pop|Soft Rock',
8251 => 'Tones|Ringtones|Pop|Tai-Pop',
8252 => 'Tones|Ringtones|Pop|Teen Pop',
8253 => 'Tones|Ringtones|Pop|Thai Pop',
8254 => 'Tones|Ringtones|R&B/Soul|Contemporary R&B',
8255 => 'Tones|Ringtones|R&B/Soul|Disco',
8256 => 'Tones|Ringtones|R&B/Soul|Doo Wop',
8257 => 'Tones|Ringtones|R&B/Soul|Funk',
8258 => 'Tones|Ringtones|R&B/Soul|Motown',
8259 => 'Tones|Ringtones|R&B/Soul|Neo-Soul',
8260 => 'Tones|Ringtones|R&B/Soul|Soul',
8261 => 'Tones|Ringtones|Reggae|Modern Dancehall',
8262 => 'Tones|Ringtones|Reggae|Dub',
8263 => 'Tones|Ringtones|Reggae|Roots Reggae',
8264 => 'Tones|Ringtones|Reggae|Ska',
8265 => 'Tones|Ringtones|Rock|Adult Alternative',
8266 => 'Tones|Ringtones|Rock|American Trad Rock',
8267 => 'Tones|Ringtones|Rock|Arena Rock',
8268 => 'Tones|Ringtones|Rock|Blues-Rock',
8269 => 'Tones|Ringtones|Rock|British Invasion',
8270 => 'Tones|Ringtones|Rock|Chinese Rock',
8271 => 'Tones|Ringtones|Rock|Death Metal/Black Metal',
8272 => 'Tones|Ringtones|Rock|Glam Rock',
8273 => 'Tones|Ringtones|Rock|Hair Metal',
8274 => 'Tones|Ringtones|Rock|Hard Rock',
8275 => 'Tones|Ringtones|Rock|Metal',
8276 => 'Tones|Ringtones|Rock|Jam Bands',
8277 => 'Tones|Ringtones|Rock|Korean Rock',
8278 => 'Tones|Ringtones|Rock|Prog-Rock/Art Rock',
8279 => 'Tones|Ringtones|Rock|Psychedelic',
8280 => 'Tones|Ringtones|Rock|Rock & Roll',
8281 => 'Tones|Ringtones|Rock|Rockabilly',
8282 => 'Tones|Ringtones|Rock|Roots Rock',
8283 => 'Tones|Ringtones|Rock|Singer/Songwriter',
8284 => 'Tones|Ringtones|Rock|Southern Rock',
8285 => 'Tones|Ringtones|Rock|Surf',
8286 => 'Tones|Ringtones|Rock|Tex-Mex',
8287 => 'Tones|Ringtones|Singer/Songwriter|Alternative Folk',
8288 => 'Tones|Ringtones|Singer/Songwriter|Contemporary Folk',
8289 => 'Tones|Ringtones|Singer/Songwriter|Contemporary Singer/Songwriter',
8290 => 'Tones|Ringtones|Singer/Songwriter|Folk-Rock',
8291 => 'Tones|Ringtones|Singer/Songwriter|New Acoustic',
8292 => 'Tones|Ringtones|Singer/Songwriter|Traditional Folk',
8293 => 'Tones|Ringtones|Soundtrack|Foreign Cinema',
8294 => 'Tones|Ringtones|Soundtrack|Musicals',
8295 => 'Tones|Ringtones|Soundtrack|Original Score',
8296 => 'Tones|Ringtones|Soundtrack|Sound Effects',
8297 => 'Tones|Ringtones|Soundtrack|Soundtrack',
8298 => 'Tones|Ringtones|Soundtrack|TV Soundtrack',
8299 => 'Tones|Ringtones|Vocal|Standards',
8300 => 'Tones|Ringtones|Vocal|Traditional Pop',
8301 => 'Tones|Ringtones|Vocal|Trot',
8302 => 'Tones|Ringtones|Jazz|Vocal Jazz',
8303 => 'Tones|Ringtones|Vocal|Vocal Pop',
8304 => 'Tones|Ringtones|World|Africa',
8305 => 'Tones|Ringtones|World|Afrikaans',
8306 => 'Tones|Ringtones|World|Afro-Beat',
8307 => 'Tones|Ringtones|World|Afro-Pop',
8308 => 'Tones|Ringtones|World|Arabesque',
8309 => 'Tones|Ringtones|World|Asia',
8310 => 'Tones|Ringtones|World|Australia',
8311 => 'Tones|Ringtones|World|Cajun',
8312 => 'Tones|Ringtones|World|Calypso',
8313 => 'Tones|Ringtones|World|Caribbean',
8314 => 'Tones|Ringtones|World|Celtic',
8315 => 'Tones|Ringtones|World|Celtic Folk',
8316 => 'Tones|Ringtones|World|Contemporary Celtic',
8317 => 'Tones|Ringtones|World|Dangdut',
8318 => 'Tones|Ringtones|World|Dini',
8319 => 'Tones|Ringtones|World|Europe',
8320 => 'Tones|Ringtones|World|Fado',
8321 => 'Tones|Ringtones|World|Farsi',
8322 => 'Tones|Ringtones|World|Flamenco',
8323 => 'Tones|Ringtones|World|France',
8324 => 'Tones|Ringtones|World|Halk',
8325 => 'Tones|Ringtones|World|Hawaii',
8326 => 'Tones|Ringtones|World|Iberia',
8327 => 'Tones|Ringtones|World|Indonesian Religious',
8328 => 'Tones|Ringtones|World|Israeli',
8329 => 'Tones|Ringtones|World|Japan',
8330 => 'Tones|Ringtones|World|Klezmer',
8331 => 'Tones|Ringtones|World|North America',
8332 => 'Tones|Ringtones|World|Polka',
8333 => 'Tones|Ringtones|World|Russian',
8334 => 'Tones|Ringtones|World|Russian Chanson',
8335 => 'Tones|Ringtones|World|Sanat',
8336 => 'Tones|Ringtones|World|Soca',
8337 => 'Tones|Ringtones|World|South Africa',
8338 => 'Tones|Ringtones|World|South America',
8339 => 'Tones|Ringtones|World|Tango',
8340 => 'Tones|Ringtones|World|Traditional Celtic',
8341 => 'Tones|Ringtones|World|Turkish',
8342 => 'Tones|Ringtones|World|Worldbeat',
8343 => 'Tones|Ringtones|World|Zydeco',
8345 => 'Tones|Ringtones|Classical|Art Song',
8346 => 'Tones|Ringtones|Classical|Brass & Woodwinds',
8347 => 'Tones|Ringtones|Classical|Solo Instrumental',
8348 => 'Tones|Ringtones|Classical|Contemporary Era',
8349 => 'Tones|Ringtones|Classical|Oratorio',
8350 => 'Tones|Ringtones|Classical|Cantata',
8351 => 'Tones|Ringtones|Classical|Electronic',
8352 => 'Tones|Ringtones|Classical|Sacred',
8353 => 'Tones|Ringtones|Classical|Guitar',
8354 => 'Tones|Ringtones|Classical|Piano',
8355 => 'Tones|Ringtones|Classical|Violin',
8356 => 'Tones|Ringtones|Classical|Cello',
8357 => 'Tones|Ringtones|Classical|Percussion',
8358 => 'Tones|Ringtones|Electronic|Dubstep',
8359 => 'Tones|Ringtones|Electronic|Bass',
8360 => 'Tones|Ringtones|Hip-Hop/Rap|UK Hip Hop',
8361 => 'Tones|Ringtones|Reggae|Lovers Rock',
8362 => 'Tones|Ringtones|Alternative|EMO',
8363 => 'Tones|Ringtones|Alternative|Pop Punk',
8364 => 'Tones|Ringtones|Alternative|Indie Pop',
8365 => 'Tones|Ringtones|New Age|Yoga',
8366 => 'Tones|Ringtones|Pop|Tribute',
8367 => 'Tones|Ringtones|Pop|Shows',
8368 => 'Tones|Ringtones|Cuban',
8369 => 'Tones|Ringtones|Cuban|Mambo',
8370 => 'Tones|Ringtones|Cuban|Chachacha',
8371 => 'Tones|Ringtones|Cuban|Guajira',
8372 => 'Tones|Ringtones|Cuban|Son',
8373 => 'Tones|Ringtones|Cuban|Bolero',
8374 => 'Tones|Ringtones|Cuban|Guaracha',
8375 => 'Tones|Ringtones|Cuban|Timba',
8376 => 'Tones|Ringtones|Soundtrack|Video Game',
8377 => 'Tones|Ringtones|Indian|Regional Indian|Punjabi|Punjabi Pop',
8378 => 'Tones|Ringtones|Indian|Regional Indian|Bengali|Rabindra Sangeet',
8379 => 'Tones|Ringtones|Indian|Regional Indian|Malayalam',
8380 => 'Tones|Ringtones|Indian|Regional Indian|Kannada',
8381 => 'Tones|Ringtones|Indian|Regional Indian|Marathi',
8382 => 'Tones|Ringtones|Indian|Regional Indian|Gujarati',
8383 => 'Tones|Ringtones|Indian|Regional Indian|Assamese',
8384 => 'Tones|Ringtones|Indian|Regional Indian|Bhojpuri',
8385 => 'Tones|Ringtones|Indian|Regional Indian|Haryanvi',
8386 => 'Tones|Ringtones|Indian|Regional Indian|Odia',
8387 => 'Tones|Ringtones|Indian|Regional Indian|Rajasthani',
8388 => 'Tones|Ringtones|Indian|Regional Indian|Urdu',
8389 => 'Tones|Ringtones|Indian|Regional Indian|Punjabi',
8390 => 'Tones|Ringtones|Indian|Regional Indian|Bengali',
8391 => 'Tones|Ringtones|Indian|Indian Classical|Carnatic Classical',
8392 => 'Tones|Ringtones|Indian|Indian Classical|Hindustani Classical',
9002 => 'Books|Nonfiction',
9003 => 'Books|Romance',
9004 => 'Books|Travel & Adventure',
9007 => 'Books|Arts & Entertainment',
9008 => 'Books|Biographies & Memoirs',
9009 => 'Books|Business & Personal Finance',
9010 => 'Books|Children & Teens',
9012 => 'Books|Humor',
9015 => 'Books|History',
9018 => 'Books|Religion & Spirituality',
9019 => 'Books|Science & Nature',
9020 => 'Books|Sci-Fi & Fantasy',
9024 => 'Books|Lifestyle & Home',
9025 => 'Books|Health, Mind & Body',
9026 => 'Books|Comics & Graphic Novels',
9027 => 'Books|Computers & Internet',
9028 => 'Books|Cookbooks, Food & Wine',
9029 => 'Books|Professional & Technical',
9030 => 'Books|Parenting',
9031 => 'Books|Fiction & Literature',
9032 => 'Books|Mysteries & Thrillers',
9033 => 'Books|Reference',
9034 => 'Books|Politics & Current Events',
9035 => 'Books|Sports & Outdoors',
10001 => 'Books|Lifestyle & Home|Antiques & Collectibles',
10002 => 'Books|Arts & Entertainment|Art & Architecture',
10003 => 'Books|Religion & Spirituality|Bibles',
10004 => 'Books|Health, Mind & Body|Spirituality',
10005 => 'Books|Business & Personal Finance|Industries & Professions',
10006 => 'Books|Business & Personal Finance|Marketing & Sales',
10007 => 'Books|Business & Personal Finance|Small Business & Entrepreneurship',
10008 => 'Books|Business & Personal Finance|Personal Finance',
10009 => 'Books|Business & Personal Finance|Reference',
10010 => 'Books|Business & Personal Finance|Careers',
10011 => 'Books|Business & Personal Finance|Economics',
10012 => 'Books|Business & Personal Finance|Investing',
10013 => 'Books|Business & Personal Finance|Finance',
10014 => 'Books|Business & Personal Finance|Management & Leadership',
10015 => 'Books|Comics & Graphic Novels|Graphic Novels',
10016 => 'Books|Comics & Graphic Novels|Manga',
10017 => 'Books|Computers & Internet|Computers',
10018 => 'Books|Computers & Internet|Databases',
10019 => 'Books|Computers & Internet|Digital Media',
10020 => 'Books|Computers & Internet|Internet',
10021 => 'Books|Computers & Internet|Network',
10022 => 'Books|Computers & Internet|Operating Systems',
10023 => 'Books|Computers & Internet|Programming',
10024 => 'Books|Computers & Internet|Software',
10025 => 'Books|Computers & Internet|System Administration',
10026 => 'Books|Cookbooks, Food & Wine|Beverages',
10027 => 'Books|Cookbooks, Food & Wine|Courses & Dishes',
10028 => 'Books|Cookbooks, Food & Wine|Special Diet',
10029 => 'Books|Cookbooks, Food & Wine|Special Occasions',
10030 => 'Books|Cookbooks, Food & Wine|Methods',
10031 => 'Books|Cookbooks, Food & Wine|Reference',
10032 => 'Books|Cookbooks, Food & Wine|Regional & Ethnic',
10033 => 'Books|Cookbooks, Food & Wine|Specific Ingredients',
10034 => 'Books|Lifestyle & Home|Crafts & Hobbies',
10035 => 'Books|Professional & Technical|Design',
10036 => 'Books|Arts & Entertainment|Theater',
10037 => 'Books|Professional & Technical|Education',
10038 => 'Books|Nonfiction|Family & Relationships',
10039 => 'Books|Fiction & Literature|Action & Adventure',
10040 => 'Books|Fiction & Literature|African American',
10041 => 'Books|Fiction & Literature|Religious',
10042 => 'Books|Fiction & Literature|Classics',
10043 => 'Books|Fiction & Literature|Erotica',
10044 => 'Books|Sci-Fi & Fantasy|Fantasy',
10045 => 'Books|Fiction & Literature|Gay',
10046 => 'Books|Fiction & Literature|Ghost',
10047 => 'Books|Fiction & Literature|Historical',
10048 => 'Books|Fiction & Literature|Horror',
10049 => 'Books|Fiction & Literature|Literary',
10050 => 'Books|Mysteries & Thrillers|Hard-Boiled',
10051 => 'Books|Mysteries & Thrillers|Historical',
10052 => 'Books|Mysteries & Thrillers|Police Procedural',
10053 => 'Books|Mysteries & Thrillers|Short Stories',
10054 => 'Books|Mysteries & Thrillers|British Detectives',
10055 => 'Books|Mysteries & Thrillers|Women Sleuths',
10056 => 'Books|Romance|Erotic Romance',
10057 => 'Books|Romance|Contemporary',
10058 => 'Books|Romance|Paranormal',
10059 => 'Books|Romance|Historical',
10060 => 'Books|Romance|Short Stories',
10061 => 'Books|Romance|Suspense',
10062 => 'Books|Romance|Western',
10063 => 'Books|Sci-Fi & Fantasy|Science Fiction',
10064 => 'Books|Sci-Fi & Fantasy|Science Fiction & Literature',
10065 => 'Books|Fiction & Literature|Short Stories',
10066 => 'Books|Reference|Foreign Languages',
10067 => 'Books|Arts & Entertainment|Games',
10068 => 'Books|Lifestyle & Home|Gardening',
10069 => 'Books|Health, Mind & Body|Health & Fitness',
10070 => 'Books|History|Africa',
10071 => 'Books|History|Americas',
10072 => 'Books|History|Ancient',
10073 => 'Books|History|Asia',
10074 => 'Books|History|Australia & Oceania',
10075 => 'Books|History|Europe',
10076 => 'Books|History|Latin America',
10077 => 'Books|History|Middle East',
10078 => 'Books|History|Military',
10079 => 'Books|History|United States',
10080 => 'Books|History|World',
10081 => "Books|Children & Teens|Children's Fiction",
10082 => "Books|Children & Teens|Children's Nonfiction",
10083 => 'Books|Professional & Technical|Law',
10084 => 'Books|Fiction & Literature|Literary Criticism',
10085 => 'Books|Science & Nature|Mathematics',
10086 => 'Books|Professional & Technical|Medical',
10087 => 'Books|Arts & Entertainment|Music',
10088 => 'Books|Science & Nature|Nature',
10089 => 'Books|Arts & Entertainment|Performing Arts',
10090 => 'Books|Lifestyle & Home|Pets',
10091 => 'Books|Nonfiction|Philosophy',
10092 => 'Books|Arts & Entertainment|Photography',
10093 => 'Books|Fiction & Literature|Poetry',
10094 => 'Books|Health, Mind & Body|Psychology',
10095 => 'Books|Reference|Almanacs & Yearbooks',
10096 => 'Books|Reference|Atlases & Maps',
10097 => 'Books|Reference|Catalogs & Directories',
10098 => 'Books|Reference|Consumer Guides',
10099 => 'Books|Reference|Dictionaries & Thesauruses',
10100 => 'Books|Reference|Encyclopedias',
10101 => 'Books|Reference|Etiquette',
10102 => 'Books|Reference|Quotations',
10103 => 'Books|Reference|Words & Language',
10104 => 'Books|Reference|Writing',
10105 => 'Books|Religion & Spirituality|Bible Studies',
10106 => 'Books|Religion & Spirituality|Buddhism',
10107 => 'Books|Religion & Spirituality|Christianity',
10108 => 'Books|Religion & Spirituality|Hinduism',
10109 => 'Books|Religion & Spirituality|Islam',
10110 => 'Books|Religion & Spirituality|Judaism',
10111 => 'Books|Science & Nature|Astronomy',
10112 => 'Books|Science & Nature|Chemistry',
10113 => 'Books|Science & Nature|Earth Sciences',
10114 => 'Books|Science & Nature|Essays',
10115 => 'Books|Science & Nature|History',
10116 => 'Books|Science & Nature|Life Sciences',
10117 => 'Books|Science & Nature|Physics',
10118 => 'Books|Science & Nature|Reference',
10119 => 'Books|Health, Mind & Body|Self-Improvement',
10120 => 'Books|Nonfiction|Social Science',
10121 => 'Books|Sports & Outdoors|Baseball',
10122 => 'Books|Sports & Outdoors|Basketball',
10123 => 'Books|Sports & Outdoors|Coaching',
10124 => 'Books|Sports & Outdoors|Extreme Sports',
10125 => 'Books|Sports & Outdoors|Football',
10126 => 'Books|Sports & Outdoors|Golf',
10127 => 'Books|Sports & Outdoors|Hockey',
10128 => 'Books|Sports & Outdoors|Mountaineering',
10129 => 'Books|Sports & Outdoors|Outdoors',
10130 => 'Books|Sports & Outdoors|Racket Sports',
10131 => 'Books|Sports & Outdoors|Reference',
10132 => 'Books|Sports & Outdoors|Soccer',
10133 => 'Books|Sports & Outdoors|Training',
10134 => 'Books|Sports & Outdoors|Water Sports',
10135 => 'Books|Sports & Outdoors|Winter Sports',
10136 => 'Books|Reference|Study Aids',
10137 => 'Books|Professional & Technical|Engineering',
10138 => 'Books|Nonfiction|Transportation',
10139 => 'Books|Travel & Adventure|Africa',
10140 => 'Books|Travel & Adventure|Asia',
10141 => 'Books|Travel & Adventure|Specialty Travel',
10142 => 'Books|Travel & Adventure|Canada',
10143 => 'Books|Travel & Adventure|Caribbean',
10144 => 'Books|Travel & Adventure|Latin America',
10145 => 'Books|Travel & Adventure|Essays & Memoirs',
10146 => 'Books|Travel & Adventure|Europe',
10147 => 'Books|Travel & Adventure|Middle East',
10148 => 'Books|Travel & Adventure|United States',
10149 => 'Books|Nonfiction|True Crime',
11001 => 'Books|Sci-Fi & Fantasy|Fantasy|Contemporary',
11002 => 'Books|Sci-Fi & Fantasy|Fantasy|Epic',
11003 => 'Books|Sci-Fi & Fantasy|Fantasy|Historical',
11004 => 'Books|Sci-Fi & Fantasy|Fantasy|Paranormal',
11005 => 'Books|Sci-Fi & Fantasy|Fantasy|Short Stories',
11006 => 'Books|Sci-Fi & Fantasy|Science Fiction & Literature|Adventure',
11007 => 'Books|Sci-Fi & Fantasy|Science Fiction & Literature|High Tech',
11008 => 'Books|Sci-Fi & Fantasy|Science Fiction & Literature|Short Stories',
11009 => 'Books|Professional & Technical|Education|Language Arts & Disciplines',
11010 => 'Books|Communications & Media',
11011 => 'Books|Communications & Media|Broadcasting',
11012 => 'Books|Communications & Media|Digital Media',
11013 => 'Books|Communications & Media|Journalism',
11014 => 'Books|Communications & Media|Photojournalism',
11015 => 'Books|Communications & Media|Print',
11016 => 'Books|Communications & Media|Speech',
11017 => 'Books|Communications & Media|Writing',
11018 => 'Books|Arts & Entertainment|Art & Architecture|Urban Planning',
11019 => 'Books|Arts & Entertainment|Dance',
11020 => 'Books|Arts & Entertainment|Fashion',
11021 => 'Books|Arts & Entertainment|Film',
11022 => 'Books|Arts & Entertainment|Interior Design',
11023 => 'Books|Arts & Entertainment|Media Arts',
11024 => 'Books|Arts & Entertainment|Radio',
11025 => 'Books|Arts & Entertainment|TV',
11026 => 'Books|Arts & Entertainment|Visual Arts',
11027 => 'Books|Biographies & Memoirs|Arts & Entertainment',
11028 => 'Books|Biographies & Memoirs|Business',
11029 => 'Books|Biographies & Memoirs|Culinary',
11030 => 'Books|Biographies & Memoirs|Gay & Lesbian',
11031 => 'Books|Biographies & Memoirs|Historical',
11032 => 'Books|Biographies & Memoirs|Literary',
11033 => 'Books|Biographies & Memoirs|Media & Journalism',
11034 => 'Books|Biographies & Memoirs|Military',
11035 => 'Books|Biographies & Memoirs|Politics',
11036 => 'Books|Biographies & Memoirs|Religious',
11037 => 'Books|Biographies & Memoirs|Science & Technology',
11038 => 'Books|Biographies & Memoirs|Sports',
11039 => 'Books|Biographies & Memoirs|Women',
11040 => 'Books|Romance|New Adult',
11042 => 'Books|Romance|Romantic Comedy',
11043 => 'Books|Romance|Gay & Lesbian',
11044 => 'Books|Fiction & Literature|Essays',
11045 => 'Books|Fiction & Literature|Anthologies',
11046 => 'Books|Fiction & Literature|Comparative Literature',
11047 => 'Books|Fiction & Literature|Drama',
11049 => 'Books|Fiction & Literature|Fairy Tales, Myths & Fables',
11050 => 'Books|Fiction & Literature|Family',
11051 => 'Books|Comics & Graphic Novels|Manga|School Drama',
11052 => 'Books|Comics & Graphic Novels|Manga|Human Drama',
11053 => 'Books|Comics & Graphic Novels|Manga|Family Drama',
11054 => 'Books|Sports & Outdoors|Boxing',
11055 => 'Books|Sports & Outdoors|Cricket',
11056 => 'Books|Sports & Outdoors|Cycling',
11057 => 'Books|Sports & Outdoors|Equestrian',
11058 => 'Books|Sports & Outdoors|Martial Arts & Self Defense',
11059 => 'Books|Sports & Outdoors|Motor Sports',
11060 => 'Books|Sports & Outdoors|Rugby',
11061 => 'Books|Sports & Outdoors|Running',
11062 => 'Books|Health, Mind & Body|Diet & Nutrition',
11063 => 'Books|Science & Nature|Agriculture',
11064 => 'Books|Science & Nature|Atmosphere',
11065 => 'Books|Science & Nature|Biology',
11066 => 'Books|Science & Nature|Ecology',
11067 => 'Books|Science & Nature|Environment',
11068 => 'Books|Science & Nature|Geography',
11069 => 'Books|Science & Nature|Geology',
11070 => 'Books|Nonfiction|Social Science|Anthropology',
11071 => 'Books|Nonfiction|Social Science|Archaeology',
11072 => 'Books|Nonfiction|Social Science|Civics',
11073 => 'Books|Nonfiction|Social Science|Government',
11074 => 'Books|Nonfiction|Social Science|Social Studies',
11075 => 'Books|Nonfiction|Social Science|Social Welfare',
11076 => 'Books|Nonfiction|Social Science|Society',
11077 => 'Books|Nonfiction|Philosophy|Aesthetics',
11078 => 'Books|Nonfiction|Philosophy|Epistemology',
11079 => 'Books|Nonfiction|Philosophy|Ethics',
11080 => 'Books|Nonfiction|Philosophy|Language',
11081 => 'Books|Nonfiction|Philosophy|Logic',
11082 => 'Books|Nonfiction|Philosophy|Metaphysics',
11083 => 'Books|Nonfiction|Philosophy|Political',
11084 => 'Books|Nonfiction|Philosophy|Religion',
11085 => 'Books|Reference|Manuals',
11086 => 'Books|Kids',
11087 => 'Books|Kids|Animals',
11088 => 'Books|Kids|Basic Concepts',
11089 => 'Books|Kids|Basic Concepts|Alphabet',
11090 => 'Books|Kids|Basic Concepts|Body',
11091 => 'Books|Kids|Basic Concepts|Colors',
11092 => 'Books|Kids|Basic Concepts|Counting & Numbers',
11093 => 'Books|Kids|Basic Concepts|Date & Time',
11094 => 'Books|Kids|Basic Concepts|General',
11095 => 'Books|Kids|Basic Concepts|Money',
11096 => 'Books|Kids|Basic Concepts|Opposites',
11097 => 'Books|Kids|Basic Concepts|Seasons',
11098 => 'Books|Kids|Basic Concepts|Senses & Sensation',
11099 => 'Books|Kids|Basic Concepts|Size & Shape',
11100 => 'Books|Kids|Basic Concepts|Sounds',
11101 => 'Books|Kids|Basic Concepts|Words',
11102 => 'Books|Kids|Biography',
11103 => 'Books|Kids|Careers & Occupations',
11104 => 'Books|Kids|Computers & Technology',
11105 => 'Books|Kids|Cooking & Food',
11106 => 'Books|Kids|Arts & Entertainment',
11107 => 'Books|Kids|Arts & Entertainment|Art',
11108 => 'Books|Kids|Arts & Entertainment|Crafts',
11109 => 'Books|Kids|Arts & Entertainment|Music',
11110 => 'Books|Kids|Arts & Entertainment|Performing Arts',
11111 => 'Books|Kids|Family',
11112 => 'Books|Kids|Fiction',
11113 => 'Books|Kids|Fiction|Action & Adventure',
11114 => 'Books|Kids|Fiction|Animals',
11115 => 'Books|Kids|Fiction|Classics',
11116 => 'Books|Kids|Fiction|Comics & Graphic Novels',
11117 => 'Books|Kids|Fiction|Culture, Places & People',
11118 => 'Books|Kids|Fiction|Family & Relationships',
11119 => 'Books|Kids|Fiction|Fantasy',
11120 => 'Books|Kids|Fiction|Fairy Tales, Myths & Fables',
11121 => 'Books|Kids|Fiction|Favorite Characters',
11122 => 'Books|Kids|Fiction|Historical',
11123 => 'Books|Kids|Fiction|Holidays & Celebrations',
11124 => 'Books|Kids|Fiction|Monsters & Ghosts',
11125 => 'Books|Kids|Fiction|Mysteries',
11126 => 'Books|Kids|Fiction|Nature',
11127 => 'Books|Kids|Fiction|Religion',
11128 => 'Books|Kids|Fiction|Sci-Fi',
11129 => 'Books|Kids|Fiction|Social Issues',
11130 => 'Books|Kids|Fiction|Sports & Recreation',
11131 => 'Books|Kids|Fiction|Transportation',
11132 => 'Books|Kids|Games & Activities',
11133 => 'Books|Kids|General Nonfiction',
11134 => 'Books|Kids|Health',
11135 => 'Books|Kids|History',
11136 => 'Books|Kids|Holidays & Celebrations',
11137 => 'Books|Kids|Holidays & Celebrations|Birthdays',
11138 => 'Books|Kids|Holidays & Celebrations|Christmas & Advent',
11139 => 'Books|Kids|Holidays & Celebrations|Easter & Lent',
11140 => 'Books|Kids|Holidays & Celebrations|General',
11141 => 'Books|Kids|Holidays & Celebrations|Halloween',
11142 => 'Books|Kids|Holidays & Celebrations|Hanukkah',
11143 => 'Books|Kids|Holidays & Celebrations|Other',
11144 => 'Books|Kids|Holidays & Celebrations|Passover',
11145 => 'Books|Kids|Holidays & Celebrations|Patriotic Holidays',
11146 => 'Books|Kids|Holidays & Celebrations|Ramadan',
11147 => 'Books|Kids|Holidays & Celebrations|Thanksgiving',
11148 => "Books|Kids|Holidays & Celebrations|Valentine's Day",
11149 => 'Books|Kids|Humor',
11150 => 'Books|Kids|Humor|Jokes & Riddles',
11151 => 'Books|Kids|Poetry',
11152 => 'Books|Kids|Learning to Read',
11153 => 'Books|Kids|Learning to Read|Chapter Books',
11154 => 'Books|Kids|Learning to Read|Early Readers',
11155 => 'Books|Kids|Learning to Read|Intermediate Readers',
11156 => 'Books|Kids|Nursery Rhymes',
11157 => 'Books|Kids|Government',
11158 => 'Books|Kids|Reference',
11159 => 'Books|Kids|Religion',
11160 => 'Books|Kids|Science & Nature',
11161 => 'Books|Kids|Social Issues',
11162 => 'Books|Kids|Social Studies',
11163 => 'Books|Kids|Sports & Recreation',
11164 => 'Books|Kids|Transportation',
11165 => 'Books|Young Adult',
11166 => 'Books|Young Adult|Animals',
11167 => 'Books|Young Adult|Biography',
11168 => 'Books|Young Adult|Careers & Occupations',
11169 => 'Books|Young Adult|Computers & Technology',
11170 => 'Books|Young Adult|Cooking & Food',
11171 => 'Books|Young Adult|Arts & Entertainment',
11172 => 'Books|Young Adult|Arts & Entertainment|Art',
11173 => 'Books|Young Adult|Arts & Entertainment|Crafts',
11174 => 'Books|Young Adult|Arts & Entertainment|Music',
11175 => 'Books|Young Adult|Arts & Entertainment|Performing Arts',
11176 => 'Books|Young Adult|Family',
11177 => 'Books|Young Adult|Fiction',
11178 => 'Books|Young Adult|Fiction|Action & Adventure',
11179 => 'Books|Young Adult|Fiction|Animals',
11180 => 'Books|Young Adult|Fiction|Classics',
11181 => 'Books|Young Adult|Fiction|Comics & Graphic Novels',
11182 => 'Books|Young Adult|Fiction|Culture, Places & People',
11183 => 'Books|Young Adult|Fiction|Dystopian',
11184 => 'Books|Young Adult|Fiction|Family & Relationships',
11185 => 'Books|Young Adult|Fiction|Fantasy',
11186 => 'Books|Young Adult|Fiction|Fairy Tales, Myths & Fables',
11187 => 'Books|Young Adult|Fiction|Favorite Characters',
11188 => 'Books|Young Adult|Fiction|Historical',
11189 => 'Books|Young Adult|Fiction|Holidays & Celebrations',
11190 => 'Books|Young Adult|Fiction|Horror, Monsters & Ghosts',
11191 => 'Books|Young Adult|Fiction|Crime & Mystery',
11192 => 'Books|Young Adult|Fiction|Nature',
11193 => 'Books|Young Adult|Fiction|Religion',
11194 => 'Books|Young Adult|Fiction|Romance',
11195 => 'Books|Young Adult|Fiction|Sci-Fi',
11196 => 'Books|Young Adult|Fiction|Coming of Age',
11197 => 'Books|Young Adult|Fiction|Sports & Recreation',
11198 => 'Books|Young Adult|Fiction|Transportation',
11199 => 'Books|Young Adult|Games & Activities',
11200 => 'Books|Young Adult|General Nonfiction',
11201 => 'Books|Young Adult|Health',
11202 => 'Books|Young Adult|History',
11203 => 'Books|Young Adult|Holidays & Celebrations',
11204 => 'Books|Young Adult|Holidays & Celebrations|Birthdays',
11205 => 'Books|Young Adult|Holidays & Celebrations|Christmas & Advent',
11206 => 'Books|Young Adult|Holidays & Celebrations|Easter & Lent',
11207 => 'Books|Young Adult|Holidays & Celebrations|General',
11208 => 'Books|Young Adult|Holidays & Celebrations|Halloween',
11209 => 'Books|Young Adult|Holidays & Celebrations|Hanukkah',
11210 => 'Books|Young Adult|Holidays & Celebrations|Other',
11211 => 'Books|Young Adult|Holidays & Celebrations|Passover',
11212 => 'Books|Young Adult|Holidays & Celebrations|Patriotic Holidays',
11213 => 'Books|Young Adult|Holidays & Celebrations|Ramadan',
11214 => 'Books|Young Adult|Holidays & Celebrations|Thanksgiving',
11215 => "Books|Young Adult|Holidays & Celebrations|Valentine's Day",
11216 => 'Books|Young Adult|Humor',
11217 => 'Books|Young Adult|Humor|Jokes & Riddles',
11218 => 'Books|Young Adult|Poetry',
11219 => 'Books|Young Adult|Politics & Government',
11220 => 'Books|Young Adult|Reference',
11221 => 'Books|Young Adult|Religion',
11222 => 'Books|Young Adult|Science & Nature',
11223 => 'Books|Young Adult|Coming of Age',
11224 => 'Books|Young Adult|Social Studies',
11225 => 'Books|Young Adult|Sports & Recreation',
11226 => 'Books|Young Adult|Transportation',
11227 => 'Books|Communications & Media',
11228 => 'Books|Military & Warfare',
11229 => 'Books|Romance|Inspirational',
11231 => 'Books|Romance|Holiday',
11232 => 'Books|Romance|Wholesome',
11233 => 'Books|Romance|Military',
11234 => 'Books|Arts & Entertainment|Art History',
11236 => 'Books|Arts & Entertainment|Design',
11243 => 'Books|Business & Personal Finance|Accounting',
11244 => 'Books|Business & Personal Finance|Hospitality',
11245 => 'Books|Business & Personal Finance|Real Estate',
11246 => 'Books|Humor|Jokes & Riddles',
11247 => 'Books|Religion & Spirituality|Comparative Religion',
11255 => 'Books|Cookbooks, Food & Wine|Culinary Arts',
11259 => 'Books|Mysteries & Thrillers|Cozy',
11260 => 'Books|Politics & Current Events|Current Events',
11261 => 'Books|Politics & Current Events|Foreign Policy & International Relations',
11262 => 'Books|Politics & Current Events|Local Government',
11263 => 'Books|Politics & Current Events|National Government',
11264 => 'Books|Politics & Current Events|Political Science',
11265 => 'Books|Politics & Current Events|Public Administration',
11266 => 'Books|Politics & Current Events|World Affairs',
11273 => 'Books|Nonfiction|Family & Relationships|Family & Childcare',
11274 => 'Books|Nonfiction|Family & Relationships|Love & Romance',
11275 => 'Books|Sci-Fi & Fantasy|Fantasy|Urban',
11276 => 'Books|Reference|Foreign Languages|Arabic',
11277 => 'Books|Reference|Foreign Languages|Bilingual Editions',
11278 => 'Books|Reference|Foreign Languages|African Languages',
11279 => 'Books|Reference|Foreign Languages|Ancient Languages',
11280 => 'Books|Reference|Foreign Languages|Chinese',
11281 => 'Books|Reference|Foreign Languages|English',
11282 => 'Books|Reference|Foreign Languages|French',
11283 => 'Books|Reference|Foreign Languages|German',
11284 => 'Books|Reference|Foreign Languages|Hebrew',
11285 => 'Books|Reference|Foreign Languages|Hindi',
11286 => 'Books|Reference|Foreign Languages|Italian',
11287 => 'Books|Reference|Foreign Languages|Japanese',
11288 => 'Books|Reference|Foreign Languages|Korean',
11289 => 'Books|Reference|Foreign Languages|Linguistics',
11290 => 'Books|Reference|Foreign Languages|Other Languages',
11291 => 'Books|Reference|Foreign Languages|Portuguese',
11292 => 'Books|Reference|Foreign Languages|Russian',
11293 => 'Books|Reference|Foreign Languages|Spanish',
11294 => 'Books|Reference|Foreign Languages|Speech Pathology',
11295 => 'Books|Science & Nature|Mathematics|Advanced Mathematics',
11296 => 'Books|Science & Nature|Mathematics|Algebra',
11297 => 'Books|Science & Nature|Mathematics|Arithmetic',
11298 => 'Books|Science & Nature|Mathematics|Calculus',
11299 => 'Books|Science & Nature|Mathematics|Geometry',
11300 => 'Books|Science & Nature|Mathematics|Statistics',
11301 => 'Books|Professional & Technical|Medical|Veterinary',
11302 => 'Books|Professional & Technical|Medical|Neuroscience',
11303 => 'Books|Professional & Technical|Medical|Immunology',
11304 => 'Books|Professional & Technical|Medical|Nursing',
11305 => 'Books|Professional & Technical|Medical|Pharmacology & Toxicology',
11306 => 'Books|Professional & Technical|Medical|Anatomy & Physiology',
11307 => 'Books|Professional & Technical|Medical|Dentistry',
11308 => 'Books|Professional & Technical|Medical|Emergency Medicine',
11309 => 'Books|Professional & Technical|Medical|Genetics',
11310 => 'Books|Professional & Technical|Medical|Psychiatry',
11311 => 'Books|Professional & Technical|Medical|Radiology',
11312 => 'Books|Professional & Technical|Medical|Alternative Medicine',
11317 => 'Books|Nonfiction|Philosophy|Political Philosophy',
11319 => 'Books|Nonfiction|Philosophy|Philosophy of Language',
11320 => 'Books|Nonfiction|Philosophy|Philosophy of Religion',
11327 => 'Books|Nonfiction|Social Science|Sociology',
11329 => 'Books|Professional & Technical|Engineering|Aeronautics',
11330 => 'Books|Professional & Technical|Engineering|Chemical & Petroleum Engineering',
11331 => 'Books|Professional & Technical|Engineering|Civil Engineering',
11332 => 'Books|Professional & Technical|Engineering|Computer Science',
11333 => 'Books|Professional & Technical|Engineering|Electrical Engineering',
11334 => 'Books|Professional & Technical|Engineering|Environmental Engineering',
11335 => 'Books|Professional & Technical|Engineering|Mechanical Engineering',
11336 => 'Books|Professional & Technical|Engineering|Power Resources',
11337 => 'Books|Comics & Graphic Novels|Manga|Boys',
11338 => 'Books|Comics & Graphic Novels|Manga|Men',
11339 => 'Books|Comics & Graphic Novels|Manga|Girls',
11340 => 'Books|Comics & Graphic Novels|Manga|Women',
11341 => 'Books|Comics & Graphic Novels|Manga|Other',
12001 => 'Mac App Store|Business',
12002 => 'Mac App Store|Developer Tools',
12003 => 'Mac App Store|Education',
12004 => 'Mac App Store|Entertainment',
12005 => 'Mac App Store|Finance',
12006 => 'Mac App Store|Games',
12007 => 'Mac App Store|Health & Fitness',
12008 => 'Mac App Store|Lifestyle',
12010 => 'Mac App Store|Medical',
12011 => 'Mac App Store|Music',
12012 => 'Mac App Store|News',
12013 => 'Mac App Store|Photography',
12014 => 'Mac App Store|Productivity',
12015 => 'Mac App Store|Reference',
12016 => 'Mac App Store|Social Networking',
12017 => 'Mac App Store|Sports',
12018 => 'Mac App Store|Travel',
12019 => 'Mac App Store|Utilities',
12020 => 'Mac App Store|Video',
12021 => 'Mac App Store|Weather',
12022 => 'Mac App Store|Graphics & Design',
12201 => 'Mac App Store|Games|Action',
12202 => 'Mac App Store|Games|Adventure',
12203 => 'Mac App Store|Games|Arcade',
12204 => 'Mac App Store|Games|Board',
12205 => 'Mac App Store|Games|Card',
12206 => 'Mac App Store|Games|Casino',
12207 => 'Mac App Store|Games|Dice',
12208 => 'Mac App Store|Games|Educational',
12209 => 'Mac App Store|Games|Family',
12210 => 'Mac App Store|Games|Kids',
12211 => 'Mac App Store|Games|Music',
12212 => 'Mac App Store|Games|Puzzle',
12213 => 'Mac App Store|Games|Racing',
12214 => 'Mac App Store|Games|Role Playing',
12215 => 'Mac App Store|Games|Simulation',
12216 => 'Mac App Store|Games|Sports',
12217 => 'Mac App Store|Games|Strategy',
12218 => 'Mac App Store|Games|Trivia',
12219 => 'Mac App Store|Games|Word',
13001 => 'App Store|Magazines & Newspapers|News & Politics',
13002 => 'App Store|Magazines & Newspapers|Fashion & Style',
13003 => 'App Store|Magazines & Newspapers|Home & Garden',
13004 => 'App Store|Magazines & Newspapers|Outdoors & Nature',
13005 => 'App Store|Magazines & Newspapers|Sports & Leisure',
13006 => 'App Store|Magazines & Newspapers|Automotive',
13007 => 'App Store|Magazines & Newspapers|Arts & Photography',
13008 => 'App Store|Magazines & Newspapers|Brides & Weddings',
13009 => 'App Store|Magazines & Newspapers|Business & Investing',
13010 => "App Store|Magazines & Newspapers|Children's Magazines",
13011 => 'App Store|Magazines & Newspapers|Computers & Internet',
13012 => 'App Store|Magazines & Newspapers|Cooking, Food & Drink',
13013 => 'App Store|Magazines & Newspapers|Crafts & Hobbies',
13014 => 'App Store|Magazines & Newspapers|Electronics & Audio',
13015 => 'App Store|Magazines & Newspapers|Entertainment',
13017 => 'App Store|Magazines & Newspapers|Health, Mind & Body',
13018 => 'App Store|Magazines & Newspapers|History',
13019 => 'App Store|Magazines & Newspapers|Literary Magazines & Journals',
13020 => "App Store|Magazines & Newspapers|Men's Interest",
13021 => 'App Store|Magazines & Newspapers|Movies & Music',
13023 => 'App Store|Magazines & Newspapers|Parenting & Family',
13024 => 'App Store|Magazines & Newspapers|Pets',
13025 => 'App Store|Magazines & Newspapers|Professional & Trade',
13026 => 'App Store|Magazines & Newspapers|Regional News',
13027 => 'App Store|Magazines & Newspapers|Science',
13028 => 'App Store|Magazines & Newspapers|Teens',
13029 => 'App Store|Magazines & Newspapers|Travel & Regional',
13030 => "App Store|Magazines & Newspapers|Women's Interest",
15000 => 'Textbooks|Arts & Entertainment',
15001 => 'Textbooks|Arts & Entertainment|Art & Architecture',
15002 => 'Textbooks|Arts & Entertainment|Art & Architecture|Urban Planning',
15003 => 'Textbooks|Arts & Entertainment|Art History',
15004 => 'Textbooks|Arts & Entertainment|Dance',
15005 => 'Textbooks|Arts & Entertainment|Design',
15006 => 'Textbooks|Arts & Entertainment|Fashion',
15007 => 'Textbooks|Arts & Entertainment|Film',
15008 => 'Textbooks|Arts & Entertainment|Games',
15009 => 'Textbooks|Arts & Entertainment|Interior Design',
15010 => 'Textbooks|Arts & Entertainment|Media Arts',
15011 => 'Textbooks|Arts & Entertainment|Music',
15012 => 'Textbooks|Arts & Entertainment|Performing Arts',
15013 => 'Textbooks|Arts & Entertainment|Photography',
15014 => 'Textbooks|Arts & Entertainment|Theater',
15015 => 'Textbooks|Arts & Entertainment|TV',
15016 => 'Textbooks|Arts & Entertainment|Visual Arts',
15017 => 'Textbooks|Biographies & Memoirs',
15018 => 'Textbooks|Business & Personal Finance',
15019 => 'Textbooks|Business & Personal Finance|Accounting',
15020 => 'Textbooks|Business & Personal Finance|Careers',
15021 => 'Textbooks|Business & Personal Finance|Economics',
15022 => 'Textbooks|Business & Personal Finance|Finance',
15023 => 'Textbooks|Business & Personal Finance|Hospitality',
15024 => 'Textbooks|Business & Personal Finance|Industries & Professions',
15025 => 'Textbooks|Business & Personal Finance|Investing',
15026 => 'Textbooks|Business & Personal Finance|Management & Leadership',
15027 => 'Textbooks|Business & Personal Finance|Marketing & Sales',
15028 => 'Textbooks|Business & Personal Finance|Personal Finance',
15029 => 'Textbooks|Business & Personal Finance|Real Estate',
15030 => 'Textbooks|Business & Personal Finance|Reference',
15031 => 'Textbooks|Business & Personal Finance|Small Business & Entrepreneurship',
15032 => 'Textbooks|Children & Teens',
15033 => 'Textbooks|Children & Teens|Fiction',
15034 => 'Textbooks|Children & Teens|Nonfiction',
15035 => 'Textbooks|Comics & Graphic Novels',
15036 => 'Textbooks|Comics & Graphic Novels|Graphic Novels',
15037 => 'Textbooks|Comics & Graphic Novels|Manga',
15038 => 'Textbooks|Communications & Media',
15039 => 'Textbooks|Communications & Media|Broadcasting',
15040 => 'Textbooks|Communications & Media|Digital Media',
15041 => 'Textbooks|Communications & Media|Journalism',
15042 => 'Textbooks|Communications & Media|Photojournalism',
15043 => 'Textbooks|Communications & Media|Print',
15044 => 'Textbooks|Communications & Media|Speech',
15045 => 'Textbooks|Communications & Media|Writing',
15046 => 'Textbooks|Computers & Internet',
15047 => 'Textbooks|Computers & Internet|Computers',
15048 => 'Textbooks|Computers & Internet|Databases',
15049 => 'Textbooks|Computers & Internet|Digital Media',
15050 => 'Textbooks|Computers & Internet|Internet',
15051 => 'Textbooks|Computers & Internet|Network',
15052 => 'Textbooks|Computers & Internet|Operating Systems',
15053 => 'Textbooks|Computers & Internet|Programming',
15054 => 'Textbooks|Computers & Internet|Software',
15055 => 'Textbooks|Computers & Internet|System Administration',
15056 => 'Textbooks|Cookbooks, Food & Wine',
15057 => 'Textbooks|Cookbooks, Food & Wine|Beverages',
15058 => 'Textbooks|Cookbooks, Food & Wine|Courses & Dishes',
15059 => 'Textbooks|Cookbooks, Food & Wine|Culinary Arts',
15060 => 'Textbooks|Cookbooks, Food & Wine|Methods',
15061 => 'Textbooks|Cookbooks, Food & Wine|Reference',
15062 => 'Textbooks|Cookbooks, Food & Wine|Regional & Ethnic',
15063 => 'Textbooks|Cookbooks, Food & Wine|Special Diet',
15064 => 'Textbooks|Cookbooks, Food & Wine|Special Occasions',
15065 => 'Textbooks|Cookbooks, Food & Wine|Specific Ingredients',
15066 => 'Textbooks|Engineering',
15067 => 'Textbooks|Engineering|Aeronautics',
15068 => 'Textbooks|Engineering|Chemical & Petroleum Engineering',
15069 => 'Textbooks|Engineering|Civil Engineering',
15070 => 'Textbooks|Engineering|Computer Science',
15071 => 'Textbooks|Engineering|Electrical Engineering',
15072 => 'Textbooks|Engineering|Environmental Engineering',
15073 => 'Textbooks|Engineering|Mechanical Engineering',
15074 => 'Textbooks|Engineering|Power Resources',
15075 => 'Textbooks|Fiction & Literature',
15076 => 'Textbooks|Fiction & Literature|Latino',
15077 => 'Textbooks|Fiction & Literature|Action & Adventure',
15078 => 'Textbooks|Fiction & Literature|African American',
15079 => 'Textbooks|Fiction & Literature|Anthologies',
15080 => 'Textbooks|Fiction & Literature|Classics',
15081 => 'Textbooks|Fiction & Literature|Comparative Literature',
15082 => 'Textbooks|Fiction & Literature|Erotica',
15083 => 'Textbooks|Fiction & Literature|Gay',
15084 => 'Textbooks|Fiction & Literature|Ghost',
15085 => 'Textbooks|Fiction & Literature|Historical',
15086 => 'Textbooks|Fiction & Literature|Horror',
15087 => 'Textbooks|Fiction & Literature|Literary',
15088 => 'Textbooks|Fiction & Literature|Literary Criticism',
15089 => 'Textbooks|Fiction & Literature|Poetry',
15090 => 'Textbooks|Fiction & Literature|Religious',
15091 => 'Textbooks|Fiction & Literature|Short Stories',
15092 => 'Textbooks|Health, Mind & Body',
15093 => 'Textbooks|Health, Mind & Body|Fitness',
15094 => 'Textbooks|Health, Mind & Body|Self-Improvement',
15095 => 'Textbooks|History',
15096 => 'Textbooks|History|Africa',
15097 => 'Textbooks|History|Americas',
15098 => 'Textbooks|History|Americas|Canada',
15099 => 'Textbooks|History|Americas|Latin America',
15100 => 'Textbooks|History|Americas|United States',
15101 => 'Textbooks|History|Ancient',
15102 => 'Textbooks|History|Asia',
15103 => 'Textbooks|History|Australia & Oceania',
15104 => 'Textbooks|History|Europe',
15105 => 'Textbooks|History|Middle East',
15106 => 'Textbooks|History|Military',
15107 => 'Textbooks|History|World',
15108 => 'Textbooks|Humor',
15109 => 'Textbooks|Language Studies',
15110 => 'Textbooks|Language Studies|African Languages',
15111 => 'Textbooks|Language Studies|Ancient Languages',
15112 => 'Textbooks|Language Studies|Arabic',
15113 => 'Textbooks|Language Studies|Bilingual Editions',
15114 => 'Textbooks|Language Studies|Chinese',
15115 => 'Textbooks|Language Studies|English',
15116 => 'Textbooks|Language Studies|French',
15117 => 'Textbooks|Language Studies|German',
15118 => 'Textbooks|Language Studies|Hebrew',
15119 => 'Textbooks|Language Studies|Hindi',
15120 => 'Textbooks|Language Studies|Indigenous Languages',
15121 => 'Textbooks|Language Studies|Italian',
15122 => 'Textbooks|Language Studies|Japanese',
15123 => 'Textbooks|Language Studies|Korean',
15124 => 'Textbooks|Language Studies|Linguistics',
15125 => 'Textbooks|Language Studies|Other Language',
15126 => 'Textbooks|Language Studies|Portuguese',
15127 => 'Textbooks|Language Studies|Russian',
15128 => 'Textbooks|Language Studies|Spanish',
15129 => 'Textbooks|Language Studies|Speech Pathology',
15130 => 'Textbooks|Lifestyle & Home',
15131 => 'Textbooks|Lifestyle & Home|Antiques & Collectibles',
15132 => 'Textbooks|Lifestyle & Home|Crafts & Hobbies',
15133 => 'Textbooks|Lifestyle & Home|Gardening',
15134 => 'Textbooks|Lifestyle & Home|Pets',
15135 => 'Textbooks|Mathematics',
15136 => 'Textbooks|Mathematics|Advanced Mathematics',
15137 => 'Textbooks|Mathematics|Algebra',
15138 => 'Textbooks|Mathematics|Arithmetic',
15139 => 'Textbooks|Mathematics|Calculus',
15140 => 'Textbooks|Mathematics|Geometry',
15141 => 'Textbooks|Mathematics|Statistics',
15142 => 'Textbooks|Medicine',
15143 => 'Textbooks|Medicine|Anatomy & Physiology',
15144 => 'Textbooks|Medicine|Dentistry',
15145 => 'Textbooks|Medicine|Emergency Medicine',
15146 => 'Textbooks|Medicine|Genetics',
15147 => 'Textbooks|Medicine|Immunology',
15148 => 'Textbooks|Medicine|Neuroscience',
15149 => 'Textbooks|Medicine|Nursing',
15150 => 'Textbooks|Medicine|Pharmacology & Toxicology',
15151 => 'Textbooks|Medicine|Psychiatry',
15152 => 'Textbooks|Medicine|Psychology',
15153 => 'Textbooks|Medicine|Radiology',
15154 => 'Textbooks|Medicine|Veterinary',
15155 => 'Textbooks|Mysteries & Thrillers',
15156 => 'Textbooks|Mysteries & Thrillers|British Detectives',
15157 => 'Textbooks|Mysteries & Thrillers|Hard-Boiled',
15158 => 'Textbooks|Mysteries & Thrillers|Historical',
15159 => 'Textbooks|Mysteries & Thrillers|Police Procedural',
15160 => 'Textbooks|Mysteries & Thrillers|Short Stories',
15161 => 'Textbooks|Mysteries & Thrillers|Women Sleuths',
15162 => 'Textbooks|Nonfiction',
15163 => 'Textbooks|Nonfiction|Family & Relationships',
15164 => 'Textbooks|Nonfiction|Transportation',
15165 => 'Textbooks|Nonfiction|True Crime',
15166 => 'Textbooks|Parenting',
15167 => 'Textbooks|Philosophy',
15168 => 'Textbooks|Philosophy|Aesthetics',
15169 => 'Textbooks|Philosophy|Epistemology',
15170 => 'Textbooks|Philosophy|Ethics',
15171 => 'Textbooks|Philosophy|Philosophy of Language',
15172 => 'Textbooks|Philosophy|Logic',
15173 => 'Textbooks|Philosophy|Metaphysics',
15174 => 'Textbooks|Philosophy|Political Philosophy',
15175 => 'Textbooks|Philosophy|Philosophy of Religion',
15176 => 'Textbooks|Politics & Current Events',
15177 => 'Textbooks|Politics & Current Events|Current Events',
15178 => 'Textbooks|Politics & Current Events|Foreign Policy & International Relations',
15179 => 'Textbooks|Politics & Current Events|Local Governments',
15180 => 'Textbooks|Politics & Current Events|National Governments',
15181 => 'Textbooks|Politics & Current Events|Political Science',
15182 => 'Textbooks|Politics & Current Events|Public Administration',
15183 => 'Textbooks|Politics & Current Events|World Affairs',
15184 => 'Textbooks|Professional & Technical',
15185 => 'Textbooks|Professional & Technical|Design',
15186 => 'Textbooks|Professional & Technical|Language Arts & Disciplines',
15187 => 'Textbooks|Professional & Technical|Engineering',
15188 => 'Textbooks|Professional & Technical|Law',
15189 => 'Textbooks|Professional & Technical|Medical',
15190 => 'Textbooks|Reference',
15191 => 'Textbooks|Reference|Almanacs & Yearbooks',
15192 => 'Textbooks|Reference|Atlases & Maps',
15193 => 'Textbooks|Reference|Catalogs & Directories',
15194 => 'Textbooks|Reference|Consumer Guides',
15195 => 'Textbooks|Reference|Dictionaries & Thesauruses',
15196 => 'Textbooks|Reference|Encyclopedias',
15197 => 'Textbooks|Reference|Etiquette',
15198 => 'Textbooks|Reference|Quotations',
15199 => 'Textbooks|Reference|Study Aids',
15200 => 'Textbooks|Reference|Words & Language',
15201 => 'Textbooks|Reference|Writing',
15202 => 'Textbooks|Religion & Spirituality',
15203 => 'Textbooks|Religion & Spirituality|Bible Studies',
15204 => 'Textbooks|Religion & Spirituality|Bibles',
15205 => 'Textbooks|Religion & Spirituality|Buddhism',
15206 => 'Textbooks|Religion & Spirituality|Christianity',
15207 => 'Textbooks|Religion & Spirituality|Comparative Religion',
15208 => 'Textbooks|Religion & Spirituality|Hinduism',
15209 => 'Textbooks|Religion & Spirituality|Islam',
15210 => 'Textbooks|Religion & Spirituality|Judaism',
15211 => 'Textbooks|Religion & Spirituality|Spirituality',
15212 => 'Textbooks|Romance',
15213 => 'Textbooks|Romance|Contemporary',
15214 => 'Textbooks|Romance|Erotic Romance',
15215 => 'Textbooks|Romance|Paranormal',
15216 => 'Textbooks|Romance|Historical',
15217 => 'Textbooks|Romance|Short Stories',
15218 => 'Textbooks|Romance|Suspense',
15219 => 'Textbooks|Romance|Western',
15220 => 'Textbooks|Sci-Fi & Fantasy',
15221 => 'Textbooks|Sci-Fi & Fantasy|Fantasy',
15222 => 'Textbooks|Sci-Fi & Fantasy|Fantasy|Contemporary',
15223 => 'Textbooks|Sci-Fi & Fantasy|Fantasy|Epic',
15224 => 'Textbooks|Sci-Fi & Fantasy|Fantasy|Historical',
15225 => 'Textbooks|Sci-Fi & Fantasy|Fantasy|Paranormal',
15226 => 'Textbooks|Sci-Fi & Fantasy|Fantasy|Short Stories',
15227 => 'Textbooks|Sci-Fi & Fantasy|Science Fiction',
15228 => 'Textbooks|Sci-Fi & Fantasy|Science Fiction & Literature',
15229 => 'Textbooks|Sci-Fi & Fantasy|Science Fiction & Literature|Adventure',
15230 => 'Textbooks|Sci-Fi & Fantasy|Science Fiction & Literature|High Tech',
15231 => 'Textbooks|Sci-Fi & Fantasy|Science Fiction & Literature|Short Stories',
15232 => 'Textbooks|Science & Nature',
15233 => 'Textbooks|Science & Nature|Agriculture',
15234 => 'Textbooks|Science & Nature|Astronomy',
15235 => 'Textbooks|Science & Nature|Atmosphere',
15236 => 'Textbooks|Science & Nature|Biology',
15237 => 'Textbooks|Science & Nature|Chemistry',
15238 => 'Textbooks|Science & Nature|Earth Sciences',
15239 => 'Textbooks|Science & Nature|Ecology',
15240 => 'Textbooks|Science & Nature|Environment',
15241 => 'Textbooks|Science & Nature|Essays',
15242 => 'Textbooks|Science & Nature|Geography',
15243 => 'Textbooks|Science & Nature|Geology',
15244 => 'Textbooks|Science & Nature|History',
15245 => 'Textbooks|Science & Nature|Life Sciences',
15246 => 'Textbooks|Science & Nature|Nature',
15247 => 'Textbooks|Science & Nature|Physics',
15248 => 'Textbooks|Science & Nature|Reference',
15249 => 'Textbooks|Social Science',
15250 => 'Textbooks|Social Science|Anthropology',
15251 => 'Textbooks|Social Science|Archaeology',
15252 => 'Textbooks|Social Science|Civics',
15253 => 'Textbooks|Social Science|Government',
15254 => 'Textbooks|Social Science|Social Studies',
15255 => 'Textbooks|Social Science|Social Welfare',
15256 => 'Textbooks|Social Science|Society',
15257 => 'Textbooks|Social Science|Society|African Studies',
15258 => 'Textbooks|Social Science|Society|American Studies',
15259 => 'Textbooks|Social Science|Society|Asia Pacific Studies',
15260 => 'Textbooks|Social Science|Society|Cross-Cultural Studies',
15261 => 'Textbooks|Social Science|Society|European Studies',
15262 => 'Textbooks|Social Science|Society|Immigration & Emigration',
15263 => 'Textbooks|Social Science|Society|Indigenous Studies',
15264 => 'Textbooks|Social Science|Society|Latin & Caribbean Studies',
15265 => 'Textbooks|Social Science|Society|Middle Eastern Studies',
15266 => 'Textbooks|Social Science|Society|Race & Ethnicity Studies',
15267 => 'Textbooks|Social Science|Society|Sexuality Studies',
15268 => "Textbooks|Social Science|Society|Women's Studies",
15269 => 'Textbooks|Social Science|Sociology',
15270 => 'Textbooks|Sports & Outdoors',
15271 => 'Textbooks|Sports & Outdoors|Baseball',
15272 => 'Textbooks|Sports & Outdoors|Basketball',
15273 => 'Textbooks|Sports & Outdoors|Coaching',
15274 => 'Textbooks|Sports & Outdoors|Equestrian',
15275 => 'Textbooks|Sports & Outdoors|Extreme Sports',
15276 => 'Textbooks|Sports & Outdoors|Football',
15277 => 'Textbooks|Sports & Outdoors|Golf',
15278 => 'Textbooks|Sports & Outdoors|Hockey',
15279 => 'Textbooks|Sports & Outdoors|Motor Sports',
15280 => 'Textbooks|Sports & Outdoors|Mountaineering',
15281 => 'Textbooks|Sports & Outdoors|Outdoors',
15282 => 'Textbooks|Sports & Outdoors|Racket Sports',
15283 => 'Textbooks|Sports & Outdoors|Reference',
15284 => 'Textbooks|Sports & Outdoors|Soccer',
15285 => 'Textbooks|Sports & Outdoors|Training',
15286 => 'Textbooks|Sports & Outdoors|Water Sports',
15287 => 'Textbooks|Sports & Outdoors|Winter Sports',
15288 => 'Textbooks|Teaching & Learning',
15289 => 'Textbooks|Teaching & Learning|Adult Education',
15290 => 'Textbooks|Teaching & Learning|Curriculum & Teaching',
15291 => 'Textbooks|Teaching & Learning|Educational Leadership',
15292 => 'Textbooks|Teaching & Learning|Educational Technology',
15293 => 'Textbooks|Teaching & Learning|Family & Childcare',
15294 => 'Textbooks|Teaching & Learning|Information & Library Science',
15295 => 'Textbooks|Teaching & Learning|Learning Resources',
15296 => 'Textbooks|Teaching & Learning|Psychology & Research',
15297 => 'Textbooks|Teaching & Learning|Special Education',
15298 => 'Textbooks|Travel & Adventure',
15299 => 'Textbooks|Travel & Adventure|Africa',
15300 => 'Textbooks|Travel & Adventure|Americas',
15301 => 'Textbooks|Travel & Adventure|Americas|Canada',
15302 => 'Textbooks|Travel & Adventure|Americas|Latin America',
15303 => 'Textbooks|Travel & Adventure|Americas|United States',
15304 => 'Textbooks|Travel & Adventure|Asia',
15305 => 'Textbooks|Travel & Adventure|Caribbean',
15306 => 'Textbooks|Travel & Adventure|Essays & Memoirs',
15307 => 'Textbooks|Travel & Adventure|Europe',
15308 => 'Textbooks|Travel & Adventure|Middle East',
15309 => 'Textbooks|Travel & Adventure|Oceania',
15310 => 'Textbooks|Travel & Adventure|Specialty Travel',
15311 => 'Textbooks|Comics & Graphic Novels|Comics',
15312 => 'Textbooks|Reference|Manuals',
16001 => 'App Store|Stickers|Emoji & Expressions',
16003 => 'App Store|Stickers|Animals & Nature',
16005 => 'App Store|Stickers|Art',
16006 => 'App Store|Stickers|Celebrations',
16007 => 'App Store|Stickers|Celebrities',
16008 => 'App Store|Stickers|Comics & Cartoons',
16009 => 'App Store|Stickers|Eating & Drinking',
16010 => 'App Store|Stickers|Gaming',
16014 => 'App Store|Stickers|Movies & TV',
16015 => 'App Store|Stickers|Music',
16017 => 'App Store|Stickers|People',
16019 => 'App Store|Stickers|Places & Objects',
16021 => 'App Store|Stickers|Sports & Activities',
16025 => 'App Store|Stickers|Kids & Family',
16026 => 'App Store|Stickers|Fashion',
100000 => 'Music|Christian & Gospel',
100001 => 'Music|Classical|Art Song',
100002 => 'Music|Classical|Brass & Woodwinds',
100003 => 'Music|Classical|Solo Instrumental',
100004 => 'Music|Classical|Contemporary Era',
100005 => 'Music|Classical|Oratorio',
100006 => 'Music|Classical|Cantata',
100007 => 'Music|Classical|Electronic',
100008 => 'Music|Classical|Sacred',
100009 => 'Music|Classical|Guitar',
100010 => 'Music|Classical|Piano',
100011 => 'Music|Classical|Violin',
100012 => 'Music|Classical|Cello',
100013 => 'Music|Classical|Percussion',
100014 => 'Music|Electronic|Dubstep',
100015 => 'Music|Electronic|Bass',
100016 => 'Music|Hip-Hop/Rap|UK Hip-Hop',
100017 => 'Music|Reggae|Lovers Rock',
100018 => 'Music|Alternative|EMO',
100019 => 'Music|Alternative|Pop Punk',
100020 => 'Music|Alternative|Indie Pop',
100021 => 'Music|New Age|Yoga',
100022 => 'Music|Pop|Tribute',
100023 => 'Music|Pop|Shows',
100024 => 'Music|Cuban',
100025 => 'Music|Cuban|Mambo',
100026 => 'Music|Cuban|Chachacha',
100027 => 'Music|Cuban|Guajira',
100028 => 'Music|Cuban|Son',
100029 => 'Music|Cuban|Bolero',
100030 => 'Music|Cuban|Guaracha',
100031 => 'Music|Cuban|Timba',
100032 => 'Music|Soundtrack|Video Game',
100033 => 'Music|Indian|Regional Indian|Punjabi|Punjabi Pop',
100034 => 'Music|Indian|Regional Indian|Bengali|Rabindra Sangeet',
100035 => 'Music|Indian|Regional Indian|Malayalam',
100036 => 'Music|Indian|Regional Indian|Kannada',
100037 => 'Music|Indian|Regional Indian|Marathi',
100038 => 'Music|Indian|Regional Indian|Gujarati',
100039 => 'Music|Indian|Regional Indian|Assamese',
100040 => 'Music|Indian|Regional Indian|Bhojpuri',
100041 => 'Music|Indian|Regional Indian|Haryanvi',
100042 => 'Music|Indian|Regional Indian|Odia',
100043 => 'Music|Indian|Regional Indian|Rajasthani',
100044 => 'Music|Indian|Regional Indian|Urdu',
100045 => 'Music|Indian|Regional Indian|Punjabi',
100046 => 'Music|Indian|Regional Indian|Bengali',
100047 => 'Music|Indian|Indian Classical|Carnatic Classical',
100048 => 'Music|Indian|Indian Classical|Hindustani Classical',
40000000 => 'iTunes U',
40000001 => 'iTunes U|Business & Economics',
40000002 => 'iTunes U|Business & Economics|Economics',
40000003 => 'iTunes U|Business & Economics|Finance',
40000004 => 'iTunes U|Business & Economics|Hospitality',
40000005 => 'iTunes U|Business & Economics|Management',
40000006 => 'iTunes U|Business & Economics|Marketing',
40000007 => 'iTunes U|Business & Economics|Personal Finance',
40000008 => 'iTunes U|Business & Economics|Real Estate',
40000009 => 'iTunes U|Engineering',
40000010 => 'iTunes U|Engineering|Chemical & Petroleum Engineering',
40000011 => 'iTunes U|Engineering|Civil Engineering',
40000012 => 'iTunes U|Engineering|Computer Science',
40000013 => 'iTunes U|Engineering|Electrical Engineering',
40000014 => 'iTunes U|Engineering|Environmental Engineering',
40000015 => 'iTunes U|Engineering|Mechanical Engineering',
40000016 => 'iTunes U|Music, Art, & Design',
40000017 => 'iTunes U|Music, Art, & Design|Architecture',
40000019 => 'iTunes U|Music, Art, & Design|Art History',
40000020 => 'iTunes U|Music, Art, & Design|Dance',
40000021 => 'iTunes U|Music, Art, & Design|Film',
40000022 => 'iTunes U|Music, Art, & Design|Design',
40000023 => 'iTunes U|Music, Art, & Design|Interior Design',
40000024 => 'iTunes U|Music, Art, & Design|Music',
40000025 => 'iTunes U|Music, Art, & Design|Theater',
40000026 => 'iTunes U|Health & Medicine',
40000027 => 'iTunes U|Health & Medicine|Anatomy & Physiology',
40000028 => 'iTunes U|Health & Medicine|Behavioral Science',
40000029 => 'iTunes U|Health & Medicine|Dentistry',
40000030 => 'iTunes U|Health & Medicine|Diet & Nutrition',
40000031 => 'iTunes U|Health & Medicine|Emergency Medicine',
40000032 => 'iTunes U|Health & Medicine|Genetics',
40000033 => 'iTunes U|Health & Medicine|Gerontology',
40000034 => 'iTunes U|Health & Medicine|Health & Exercise Science',
40000035 => 'iTunes U|Health & Medicine|Immunology',
40000036 => 'iTunes U|Health & Medicine|Neuroscience',
40000037 => 'iTunes U|Health & Medicine|Pharmacology & Toxicology',
40000038 => 'iTunes U|Health & Medicine|Psychiatry',
40000039 => 'iTunes U|Health & Medicine|Global Health',
40000040 => 'iTunes U|Health & Medicine|Radiology',
40000041 => 'iTunes U|History',
40000042 => 'iTunes U|History|Ancient History',
40000043 => 'iTunes U|History|Medieval History',
40000044 => 'iTunes U|History|Military History',
40000045 => 'iTunes U|History|Modern History',
40000046 => 'iTunes U|History|African History',
40000047 => 'iTunes U|History|Asia-Pacific History',
40000048 => 'iTunes U|History|European History',
40000049 => 'iTunes U|History|Middle Eastern History',
40000050 => 'iTunes U|History|North American History',
40000051 => 'iTunes U|History|South American History',
40000053 => 'iTunes U|Communications & Journalism',
40000054 => 'iTunes U|Philosophy',
40000055 => 'iTunes U|Religion & Spirituality',
40000056 => 'iTunes U|Languages',
40000057 => 'iTunes U|Languages|African Languages',
40000058 => 'iTunes U|Languages|Ancient Languages',
40000061 => 'iTunes U|Languages|English',
40000063 => 'iTunes U|Languages|French',
40000064 => 'iTunes U|Languages|German',
40000065 => 'iTunes U|Languages|Italian',
40000066 => 'iTunes U|Languages|Linguistics',
40000068 => 'iTunes U|Languages|Spanish',
40000069 => 'iTunes U|Languages|Speech Pathology',
40000070 => 'iTunes U|Writing & Literature',
40000071 => 'iTunes U|Writing & Literature|Anthologies',
40000072 => 'iTunes U|Writing & Literature|Biography',
40000073 => 'iTunes U|Writing & Literature|Classics',
40000074 => 'iTunes U|Writing & Literature|Literary Criticism',
40000075 => 'iTunes U|Writing & Literature|Fiction',
40000076 => 'iTunes U|Writing & Literature|Poetry',
40000077 => 'iTunes U|Mathematics',
40000078 => 'iTunes U|Mathematics|Advanced Mathematics',
40000079 => 'iTunes U|Mathematics|Algebra',
40000080 => 'iTunes U|Mathematics|Arithmetic',
40000081 => 'iTunes U|Mathematics|Calculus',
40000082 => 'iTunes U|Mathematics|Geometry',
40000083 => 'iTunes U|Mathematics|Statistics',
40000084 => 'iTunes U|Science',
40000085 => 'iTunes U|Science|Agricultural',
40000086 => 'iTunes U|Science|Astronomy',
40000087 => 'iTunes U|Science|Atmosphere',
40000088 => 'iTunes U|Science|Biology',
40000089 => 'iTunes U|Science|Chemistry',
40000090 => 'iTunes U|Science|Ecology',
40000091 => 'iTunes U|Science|Geography',
40000092 => 'iTunes U|Science|Geology',
40000093 => 'iTunes U|Science|Physics',
40000094 => 'iTunes U|Social Science',
40000095 => 'iTunes U|Law & Politics|Law',
40000096 => 'iTunes U|Law & Politics|Political Science',
40000097 => 'iTunes U|Law & Politics|Public Administration',
40000098 => 'iTunes U|Social Science|Psychology',
40000099 => 'iTunes U|Social Science|Social Welfare',
40000100 => 'iTunes U|Social Science|Sociology',
40000101 => 'iTunes U|Society',
40000103 => 'iTunes U|Society|Asia Pacific Studies',
40000104 => 'iTunes U|Society|European Studies',
40000105 => 'iTunes U|Society|Indigenous Studies',
40000106 => 'iTunes U|Society|Latin & Caribbean Studies',
40000107 => 'iTunes U|Society|Middle Eastern Studies',
40000108 => "iTunes U|Society|Women's Studies",
40000109 => 'iTunes U|Teaching & Learning',
40000110 => 'iTunes U|Teaching & Learning|Curriculum & Teaching',
40000111 => 'iTunes U|Teaching & Learning|Educational Leadership',
40000112 => 'iTunes U|Teaching & Learning|Family & Childcare',
40000113 => 'iTunes U|Teaching & Learning|Learning Resources',
40000114 => 'iTunes U|Teaching & Learning|Psychology & Research',
40000115 => 'iTunes U|Teaching & Learning|Special Education',
40000116 => 'iTunes U|Music, Art, & Design|Culinary Arts',
40000117 => 'iTunes U|Music, Art, & Design|Fashion',
40000118 => 'iTunes U|Music, Art, & Design|Media Arts',
40000119 => 'iTunes U|Music, Art, & Design|Photography',
40000120 => 'iTunes U|Music, Art, & Design|Visual Art',
40000121 => 'iTunes U|Business & Economics|Entrepreneurship',
40000122 => 'iTunes U|Communications & Journalism|Broadcasting',
40000123 => 'iTunes U|Communications & Journalism|Digital Media',
40000124 => 'iTunes U|Communications & Journalism|Journalism',
40000125 => 'iTunes U|Communications & Journalism|Photojournalism',
40000126 => 'iTunes U|Communications & Journalism|Print',
40000127 => 'iTunes U|Communications & Journalism|Speech',
40000128 => 'iTunes U|Communications & Journalism|Writing',
40000129 => 'iTunes U|Health & Medicine|Nursing',
40000130 => 'iTunes U|Languages|Arabic',
40000131 => 'iTunes U|Languages|Chinese',
40000132 => 'iTunes U|Languages|Hebrew',
40000133 => 'iTunes U|Languages|Hindi',
40000134 => 'iTunes U|Languages|Indigenous Languages',
40000135 => 'iTunes U|Languages|Japanese',
40000136 => 'iTunes U|Languages|Korean',
40000137 => 'iTunes U|Languages|Other Languages',
40000138 => 'iTunes U|Languages|Portuguese',
40000139 => 'iTunes U|Languages|Russian',
40000140 => 'iTunes U|Law & Politics',
40000141 => 'iTunes U|Law & Politics|Foreign Policy & International Relations',
40000142 => 'iTunes U|Law & Politics|Local Governments',
40000143 => 'iTunes U|Law & Politics|National Governments',
40000144 => 'iTunes U|Law & Politics|World Affairs',
40000145 => 'iTunes U|Writing & Literature|Comparative Literature',
40000146 => 'iTunes U|Philosophy|Aesthetics',
40000147 => 'iTunes U|Philosophy|Epistemology',
40000148 => 'iTunes U|Philosophy|Ethics',
40000149 => 'iTunes U|Philosophy|Metaphysics',
40000150 => 'iTunes U|Philosophy|Political Philosophy',
40000151 => 'iTunes U|Philosophy|Logic',
40000152 => 'iTunes U|Philosophy|Philosophy of Language',
40000153 => 'iTunes U|Philosophy|Philosophy of Religion',
40000154 => 'iTunes U|Social Science|Archaeology',
40000155 => 'iTunes U|Social Science|Anthropology',
40000156 => 'iTunes U|Religion & Spirituality|Buddhism',
40000157 => 'iTunes U|Religion & Spirituality|Christianity',
40000158 => 'iTunes U|Religion & Spirituality|Comparative Religion',
40000159 => 'iTunes U|Religion & Spirituality|Hinduism',
40000160 => 'iTunes U|Religion & Spirituality|Islam',
40000161 => 'iTunes U|Religion & Spirituality|Judaism',
40000162 => 'iTunes U|Religion & Spirituality|Other Religions',
40000163 => 'iTunes U|Religion & Spirituality|Spirituality',
40000164 => 'iTunes U|Science|Environment',
40000165 => 'iTunes U|Society|African Studies',
40000166 => 'iTunes U|Society|American Studies',
40000167 => 'iTunes U|Society|Cross-cultural Studies',
40000168 => 'iTunes U|Society|Immigration & Emigration',
40000169 => 'iTunes U|Society|Race & Ethnicity Studies',
40000170 => 'iTunes U|Society|Sexuality Studies',
40000171 => 'iTunes U|Teaching & Learning|Educational Technology',
40000172 => 'iTunes U|Teaching & Learning|Information/Library Science',
40000173 => 'iTunes U|Languages|Dutch',
40000174 => 'iTunes U|Languages|Luxembourgish',
40000175 => 'iTunes U|Languages|Swedish',
40000176 => 'iTunes U|Languages|Norwegian',
40000177 => 'iTunes U|Languages|Finnish',
40000178 => 'iTunes U|Languages|Danish',
40000179 => 'iTunes U|Languages|Polish',
40000180 => 'iTunes U|Languages|Turkish',
40000181 => 'iTunes U|Languages|Flemish',
50000024 => 'Audiobooks',
50000040 => 'Audiobooks|Fiction',
50000041 => 'Audiobooks|Arts & Entertainment',
50000042 => 'Audiobooks|Biographies & Memoirs',
50000043 => 'Audiobooks|Business & Personal Finance',
50000044 => 'Audiobooks|Kids & Young Adults',
50000045 => 'Audiobooks|Classics',
50000046 => 'Audiobooks|Comedy',
50000047 => 'Audiobooks|Drama & Poetry',
50000048 => 'Audiobooks|Speakers & Storytellers',
50000049 => 'Audiobooks|History',
50000050 => 'Audiobooks|Languages',
50000051 => 'Audiobooks|Mysteries & Thrillers',
50000052 => 'Audiobooks|Nonfiction',
50000053 => 'Audiobooks|Religion & Spirituality',
50000054 => 'Audiobooks|Science & Nature',
50000055 => 'Audiobooks|Sci Fi & Fantasy',
50000056 => 'Audiobooks|Self-Development',
50000057 => 'Audiobooks|Sports & Outdoors',
50000058 => 'Audiobooks|Technology',
50000059 => 'Audiobooks|Travel & Adventure',
50000061 => 'Music|Spoken Word',
50000063 => 'Music|Disney',
50000064 => 'Music|French Pop',
50000066 => 'Music|German Pop',
50000068 => 'Music|German Folk',
50000069 => 'Audiobooks|Romance',
50000070 => 'Audiobooks|Audiobooks Latino',
50000071 => 'Books|Comics & Graphic Novels|Manga|Action',
50000072 => 'Books|Comics & Graphic Novels|Manga|Comedy',
50000073 => 'Books|Comics & Graphic Novels|Manga|Erotica',
50000074 => 'Books|Comics & Graphic Novels|Manga|Fantasy',
50000075 => 'Books|Comics & Graphic Novels|Manga|Four Cell Manga',
50000076 => 'Books|Comics & Graphic Novels|Manga|Gay & Lesbian',
50000077 => 'Books|Comics & Graphic Novels|Manga|Hard-Boiled',
50000078 => 'Books|Comics & Graphic Novels|Manga|Heroes',
50000079 => 'Books|Comics & Graphic Novels|Manga|Historical Fiction',
50000080 => 'Books|Comics & Graphic Novels|Manga|Mecha',
50000081 => 'Books|Comics & Graphic Novels|Manga|Mystery',
50000082 => 'Books|Comics & Graphic Novels|Manga|Nonfiction',
50000083 => 'Books|Comics & Graphic Novels|Manga|Religious',
50000084 => 'Books|Comics & Graphic Novels|Manga|Romance',
50000085 => 'Books|Comics & Graphic Novels|Manga|Romantic Comedy',
50000086 => 'Books|Comics & Graphic Novels|Manga|Science Fiction',
50000087 => 'Books|Comics & Graphic Novels|Manga|Sports',
50000088 => 'Books|Fiction & Literature|Light Novels',
50000089 => 'Books|Comics & Graphic Novels|Manga|Horror',
50000090 => 'Books|Comics & Graphic Novels|Comics',
50000091 => 'Books|Romance|Multicultural',
50000092 => 'Audiobooks|Erotica',
},
},
grup => { Name => 'Grouping', Avoid => 1 }, #10
hdvd => { #10
Name => 'HDVideo',
Format => 'int8u', #24
PrintConv => { 0 => 'No', 1 => 'Yes' },
},
keyw => 'Keyword', #7
ldes => 'LongDescription', #10
pcst => { #7
Name => 'Podcast',
Format => 'int8u', #23
PrintConv => { 0 => 'No', 1 => 'Yes' },
},
perf => 'Performer',
plID => { #10 (or TV season)
Name => 'PlayListID',
Format => 'int8u', # actually int64u, but split it up
},
purd => 'PurchaseDate', #7
purl => 'PodcastURL', #7
rtng => { #10
Name => 'Rating',
Format => 'int8u', #23
PrintConv => {
0 => 'none',
1 => 'Explicit',
2 => 'Clean',
4 => 'Explicit (old)',
},
},
sfID => { #10
Name => 'AppleStoreCountry',
Format => 'int32u',
SeparateTable => 1,
PrintConv => { #21
143441 => 'United States', # US
143442 => 'France', # FR
143443 => 'Germany', # DE
143444 => 'United Kingdom', # GB
143445 => 'Austria', # AT
143446 => 'Belgium', # BE
143447 => 'Finland', # FI
143448 => 'Greece', # GR
143449 => 'Ireland', # IE
143450 => 'Italy', # IT
143451 => 'Luxembourg', # LU
143452 => 'Netherlands', # NL
143453 => 'Portugal', # PT
143454 => 'Spain', # ES
143455 => 'Canada', # CA
143456 => 'Sweden', # SE
143457 => 'Norway', # NO
143458 => 'Denmark', # DK
143459 => 'Switzerland', # CH
143460 => 'Australia', # AU
143461 => 'New Zealand', # NZ
143462 => 'Japan', # JP
143463 => 'Hong Kong', # HK
143464 => 'Singapore', # SG
143465 => 'China', # CN
143466 => 'Republic of Korea', # KR
143467 => 'India', # IN
143468 => 'Mexico', # MX
143469 => 'Russia', # RU
143470 => 'Taiwan', # TW
143471 => 'Vietnam', # VN
143472 => 'South Africa', # ZA
143473 => 'Malaysia', # MY
143474 => 'Philippines', # PH
143475 => 'Thailand', # TH
143476 => 'Indonesia', # ID
143477 => 'Pakistan', # PK
143478 => 'Poland', # PL
143479 => 'Saudi Arabia', # SA
143480 => 'Turkey', # TR
143481 => 'United Arab Emirates', # AE
143482 => 'Hungary', # HU
143483 => 'Chile', # CL
143484 => 'Nepal', # NP
143485 => 'Panama', # PA
143486 => 'Sri Lanka', # LK
143487 => 'Romania', # RO
143489 => 'Czech Republic', # CZ
143491 => 'Israel', # IL
143492 => 'Ukraine', # UA
143493 => 'Kuwait', # KW
143494 => 'Croatia', # HR
143495 => 'Costa Rica', # CR
143496 => 'Slovakia', # SK
143497 => 'Lebanon', # LB
143498 => 'Qatar', # QA
143499 => 'Slovenia', # SI
143501 => 'Colombia', # CO
143502 => 'Venezuela', # VE
143503 => 'Brazil', # BR
143504 => 'Guatemala', # GT
143505 => 'Argentina', # AR
143506 => 'El Salvador', # SV
143507 => 'Peru', # PE
143508 => 'Dominican Republic', # DO
143509 => 'Ecuador', # EC
143510 => 'Honduras', # HN
143511 => 'Jamaica', # JM
143512 => 'Nicaragua', # NI
143513 => 'Paraguay', # PY
143514 => 'Uruguay', # UY
143515 => 'Macau', # MO
143516 => 'Egypt', # EG
143517 => 'Kazakhstan', # KZ
143518 => 'Estonia', # EE
143519 => 'Latvia', # LV
143520 => 'Lithuania', # LT
143521 => 'Malta', # MT
143523 => 'Moldova', # MD
143524 => 'Armenia', # AM
143525 => 'Botswana', # BW
143526 => 'Bulgaria', # BG
143528 => 'Jordan', # JO
143529 => 'Kenya', # KE
143530 => 'Macedonia', # MK
143531 => 'Madagascar', # MG
143532 => 'Mali', # ML
143533 => 'Mauritius', # MU
143534 => 'Niger', # NE
143535 => 'Senegal', # SN
143536 => 'Tunisia', # TN
143537 => 'Uganda', # UG
143538 => 'Anguilla', # AI
143539 => 'Bahamas', # BS
143540 => 'Antigua and Barbuda', # AG
143541 => 'Barbados', # BB
143542 => 'Bermuda', # BM
143543 => 'British Virgin Islands', # VG
143544 => 'Cayman Islands', # KY
143545 => 'Dominica', # DM
143546 => 'Grenada', # GD
143547 => 'Montserrat', # MS
143548 => 'St. Kitts and Nevis', # KN
143549 => 'St. Lucia', # LC
143550 => 'St. Vincent and The Grenadines', # VC
143551 => 'Trinidad and Tobago', # TT
143552 => 'Turks and Caicos', # TC
143553 => 'Guyana', # GY
143554 => 'Suriname', # SR
143555 => 'Belize', # BZ
143556 => 'Bolivia', # BO
143557 => 'Cyprus', # CY
143558 => 'Iceland', # IS
143559 => 'Bahrain', # BH
143560 => 'Brunei Darussalam', # BN
143561 => 'Nigeria', # NG
143562 => 'Oman', # OM
143563 => 'Algeria', # DZ
143564 => 'Angola', # AO
143565 => 'Belarus', # BY
143566 => 'Uzbekistan', # UZ
143568 => 'Azerbaijan', # AZ
143571 => 'Yemen', # YE
143572 => 'Tanzania', # TZ
143573 => 'Ghana', # GH
143575 => 'Albania', # AL
143576 => 'Benin', # BJ
143577 => 'Bhutan', # BT
143578 => 'Burkina Faso', # BF
143579 => 'Cambodia', # KH
143580 => 'Cape Verde', # CV
143581 => 'Chad', # TD
143582 => 'Republic of the Congo', # CG
143583 => 'Fiji', # FJ
143584 => 'Gambia', # GM
143585 => 'Guinea-Bissau', # GW
143586 => 'Kyrgyzstan', # KG
143587 => "Lao People's Democratic Republic", # LA
143588 => 'Liberia', # LR
143589 => 'Malawi', # MW
143590 => 'Mauritania', # MR
143591 => 'Federated States of Micronesia', # FM
143592 => 'Mongolia', # MN
143593 => 'Mozambique', # MZ
143594 => 'Namibia', # NA
143595 => 'Palau', # PW
143597 => 'Papua New Guinea', # PG
143598 => 'Sao Tome and Principe', # ST (São Tomé and Príncipe)
143599 => 'Seychelles', # SC
143600 => 'Sierra Leone', # SL
143601 => 'Solomon Islands', # SB
143602 => 'Swaziland', # SZ
143603 => 'Tajikistan', # TJ
143604 => 'Turkmenistan', # TM
143605 => 'Zimbabwe', # ZW
},
},
soaa => 'SortAlbumArtist', #10
soal => 'SortAlbum', #10
soar => 'SortArtist', #10
soco => 'SortComposer', #10
sonm => 'SortName', #10
sosn => 'SortShow', #10
stik => { #10
Name => 'MediaType',
Format => 'int8u', #23
PrintConvColumns => 2,
PrintConv => { #(http://weblog.xanga.com/gryphondwb/615474010/iphone-ringtones---what-did-itunes-741-really-do.html)
0 => 'Movie (old)', #forum9059 (was Movie)
1 => 'Normal (Music)',
2 => 'Audiobook',
5 => 'Whacked Bookmark',
6 => 'Music Video',
9 => 'Movie', #forum9059 (was Short Film)
10 => 'TV Show',
11 => 'Booklet',
14 => 'Ringtone',
21 => 'Podcast', #15
23 => 'iTunes U', #forum9059
},
},
rate => 'RatingPercent', #PH
titl => { Name => 'Title', Avoid => 1 },
tven => 'TVEpisodeID', #7
tves => { #7/10
Name => 'TVEpisode',
Format => 'int32u',
},
tvnn => 'TVNetworkName', #7
tvsh => 'TVShow', #10
tvsn => { #7/10
Name => 'TVSeason',
Format => 'int32u',
},
yrrc => 'Year', #(ffmpeg source)
itnu => { #PH (iTunes 10.5)
Name => 'iTunesU',
Format => 'int8s',
Description => 'iTunes U',
PrintConv => { 0 => 'No', 1 => 'Yes' },
},
#https://github.com/communitymedia/mediautilities/blob/master/src/net/sourceforge/jaad/mp4/boxes/BoxTypes.java
gshh => { Name => 'GoogleHostHeader', Format => 'string' },
gspm => { Name => 'GooglePingMessage', Format => 'string' },
gspu => { Name => 'GooglePingURL', Format => 'string' },
gssd => { Name => 'GoogleSourceData', Format => 'string' },
gsst => { Name => 'GoogleStartTime', Format => 'string' },
gstd => {
Name => 'GoogleTrackDuration',
Format => 'string',
ValueConv => '$val / 1000',
ValueConvInv => '$val * 1000',
PrintConv => 'ConvertDuration($val)',
PrintConvInv => q{
$val =~ s/ s$//;
my @a = split /(:| days )/, $val;
my $sign = ($val =~ s/^-//) ? -1 : 1;
$a[0] += shift(@a) * 24 if @a == 4;
$a[0] += shift(@a) * 60 while @a > 1;
return $a[0] * $sign;
},
},
# atoms observed in AAX audiobooks (ref PH)
"\xa9cpy" => { Name => 'Copyright', Avoid => 1, Groups => { 2 => 'Author' } },
"\xa9pub" => 'Publisher',
"\xa9nrt" => 'Narrator',
'@pti' => 'ParentTitle', # (guess -- same as "\xa9nam")
'@PST' => 'ParentShortTitle', # (guess -- same as "\xa9nam")
'@ppi' => 'ParentProductID', # (guess -- same as 'prID')
'@sti' => 'ShortTitle', # (guess -- same as "\xa9nam")
prID => 'ProductID',
rldt => { Name => 'ReleaseDate', Groups => { 2 => 'Time' }},
CDEK => { Name => 'Unknown_CDEK', Unknown => 1 }, # eg: "B004ZMTFEG" - used in URL's ("asin=")
CDET => { Name => 'Unknown_CDET', Unknown => 1 }, # eg: "ADBL"
VERS => 'ProductVersion',
GUID => 'GUID',
AACR => { Name => 'Unknown_AACR', Unknown => 1 }, # eg: "CR!1T1H1QH6WX7T714G2BMFX3E9MC4S"
# ausr - 30 bytes (User Alias?)
);
# tag decoded from timed face records
%Image::ExifTool::QuickTime::FaceInfo = (
PROCESS_PROC => \&ProcessMOV,
GROUPS => { 2 => 'Video' },
crec => {
Name => 'FaceRec',
SubDirectory => {
TagTable => 'Image::ExifTool::QuickTime::FaceRec',
},
},
);
# tag decoded from timed face records
%Image::ExifTool::QuickTime::FaceRec = (
PROCESS_PROC => \&ProcessMOV,
GROUPS => { 2 => 'Video' },
cits => {
Name => 'FaceItem',
SubDirectory => {
TagTable => 'Image::ExifTool::QuickTime::Keys',
ProcessProc => \&Process_mebx,
},
},
);
# item list keys (ref PH)
%Image::ExifTool::QuickTime::Keys = (
PROCESS_PROC => \&ProcessKeys,
WRITE_PROC => \&WriteKeys,
CHECK_PROC => \&CheckQTValue,
VARS => { LONG_TAGS => 3 },
WRITABLE => 1,
# (not PREFERRED when writing)
GROUPS => { 1 => 'Keys' },
WRITE_GROUP => 'Keys',
LANG_INFO => \&GetLangInfo,
FORMAT => 'string',
NOTES => q{
This directory contains a list of key names which are used to decode tags
written by the "mdta" handler. Also in this table are a few tags found in
timed metadata that are not yet writable by ExifTool. The prefix of
"com.apple.quicktime." has been removed from the TagID's below. These tags
support alternate languages in the same way as the
L<ItemList|Image::ExifTool::TagNames/QuickTime ItemList Tags> tags. Note
that by default,
L<ItemList|Image::ExifTool::TagNames/QuickTime ItemList Tags> and
L<UserData|Image::ExifTool::TagNames/QuickTime UserData Tags> tags are
preferred when writing, so to create a tag when a same-named tag exists in
either of these tables, either the "Keys" location must be specified (eg.
C<-Keys:Author=Phil> on the command line), or the PREFERRED level must be
changed via L<the config file|../config.html#PREF>.
},
version => 'Version',
album => 'Album',
artist => { },
artwork => { },
author => { Name => 'Author', Groups => { 2 => 'Author' } },
comment => { },
copyright => { Name => 'Copyright', Groups => { 2 => 'Author' } },
creationdate=> {
Name => 'CreationDate',
Groups => { 2 => 'Time' },
Shift => 'Time',
ValueConv => q{
require Image::ExifTool::XMP;
$val = Image::ExifTool::XMP::ConvertXMPDate($val,1);
$val =~ s/([-+]\d{2})(\d{2})$/$1:$2/; # add colon to timezone if necessary
return $val;
},
ValueConvInv => q{
require Image::ExifTool::XMP;
$val = Image::ExifTool::XMP::FormatXMPDate($val);
$val =~ s/([-+]\d{2}):(\d{2})$/$1$2/; # remove time zone colon
return $val;
},
PrintConv => '$self->ConvertDateTime($val)',
PrintConvInv => '$self->InverseDateTime($val)',
},
description => { },
director => { },
displayname => { Name => 'DisplayName' },
title => { }, #22
genre => { },
information => { },
keywords => { },
producer => { }, #22
make => { Name => 'Make', Groups => { 2 => 'Camera' } },
model => { Name => 'Model', Groups => { 2 => 'Camera' } },
publisher => { },
software => { },
year => { Groups => { 2 => 'Time' } },
'camera.identifier' => 'CameraIdentifier', # (iPhone 4)
'camera.framereadouttimeinmicroseconds' => { # (iPhone 4)
Name => 'FrameReadoutTime',
ValueConv => '$val * 1e-6',
ValueConvInv => 'int($val * 1e6 + 0.5)',
PrintConv => '$val * 1e6 . " microseconds"',
PrintConvInv => '$val =~ s/ .*//; $val * 1e-6',
},
'location.ISO6709' => {
Name => 'GPSCoordinates',
Groups => { 2 => 'Location' },
ValueConv => \&ConvertISO6709,
ValueConvInv => \&ConvInvISO6709,
PrintConv => \&PrintGPSCoordinates,
PrintConvInv => \&PrintInvGPSCoordinates,
},
'location.name' => { Name => 'LocationName', Groups => { 2 => 'Location' } },
'location.body' => { Name => 'LocationBody', Groups => { 2 => 'Location' } },
'location.note' => { Name => 'LocationNote', Groups => { 2 => 'Location' } },
'location.role' => {
Name => 'LocationRole',
Groups => { 2 => 'Location' },
PrintConv => {
0 => 'Shooting Location',
1 => 'Real Location',
2 => 'Fictional Location',
},
},
'location.date' => {
Name => 'LocationDate',
Groups => { 2 => 'Time' },
Shift => 'Time',
ValueConv => q{
require Image::ExifTool::XMP;
$val = Image::ExifTool::XMP::ConvertXMPDate($val);
$val =~ s/([-+]\d{2})(\d{2})$/$1:$2/; # add colon to timezone if necessary
return $val;
},
ValueConvInv => q{
require Image::ExifTool::XMP;
$val = Image::ExifTool::XMP::FormatXMPDate($val);
$val =~ s/([-+]\d{2}):(\d{2})$/$1$2/; # remove time zone colon
return $val;
},
PrintConv => '$self->ConvertDateTime($val)',
PrintConvInv => '$self->InverseDateTime($val)',
},
'direction.facing' => { Name => 'CameraDirection', Groups => { 2 => 'Location' } },
'direction.motion' => { Name => 'CameraMotion', Groups => { 2 => 'Location' } },
'location.body' => { Name => 'LocationBody', Groups => { 2 => 'Location' } },
'player.version' => 'PlayerVersion',
'player.movie.visual.brightness'=> 'Brightness',
'player.movie.visual.color' => 'Color',
'player.movie.visual.tint' => 'Tint',
'player.movie.visual.contrast' => 'Contrast',
'player.movie.audio.gain' => 'AudioGain',
'player.movie.audio.treble' => 'Trebel',
'player.movie.audio.bass' => 'Bass',
'player.movie.audio.balance' => 'Balance',
'player.movie.audio.pitchshift' => 'PitchShift',
'player.movie.audio.mute' => {
Name => 'Mute',
Format => 'int8u',
PrintConv => { 0 => 'Off', 1 => 'On' },
},
'rating.user' => 'UserRating', # (Canon ELPH 510 HS)
'collection.user' => 'UserCollection', #22
'Encoded_With' => 'EncodedWith',
#
# the following tags aren't in the com.apple.quicktime namespace:
#
'com.apple.photos.captureMode' => 'CaptureMode',
'com.android.version' => 'AndroidVersion',
#
# also seen
#
# com.divergentmedia.clipwrap.model ('NEX-FS700EK')
# com.divergentmedia.clipwrap.model1 ('49')
# com.divergentmedia.clipwrap.model2 ('0')
# com.divergentmedia.clipwrap.manufacturer ('Sony')
# com.divergentmedia.clipwrap.originalDateTime ('2013/2/6 10:30:40+0200')
#
# seen in timed metadata (mebx), and added dynamically to the table
# via SaveMetaKeys(). NOTE: these tags are not writable!
#
# (mdta)com.apple.quicktime.video-orientation (dtyp=66, int16s)
'video-orientation' => { Name => 'VideoOrientation', Writable => 0 },
# (mdta)com.apple.quicktime.live-photo-info (dtyp=com.apple.quicktime.com.apple.quicktime.live-photo-info)
'live-photo-info' => {
Name => 'LivePhotoInfo',
Writable => 0,
# not sure what these values mean, but unpack them anyway - PH
# (ignore the fact that the "f" and "l" unpacks won't work on a big-endian machine)
ValueConv => 'join " ",unpack "VfVVf6c4lCCcclf4Vvv", $val',
},
# (mdta)com.apple.quicktime.still-image-time (dtyp=65, int8s)
'still-image-time' => { # (found in live photo)
Name => 'StillImageTime',
Writable => 0,
Notes => q{
this tag always has a value of -1; the time of the still image is obtained
from the associated SampleTime
},
},
# (mdta)com.apple.quicktime.detected-face (dtyp='com.apple.quicktime.detected-face')
'detected-face' => {
Name => 'FaceInfo',
Writable => 0,
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::FaceInfo' },
},
# ---- detected-face fields ( ----
# --> back here after a round trip through FaceInfo -> FaceRec -> FaceItem
# (fiel)com.apple.quicktime.detected-face.bounds (dtyp=80, float[8])
'detected-face.bounds' => {
Name => 'DetectedFaceBounds',
Writable => 0,
# round to a reasonable number of decimal places
PrintConv => 'my @a=split " ",$val;$_=int($_*1e6+.5)/1e6 foreach @a;join " ",@a',
PrintConvInv => '$val',
},
# (fiel)com.apple.quicktime.detected-face.face-id (dtyp=77, int32u)
'detected-face.face-id' => { Name => 'DetectedFaceID', Writable => 0 },
# (fiel)com.apple.quicktime.detected-face.roll-angle (dtyp=23, float)
'detected-face.roll-angle' => { Name => 'DetectedFaceRollAngle', Writable => 0 },
# (fiel)com.apple.quicktime.detected-face.yaw-angle (dtyp=23, float)
'detected-face.yaw-angle' => { Name => 'DetectedFaceYawAngle', Writable => 0 },
);
# iTunes info ('----') atoms
%Image::ExifTool::QuickTime::iTunesInfo = (
PROCESS_PROC => \&ProcessMOV,
GROUPS => { 2 => 'Audio' },
NOTES => q{
ExifTool will extract any iTunesInfo tags that exist, even if they are not
defined in this table.
},
# 'mean'/'name'/'data' atoms form a triplet, but unfortunately
# I haven't been able to find any documentation on this.
# 'mean' is normally 'com.apple.iTunes'
mean => {
Name => 'Mean',
# the 'Triplet' flag tells ProcessMOV() to generate
# a single tag from the mean/name/data triplet
Triplet => 1,
Hidden => 1,
},
name => {
Name => 'Name',
Triplet => 1,
Hidden => 1,
},
data => {
Name => 'Data',
Triplet => 1,
Hidden => 1,
},
# the tag ID's below are composed from "mean/name",
# but "mean/" is omitted if it is "com.apple.iTunes/":
'iTunMOVI' => {
Name => 'iTunMOVI',
SubDirectory => { TagTable => 'Image::ExifTool::PLIST::Main' },
},
'tool' => {
Name => 'iTunTool',
Description => 'iTunTool',
Format => 'int32u',
PrintConv => 'sprintf("0x%.8x",$val)',
},
'iTunEXTC' => {
Name => 'ContentRating',
Notes => 'standard | rating | score | reasons',
# eg. 'us-tv|TV-14|500|V', 'mpaa|PG-13|300|For violence and sexuality'
# (see http://shadowofged.blogspot.ca/2008/06/itunes-content-ratings.html)
},
'iTunNORM' => {
Name => 'VolumeNormalization',
PrintConv => '$val=~s/ 0+(\w)/ $1/g; $val=~s/^\s+//; $val',
},
'iTunSMPB' => {
Name => 'iTunSMPB',
Description => 'iTunSMPB',
# hex format, similar to iTunNORM, but 12 words instead of 10,
# and 4th word is 16 hex digits (all others are 8)
# (gives AAC encoder delay, ref http://code.google.com/p/l-smash/issues/detail?id=1)
PrintConv => '$val=~s/ 0+(\w)/ $1/g; $val=~s/^\s+//; $val',
},
# (CDDB = Compact Disc DataBase)
# iTunes_CDDB_1 = <CDDB1 disk ID>+<# tracks>+<logical block address for each track>...
'iTunes_CDDB_1' => 'CDDB1Info',
'iTunes_CDDB_TrackNumber' => 'CDDBTrackNumber',
'Encoding Params' => {
Name => 'EncodingParams',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::EncodingParams' },
},
# also heard about 'iTunPGAP', but I haven't seen a sample
DISCNUMBER => 'DiscNumber', #PH
TRACKNUMBER => 'TrackNumber', #PH
popularimeter => 'Popularimeter', #PH
);
# iTunes audio encoding parameters
# ref https://developer.apple.com/library/mac/#documentation/MusicAudio/Reference/AudioCodecServicesRef/Reference/reference.html
%Image::ExifTool::QuickTime::EncodingParams = (
PROCESS_PROC => \&ProcessEncodingParams,
GROUPS => { 2 => 'Audio' },
# (I have commented out the ones that don't have integer values because they
# probably don't appear, and definitely wouldn't work with current decoding - PH)
# global codec properties
#'lnam' => 'AudioCodecName',
#'lmak' => 'AudioCodecManufacturer',
#'lfor' => 'AudioCodecFormat',
'vpk?' => 'AudioHasVariablePacketByteSizes',
#'ifm#' => 'AudioSupportedInputFormats',
#'ofm#' => 'AudioSupportedOutputFormats',
#'aisr' => 'AudioAvailableInputSampleRates',
#'aosr' => 'AudioAvailableOutputSampleRates',
'abrt' => 'AudioAvailableBitRateRange',
'mnip' => 'AudioMinimumNumberInputPackets',
'mnop' => 'AudioMinimumNumberOutputPackets',
'cmnc' => 'AudioAvailableNumberChannels',
'lmrc' => 'AudioDoesSampleRateConversion',
#'aicl' => 'AudioAvailableInputChannelLayoutTags',
#'aocl' => 'AudioAvailableOutputChannelLayoutTags',
#'if4o' => 'AudioInputFormatsForOutputFormat',
#'of4i' => 'AudioOutputFormatsForInputFormat',
#'acfi' => 'AudioFormatInfo',
# instance codec properties
'tbuf' => 'AudioInputBufferSize',
'pakf' => 'AudioPacketFrameSize',
'pakb' => 'AudioMaximumPacketByteSize',
#'ifmt' => 'AudioCurrentInputFormat',
#'ofmt' => 'AudioCurrentOutputFormat',
#'kuki' => 'AudioMagicCookie',
'ubuf' => 'AudioUsedInputBufferSize',
'init' => 'AudioIsInitialized',
'brat' => 'AudioCurrentTargetBitRate',
#'cisr' => 'AudioCurrentInputSampleRate',
#'cosr' => 'AudioCurrentOutputSampleRate',
'srcq' => 'AudioQualitySetting',
#'brta' => 'AudioApplicableBitRateRange',
#'isra' => 'AudioApplicableInputSampleRates',
#'osra' => 'AudioApplicableOutputSampleRates',
'pad0' => 'AudioZeroFramesPadded',
'prmm' => 'AudioCodecPrimeMethod',
#'prim' => 'AudioCodecPrimeInfo',
#'icl ' => 'AudioInputChannelLayout',
#'ocl ' => 'AudioOutputChannelLayout',
#'acs ' => 'AudioCodecSettings',
#'acfl' => 'AudioCodecFormatList',
'acbf' => 'AudioBitRateControlMode',
'vbrq' => 'AudioVBRQuality',
'mdel' => 'AudioMinimumDelayMode',
# deprecated
'pakd' => 'AudioRequiresPacketDescription',
#'brt#' => 'AudioAvailableBitRates',
'acef' => 'AudioExtendFrequencies',
'ursr' => 'AudioUseRecommendedSampleRate',
'oppr' => 'AudioOutputPrecedence',
#'loud' => 'AudioCurrentLoudnessStatistics',
# others
'vers' => 'AudioEncodingParamsVersion', #PH
'cdcv' => { #PH
Name => 'AudioComponentVersion',
ValueConv => 'join ".", unpack("ncc", pack("N",$val))',
},
);
# print to video data block
%Image::ExifTool::QuickTime::Video = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
GROUPS => { 2 => 'Video' },
0 => {
Name => 'DisplaySize',
PrintConv => {
0 => 'Normal',
1 => 'Double Size',
2 => 'Half Size',
3 => 'Full Screen',
4 => 'Current Size',
},
},
6 => {
Name => 'SlideShow',
PrintConv => {
0 => 'No',
1 => 'Yes',
},
},
);
# 'hnti' atoms
%Image::ExifTool::QuickTime::HintInfo = (
PROCESS_PROC => \&ProcessMOV,
GROUPS => { 2 => 'Video' },
'rtp ' => {
Name => 'RealtimeStreamingProtocol',
PrintConv => '$val=~s/^sdp /(SDP) /; $val',
},
'sdp ' => 'StreamingDataProtocol',
);
# 'hinf' atoms
%Image::ExifTool::QuickTime::HintTrackInfo = (
PROCESS_PROC => \&ProcessMOV,
GROUPS => { 2 => 'Video' },
trpY => { Name => 'TotalBytes', Format => 'int64u' }, #(documented)
trpy => { Name => 'TotalBytes', Format => 'int64u' }, #(observed)
totl => { Name => 'TotalBytes', Format => 'int32u' },
nump => { Name => 'NumPackets', Format => 'int64u' },
npck => { Name => 'NumPackets', Format => 'int32u' },
tpyl => { Name => 'TotalBytesNoRTPHeaders', Format => 'int64u' },
tpaY => { Name => 'TotalBytesNoRTPHeaders', Format => 'int32u' }, #(documented)
tpay => { Name => 'TotalBytesNoRTPHeaders', Format => 'int32u' }, #(observed)
maxr => {
Name => 'MaxDataRate',
Format => 'int32u',
Count => 2,
PrintConv => 'my @a=split(" ",$val);sprintf("%d bytes in %.3f s",$a[1],$a[0]/1000)',
},
dmed => { Name => 'MediaTrackBytes', Format => 'int64u' },
dimm => { Name => 'ImmediateDataBytes', Format => 'int64u' },
drep => { Name => 'RepeatedDataBytes', Format => 'int64u' },
tmin => {
Name => 'MinTransmissionTime',
Format => 'int32u',
PrintConv => 'sprintf("%.3f s",$val/1000)',
},
tmax => {
Name => 'MaxTransmissionTime',
Format => 'int32u',
PrintConv => 'sprintf("%.3f s",$val/1000)',
},
pmax => { Name => 'LargestPacketSize', Format => 'int32u' },
dmax => {
Name => 'LargestPacketDuration',
Format => 'int32u',
PrintConv => 'sprintf("%.3f s",$val/1000)',
},
payt => {
Name => 'PayloadType',
Format => 'undef', # (necessary to prevent decoding as string!)
ValueConv => 'unpack("N",$val) . " " . substr($val, 5)',
PrintConv => '$val=~s/ /, /;$val',
},
);
# MP4 media box (ref 5)
%Image::ExifTool::QuickTime::Media = (
PROCESS_PROC => \&ProcessMOV,
WRITE_PROC => \&WriteQuickTime,
GROUPS => { 1 => 'Track#', 2 => 'Video' },
NOTES => 'MP4 media box.',
mdhd => {
Name => 'MediaHeader',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::MediaHeader' },
},
hdlr => {
Name => 'Handler',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::Handler' },
},
minf => {
Name => 'MediaInfo',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::MediaInfo' },
},
);
# MP4 media header box (ref 5)
%Image::ExifTool::QuickTime::MediaHeader = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
WRITE_PROC => \&Image::ExifTool::WriteBinaryData,
CHECK_PROC => \&Image::ExifTool::CheckBinaryData,
GROUPS => { 1 => 'Track#', 2 => 'Video' },
FORMAT => 'int32u',
DATAMEMBER => [ 0, 1, 2, 3, 4 ],
0 => {
Name => 'MediaHeaderVersion',
RawConv => '$$self{MediaHeaderVersion} = $val',
},
1 => {
Name => 'MediaCreateDate',
Groups => { 2 => 'Time' },
%timeInfo,
# this is int64u if MediaHeaderVersion == 1 (ref 5/13)
Hook => '$$self{MediaHeaderVersion} and $format = "int64u", $varSize += 4',
},
2 => {
Name => 'MediaModifyDate',
Groups => { 2 => 'Time' },
%timeInfo,
# this is int64u if MediaHeaderVersion == 1 (ref 5/13)
Hook => '$$self{MediaHeaderVersion} and $format = "int64u", $varSize += 4',
},
3 => {
Name => 'MediaTimeScale',
RawConv => '$$self{MediaTS} = $val',
},
4 => {
Name => 'MediaDuration',
RawConv => '$$self{MediaTS} ? $val / $$self{MediaTS} : $val',
PrintConv => '$$self{MediaTS} ? ConvertDuration($val) : $val',
# this is int64u if MediaHeaderVersion == 1 (ref 5/13)
Hook => '$$self{MediaHeaderVersion} and $format = "int64u", $varSize += 4',
},
5 => {
Name => 'MediaLanguageCode',
Format => 'int16u',
RawConv => '$val ? $val : undef',
# allow both Macintosh (for MOV files) and ISO (for MP4 files) language codes
ValueConv => '$val < 0x400 ? $val : pack "C*", map { (($val>>$_)&0x1f)+0x60 } 10, 5, 0',
PrintConv => q{
return $val unless $val =~ /^\d+$/;
require Image::ExifTool::Font;
return $Image::ExifTool::Font::ttLang{Macintosh}{$val} || "Unknown ($val)";
},
},
);
# MP4 media information box (ref 5)
%Image::ExifTool::QuickTime::MediaInfo = (
PROCESS_PROC => \&ProcessMOV,
WRITE_PROC => \&WriteQuickTime,
GROUPS => { 1 => 'Track#', 2 => 'Video' },
NOTES => 'MP4 media info box.',
vmhd => {
Name => 'VideoHeader',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::VideoHeader' },
},
smhd => {
Name => 'AudioHeader',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::AudioHeader' },
},
hmhd => {
Name => 'HintHeader',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::HintHeader' },
},
nmhd => {
Name => 'NullMediaHeader',
Flags => ['Binary','Unknown'],
},
dinf => {
Name => 'DataInfo', # (don't change this name -- used to recognize directory when writing)
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::DataInfo' },
},
gmhd => {
Name => 'GenMediaHeader',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::GenMediaHeader' },
},
hdlr => { #PH
Name => 'Handler',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::Handler' },
},
stbl => {
Name => 'SampleTable',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::SampleTable' },
},
);
# MP4 video media header (ref 5)
%Image::ExifTool::QuickTime::VideoHeader = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
GROUPS => { 2 => 'Video' },
NOTES => 'MP4 video media header.',
FORMAT => 'int16u',
2 => {
Name => 'GraphicsMode',
PrintHex => 1,
SeparateTable => 'GraphicsMode',
PrintConv => \%graphicsMode,
},
3 => { Name => 'OpColor', Format => 'int16u[3]' },
);
# MP4 audio media header (ref 5)
%Image::ExifTool::QuickTime::AudioHeader = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
GROUPS => { 2 => 'Audio' },
NOTES => 'MP4 audio media header.',
FORMAT => 'int16u',
2 => { Name => 'Balance', Format => 'fixed16s' },
);
# MP4 hint media header (ref 5)
%Image::ExifTool::QuickTime::HintHeader = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
NOTES => 'MP4 hint media header.',
FORMAT => 'int16u',
2 => 'MaxPDUSize',
3 => 'AvgPDUSize',
4 => { Name => 'MaxBitrate', Format => 'int32u', PrintConv => 'ConvertBitrate($val)' },
6 => { Name => 'AvgBitrate', Format => 'int32u', PrintConv => 'ConvertBitrate($val)' },
);
# MP4 sample table box (ref 5)
%Image::ExifTool::QuickTime::SampleTable = (
PROCESS_PROC => \&ProcessMOV,
WRITE_PROC => \&WriteQuickTime,
GROUPS => { 2 => 'Video' },
NOTES => 'MP4 sample table box.',
stsd => [
{
Name => 'AudioSampleDesc',
Condition => '$$self{HandlerType} and $$self{HandlerType} eq "soun"',
SubDirectory => {
TagTable => 'Image::ExifTool::QuickTime::AudioSampleDesc',
ProcessProc => \&ProcessSampleDesc,
},
},{
Name => 'VideoSampleDesc',
Condition => '$$self{HandlerType} and $$self{HandlerType} eq "vide"',
SubDirectory => {
TagTable => 'Image::ExifTool::QuickTime::ImageDesc',
ProcessProc => \&ProcessSampleDesc,
},
},{
Name => 'HintSampleDesc',
Condition => '$$self{HandlerType} and $$self{HandlerType} eq "hint"',
SubDirectory => {
TagTable => 'Image::ExifTool::QuickTime::HintSampleDesc',
ProcessProc => \&ProcessSampleDesc,
},
},{
Name => 'MetaSampleDesc',
Condition => '$$self{HandlerType} and $$self{HandlerType} eq "meta"',
SubDirectory => {
TagTable => 'Image::ExifTool::QuickTime::MetaSampleDesc',
ProcessProc => \&ProcessSampleDesc,
},
},{
Name => 'OtherSampleDesc',
SubDirectory => {
TagTable => 'Image::ExifTool::QuickTime::OtherSampleDesc',
ProcessProc => \&ProcessSampleDesc,
},
},
# (Note: "alis" HandlerType handled by the parent audio or video handler)
],
stts => [ # decoding time-to-sample table
{
Name => 'VideoFrameRate',
Notes => 'average rate calculated from time-to-sample table for video media',
Condition => '$$self{HandlerType} and $$self{HandlerType} eq "vide"',
Format => 'undef', # (necessary to prevent decoding as string!)
# (must be RawConv so appropriate MediaTS is used in calculation)
RawConv => 'Image::ExifTool::QuickTime::CalcSampleRate($self, \$val)',
PrintConv => 'int($val * 1000 + 0.5) / 1000',
},
{
Name => 'TimeToSampleTable',
Flags => ['Binary','Unknown'],
},
],
ctts => {
Name => 'CompositionTimeToSample',
Flags => ['Binary','Unknown'],
},
stsc => {
Name => 'SampleToChunk',
Flags => ['Binary','Unknown'],
},
stsz => {
Name => 'SampleSizes',
Flags => ['Binary','Unknown'],
},
stz2 => {
Name => 'CompactSampleSizes',
Flags => ['Binary','Unknown'],
},
stco => {
Name => 'ChunkOffset',
Flags => ['Binary','Unknown'],
},
co64 => {
Name => 'ChunkOffset64',
Flags => ['Binary','Unknown'],
},
stss => {
Name => 'SyncSampleTable',
Flags => ['Binary','Unknown'],
},
stsh => {
Name => 'ShadowSyncSampleTable',
Flags => ['Binary','Unknown'],
},
padb => {
Name => 'SamplePaddingBits',
Flags => ['Binary','Unknown'],
},
stdp => {
Name => 'SampleDegradationPriority',
Flags => ['Binary','Unknown'],
},
sdtp => {
Name => 'IdependentAndDisposableSamples',
Flags => ['Binary','Unknown'],
},
sbgp => {
Name => 'SampleToGroup',
Flags => ['Binary','Unknown'],
},
sgpd => {
Name => 'SampleGroupDescription',
Flags => ['Binary','Unknown'],
# bytes 4-7 give grouping type (ref ISO/IEC 14496-15:2014)
# tsas - temporal sublayer sample
# stsa - step-wise temporal layer access
# avss - AVC sample
# tscl - temporal layer scaleability
# sync - sync sample
},
subs => {
Name => 'Sub-sampleInformation',
Flags => ['Binary','Unknown'],
},
cslg => {
Name => 'CompositionToDecodeTimelineMapping',
Flags => ['Binary','Unknown'],
},
stps => {
Name => 'PartialSyncSamples',
ValueConv => 'join " ",unpack("x8N*",$val)',
},
# mark - 8 bytes all zero (GoPro)
);
# MP4 audio sample description box (ref 5/AtomicParsley 0.9.4 parsley.cpp)
%Image::ExifTool::QuickTime::AudioSampleDesc = (
PROCESS_PROC => \&ProcessHybrid,
VARS => { ID_LABEL => 'ID/Index' },
GROUPS => { 2 => 'Audio' },
NOTES => q{
MP4 audio sample description. This hybrid atom contains both data and child
atoms.
},
4 => {
Name => 'AudioFormat',
Format => 'undef[4]',
RawConv => q{
$$self{AudioFormat} = $val;
return undef unless $val =~ /^[\w ]{4}$/i;
# check for protected audio format
$self->OverrideFileType('M4P') if $val eq 'drms' and $$self{VALUE}{FileType} eq 'M4A';
return $val;
},
# see this link for print conversions (not complete):
# https://github.com/yannickcr/brooser/blob/master/php/librairies/getid3/module.audio-video.quicktime.php
},
20 => { #PH
Name => 'AudioVendorID',
Condition => '$$self{AudioFormat} ne "mp4s"',
Format => 'undef[4]',
RawConv => '$val eq "\0\0\0\0" ? undef : $val',
PrintConv => \%vendorID,
SeparateTable => 'VendorID',
},
24 => { Name => 'AudioChannels', Format => 'int16u' },
26 => { Name => 'AudioBitsPerSample', Format => 'int16u' },
32 => { Name => 'AudioSampleRate', Format => 'fixed32u' },
#
# Observed offsets for child atoms of various AudioFormat types:
#
# AudioFormat Offset Child atoms
# ----------- ------ ----------------
# mp4a 52 * wave, chan, esds, SA3D(Insta360 spherical video params?)
# in24 52 wave, chan
# "ms\0\x11" 52 wave
# sowt 52 chan
# mp4a 36 * esds, pinf
# drms 36 esds, sinf
# samr 36 damr
# alac 36 alac
# ac-3 36 dac3
#
# (* child atoms found at different offsets in mp4a)
#
pinf => {
Name => 'PurchaseInfo',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::ProtectionInfo' },
},
sinf => { # "protection scheme information"
Name => 'ProtectionInfo', #3
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::ProtectionInfo' },
},
# f - 16/36 bytes
# esds - 31/40/42/43 bytes - ES descriptor (ref 3)
damr => { #3
Name => 'DecodeConfig',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::DecodeConfig' },
},
wave => {
Name => 'Wave',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::Wave' },
},
chan => {
Name => 'AudioChannelLayout',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::ChannelLayout' },
}
# alac - 28 bytes
# adrm - AAX DRM atom? 148 bytes
# aabd - AAX unknown 17kB (contains 'aavd' strings)
);
# AMR decode config box (ref 3)
%Image::ExifTool::QuickTime::DecodeConfig = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
GROUPS => { 2 => 'Audio' },
0 => {
Name => 'EncoderVendor',
Format => 'undef[4]',
},
4 => 'EncoderVersion',
# 5 - int16u - packet modes
# 7 - int8u - number of packet mode changes
# 8 - int8u - bytes per packet
);
%Image::ExifTool::QuickTime::ProtectionInfo = (
PROCESS_PROC => \&ProcessMOV,
GROUPS => { 2 => 'Audio' },
NOTES => 'Child atoms found in "sinf" and/or "pinf" atoms.',
frma => 'OriginalFormat',
# imif - IPMP information
schm => {
Name => 'SchemeType',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::SchemeType' },
},
schi => {
Name => 'SchemeInfo',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::SchemeInfo' },
},
# skcr
# enda
);
%Image::ExifTool::QuickTime::Wave = (
PROCESS_PROC => \&ProcessMOV,
frma => 'PurchaseFileFormat',
# "ms\0\x11" - 20 bytes
);
# audio channel layout (ref CoreAudioTypes.h)
%Image::ExifTool::QuickTime::ChannelLayout = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
GROUPS => { 2 => 'Audio' },
DATAMEMBER => [ 0, 8 ],
NOTES => 'Audio channel layout.',
# 0 - version and flags
4 => {
Name => 'LayoutFlags',
Format => 'int16u',
RawConv => '$$self{LayoutFlags} = $val',
PrintConvColumns => 2,
PrintConv => {
0 => 'UseDescriptions',
1 => 'UseBitmap',
100 => 'Mono',
101 => 'Stereo',
102 => 'StereoHeadphones',
100 => 'Mono',
101 => 'Stereo',
102 => 'StereoHeadphones',
103 => 'MatrixStereo',
104 => 'MidSide',
105 => 'XY',
106 => 'Binaural',
107 => 'Ambisonic_B_Format',
108 => 'Quadraphonic',
109 => 'Pentagonal',
110 => 'Hexagonal',
111 => 'Octagonal',
112 => 'Cube',
113 => 'MPEG_3_0_A',
114 => 'MPEG_3_0_B',
115 => 'MPEG_4_0_A',
116 => 'MPEG_4_0_B',
117 => 'MPEG_5_0_A',
118 => 'MPEG_5_0_B',
119 => 'MPEG_5_0_C',
120 => 'MPEG_5_0_D',
121 => 'MPEG_5_1_A',
122 => 'MPEG_5_1_B',
123 => 'MPEG_5_1_C',
124 => 'MPEG_5_1_D',
125 => 'MPEG_6_1_A',
126 => 'MPEG_7_1_A',
127 => 'MPEG_7_1_B',
128 => 'MPEG_7_1_C',
129 => 'Emagic_Default_7_1',
130 => 'SMPTE_DTV',
131 => 'ITU_2_1',
132 => 'ITU_2_2',
133 => 'DVD_4',
134 => 'DVD_5',
135 => 'DVD_6',
136 => 'DVD_10',
137 => 'DVD_11',
138 => 'DVD_18',
139 => 'AudioUnit_6_0',
140 => 'AudioUnit_7_0',
141 => 'AAC_6_0',
142 => 'AAC_6_1',
143 => 'AAC_7_0',
144 => 'AAC_Octagonal',
145 => 'TMH_10_2_std',
146 => 'TMH_10_2_full',
147 => 'DiscreteInOrder',
148 => 'AudioUnit_7_0_Front',
149 => 'AC3_1_0_1',
150 => 'AC3_3_0',
151 => 'AC3_3_1',
152 => 'AC3_3_0_1',
153 => 'AC3_2_1_1',
154 => 'AC3_3_1_1',
155 => 'EAC_6_0_A',
156 => 'EAC_7_0_A',
157 => 'EAC3_6_1_A',
158 => 'EAC3_6_1_B',
159 => 'EAC3_6_1_C',
160 => 'EAC3_7_1_A',
161 => 'EAC3_7_1_B',
162 => 'EAC3_7_1_C',
163 => 'EAC3_7_1_D',
164 => 'EAC3_7_1_E',
165 => 'EAC3_7_1_F',
166 => 'EAC3_7_1_G',
167 => 'EAC3_7_1_H',
168 => 'DTS_3_1',
169 => 'DTS_4_1',
170 => 'DTS_6_0_A',
171 => 'DTS_6_0_B',
172 => 'DTS_6_0_C',
173 => 'DTS_6_1_A',
174 => 'DTS_6_1_B',
175 => 'DTS_6_1_C',
176 => 'DTS_7_0',
177 => 'DTS_7_1',
178 => 'DTS_8_0_A',
179 => 'DTS_8_0_B',
180 => 'DTS_8_1_A',
181 => 'DTS_8_1_B',
182 => 'DTS_6_1_D',
183 => 'AAC_7_1_B',
0xffff => 'Unknown',
},
},
6 => {
Name => 'AudioChannels',
Condition => '$$self{LayoutFlags} != 0 and $$self{LayoutFlags} != 1',
Format => 'int16u',
},
8 => {
Name => 'AudioChannelTypes',
Condition => '$$self{LayoutFlags} == 1',
Format => 'int32u',
PrintConv => { BITMASK => {
0 => 'Left',
1 => 'Right',
2 => 'Center',
3 => 'LFEScreen',
4 => 'LeftSurround',
5 => 'RightSurround',
6 => 'LeftCenter',
7 => 'RightCenter',
8 => 'CenterSurround',
9 => 'LeftSurroundDirect',
10 => 'RightSurroundDirect',
11 => 'TopCenterSurround',
12 => 'VerticalHeightLeft',
13 => 'VerticalHeightCenter',
14 => 'VerticalHeightRight',
15 => 'TopBackLeft',
16 => 'TopBackCenter',
17 => 'TopBackRight',
}},
},
12 => {
Name => 'NumChannelDescriptions',
Condition => '$$self{LayoutFlags} == 1',
Format => 'int32u',
RawConv => '$$self{NumChannelDescriptions} = $val',
},
16 => {
Name => 'Channel1Label',
Condition => '$$self{LayoutFlags} == 1 and $$self{NumChannelDescriptions} > 0',
Format => 'int32u',
SeparateTable => 'ChannelLabel',
PrintConv => \%channelLabel,
},
20 => {
Name => 'Channel1Flags',
Condition => '$$self{LayoutFlags} == 1 and $$self{NumChannelDescriptions} > 0',
Format => 'int32u',
PrintConv => { BITMASK => { 0 => 'Rectangular', 1 => 'Spherical', 2 => 'Meters' }},
},
24 => {
Name => 'Channel1Coordinates',
Condition => '$$self{LayoutFlags} == 1 and $$self{NumChannelDescriptions} > 0',
Notes => q{
3 numbers: for rectangular coordinates left/right, back/front, down/up; for
spherical coordinates left/right degrees, down/up degrees, distance
},
Format => 'float[3]',
},
36 => {
Name => 'Channel2Label',
Condition => '$$self{LayoutFlags} == 1 and $$self{NumChannelDescriptions} > 1',
Format => 'int32u',
SeparateTable => 'ChannelLabel',
PrintConv => \%channelLabel,
},
40 => {
Name => 'Channel2Flags',
Condition => '$$self{LayoutFlags} == 1 and $$self{NumChannelDescriptions} > 1',
Format => 'int32u',
PrintConv => { BITMASK => { 0 => 'Rectangular', 1 => 'Spherical', 2 => 'Meters' }},
},
44 => {
Name => 'Channel2Coordinates',
Condition => '$$self{LayoutFlags} == 1 and $$self{NumChannelDescriptions} > 1',
Format => 'float[3]',
},
56 => {
Name => 'Channel3Label',
Condition => '$$self{LayoutFlags} == 1 and $$self{NumChannelDescriptions} > 2',
Format => 'int32u',
SeparateTable => 'ChannelLabel',
PrintConv => \%channelLabel,
},
60 => {
Name => 'Channel3Flags',
Condition => '$$self{LayoutFlags} == 1 and $$self{NumChannelDescriptions} > 2',
Format => 'int32u',
PrintConv => { BITMASK => { 0 => 'Rectangular', 1 => 'Spherical', 2 => 'Meters' }},
},
64 => {
Name => 'Channel3Coordinates',
Condition => '$$self{LayoutFlags} == 1 and $$self{NumChannelDescriptions} > 2',
Format => 'float[3]',
},
76 => {
Name => 'Channel4Label',
Condition => '$$self{LayoutFlags} == 1 and $$self{NumChannelDescriptions} > 3',
Format => 'int32u',
SeparateTable => 'ChannelLabel',
PrintConv => \%channelLabel,
},
80 => {
Name => 'Channel4Flags',
Condition => '$$self{LayoutFlags} == 1 and $$self{NumChannelDescriptions} > 3',
Format => 'int32u',
PrintConv => { BITMASK => { 0 => 'Rectangular', 1 => 'Spherical', 2 => 'Meters' }},
},
84 => {
Name => 'Channel4Coordinates',
Condition => '$$self{LayoutFlags} == 1 and $$self{NumChannelDescriptions} > 3',
Format => 'float[3]',
},
96 => {
Name => 'Channel5Label',
Condition => '$$self{LayoutFlags} == 1 and $$self{NumChannelDescriptions} > 4',
Format => 'int32u',
SeparateTable => 'ChannelLabel',
PrintConv => \%channelLabel,
},
100 => {
Name => 'Channel5Flags',
Condition => '$$self{LayoutFlags} == 1 and $$self{NumChannelDescriptions} > 4',
Format => 'int32u',
PrintConv => { BITMASK => { 0 => 'Rectangular', 1 => 'Spherical', 2 => 'Meters' }},
},
104 => {
Name => 'Channel5Coordinates',
Condition => '$$self{LayoutFlags} == 1 and $$self{NumChannelDescriptions} > 4',
Format => 'float[3]',
},
116 => {
Name => 'Channel6Label',
Condition => '$$self{LayoutFlags} == 1 and $$self{NumChannelDescriptions} > 5',
Format => 'int32u',
SeparateTable => 'ChannelLabel',
PrintConv => \%channelLabel,
},
120 => {
Name => 'Channel6Flags',
Condition => '$$self{LayoutFlags} == 1 and $$self{NumChannelDescriptions} > 5',
Format => 'int32u',
PrintConv => { BITMASK => { 0 => 'Rectangular', 1 => 'Spherical', 2 => 'Meters' }},
},
124 => {
Name => 'Channel6Coordinates',
Condition => '$$self{LayoutFlags} == 1 and $$self{NumChannelDescriptions} > 5',
Format => 'float[3]',
},
136 => {
Name => 'Channel7Label',
Condition => '$$self{LayoutFlags} == 1 and $$self{NumChannelDescriptions} > 6',
Format => 'int32u',
SeparateTable => 'ChannelLabel',
PrintConv => \%channelLabel,
},
140 => {
Name => 'Channel7Flags',
Condition => '$$self{LayoutFlags} == 1 and $$self{NumChannelDescriptions} > 6',
Format => 'int32u',
PrintConv => { BITMASK => { 0 => 'Rectangular', 1 => 'Spherical', 2 => 'Meters' }},
},
144 => {
Name => 'Channel7Coordinates',
Condition => '$$self{LayoutFlags} == 1 and $$self{NumChannelDescriptions} > 6',
Format => 'float[3]',
},
156 => {
Name => 'Channel8Label',
Condition => '$$self{LayoutFlags} == 1 and $$self{NumChannelDescriptions} > 7',
Format => 'int32u',
SeparateTable => 'ChannelLabel',
PrintConv => \%channelLabel,
},
160 => {
Name => 'Channel8Flags',
Condition => '$$self{LayoutFlags} == 1 and $$self{NumChannelDescriptions} > 7',
Format => 'int32u',
PrintConv => { BITMASK => { 0 => 'Rectangular', 1 => 'Spherical', 2 => 'Meters' }},
},
164 => {
Name => 'Channel8Coordinates',
Condition => '$$self{LayoutFlags} == 1 and $$self{NumChannelDescriptions} > 7',
Format => 'float[3]',
},
# (arbitrarily decode only first 8 channels)
);
# scheme type atom
# ref http://xhelmboyx.tripod.com/formats/mp4-layout.txt
%Image::ExifTool::QuickTime::SchemeType = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
GROUPS => { 2 => 'Audio' },
# 0 - 4 bytes version
4 => { Name => 'SchemeType', Format => 'undef[4]' },
8 => { Name => 'SchemeVersion', Format => 'int16u' },
10 => { Name => 'SchemeURL', Format => 'string[$size-10]' },
);
%Image::ExifTool::QuickTime::SchemeInfo = (
PROCESS_PROC => \&ProcessMOV,
GROUPS => { 2 => 'Audio' },
user => {
Name => 'UserID',
Groups => { 2 => 'Author' },
ValueConv => '"0x" . unpack("H*",$val)',
},
cert => { # ref http://www.onvif.org/specs/stream/ONVIF-ExportFileFormat-Spec-v100.pdf
Name => 'Certificate',
ValueConv => '"0x" . unpack("H*",$val)',
},
'key ' => {
Name => 'KeyID',
ValueConv => '"0x" . unpack("H*",$val)',
},
iviv => {
Name => 'InitializationVector',
ValueConv => 'unpack("H*",$val)',
},
righ => {
Name => 'Rights',
Groups => { 2 => 'Author' },
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::Rights' },
},
name => { Name => 'UserName', Groups => { 2 => 'Author' } },
# chtb
# priv - private data
# sign
# adkm - Adobe DRM key management system (ref http://download.macromedia.com/f4v/video_file_format_spec_v10_1.pdf)
# iKMS
# iSFM
# iSLT
);
%Image::ExifTool::QuickTime::Rights = (
PROCESS_PROC => \&ProcessRights,
GROUPS => { 2 => 'Audio' },
veID => 'ItemVendorID', #PH ("VendorID" ref 19)
plat => 'Platform', #18?
aver => 'VersionRestrictions', #19 ("appversion?" ref 18)
tran => 'TransactionID', #18
song => 'ItemID', #19 ("appid" ref 18)
tool => {
Name => 'ItemTool', #PH (guess) ("itunes build?" ref 18)
Format => 'string',
},
medi => 'MediaFlags', #PH (?)
mode => 'ModeFlags', #PH (?) 0x04 is HD flag (https://compilr.com/heksesang/requiem-mac/UnDrm.java)
);
# MP4 hint sample description box (ref 5)
# (ref https://developer.apple.com/library/mac/documentation/QuickTime/QTFF/QTFFChap3/qtff3.html#//apple_ref/doc/uid/TP40000939-CH205-SW1)
%Image::ExifTool::QuickTime::HintSampleDesc = (
PROCESS_PROC => \&ProcessHybrid,
VARS => { ID_LABEL => 'ID/Index' },
NOTES => 'MP4 hint sample description.',
4 => { Name => 'HintFormat', Format => 'undef[4]' },
# 14 - int16u DataReferenceIndex
16 => { Name => 'HintTrackVersion', Format => 'int16u' },
# 18 - int16u LastCompatibleHintTrackVersion
20 => { Name => 'MaxPacketSize', Format => 'int32u' },
#
# Observed offsets for child atoms of various HintFormat types:
#
# HintFormat Offset Child atoms
# ----------- ------ ----------------
# "rtp " 24 tims
#
tims => { Name => 'RTPTimeScale', Format => 'int32u' },
tsro => { Name => 'TimestampRandomOffset', Format => 'int32u' },
snro => { Name => 'SequenceNumberRandomOffset', Format => 'int32u' },
);
# MP4 metadata sample description box
%Image::ExifTool::QuickTime::MetaSampleDesc = (
PROCESS_PROC => \&ProcessHybrid,
NOTES => 'MP4 metadata sample description.',
4 => {
Name => 'MetaFormat',
Format => 'undef[4]',
RawConv => '$$self{MetaFormat} = $val',
},
#
# Observed offsets for child atoms of various MetaFormat types:
#
# MetaFormat Offset Child atoms
# ----------- ------ ----------------
# mebx 24 keys,btrt,lidp,lidl
# fdsc - -
# gpmd - -
# rtmd - -
# CTMD - -
#
'keys' => { #PH (iPhone7+ hevc)
Name => 'Keys',
SubDirectory => {
TagTable => 'Image::ExifTool::QuickTime::Keys',
ProcessProc => \&ProcessMetaKeys,
},
},
btrt => {
Name => 'BitrateInfo',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::Bitrate' },
},
);
# MP4 generic sample description box
%Image::ExifTool::QuickTime::OtherSampleDesc = (
PROCESS_PROC => \&ProcessHybrid,
4 => {
Name => 'OtherFormat',
Format => 'undef[4]',
RawConv => '$$self{MetaFormat} = $val', # (yes, use MetaFormat for this too)
},
#
# Observed offsets for child atoms of various OtherFormat types:
#
# OtherFormat Offset Child atoms
# ----------- ------ ----------------
# avc1 86 avcC
# mp4a 36 esds
# mp4s 16 esds
# tmcd 34 name
# data - -
#
ftab => { Name => 'FontTable', Format => 'undef', ValueConv => 'substr($val, 5)' },
name => { Name => 'OtherName', Format => 'undef', ValueConv => 'substr($val, 4)' },
);
# MP4 data information box (ref 5)
%Image::ExifTool::QuickTime::DataInfo = (
PROCESS_PROC => \&ProcessMOV,
WRITE_PROC => \&WriteQuickTime, # (necessary to parse dref even though we don't change it)
NOTES => 'MP4 data information box.',
dref => {
Name => 'DataRef',
SubDirectory => {
TagTable => 'Image::ExifTool::QuickTime::DataRef',
Start => 8,
},
},
);
# Generic media header
%Image::ExifTool::QuickTime::GenMediaHeader = (
PROCESS_PROC => \&ProcessMOV,
gmin => {
Name => 'GenMediaInfo',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::GenMediaInfo' },
},
text => {
Name => 'Text',
Flags => ['Binary','Unknown'],
},
tmcd => {
Name => 'TimeCode',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::TimeCode' },
},
);
# TimeCode header
%Image::ExifTool::QuickTime::TimeCode = (
PROCESS_PROC => \&ProcessMOV,
tcmi => {
Name => 'TCMediaInfo',
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::TCMediaInfo' },
},
);
# TimeCode media info (ref 12)
%Image::ExifTool::QuickTime::TCMediaInfo = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
GROUPS => { 2 => 'Video' },
4 => {
Name => 'TextFont',
Format => 'int16u',
PrintConv => { 0 => 'System' },
},
6 => {
Name => 'TextFace',
Format => 'int16u',
PrintConv => {
0 => 'Plain',
BITMASK => {
0 => 'Bold',
1 => 'Italic',
2 => 'Underline',
3 => 'Outline',
4 => 'Shadow',
5 => 'Condense',
6 => 'Extend',
},
},
},
8 => {
Name => 'TextSize',
Format => 'int16u',
},
# 10 - reserved
12 => {
Name => 'TextColor',
Format => 'int16u[3]',
},
18 => {
Name => 'BackgroundColor',
Format => 'int16u[3]',
},
24 => {
Name => 'FontName',
Format => 'pstring',
ValueConv => '$self->Decode($val, $self->Options("CharsetQuickTime"))',
},
);
# Generic media info (ref http://sourceforge.jp/cvs/view/ntvrec/ntvrec/libqtime/gmin.h?view=co)
%Image::ExifTool::QuickTime::GenMediaInfo = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
GROUPS => { 2 => 'Video' },
0 => 'GenMediaVersion',
1 => { Name => 'GenFlags', Format => 'int8u[3]' },
4 => { Name => 'GenGraphicsMode',
Format => 'int16u',
PrintHex => 1,
SeparateTable => 'GraphicsMode',
PrintConv => \%graphicsMode,
},
6 => { Name => 'GenOpColor', Format => 'int16u[3]' },
12 => { Name => 'GenBalance', Format => 'fixed16s' },
);
# MP4 data reference box (ref 5)
%Image::ExifTool::QuickTime::DataRef = (
PROCESS_PROC => \&ProcessMOV,
WRITE_PROC => \&WriteQuickTime, # (necessary to parse dref even though we don't change it)
NOTES => 'MP4 data reference box.',
'url ' => {
Name => 'URL',
Format => 'undef', # (necessary to prevent decoding as string!)
RawConv => q{
# ignore if self-contained (flags bit 0 set)
return undef if unpack("N",$val) & 0x01;
$_ = substr($val,4); s/\0.*//s; $_;
},
},
"url\0" => { # (written by GoPro)
Name => 'URL',
Format => 'undef', # (necessary to prevent decoding as string!)
RawConv => q{
# ignore if self-contained (flags bit 0 set)
return undef if unpack("N",$val) & 0x01;
$_ = substr($val,4); s/\0.*//s; $_;
},
},
'urn ' => {
Name => 'URN',
Format => 'undef', # (necessary to prevent decoding as string!)
RawConv => q{
return undef if unpack("N",$val) & 0x01;
$_ = substr($val,4); s/\0+/; /; s/\0.*//s; $_;
},
},
);
# MP4 handler box (ref 5)
%Image::ExifTool::QuickTime::Handler = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
GROUPS => { 2 => 'Video' },
4 => { #PH
Name => 'HandlerClass',
Format => 'undef[4]',
RawConv => '$val eq "\0\0\0\0" ? undef : $val',
PrintConv => {
mhlr => 'Media Handler',
dhlr => 'Data Handler',
},
},
8 => {
Name => 'HandlerType',
Format => 'undef[4]',
RawConv => '$$self{HandlerType} = $val unless $val eq "alis" or $val eq "url "; $val',
PrintConvColumns => 2,
PrintConv => {
alis => 'Alias Data', #PH
crsm => 'Clock Reference', #3
hint => 'Hint Track',
ipsm => 'IPMP', #3
m7sm => 'MPEG-7 Stream', #3
meta => 'NRT Metadata', #PH
mdir => 'Metadata', #3
mdta => 'Metadata Tags', #PH
mjsm => 'MPEG-J', #3
ocsm => 'Object Content', #3
odsm => 'Object Descriptor', #3
priv => 'Private', #PH
sdsm => 'Scene Description', #3
soun => 'Audio Track',
text => 'Text', #PH (but what type? subtitle?)
tmcd => 'Time Code', #PH
'url '=> 'URL', #3
vide => 'Video Track',
subp => 'Subpicture', #http://www.google.nl/patents/US7778526
nrtm => 'Non-Real Time Metadata', #PH (Sony ILCE-7S) [how is this different from "meta"?]
pict => 'Picture', # (HEIC images)
camm => 'Camera Metadata', # (Insta360 MP4)
psmd => 'Panasonic Static Metadata', #PH (Leica C-Lux CAM-DC25)
data => 'Data', #PH (GPS and G-sensor data from DataKam)
sbtl => 'Subtitle', #PH (TomTom Bandit Action Cam)
},
},
12 => { #PH
Name => 'HandlerVendorID',
Format => 'undef[4]',
RawConv => '$val eq "\0\0\0\0" ? undef : $val',
PrintConv => \%vendorID,
SeparateTable => 'VendorID',
},
24 => {
Name => 'HandlerDescription',
Format => 'string',
# (sometimes this is a Pascal string, and sometimes it is a C string)
RawConv => q{
$val=substr($val,1,ord($1)) if $val=~/^([\0-\x1f])/ and ord($1)<length($val);
length $val ? $val : undef;
},
},
);
# Flip uuid data (ref PH)
%Image::ExifTool::QuickTime::Flip = (
PROCESS_PROC => \&Image::ExifTool::ProcessBinaryData,
FORMAT => 'int32u',
FIRST_ENTRY => 0,
NOTES => 'Found in MP4 files from Flip Video cameras.',
GROUPS => { 1 => 'MakerNotes', 2 => 'Image' },
1 => 'PreviewImageWidth',
2 => 'PreviewImageHeight',
13 => 'PreviewImageLength',
14 => { # (confirmed for FlipVideoMinoHD)
Name => 'SerialNumber',
Groups => { 2 => 'Camera' },
Format => 'string[16]',
},
28 => {
Name => 'PreviewImage',
Groups => { 2 => 'Preview' },
Format => 'undef[$val{13}]',
RawConv => '$self->ValidateImage(\$val, $tag)',
},
);
# atoms in Pittasoft "free" atom
%Image::ExifTool::QuickTime::Pittasoft = (
PROCESS_PROC => \&ProcessMOV,
NOTES => 'Tags found in Pittasoft Blackvue dashcam "free" data.',
cprt => 'Copyright',
thum => {
Name => 'PreviewImage',
Groups => { 2 => 'Preview' },
Binary => 1,
RawConv => q{
return undef unless length $val > 4;
my $len = unpack('N', $val);
return undef unless length $val >= 4 + $len;
return substr($val, 4, $len);
},
},
ptnm => {
Name => 'OriginalFileName',
ValueConv => 'substr($val, 4, -1)',
},
ptrh => {
SubDirectory => { TagTable => 'Image::ExifTool::QuickTime::Pittasoft' },
# contains these atoms:
# ptvi - 27 bytes: '..avc1...'
# ptso - 16 bytes: '..mp4a...'
},
'gps ' => {
Name => 'GPSLog',
Binary => 1, # (ASCII NMEA track log with leading timestamps)
Notes => 'parsed to extract GPS separately when ExtractEmbedded is used',
RawConv => q{
$val =~ s/\0+$//; # remove trailing nulls
if (length $val and $$self{OPTIONS}{ExtractEmbedded}) {
my $tagTbl = GetTagTable('Image::ExifTool::QuickTime::Stream');
Image::ExifTool::QuickTime::ProcessNMEA($self, { DataPt => \$val }, $tagTbl);
}
return $val;
},
},
'3gf ' => {
Name => 'AccelData',
SubDirectory => {
TagTable => 'Image::ExifTool::QuickTime::Stream',
ProcessProc => \&Process_3gf,
},
},
sttm => {
Name => 'StartTime',
Format => 'int64u',
Groups => { 2 => 'Time' },
RawConv => '$$self{StartTime} = $val',
# (ms since Jan 1, 1970, in local time zone - PH)
ValueConv => q{
my $secs = int($val / 1000);
return ConvertUnixTime($secs) . sprintf(".%03d",$val - $secs * 1000);
},
PrintConv => '$self->ConvertDateTime($val)',
},
);
# QuickTime composite tags
%Image::ExifTool::QuickTime::Composite = (
GROUPS => { 2 => 'Video' },
Rotation => {
Notes => q{
writing this tag updates QuickTime MatrixStructure for all tracks with a
non-zero image size
},
Require => {
0 => 'QuickTime:MatrixStructure',
1 => 'QuickTime:HandlerType',
},
Writable => 1,
WriteAlso => {
MatrixStructure => 'Image::ExifTool::QuickTime::GetRotationMatrix($val)',
},
ValueConv => 'Image::ExifTool::QuickTime::CalcRotation($self)',
ValueConvInv => '$val',
},
AvgBitrate => {
Priority => 0, # let QuickTime::AvgBitrate take priority
Require => {
0 => 'QuickTime::MovieDataSize',
1 => 'QuickTime::Duration',
},
RawConv => q{
return undef unless $val[1];
$val[1] /= $$self{TimeScale} if $$self{TimeScale};
my $key = 'MovieDataSize';
my $size = $val[0];
for (;;) {
$key = $self->NextTagKey($key) or last;
$size += $self->GetValue($key, 'ValueConv');
}
return int($size * 8 / $val[1] + 0.5);
},
PrintConv => 'ConvertBitrate($val)',
},
GPSLatitude => {
Require => 'QuickTime:GPSCoordinates',
Groups => { 2 => 'Location' },
ValueConv => 'my @c = split " ", $val; $c[0]',
PrintConv => 'Image::ExifTool::GPS::ToDMS($self, $val, 1, "N")',
},
GPSLongitude => {
Require => 'QuickTime:GPSCoordinates',
Groups => { 2 => 'Location' },
ValueConv => 'my @c = split " ", $val; $c[1]',
PrintConv => 'Image::ExifTool::GPS::ToDMS($self, $val, 1, "E")',
},
# split altitude into GPSAltitude/GPSAltitudeRef like EXIF and XMP
GPSAltitude => {
Require => 'QuickTime:GPSCoordinates',
Groups => { 2 => 'Location' },
Priority => 0, # (because it may not exist)
ValueConv => 'my @c = split " ", $val; defined $c[2] ? abs($c[2]) : undef',
PrintConv => '"$val m"',
},
GPSAltitudeRef => {
Require => 'QuickTime:GPSCoordinates',
Groups => { 2 => 'Location' },
Priority => 0, # (because altitude information may not exist)
ValueConv => 'my @c = split " ", $val; defined $c[2] ? ($c[2] < 0 ? 1 : 0) : undef',
PrintConv => {
0 => 'Above Sea Level',
1 => 'Below Sea Level',
},
},
GPSLatitude2 => {
Name => 'GPSLatitude',
Require => 'QuickTime:LocationInformation',
Groups => { 2 => 'Location' },
ValueConv => '$val =~ /Lat=([-+.\d]+)/; $1',
PrintConv => 'Image::ExifTool::GPS::ToDMS($self, $val, 1, "N")',
},
GPSLongitude2 => {
Name => 'GPSLongitude',
Require => 'QuickTime:LocationInformation',
Groups => { 2 => 'Location' },
ValueConv => '$val =~ /Lon=([-+.\d]+)/; $1',
PrintConv => 'Image::ExifTool::GPS::ToDMS($self, $val, 1, "E")',
},
GPSAltitude2 => {
Name => 'GPSAltitude',
Require => 'QuickTime:LocationInformation',
Groups => { 2 => 'Location' },
ValueConv => '$val =~ /Alt=([-+.\d]+)/; abs($1)',
PrintConv => '"$val m"',
},
GPSAltitudeRef2 => {
Name => 'GPSAltitudeRef',
Require => 'QuickTime:LocationInformation',
Groups => { 2 => 'Location' },
ValueConv => '$val =~ /Alt=([-+.\d]+)/; $1 < 0 ? 1 : 0',
PrintConv => {
0 => 'Above Sea Level',
1 => 'Below Sea Level',
},
},
CDDBDiscPlayTime => {
Require => 'CDDB1Info',
Groups => { 2 => 'Audio' },
ValueConv => '$val =~ /^..([a-z0-9]{4})/i ? hex($1) : undef',
PrintConv => 'ConvertDuration($val)',
},
CDDBDiscTracks => {
Require => 'CDDB1Info',
Groups => { 2 => 'Audio' },
ValueConv => '$val =~ /^.{6}([a-z0-9]{2})/i ? hex($1) : undef',
},
);
# add our composite tags
Image::ExifTool::AddCompositeTags('Image::ExifTool::QuickTime');
#------------------------------------------------------------------------------
# AutoLoad our routines when necessary
#
sub AUTOLOAD
{
# (Note: no need to autoload routines in QuickTimeStream that use Stream table)
if ($AUTOLOAD eq 'Image::ExifTool::QuickTime::Process_mebx') {
require 'Image/ExifTool/QuickTimeStream.pl';
no strict 'refs';
return &$AUTOLOAD(@_);
} else {
return Image::ExifTool::DoAutoLoad($AUTOLOAD, @_);
}
}
#------------------------------------------------------------------------------
# Get rotation matrix
# Inputs: 0) angle in degrees
# Returns: 9-element rotation matrix as a string (with 0 x/y offsets)
sub GetRotationMatrix($)
{
my $ang = 3.1415926536 * shift() / 180;
my $cos = cos $ang;
my $sin = sin $ang;
my $msn = -$sin;
return "$cos $sin 0 $msn $cos 0 0 0 1";
}
#------------------------------------------------------------------------------
# Get rotation angle from a matrix
# Inputs: 0) rotation matrix as a string
# Return: positive rotation angle in degrees rounded to 3 decimal points,
# or undef on error
sub GetRotationAngle($)
{
my $rotMatrix = shift;
my @a = split ' ', $rotMatrix;
return undef if $a[0]==0 and $a[1]==0;
# calculate the rotation angle (assume uniform rotation)
my $angle = atan2($a[1], $a[0]) * 180 / 3.14159;
$angle += 360 if $angle < 0;
return int($angle * 1000 + 0.5) / 1000;
}
#------------------------------------------------------------------------------
# Calculate rotation of video track
# Inputs: 0) ExifTool object ref
# Returns: rotation angle or undef
sub CalcRotation($)
{
my $et = shift;
my $value = $$et{VALUE};
my ($i, $track);
# get the video track family 1 group (eg. "Track1");
for ($i=0; ; ++$i) {
my $idx = $i ? " ($i)" : '';
my $tag = "HandlerType$idx";
last unless $$value{$tag};
next unless $$value{$tag} eq 'vide';
$track = $et->GetGroup($tag, 1);
last;
}
return undef unless $track;
# get the video track matrix
for ($i=0; ; ++$i) {
my $idx = $i ? " ($i)" : '';
my $tag = "MatrixStructure$idx";
last unless $$value{$tag};
next unless $et->GetGroup($tag, 1) eq $track;
return GetRotationAngle($$value{$tag});
}
return undef;
}
#------------------------------------------------------------------------------
# Get MatrixStructure for a given rotation angle
# Inputs: 0) rotation angle (deg), 1) ExifTool ref
# Returns: matrix structure as a string, or undef if it can't be rotated
# - requires ImageSizeLookahead to determine the video image size, and doesn't
# rotate matrix unless image size is valid
sub GetMatrixStructure($$)
{
my ($val, $et) = @_;
my @a = split ' ', $val;
# pass straight through if it already has an offset
return $val unless $a[6] == 0 and $a[7] == 0;
my @s = split ' ', $$et{ImageSizeLookahead};
my ($w, $h) = @s[12,13];
return undef unless $w and $h; # don't rotate 0-sized track
$_ = Image::ExifTool::QuickTime::FixWrongFormat($_) foreach $w,$h;
# apply necessary offsets for the standard rotations
my $angle = GetRotationAngle($val);
return undef unless defined $angle;
if ($angle == 90) {
@a[6,7] = ($h, 0);
} elsif ($angle == 180) {
@a[6,7] = ($w, $h);
} elsif ($angle == 270) {
@a[6,7] = (0, $w);
}
return "@a";
}
#------------------------------------------------------------------------------
# Determine the average sample rate from a time-to-sample table
# Inputs: 0) ExifTool object ref, 1) time-to-sample table data ref
# Returns: average sample rate (in Hz)
sub CalcSampleRate($$)
{
my ($et, $valPt) = @_;
my @dat = unpack('N*', $$valPt);
my ($num, $dur) = (0, 0);
my $i;
for ($i=2; $i<@dat-1; $i+=2) {
$num += $dat[$i]; # total number of samples
$dur += $dat[$i] * $dat[$i+1]; # total sample duration
}
return undef unless $num and $dur and $$et{MediaTS};
return $num * $$et{MediaTS} / $dur;
}
#------------------------------------------------------------------------------
# Fix incorrect format for ImageWidth/Height as written by Pentax
sub FixWrongFormat($)
{
my $val = shift;
return undef unless $val;
return $val & 0xfff00000 ? unpack('n',pack('N',$val)) : $val;
}
#------------------------------------------------------------------------------
# Convert ISO 6709 string to standard lag/lon format
# Inputs: 0) ISO 6709 string (lat, lon, and optional alt)
# Returns: position in decimal degress with altitude if available
# Notes: Wikipedia indicates altitude may be in feet -- how is this specified?
sub ConvertISO6709($)
{
my $val = shift;
if ($val =~ /^([-+]\d{1,2}(?:\.\d*)?)([-+]\d{1,3}(?:\.\d*)?)([-+]\d+(?:\.\d*)?)?/) {
# +DD.DDD+DDD.DDD+AA.AAA
$val = ($1 + 0) . ' ' . ($2 + 0);
$val .= ' ' . ($3 + 0) if $3;
} elsif ($val =~ /^([-+])(\d{2})(\d{2}(?:\.\d*)?)([-+])(\d{3})(\d{2}(?:\.\d*)?)([-+]\d+(?:\.\d*)?)?/) {
# +DDMM.MMM+DDDMM.MMM+AA.AAA
my $lat = $2 + $3 / 60;
$lat = -$lat if $1 eq '-';
my $lon = $5 + $6 / 60;
$lon = -$lon if $4 eq '-';
$val = "$lat $lon";
$val .= ' ' . ($7 + 0) if $7;
} elsif ($val =~ /^([-+])(\d{2})(\d{2})(\d{2}(?:\.\d*)?)([-+])(\d{3})(\d{2})(\d{2}(?:\.\d*)?)([-+]\d+(?:\.\d*)?)?/) {
# +DDMMSS.SSS+DDDMMSS.SSS+AA.AAA
my $lat = $2 + $3 / 60 + $4 / 3600;
$lat = -$lat if $1 eq '-';
my $lon = $6 + $7 / 60 + $8 / 3600;
$lon = -$lon if $5 eq '-';
$val = "$lat $lon";
$val .= ' ' . ($9 + 0) if $9;
}
return $val;
}
#------------------------------------------------------------------------------
# Convert Nero chapter list (ref ffmpeg libavformat/movenc.c)
# Inputs: 0) binary chpl data
# Returns: chapter list
sub ConvertChapterList($)
{
my $val = shift;
my $size = length $val;
return '<invalid>' if $size < 9;
my $num = Get8u(\$val, 8);
my ($i, @chapters);
my $pos = 9;
for ($i=0; $i<$num; ++$i) {
last if $pos + 9 > $size;
my $dur = Get64u(\$val, $pos) / 10000000;
my $len = Get8u(\$val, $pos + 8);
last if $pos + 9 + $len > $size;
my $title = substr($val, $pos + 9, $len);
$pos += 9 + $len;
push @chapters, "$dur $title";
}
return \@chapters; # return as a list
}
#------------------------------------------------------------------------------
# Print conversion for a Nero chapter list item
# Inputs: 0) ValueConv chapter string
# Returns: formatted chapter string
sub PrintChapter($)
{
my $val = shift;
$val =~ /^(\S+) (.*)/ or return $val;
my ($dur, $title) = ($1, $2);
my $h = int($dur / 3600);
$dur -= $h * 3600;
my $m = int($dur / 60);
my $s = $dur - $m * 60;
my $ss = sprintf('%06.3f', $s);
if ($ss >= 60) {
$ss = '00.000';
++$m >= 60 and $m -= 60, ++$h;
}
return sprintf("[%d:%.2d:%s] %s",$h,$m,$ss,$title);
}
#------------------------------------------------------------------------------
# Format GPSCoordinates for printing
# Inputs: 0) string with numerical lat, lon and optional alt, separated by spaces
# 1) ExifTool object reference
# Returns: PrintConv value
sub PrintGPSCoordinates($)
{
my ($val, $et) = @_;
my @v = split ' ', $val;
my $prt = Image::ExifTool::GPS::ToDMS($et, $v[0], 1, "N") . ', ' .
Image::ExifTool::GPS::ToDMS($et, $v[1], 1, "E");
if (defined $v[2]) {
$prt .= ', ' . ($v[2] < 0 ? -$v[2] . ' m Below' : $v[2] . ' m Above') . ' Sea Level';
}
return $prt;
}
#------------------------------------------------------------------------------
# Unpack packed ISO 639/T language code
# Inputs: 0) packed language code (or undef/0), 1) true to not treat 'und' and 'eng' as default
# Returns: language code, or undef/0 for default language, or 'err' for format error
sub UnpackLang($;$)
{
my ($lang, $noDef) = @_;
if ($lang) {
# language code is packed in 5-bit characters
$lang = pack 'C*', map { (($lang>>$_)&0x1f)+0x60 } 10, 5, 0;
# validate language code
if ($lang =~ /^[a-z]+$/) {
# treat 'eng' or 'und' as the default language
undef $lang if ($lang eq 'und' or $lang eq 'eng') and not $noDef;
} else {
$lang = 'err'; # invalid language code
}
}
return $lang;
}
#------------------------------------------------------------------------------
# Get language code string given QuickTime language and country codes
# Inputs: 0) numerical language code, 1) numerical country code, 2) no defaults
# Returns: language code string (ie. "fra-FR") or undef for default language
sub GetLangCode($;$$)
{
my ($lang, $ctry, $noDef) = @_;
# ignore country ('ctry') and language lists ('lang') for now
undef $ctry if $ctry and $ctry <= 255;
undef $lang if $lang and $lang <= 255;
$lang = UnpackLang($lang, $noDef);
# add country code if specified
if ($ctry) {
$ctry = unpack('a2',pack('n',$ctry)); # unpack as ISO 3166-1
# treat 'ZZ' like a default country (see ref 12)
undef $ctry if $ctry eq 'ZZ';
if ($ctry and $ctry =~ /^[A-Z]{2}$/) {
$lang or $lang = 'und';
$lang .= "-$ctry";
}
}
return $lang;
}
#------------------------------------------------------------------------------
# Get langInfo hash and save details about alt-lang tags
# Inputs: 0) ExifTool ref, 1) tagInfo hash ref, 2) locale code
# Returns: new tagInfo hash ref, or undef if invalid
sub GetLangInfoQT($$$)
{
my ($et, $tagInfo, $langCode) = @_;
my $langInfo = Image::ExifTool::GetLangInfo($tagInfo, $langCode);
if ($langInfo) {
$$et{QTLang} or $$et{QTLang} = [ ];
push @{$$et{QTLang}}, $$langInfo{Name};
}
return $langInfo;
}
#------------------------------------------------------------------------------
# Get variable-length integer from data (used by ParseItemLocation)
# Inputs: 0) data ref, 1) start position, 2) integer size in bytes (0, 4 or 8),
# 3) default value
# Returns: integer value, and updates current position
sub GetVarInt($$$;$)
{
my ($dataPt, $pos, $n, $default) = @_;
my $len = length $$dataPt;
$_[1] = $pos + $n; # update current position
return undef if $pos + $n > $len;
if ($n == 0) {
return $default || 0;
} elsif ($n == 4) {
return Get32u($dataPt, $pos);
} elsif ($n == 8) {
return Get64u($dataPt, $pos);
}
return undef;
}
#------------------------------------------------------------------------------
# Get null-terminated string from binary data (used by ParseItemInfoEntry)
# Inputs: 0) data ref, 1) start position
# Returns: string, and updates current position
sub GetString($$)
{
my ($dataPt, $pos) = @_;
my $len = length $$dataPt;
my $str = '';
while ($pos < $len) {
my $ch = substr($$dataPt, $pos, 1);
++$pos;
last if ord($ch) == 0;
$str .= $ch;
}
$_[1] = $pos; # update current position
return $str;
}
#------------------------------------------------------------------------------
# Get a printable version of the tag ID
# Inputs: 0) tag ID, 1) Flag: 0x01 - print as 4- or 8-digit hex value if necessary
# 0x02 - put leading backslash before escaped character
# Returns: Printable tag ID
sub PrintableTagID($;$)
{
my $tag = $_[0];
my $n = ($tag =~ s/([\x00-\x1f\x7f-\xff])/'x'.unpack('H*',$1)/eg);
if ($n and $_[1]) {
if ($n > 2 and $_[1] & 0x01) {
$tag = '0x' . unpack('H8', $_[0]);
$tag =~ s/^0x0000/0x/;
} elsif ($_[1] & 0x02) {
($tag = $_[0]) =~ s/([\x00-\x1f\x7f-\xff])/'\\x'.unpack('H*',$1)/eg;
}
}
return $tag;
}
#==============================================================================
# The following ParseXxx routines parse various boxes to extract this
# information about embedded items in a $$et{ItemInfo} hash, keyed by item ID:
#
# iloc:
# ConstructionMethod - offset type: 0=file, 1=idat, 2=item
# DataReferenceIndex - 0 for "this file", otherwise index in dref box
# BaseOffset - base for file offsets
# Extents - list of index,offset,length,nlen,lenPt details for data in file
# infe:
# ProtectionIndex - index if item is protected (0 for unprotected)
# Name - item name
# ContentType - mime type of item
# ContentEncoding - item encoding
# URI - URI of a 'uri '-type item
# ipma:
# Association - list of associated properties in the ipco container
# Essential - list of "essential" flags for the associated properties
# cdsc:
# RefersTo - hash lookup of flags based on referred item ID
# other:
# DocNum - exiftool document number for this item
#
#------------------------------------------------------------------------------
# Parse item location (iloc) box (ref ISO 14496-12:2015 pg.79)
# Inputs: 0) iloc data, 1) ExifTool ref
# Returns: undef, and fills in ExifTool ItemInfo hash
# Notes: see also Handle_iloc() in WriteQuickTime.pl
sub ParseItemLocation($$)
{
my ($val, $et) = @_;
my ($i, $j, $num, $pos, $id);
my ($extent_index, $extent_offset, $extent_length);
my $verbose = $$et{IsWriting} ? 0 : $et->Options('Verbose');
my $items = $$et{ItemInfo} || ($$et{ItemInfo} = { });
my $len = length $val;
return undef if $len < 8;
my $ver = Get8u(\$val, 0);
my $siz = Get16u(\$val, 4);
my $noff = ($siz >> 12);
my $nlen = ($siz >> 8) & 0x0f;
my $nbas = ($siz >> 4) & 0x0f;
my $nind = $siz & 0x0f;
if ($ver < 2) {
$num = Get16u(\$val, 6);
$pos = 8;
} else {
return undef if $len < 10;
$num = Get32u(\$val, 6);
$pos = 10;
}
for ($i=0; $i<$num; ++$i) {
if ($ver < 2) {
return undef if $pos + 2 > $len;
$id = Get16u(\$val, $pos);
$pos += 2;
} else {
return undef if $pos + 4 > $len;
$id = Get32u(\$val, $pos);
$pos += 4;
}
if ($ver == 1 or $ver == 2) {
return undef if $pos + 2 > $len;
$$items{$id}{ConstructionMethod} = Get16u(\$val, $pos) & 0x0f;
$pos += 2;
}
return undef if $pos + 2 > $len;
$$items{$id}{DataReferenceIndex} = Get16u(\$val, $pos);
$pos += 2;
$$items{$id}{BaseOffset} = GetVarInt(\$val, $pos, $nbas);
return undef if $pos + 2 > $len;
my $ext_num = Get16u(\$val, $pos);
$pos += 2;
my @extents;
for ($j=0; $j<$ext_num; ++$j) {
if ($ver == 1 or $ver == 2) {
$extent_index = GetVarInt(\$val, $pos, $nind, 1);
}
$extent_offset = GetVarInt(\$val, $pos, $noff);
$extent_length = GetVarInt(\$val, $pos, $nlen);
return undef unless defined $extent_length;
$et->VPrint(1, "$$et{INDENT} Item $id: const_meth=",
defined $$items{$id}{ConstructionMethod} ? $$items{$id}{ConstructionMethod} : '',
sprintf(" base=0x%x offset=0x%x len=0x%x\n", $$items{$id}{BaseOffset},
$extent_offset, $extent_length)) if $verbose;
push @extents, [ $extent_index, $extent_offset, $extent_length, $nlen, $pos-$nlen ];
}
# save item location information keyed on 1-based item ID:
$$items{$id}{Extents} = \@extents;
}
return undef;
}
#------------------------------------------------------------------------------
# Parse content describes entry (cdsc) box
# Inputs: 0) cdsc data, 1) ExifTool ref
# Returns: undef, and fills in ExifTool ItemInfo hash
sub ParseContentDescribes($$)
{
my ($val, $et) = @_;
my ($id, $count, @to);
if ($$et{ItemRefVersion}) {
return undef if length $val < 10;
($id, $count, @to) = unpack('NnN*', $val);
} else {
return undef if length $val < 6;
($id, $count, @to) = unpack('nnn*', $val);
}
if ($count > @to) {
my $str = 'Missing values in ContentDescribes box';
$$et{IsWriting} ? $et->Error($str) : $et->Warn($str);
} elsif ($count < @to) {
$et->Warn('Ignored extra values in ContentDescribes box', 1);
@to = $count;
}
# add all referenced item ID's to a "RefersTo" lookup
$$et{ItemInfo}{$id}{RefersTo}{$_} = 1 foreach @to;
$et->VPrint(1, "$$et{INDENT} Item $id describes: @to\n") unless $$et{IsWriting};
return undef;
}
#------------------------------------------------------------------------------
# Parse item information entry (infe) box (ref ISO 14496-12:2015 pg.82)
# Inputs: 0) infe data, 1) ExifTool ref
# Returns: undef, and fills in ExifTool ItemInfo hash
sub ParseItemInfoEntry($$)
{
my ($val, $et) = @_;
my $id;
my $verbose = $$et{IsWriting} ? 0 : $et->Options('Verbose');
my $items = $$et{ItemInfo} || ($$et{ItemInfo} = { });
my $len = length $val;
return undef if $len < 4;
my $ver = Get8u(\$val, 0);
my $pos = 4;
return undef if $pos + 4 > $len;
if ($ver == 0 or $ver == 1) {
$id = Get16u(\$val, $pos);
$$items{$id}{ProtectionIndex} = Get16u(\$val, $pos + 2);
$pos += 4;
$$items{$id}{Name} = GetString(\$val, $pos);
$$items{$id}{ContentType} = GetString(\$val, $pos);
$$items{$id}{ContentEncoding} = GetString(\$val, $pos);
} else {
if ($ver == 2) {
$id = Get16u(\$val, $pos);
$pos += 2;
} elsif ($ver == 3) {
$id = Get32u(\$val, $pos);
$pos += 4;
}
return undef if $pos + 6 > $len;
$$items{$id}{ProtectionIndex} = Get16u(\$val, $pos);
my $type = substr($val, $pos + 2, 4);
$$items{$id}{Type} = $type;
$pos += 6;
$$items{$id}{Name} = GetString(\$val, $pos);
if ($type eq 'mime') {
$$items{$id}{ContentType} = GetString(\$val, $pos);
$$items{$id}{ContentEncoding} = GetString(\$val, $pos);
} elsif ($type eq 'uri ') {
$$items{$id}{URI} = GetString(\$val, $pos);
}
}
$et->VPrint(1, "$$et{INDENT} Item $id: Type=", $$items{$id}{Type} || '',
' Name=', $$items{$id}{Name} || '',
' ContentType=', $$items{$id}{ContentType} || '',
"\n") if $verbose > 1;
return undef;
}
#------------------------------------------------------------------------------
# Parse item property association (ipma) box (ref https://github.com/gpac/gpac/blob/master/src/isomedia/iff.c)
# Inputs: 0) ipma data, 1) ExifTool ref
# Returns: undef, and fills in ExifTool ItemInfo hash
sub ParseItemPropAssoc($$)
{
my ($val, $et) = @_;
my ($i, $j, $id);
my $verbose = $$et{IsWriting} ? 0 : $et->Options('Verbose');
my $items = $$et{ItemInfo} || ($$et{ItemInfo} = { });
my $len = length $val;
return undef if $len < 8;
my $ver = Get8u(\$val, 0);
my $flg = Get32u(\$val, 0);
my $num = Get32u(\$val, 4);
my $pos = 8;
for ($i=0; $i<$num; ++$i) {
if ($ver == 0) {
return undef if $pos + 3 > $len;
$id = Get16u(\$val, $pos);
$pos += 2;
} else {
return undef if $pos + 5 > $len;
$id = Get32u(\$val, $pos);
$pos += 4;
}
my $n = Get8u(\$val, $pos++);
my (@association, @essential);
if ($flg & 0x01) {
return undef if $pos + $n * 2 > $len;
for ($j=0; $j<$n; ++$j) {
my $tmp = Get16u(\$val, $pos + $j * 2);
push @association, $tmp & 0x7fff;
push @essential, ($tmp & 0x8000) ? 1 : 0;
}
$pos += $n * 2;
} else {
return undef if $pos + $n > $len;
for ($j=0; $j<$n; ++$j) {
my $tmp = Get8u(\$val, $pos + $j);
push @association, $tmp & 0x7f;
push @essential, ($tmp & 0x80) ? 1 : 0;
}
$pos += $n;
}
$$items{$id}{Association} = \@association;
$$items{$id}{Essential} = \@essential;
$et->VPrint(1, "$$et{INDENT} Item $id properties: @association\n") if $verbose > 1;
}
return undef;
}
#------------------------------------------------------------------------------
# Process item information now
# Inputs: 0) ExifTool ref
sub HandleItemInfo($)
{
my $et = shift;
my $raf = $$et{RAF};
my $items = $$et{ItemInfo};
my $verbose = $et->Options('Verbose');
my $buff;
# extract information from EXIF/XMP metadata items
if ($items and $raf) {
push @{$$et{PATH}}, 'ItemInformation';
my $curPos = $raf->Tell();
my $primary = $$et{PrimaryItem};
my $id;
$et->VerboseDir('Processing items from ItemInformation', scalar(keys %$items));
foreach $id (sort { $a <=> $b } keys %$items) {
my $item = $$items{$id};
my $type = $$item{ContentType} || $$item{Type} || next;
if ($verbose) {
# add up total length of this item for the verbose output
my $len = 0;
if ($$item{Extents} and @{$$item{Extents}}) {
$len += $$_[2] foreach @{$$item{Extents}};
}
$et->VPrint(0, "$$et{INDENT}Item $id) '${type}' ($len bytes)\n");
}
# get ExifTool name for this item
my $name = { Exif => 'EXIF', 'application/rdf+xml' => 'XMP' }->{$type} || '';
my ($warn, $extent);
$warn = "Can't currently decode encoded $type metadata" if $$item{ContentEncoding};
$warn = "Can't currently decode protected $type metadata" if $$item{ProtectionIndex};
$warn = "Can't currently extract $type with construction method $$item{ConstructionMethod}" if $$item{ConstructionMethod};
$et->WarnOnce($warn) if $warn and $name;
$warn = 'Not this file' if $$item{DataReferenceIndex}; # (can only extract from "this file")
unless (($$item{Extents} and @{$$item{Extents}}) or $warn) {
$warn = "No Extents for $type item";
$et->WarnOnce($warn) if $name;
}
if ($warn) {
$et->VPrint(0, "$$et{INDENT} [not extracted] ($warn)\n") if $verbose > 2;
next;
}
my $base = $$item{BaseOffset} || 0;
if ($verbose > 2) {
# do verbose hex dump
my $len = 0;
undef $buff;
my $val = '';
my $maxLen = $verbose > 3 ? 2048 : 96;
foreach $extent (@{$$item{Extents}}) {
my $n = $$extent[2];
my $more = $maxLen - $len;
if ($more > 0 and $n) {
$more = $n if $more > $n;
$val .= $buff if defined $buff;
$raf->Seek($$extent[1] + $base, 0) or last;
$raf->Read($buff, $more) or last;
}
$len += $n;
}
if (defined $buff) {
$buff = $val . $buff if length $val;
$et->VerboseDump(\$buff, DataPos => $$item{Extents}[0][1] + $base);
my $snip = $len - length $buff;
$et->VPrint(0, "$$et{INDENT} [snip $snip bytes]\n") if $snip;
}
}
next unless $name;
# assemble the data for this item
undef $buff;
my $val = '';
foreach $extent (@{$$item{Extents}}) {
$val .= $buff if defined $buff;
$raf->Seek($$extent[1] + $base, 0) or last;
$raf->Read($buff, $$extent[2]) or last;
}
next unless defined $buff;
$buff = $val . $buff if length $val;
next unless length $buff; # ignore empty directories
my ($start, $subTable, $proc);
if ($name eq 'EXIF') {
$start = 10;
$subTable = GetTagTable('Image::ExifTool::Exif::Main');
$proc = \&Image::ExifTool::ProcessTIFF;
} else {
$start = 0;
$subTable = GetTagTable('Image::ExifTool::XMP::Main');
}
my $pos = $$item{Extents}[0][1] + $base;
my %dirInfo = (
DataPt => \$buff,
DataLen => length $buff,
DirStart => $start,
DirLen => length($buff) - $start,
DataPos => $pos,
Base => $pos, # (needed for IsOffset tags in binary data)
);
# handle processing of metadata for sub-documents
if (defined $primary and $$item{RefersTo} and not $$item{RefersTo}{$primary}) {
# set document number if this doesn't refer to the primary document
$$et{DOC_NUM} = ++$$et{DOC_COUNT};
# associate this document number with the lowest item index
my ($lowest) = sort { $a <=> $b } keys %{$$item{RefersTo}};
$$items{$lowest}{DocNum} = $$et{DOC_NUM};
}
$et->ProcessDirectory(\%dirInfo, $subTable, $proc);
delete $$et{DOC_NUM};
}
$raf->Seek($curPos, 0); # seek back to original position
pop @{$$et{PATH}};
}
# process the item properties now that we should know their associations and document numbers
if ($$et{ItemPropertyContainer}) {
my ($dirInfo, $subTable, $proc) = @{$$et{ItemPropertyContainer}};
$$et{IsItemProperty} = 1; # set item property flag
$et->ProcessDirectory($dirInfo, $subTable, $proc);
delete $$et{ItemPropertyContainer};
delete $$et{IsItemProperty};
delete $$et{DOC_NUM};
}
delete $$et{ItemInfo};
}
#------------------------------------------------------------------------------
# Warn if ExtractEmbedded option isn't used
# Inputs: 0) ExifTool ref
sub EEWarn($)
{
my $et = shift;
$et->WarnOnce('The ExtractEmbedded option may find more tags in the movie data',3);
}
#------------------------------------------------------------------------------
# Get quicktime format from flags word
# Inputs: 0) quicktime atom flags, 1) data length
# Returns: Exiftool format string
sub QuickTimeFormat($$)
{
my ($flags, $len) = @_;
my $format;
if ($flags == 0x15 or $flags == 0x16) {
$format = { 1=>'int8', 2=>'int16', 4=>'int32' }->{$len};
$format .= $flags == 0x15 ? 's' : 'u' if $format;
} elsif ($flags == 0x17) {
$format = 'float';
} elsif ($flags == 0x18) {
$format = 'double';
} elsif ($flags == 0x00) {
$format = { 1=>'int8u', 2=>'int16u' }->{$len};
}
return $format;
}
#------------------------------------------------------------------------------
# Process MPEG-4 MTDT atom (ref 11)
# Inputs: 0) ExifTool object ref, 1) dirInfo ref, 2) tag table ref
# Returns: 1 on success
sub ProcessMetaData($$$)
{
my ($et, $dirInfo, $tagTablePtr) = @_;
my $dataPt = $$dirInfo{DataPt};
my $dirLen = length $$dataPt;
my $verbose = $et->Options('Verbose');
return 0 unless $dirLen >= 2;
my $count = Get16u($dataPt, 0);
$verbose and $et->VerboseDir('MetaData', $count);
my $i;
my $pos = 2;
for ($i=0; $i<$count; ++$i) {
last if $pos + 10 > $dirLen;
my $size = Get16u($dataPt, $pos);
last if $size < 10 or $size + $pos > $dirLen;
my $tag = Get32u($dataPt, $pos + 2);
my $lang = Get16u($dataPt, $pos + 6);
my $enc = Get16u($dataPt, $pos + 8);
my $val = substr($$dataPt, $pos + 10, $size);
my $tagInfo = $et->GetTagInfo($tagTablePtr, $tag);
if ($tagInfo) {
# convert language code to ASCII (ignore read-only bit)
$lang = UnpackLang($lang);
# handle alternate languages
if ($lang) {
my $langInfo = GetLangInfoQT($et, $tagInfo, $lang);
$tagInfo = $langInfo if $langInfo;
}
$verbose and $et->VerboseInfo($tag, $tagInfo,
Value => $val,
DataPt => $dataPt,
Start => $pos + 10,
Size => $size - 10,
);
# convert from UTF-16 BE if necessary
$val = $et->Decode($val, 'UCS2') if $enc == 1;
if ($enc == 0 and $$tagInfo{Unknown}) {
# binary data
$et->FoundTag($tagInfo, \$val);
} else {
$et->FoundTag($tagInfo, $val);
}
}
$pos += $size;
}
return 1;
}
#------------------------------------------------------------------------------
# Process sample description table
# Inputs: 0) ExifTool object ref, 1) dirInfo ref, 2) tag table ref
# Returns: 1 on success
# (ref https://developer.apple.com/library/content/documentation/QuickTime/QTFF/QTFFChap2/qtff2.html#//apple_ref/doc/uid/TP40000939-CH204-25691)
sub ProcessSampleDesc($$$)
{
my ($et, $dirInfo, $tagTablePtr) = @_;
my $dataPt = $$dirInfo{DataPt};
my $pos = $$dirInfo{DirStart} || 0;
my $dirLen = $$dirInfo{DirLen} || (length($$dataPt) - $pos);
return 0 if $pos + 8 > $dirLen;
my $num = Get32u($dataPt, 4); # get number of sample descriptions in table
$pos += 8;
my $i;
for ($i=0; $i<$num; ++$i) { # loop through sample descriptions
last if $pos + 8 > $dirLen;
my $size = Get32u($dataPt, $pos);
last if $pos + $size > $dirLen;
$$dirInfo{DirStart} = $pos;
$$dirInfo{DirLen} = $size;
ProcessHybrid($et, $dirInfo, $tagTablePtr);
$pos += $size;
}
return 1;
}
#------------------------------------------------------------------------------
# Process hybrid binary data + QuickTime container (ref PH)
# Inputs: 0) ExifTool object ref, 1) dirInfo ref, 2) tag table ref
# Returns: 1 on success
sub ProcessHybrid($$$)
{
my ($et, $dirInfo, $tagTablePtr) = @_;
# brute-force search for child atoms after first 8 bytes of binary data
my $dataPt = $$dirInfo{DataPt};
my $dirStart = $$dirInfo{DirStart} || 0;
my $dirLen = $$dirInfo{DirLen} || length($$dataPt) - $dirStart;
my $end = $dirStart + $dirLen;
my $pos = $dirStart + 8; # skip length/version
my $try = $pos;
my $childPos;
while ($pos <= $end - 8) {
my $tag = substr($$dataPt, $try+4, 4);
# look only for well-behaved tag ID's
$tag =~ /[^\w ]/ and $try = ++$pos, next;
my $size = Get32u($dataPt, $try);
if ($size + $try == $end) {
# the atom ends exactly at the end of the parent -- this must be it
$childPos = $pos;
$$dirInfo{DirLen} = $pos; # the binary data ends at the first child atom
last;
}
if ($size < 8 or $size + $try > $end - 8) {
$try = ++$pos; # fail. try next position
} else {
$try += $size; # could be another atom following this
}
}
# process binary data
$$dirInfo{MixedTags} = 1; # ignore non-integer tag ID's
$et->ProcessBinaryData($dirInfo, $tagTablePtr);
# process child atoms if found
if ($childPos) {
$$dirInfo{DirStart} = $childPos;
$$dirInfo{DirLen} = $end - $childPos;
ProcessMOV($et, $dirInfo, $tagTablePtr);
}
return 1;
}
#------------------------------------------------------------------------------
# Process iTunes 'righ' atom (ref PH)
# Inputs: 0) ExifTool object ref, 1) dirInfo ref, 2) tag table ref
# Returns: 1 on success
sub ProcessRights($$$)
{
my ($et, $dirInfo, $tagTablePtr) = @_;
my $dataPt = $$dirInfo{DataPt};
my $dataPos = $$dirInfo{Base};
my $dirLen = length $$dataPt;
my $unknown = $$et{OPTIONS}{Unknown} || $$et{OPTIONS}{Verbose};
my $pos;
$et->VerboseDir('righ', $dirLen / 8);
for ($pos = 0; $pos + 8 <= $dirLen; $pos += 8) {
my $tag = substr($$dataPt, $pos, 4);
last if $tag eq "\0\0\0\0";
my $val = substr($$dataPt, $pos + 4, 4);
my $tagInfo = $et->GetTagInfo($tagTablePtr, $tag);
unless ($tagInfo) {
next unless $unknown;
my $name = PrintableTagID($tag);
$tagInfo = {
Name => "Unknown_$name",
Description => "Unknown $name",
Unknown => 1,
},
AddTagToTable($tagTablePtr, $tag, $tagInfo);
}
$val = '0x' . unpack('H*', $val) unless $$tagInfo{Format};
$et->HandleTag($tagTablePtr, $tag, $val,
DataPt => $dataPt,
DataPos => $dataPos,
Start => $pos + 4,
Size => 4,
);
}
return 1;
}
#------------------------------------------------------------------------------
# Process iTunes Encoding Params (ref PH)
# Inputs: 0) ExifTool object ref, 1) dirInfo ref, 2) tag table ref
# Returns: 1 on success
sub ProcessEncodingParams($$$)
{
my ($et, $dirInfo, $tagTablePtr) = @_;
my $dataPt = $$dirInfo{DataPt};
my $dirLen = length $$dataPt;
my $pos;
$et->VerboseDir('Encoding Params', $dirLen / 8);
for ($pos = 0; $pos + 8 <= $dirLen; $pos += 8) {
my ($tag, $val) = unpack("x${pos}a4N", $$dataPt);
$et->HandleTag($tagTablePtr, $tag, $val);
}
return 1;
}
#------------------------------------------------------------------------------
# Read Meta Keys and add tags to ItemList table ('mdta' handler) (ref PH)
# Inputs: 0) ExifTool object ref, 1) dirInfo ref, 2) tag table ref
# Returns: 1 on success
sub ProcessKeys($$$)
{
local $_;
my ($et, $dirInfo, $tagTablePtr) = @_;
my $dataPt = $$dirInfo{DataPt};
my $dirLen = length $$dataPt;
my $out;
if ($et->Options('Verbose')) {
$et->VerboseDir('Keys');
$out = $et->Options('TextOut');
}
my $pos = 8;
my $index = 1;
++$$et{KeysCount}; # increment key count for this directory
my $itemList = GetTagTable('Image::ExifTool::QuickTime::ItemList');
my $userData = GetTagTable('Image::ExifTool::QuickTime::UserData');
while ($pos < $dirLen - 4) {
my $len = unpack("x${pos}N", $$dataPt);
last if $len < 8 or $pos + $len > $dirLen;
delete $$tagTablePtr{$index};
my $ns = substr($$dataPt, $pos + 4, 4);
my $tag = substr($$dataPt, $pos + 8, $len - 8);
$tag =~ s/\0.*//s; # truncate at null
$tag =~ s/^com\.apple\.quicktime\.// if $ns eq 'mdta'; # remove apple quicktime domain
$tag = "Tag_$ns" unless $tag;
# (I have some samples where the tag is a reversed ItemList or UserData tag ID)
my $tagInfo = $et->GetTagInfo($tagTablePtr, $tag);
unless ($tagInfo) {
$tagInfo = $et->GetTagInfo($itemList, $tag);
unless ($tagInfo) {
$tagInfo = $et->GetTagInfo($userData, $tag);
if (not $tagInfo and $tag =~ /^\w{3}\xa9$/) {
$tag = pack('N', unpack('V', $tag));
$tagInfo = $et->GetTagInfo($itemList, $tag);
$tagInfo or $tagInfo = $et->GetTagInfo($userData, $tag);
}
}
}
my ($newInfo, $msg);
if ($tagInfo) {
$newInfo = {
Name => $$tagInfo{Name},
Format => $$tagInfo{Format},
ValueConv => $$tagInfo{ValueConv},
ValueConvInv => $$tagInfo{ValueConvInv},
PrintConv => $$tagInfo{PrintConv},
PrintConvInv => $$tagInfo{PrintConvInv},
Writable => defined $$tagInfo{Writable} ? $$tagInfo{Writable} : 1,
KeysInfo => $tagInfo,
};
my $groups = $$tagInfo{Groups};
$$newInfo{Groups} = $groups ? { %$groups } : { };
$$newInfo{Groups}{$_} or $$newInfo{Groups}{$_} = $$tagTablePtr{GROUPS}{$_} foreach 0..2;
$$newInfo{Groups}{1} = 'Keys';
} elsif ($tag =~ /^[-\w. ]+$/) {
# create info for tags with reasonable id's
my $name = ucfirst $tag;
$name =~ s/[. ]+(.?)/\U$1/g;
$name =~ s/_([a-z])/_\U$1/g;
$name =~ s/([a-z])_([A-Z])/$1$2/g;
$name = "Tag_$name" if length $name < 2;
$newInfo = { Name => $name, Groups => { 1 => 'Keys' } };
$msg = ' (Unknown)';
}
# substitute this tag in the ItemList table with the given index
my $id = $$et{KeysCount} . '.' . $index;
if (ref $$itemList{$id} eq 'HASH') {
# delete other languages too if they exist
my $oldInfo = $$itemList{$id};
if ($$oldInfo{OtherLang}) {
delete $$itemList{$_} foreach @{$$oldInfo{OtherLang}};
}
delete $$itemList{$id};
}
if ($newInfo) {
AddTagToTable($itemList, $id, $newInfo);
$msg or $msg = '';
$out and print $out "$$et{INDENT}Added ItemList Tag $id = $tag$msg\n";
}
$pos += $len;
++$index;
}
return 1;
}
#------------------------------------------------------------------------------
# Process keys in MetaSampleDesc directory
# Inputs: 0) ExifTool object ref, 1) dirInfo ref, 2) tag table ref
# Returns: 1 on success
sub ProcessMetaKeys($$$)
{
my ($et, $dirInfo, $tagTablePtr) = @_;
# save this information to decode timed metadata samples when ExtractEmbedded is used
SaveMetaKeys($et, $dirInfo, $tagTablePtr) if $$et{OPTIONS}{ExtractEmbedded};
return 1;
}
#------------------------------------------------------------------------------
# Process a QuickTime atom
# Inputs: 0) ExifTool object ref, 1) dirInfo ref, 2) optional tag table ref
# Returns: 1 on success
sub ProcessMOV($$;$)
{
local $_;
my ($et, $dirInfo, $tagTablePtr) = @_;
my $raf = $$dirInfo{RAF};
my $dataPt = $$dirInfo{DataPt};
my $verbose = $et->Options('Verbose');
my $validate = $$et{OPTIONS}{Validate};
my $dataPos = $$dirInfo{Base} || 0;
my $dirID = $$dirInfo{DirID} || '';
my $charsetQuickTime = $et->Options('CharsetQuickTime');
my ($buff, $tag, $size, $track, $isUserData, %triplet, $doDefaultLang, $index);
my ($dirEnd, $ee, $unkOpt, %saveOptions, $atomCount);
my $topLevel = not $$et{InQuickTime};
$$et{InQuickTime} = 1;
$$et{HandlerType} = $$et{MetaFormat} = '' unless defined $$et{HandlerType};
unless (defined $$et{KeysCount}) {
$$et{KeysCount} = 0; # initialize ItemList key directory count
$doDefaultLang = 1; # flag to generate default language tags
}
# more convenient to package data as a RandomAccess file
unless ($raf) {
$raf = new File::RandomAccess($dataPt);
$dirEnd = $dataPos + $$dirInfo{DirLen} + ($$dirInfo{DirStart} || 0) if $$dirInfo{DirLen};
}
# skip leading bytes if necessary
if ($$dirInfo{DirStart}) {
$raf->Seek($$dirInfo{DirStart}, 1) or return 0;
$dataPos += $$dirInfo{DirStart};
}
# read size/tag name atom header
$raf->Read($buff,8) == 8 or return 0;
$dataPos += 8;
if ($tagTablePtr) {
$isUserData = ($tagTablePtr eq \%Image::ExifTool::QuickTime::UserData);
} else {
$tagTablePtr = GetTagTable('Image::ExifTool::QuickTime::Main');
}
($size, $tag) = unpack('Na4', $buff);
if ($dataPt) {
$verbose and $et->VerboseDir($$dirInfo{DirName});
} else {
# check on file type if called with a RAF
$$tagTablePtr{$tag} or return 0;
if ($tag eq 'ftyp' and $size >= 12) {
# read ftyp atom to see what type of file this is
my $fileType;
if ($raf->Read($buff, $size-8) == $size-8) {
$raf->Seek(-($size-8), 1);
my $type = substr($buff, 0, 4);
# see if we know the extension for this file type
if ($ftypLookup{$type} and $ftypLookup{$type} =~ /\(\.(\w+)/) {
$fileType = $1;
# check compatible brands
} elsif ($buff =~ /^.{8}(.{4})+(mp41|mp42|avc1)/s) {
$fileType = 'MP4';
} elsif ($buff =~ /^.{8}(.{4})+(f4v )/s) {
$fileType = 'F4V';
} elsif ($buff =~ /^.{8}(.{4})+(qt )/s) {
$fileType = 'MOV';
}
}
$fileType or $fileType = 'MP4'; # default to MP4
$et->SetFileType($fileType, $mimeLookup{$fileType} || 'video/mp4');
# temporarily set ExtractEmbedded option for CRX files
$saveOptions{ExtractEmbedded} = $et->Options(ExtractEmbedded => 1) if $fileType eq 'CRX';
} else {
$et->SetFileType(); # MOV
}
SetByteOrder('MM');
$$et{PRIORITY_DIR} = 'XMP'; # have XMP take priority
}
$$raf{NoBuffer} = 1 if $et->Options('FastScan'); # disable buffering in FastScan mode
if ($$et{OPTIONS}{ExtractEmbedded}) {
$ee = 1;
$unkOpt = $$et{OPTIONS}{Unknown};
require 'Image/ExifTool/QuickTimeStream.pl';
}
if ($$tagTablePtr{VARS}) {
$index = $$tagTablePtr{VARS}{START_INDEX};
$atomCount = $$tagTablePtr{VARS}{ATOM_COUNT};
}
for (;;) {
my ($eeTag, $ignore);
last if defined $atomCount and --$atomCount < 0;
if ($size < 8) {
if ($size == 0) {
if ($dataPt) {
# a zero size isn't legal for contained atoms, but Canon uses it to
# terminate the CNTH atom (eg. CanonEOS100D.mov), so tolerate it here
my $pos = $raf->Tell() - 4;
$raf->Seek(0,2);
my $str = $$dirInfo{DirName} . ' with ' . ($raf->Tell() - $pos) . ' bytes';
$et->VPrint(0,"$$et{INDENT}\[Terminator found in $str remaining]");
} else {
my $t = PrintableTagID($tag);
$et->VPrint(0,"$$et{INDENT}Tag '${t}' extends to end of file");
}
last;
}
$size == 1 or $et->Warn('Invalid atom size'), last;
# read extended atom size
$raf->Read($buff, 8) == 8 or $et->Warn('Truncated atom header'), last;
$dataPos += 8;
my ($hi, $lo) = unpack('NN', $buff);
if ($hi or $lo > 0x7fffffff) {
if ($hi > 0x7fffffff) {
$et->Warn('Invalid atom size');
last;
} elsif (not $et->Options('LargeFileSupport')) {
$et->Warn('End of processing at large atom (LargeFileSupport not enabled)');
last;
}
}
$size = $hi * 4294967296 + $lo - 16;
$size < 0 and $et->Warn('Invalid extended size'), last;
} else {
$size -= 8;
}
if ($validate) {
$$et{ValidatePath} or $$et{ValidatePath} = { };
my $path = join('-', @{$$et{PATH}}, $tag);
$path =~ s/-Track-/-$$et{SET_GROUP1}-/ if $$et{SET_GROUP1};
if ($$et{ValidatePath}{$path} and not $dupTagOK{$tag} and not $dupDirOK{$dirID}) {
my $i = Get32u(\$tag,0);
my $str = $i < 255 ? "index $i" : "tag '" . PrintableTagID($tag,2) . "'";
$et->WarnOnce("Duplicate $str at " . join('-', @{$$et{PATH}}));
$$et{ValidatePath} = { } if $path eq 'MOV-moov'; # avoid warnings for all contained dups
}
$$et{ValidatePath}{$path} = 1;
}
if ($isUserData and $$et{SET_GROUP1}) {
my $tagInfo = $et->GetTagInfo($tagTablePtr, $tag);
# add track name to UserData tags inside tracks
$tag = $$et{SET_GROUP1} . $tag;
if (not $$tagTablePtr{$tag} and $tagInfo) {
my %newInfo = %$tagInfo;
foreach ('Name', 'Description') {
next unless $$tagInfo{$_};
$newInfo{$_} = $$et{SET_GROUP1} . $$tagInfo{$_};
$newInfo{$_} =~ s/^(Track\d+)Track/$1/; # remove duplicate "Track" in name
}
AddTagToTable($tagTablePtr, $tag, \%newInfo);
}
}
# set flag to store additional information for ExtractEmbedded option
my $handlerType = $$et{HandlerType};
if ($eeBox{$handlerType} and $eeBox{$handlerType}{$tag}) {
if ($ee) {
# (there is another 'gps ' box with a track log that doesn't contain offsets)
if ($tag ne 'gps ' or $eeBox{$handlerType}{$tag} eq $dirID) {
$eeTag = 1;
$$et{OPTIONS}{Unknown} = 1; # temporarily enable "Unknown" option
}
} elsif ($handlerType ne 'vide' and not $$et{OPTIONS}{Validate}) {
EEWarn($et);
}
}
my $tagInfo = $et->GetTagInfo($tagTablePtr, $tag);
$$et{OPTIONS}{Unknown} = $unkOpt if $eeTag; # restore Unknown option
# allow numerical tag ID's
unless ($tagInfo) {
my $id = $$et{KeysCount} . '.' . unpack('N', $tag);
if ($$tagTablePtr{$id}) {
$tagInfo = $et->GetTagInfo($tagTablePtr, $id);
$tag = $id;
}
}
# generate tagInfo if Unknown option set
if (not defined $tagInfo and ($$et{OPTIONS}{Unknown} or
$verbose or $tag =~ /^\xa9/))
{
my $name = PrintableTagID($tag,1);
if ($name =~ /^xa9(.*)/) {
$tagInfo = {
Name => "UserData_$1",
Description => "User Data $1",
};
} else {
$tagInfo = {
Name => "Unknown_$name",
Description => "Unknown $name",
%unknownInfo,
};
}
AddTagToTable($tagTablePtr, $tag, $tagInfo);
}
# save required tag sizes
if ($$tagTablePtr{"$tag-size"}) {
$et->HandleTag($tagTablePtr, "$tag-size", $size);
$et->HandleTag($tagTablePtr, "$tag-offset", $raf->Tell()) if $$tagTablePtr{"$tag-offset"};
}
# load values only if associated with a tag (or verbose) and not too big
if ($size > 0x2000000) { # start to get worried above 32 MB
$ignore = 1;
if ($tagInfo and not $$tagInfo{Unknown} and not $eeTag) {
my $t = PrintableTagID($tag);
if ($size > 0x8000000) {
$et->Warn("Skipping '${t}' atom > 128 MB", 1);
} else {
$et->Warn("Skipping '${t}' atom > 32 MB", 2) or $ignore = 0;
}
}
}
if (defined $tagInfo and not $ignore) {
# set document number for this item property if necessary
if ($$et{IsItemProperty}) {
my $items = $$et{ItemInfo};
my ($id, $prop, $docNum, $lowest);
my $primary = $$et{PrimaryItem} || 0;
ItemID: foreach $id (keys %$items) {
next unless $$items{$id}{Association};
my $item = $$items{$id};
foreach $prop (@{$$item{Association}}) {
next unless $prop == $index;
if ($id == $primary or (not $dontInherit{$tag} and
(not $$item{RefersTo} or $$item{RefersTo}{$primary})))
{
# this is associated with the primary item or an item describing
# the primary item, so consider this part of the main document
undef $docNum;
undef $lowest;
last ItemID;
} elsif ($$item{DocNum}) {
# this property is already associated with an item that has
# an ExifTool document number, so use the lowest assocated DocNum
$docNum = $$item{DocNum} if not defined $docNum or $docNum > $$item{DocNum};
} elsif (not defined $lowest or $lowest > $id) {
# keep track of the lowest associated item ID
$lowest = $id;
}
}
}
if (not defined $docNum and defined $lowest) {
# this is the first time we've seen metadata from this item,
# so use a new document number
$docNum = ++$$et{DOC_COUNT};
$$items{$lowest}{DocNum} = $docNum;
}
$$et{DOC_NUM} = $docNum;
}
my $val;
my $missing = $size - $raf->Read($val, $size);
if ($missing) {
my $t = PrintableTagID($tag);
$et->Warn("Truncated '${t}' data (missing $missing bytes)");
last;
}
# use value to get tag info if necessary
$tagInfo or $tagInfo = $et->GetTagInfo($tagTablePtr, $tag, \$val);
my $hasData = ($$dirInfo{HasData} and $val =~ /\0...data\0/s);
if ($verbose and not $hasData) {
my $tval;
if ($tagInfo and $$tagInfo{Format}) {
$tval = ReadValue(\$val, 0, $$tagInfo{Format}, $$tagInfo{Count}, length($val));
}
$et->VerboseInfo($tag, $tagInfo,
Value => $tval,
DataPt => \$val,
DataPos => $dataPos,
Size => $size,
Format => $tagInfo ? $$tagInfo{Format} : undef,
Index => $index,
);
}
# extract metadata from stream if ExtractEmbedded option is enabled
if ($eeTag) {
ParseTag($et, $tag, \$val);
# forget this tag if we generated it only for ExtractEmbedded
undef $tagInfo if $tagInfo and $$tagInfo{Unknown} and not $unkOpt;
}
# handle iTunesInfo mean/name/data triplets
if ($tagInfo and $$tagInfo{Triplet}) {
if ($tag eq 'data' and $triplet{mean} and $triplet{name}) {
$tag = $triplet{name};
# add 'mean' to name unless it is 'com.apple.iTunes'
$tag = $triplet{mean} . '/' . $tag unless $triplet{mean} eq 'com.apple.iTunes';
$tagInfo = $et->GetTagInfo($tagTablePtr, $tag, \$val);
unless ($tagInfo) {
my $name = $triplet{name};
my $desc = $name;
$name =~ tr/-_a-zA-Z0-9//dc;
$desc =~ tr/_/ /;
$tagInfo = {
Name => $name,
Description => $desc,
};
AddTagToTable($tagTablePtr, $tag, $tagInfo);
}
# ignore 8-byte header
$val = substr($val, 8) if length($val) >= 8;
unless ($$tagInfo{Format} or $$tagInfo{SubDirectory}) {
# extract as binary if it contains any non-ASCII or control characters
if ($val =~ /[^\x20-\x7e]/) {
my $buff = $val;
$val = \$buff;
}
}
undef %triplet;
} else {
undef %triplet if $tag eq 'mean';
$triplet{$tag} = substr($val,4) if length($val) > 4;
undef $tagInfo; # don't store this tag
}
}
if ($tagInfo) {
my $subdir = $$tagInfo{SubDirectory};
if ($subdir) {
my $start = $$subdir{Start} || 0;
my ($base, $dPos) = ($dataPos, 0);
if ($$subdir{Base}) {
$dPos -= eval $$subdir{Base};
$base -= $dPos;
}
my %dirInfo = (
DataPt => \$val,
DataLen => $size,
DirStart => $start,
DirLen => $size - $start,
DirName => $$subdir{DirName} || $$tagInfo{Name},
DirID => $tag,
HasData => $$subdir{HasData},
Multi => $$subdir{Multi},
IgnoreProp => $$subdir{IgnoreProp}, # (XML hack)
DataPos => $dPos,
Base => $base, # (needed for IsOffset tags in binary data)
);
$dirInfo{BlockInfo} = $tagInfo if $$tagInfo{BlockExtract};
if ($$subdir{ByteOrder} and $$subdir{ByteOrder} =~ /^Little/) {
SetByteOrder('II');
}
my $oldGroup1 = $$et{SET_GROUP1};
if ($$tagInfo{Name} eq 'Track') {
$track or $track = 0;
$$et{SET_GROUP1} = 'Track' . (++$track);
}
my $subTable = GetTagTable($$subdir{TagTable});
my $proc = $$subdir{ProcessProc};
# make ProcessMOV() the default processing procedure for subdirectories
$proc = \&ProcessMOV unless $proc or $$subTable{PROCESS_PROC};
if ($size > $start) {
# delay processing of ipco box until after all other boxes
if ($tag eq 'ipco' and not $$et{IsItemProperty}) {
$$et{ItemPropertyContainer} = [ \%dirInfo, $subTable, $proc ];
$et->VPrint(0,"$$et{INDENT}\[Process ipco box later]");
} else {
$et->ProcessDirectory(\%dirInfo, $subTable, $proc);
}
}
if ($tag eq 'stbl') {
# process sample data when exiting SampleTable box if extracting embedded
ProcessSamples($et) if $ee;
} elsif ($tag eq 'minf') {
$$et{HandlerType} = ''; # reset handler type at end of media info box
}
$$et{SET_GROUP1} = $oldGroup1;
SetByteOrder('MM');
} elsif ($hasData) {
# handle atoms containing 'data' tags
# (currently ignore contained atoms: 'itif', 'name', etc.)
my $pos = 0;
for (;;) {
last if $pos + 16 > $size;
my ($len, $type, $flags, $ctry, $lang) = unpack("x${pos}Na4Nnn", $val);
last if $pos + $len > $size;
my ($value, $langInfo, $oldDir);
my $format = $$tagInfo{Format};
if ($type eq 'data' and $len >= 16) {
$pos += 16;
$len -= 16;
$value = substr($val, $pos, $len);
# format flags (ref 12):
# 0x0=binary, 0x1=UTF-8, 0x2=UTF-16, 0x3=ShiftJIS,
# 0x4=UTF-8 0x5=UTF-16, 0xd=JPEG, 0xe=PNG,
# 0x15=signed int, 0x16=unsigned int, 0x17=float,
# 0x18=double, 0x1b=BMP, 0x1c='meta' atom
if ($stringEncoding{$flags}) {
# handle all string formats
$value = $et->Decode($value, $stringEncoding{$flags});
# (shouldn't be null terminated, but some software writes it anyway)
$value =~ s/\0$// unless $$tagInfo{Binary};
} else {
$format = QuickTimeFormat($flags, $len) unless $format;
if ($format) {
$value = ReadValue(\$value, 0, $format, $$tagInfo{Count}, $len);
} elsif (not $$tagInfo{ValueConv}) {
# make binary data a scalar reference unless a ValueConv exists
my $buf = $value;
$value = \$buf;
}
}
}
if ($ctry or $lang) {
$lang = GetLangCode($lang, $ctry);
if ($lang) {
# get tagInfo for other language
$langInfo = GetLangInfoQT($et, $tagInfo, $lang);
# save other language tag ID's so we can delete later if necessary
if ($langInfo) {
$$tagInfo{OtherLang} or $$tagInfo{OtherLang} = [ ];
push @{$$tagInfo{OtherLang}}, $$langInfo{TagID};
}
}
}
$langInfo or $langInfo = $tagInfo;
$et->VerboseInfo($tag, $langInfo,
Value => ref $value ? $$value : $value,
DataPt => \$val,
DataPos => $dataPos,
Start => $pos,
Size => $len,
Format => $format,
Index => $index,
Extra => sprintf(", Type='${type}', Flags=0x%x%s",$flags,($lang ? ", Lang=$lang" : '')),
) if $verbose;
# use "Keys" in path instead of ItemList if this was defined by a Keys tag
my $isKey = $$tagInfo{Groups} && $$tagInfo{Groups}{1} && $$tagInfo{Groups}{1} eq 'Keys';
if ($isKey) {
$oldDir = $$et{PATH}[-1];
$$et{PATH}[-1] = 'Keys';
}
$et->FoundTag($langInfo, $value) if defined $value;
$$et{PATH}[-1] = $oldDir if $isKey;
$pos += $len;
}
} elsif ($tag =~ /^\xa9/ or $$tagInfo{IText}) {
# parse international text to extract all languages
my $pos = 0;
if ($$tagInfo{Format}) {
$et->FoundTag($tagInfo, ReadValue(\$val, 0, $$tagInfo{Format}, undef, length($val)));
$pos = $size;
}
for (;;) {
my ($len, $lang);
if ($$tagInfo{IText} and $$tagInfo{IText} == 6) {
last if $pos + 6 > $size;
$pos += 4;
$lang = unpack("x${pos}n", $val);
$pos += 2;
$len = $size - $pos;
} else {
last if $pos + 4 > $size;
($len, $lang) = unpack("x${pos}nn", $val);
$pos += 4;
# according to the QuickTime spec (ref 12), $len should include
# 4 bytes for length and type words, but nobody (including
# Apple, Pentax and Kodak) seems to add these in, so try
# to allow for either
if ($pos + $len > $size) {
$len -= 4;
last if $pos + $len > $size or $len < 0;
}
}
# ignore any empty entries (or null padding) after the first
next if not $len and $pos;
my $str = substr($val, $pos, $len);
my $langInfo;
if ($lang < 0x400 and $str !~ /^\xfe\xff/) {
# this is a Macintosh language code
# a language code of 0 is Macintosh english, so treat as default
if ($lang) {
# use Font.pm to look up language string
require Image::ExifTool::Font;
$lang = $Image::ExifTool::Font::ttLang{Macintosh}{$lang};
}
# the spec says only "Macintosh text encoding", but
# allow this to be configured by the user
$str = $et->Decode($str, $charsetQuickTime);
} else {
# convert language code to ASCII (ignore read-only bit)
$lang = UnpackLang($lang);
# may be either UTF-8 or UTF-16BE
my $enc = $str=~s/^\xfe\xff// ? 'UTF16' : 'UTF8';
$str = $et->Decode($str, $enc);
}
$str =~ s/\0+$//; # remove any trailing nulls (eg. 3gp tags)
$langInfo = GetLangInfoQT($et, $tagInfo, $lang) if $lang;
$et->FoundTag($langInfo || $tagInfo, $str);
$pos += $len;
}
} else {
my $format = $$tagInfo{Format};
if ($format) {
$val = ReadValue(\$val, 0, $format, $$tagInfo{Count}, length($val));
}
my $oldBase;
if ($$tagInfo{SetBase}) {
$oldBase = $$et{BASE};
$$et{BASE} = $dataPos;
}
my $key = $et->FoundTag($tagInfo, $val);
$$et{BASE} = $oldBase if defined $oldBase;
# decode if necessary (NOTE: must be done after RawConv)
if (defined $key and (not $format or $format =~ /^string/) and
not $$tagInfo{Unknown} and not $$tagInfo{ValueConv} and
not $$tagInfo{Binary} and defined $$et{VALUE}{$key} and not ref $val)
{
my $vp = \$$et{VALUE}{$key};
if (not ref $$vp and length($$vp) <= 65536 and $$vp =~ /[\x80-\xff]/) {
# the encoding of this is not specified, so use CharsetQuickTime
# unless the string is valid UTF-8
require Image::ExifTool::XMP;
my $enc = Image::ExifTool::XMP::IsUTF8($vp) > 0 ? 'UTF8' : $charsetQuickTime;
$$vp = $et->Decode($$vp, $enc);
}
}
}
}
} else {
$et->VerboseInfo($tag, $tagInfo,
Size => $size,
Extra => sprintf(' at offset 0x%.4x', $raf->Tell()),
) if $verbose;
if ($size and (not $raf->Seek($size-1, 1) or $raf->Read($buff, 1) != 1)) {
my $t = PrintableTagID($tag);
$et->Warn("Truncated '${t}' data");
last;
}
}
$dataPos += $size + 8; # point to start of next atom data
last if $dirEnd and $dataPos >= $dirEnd; # (note: ignores last value if 0 bytes)
$raf->Read($buff, 8) == 8 or last;
($size, $tag) = unpack('Na4', $buff);
++$index if defined $index;
}
# fill in missing defaults for alternate language tags
# (the first language is taken as the default)
if ($doDefaultLang and $$et{QTLang}) {
QTLang: foreach $tag (@{$$et{QTLang}}) {
next unless defined $$et{VALUE}{$tag};
my $langInfo = $$et{TAG_INFO}{$tag} or next;
my $tagInfo = $$langInfo{SrcTagInfo} or next;
my $infoHash = $$et{TAG_INFO};
my $name = $$tagInfo{Name};
# loop through all instances of this tag name and generate the default-language
# version only if we don't already have a QuickTime tag with this name
my ($i, $key);
for ($i=0, $key=$name; $$infoHash{$key}; ++$i, $key="$name ($i)") {
next QTLang if $et->GetGroup($key, 0) eq 'QuickTime';
}
$et->FoundTag($tagInfo, $$et{VALUE}{$tag});
}
delete $$et{QTLang};
}
# process item information now that we are done processing its 'meta' container
HandleItemInfo($et) if $topLevel or $dirID eq 'meta';
ScanMovieData($et) if $ee and $topLevel; # brute force scan for metadata embedded in movie data
# restore any changed options
$et->Options($_ => $saveOptions{$_}) foreach keys %saveOptions;
return 1;
}
#------------------------------------------------------------------------------
# Process a QuickTime Image File
# Inputs: 0) ExifTool object reference, 1) directory information reference
# Returns: 1 on success
sub ProcessQTIF($$)
{
my ($et, $dirInfo) = @_;
my $table = GetTagTable('Image::ExifTool::QuickTime::ImageFile');
return ProcessMOV($et, $dirInfo, $table);
}
1; # end
__END__
=head1 NAME
Image::ExifTool::QuickTime - Read QuickTime and MP4 meta information
=head1 SYNOPSIS
This module is used by Image::ExifTool
=head1 DESCRIPTION
This module contains routines required by Image::ExifTool to extract
information from QuickTime and MP4 video, M4A audio, and HEIC image files.
=head1 AUTHOR
Copyright 2003-2019, Phil Harvey (phil at owl.phy.queensu.ca)
This library is free software; you can redistribute it and/or modify it
under the same terms as Perl itself.
=head1 REFERENCES
=over 4
=item L<http://developer.apple.com/mac/library/documentation/QuickTime/QTFF/QTFFChap1/qtff1.html>
=item L<http://search.cpan.org/dist/MP4-Info-1.04/>
=item L<http://www.geocities.com/xhelmboyx/quicktime/formats/mp4-layout.txt>
=item L<http://wiki.multimedia.cx/index.php?title=Apple_QuickTime>
=item L<http://atomicparsley.sourceforge.net/mpeg-4files.html>
=item L<http://wiki.multimedia.cx/index.php?title=QuickTime_container>
=item L<http://code.google.com/p/mp4v2/wiki/iTunesMetadata>
=item L<http://www.canieti.com.mx/assets/files/1011/IEC_100_1384_DC.pdf>
=item L<http://www.adobe.com/devnet/flv/pdf/video_file_format_spec_v10.pdf>
=back
=head1 SEE ALSO
L<Image::ExifTool::TagNames/QuickTime Tags>,
L<Image::ExifTool(3pm)|Image::ExifTool>
=cut
| philmoz/Focus-Points | focuspoints.lrdevplugin/bin/exiftool/lib/Image/ExifTool/QuickTime.pm | Perl | apache-2.0 | 377,884 |
=head1 LICENSE
Copyright [1999-2014] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
package Bio::EnsEMBL::Variation::RegulationVariation;
use strict;
use warnings;
use base qw(Bio::EnsEMBL::Variation::VariationFeatureOverlap);
sub feature_label {
my ($self, $feature_label) = @_;
$self->{feature_label} = $feature_label if $feature_label;
return $self->{feature_label};
}
sub target_feature {
# XXX: fetch the target feature
}
sub target_feature_stable_id {
my ($self, $target_feature_stable_id) = @_;
$self->{target_feature_stable_id} = $target_feature_stable_id if $target_feature_stable_id;
return $self->{target_feature_stable_id};
}
1;
| dbolser-ebi/ensembl-variation | modules/Bio/EnsEMBL/Variation/RegulationVariation.pm | Perl | apache-2.0 | 1,489 |
#!/usr/bin/env perl
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
use strict;
use warnings;
use English;
use Getopt::Long;
use FileHandle;
use DBI;
use Cwd;
use Data::Dumper;
use Schema;
use CPAN::Meta;
use File::Find::Rule;
use YAML;
use YAML qw(LoadFile);
use DBIx::Class::Schema::Loader qw/make_schema_at/;
use Env;
use Env qw(HOME);
my $usage = "\n"
. "Usage: $PROGRAM_NAME [--env (development|test|production|integration)] [arguments]\t\n\n"
. "Example: $PROGRAM_NAME --env=test reset\n\n"
. "Purpose: This script is used to manage database. The environments are\n"
. " defined in the dbconf.yml, as well as the database names.\n\n"
. "NOTE: \n"
. "Postgres Superuser: The 'postgres' superuser needs to be created to run $PROGRAM_NAME and setup databases.\n"
. "If the 'postgres' superuser has not been created or password has not been set then run the following commands accordingly. \n\n"
. "Create the 'postgres' user as a super user (if not created):\n\n"
. " \$ createuser postgres --superuser --createrole --createdb --login --pwprompt\n\n"
. "Modify your $HOME/.pgpass file which allows for easy command line access by defaulting the user and password for the database\n"
. "without prompts.\n\n"
. " Postgres .pgpass file format:\n"
. " hostname:port:database:username:password\n\n"
. " ----------------------\n"
. " Example Contents\n"
. " ----------------------\n"
. " *:*:*:postgres:your-postgres-password \n"
. " *:*:*:traffic_ops:the-password-in-dbconf.yml \n"
. " ----------------------\n\n"
. " Save the following example into this file $HOME/.pgpass with the permissions of this file\n"
. " so only $USER can read and write.\n\n"
. " \$ chmod 0600 $HOME/.pgpass\n\n"
. "===================================================================================================================\n"
. "$PROGRAM_NAME arguments: \n\n"
. "createdb - Execute db 'createdb' the database for the current environment.\n"
. "create_user - Execute 'create_user' the user for the current environment (traffic_ops).\n"
. "dropdb - Execute db 'dropdb' on the database for the current environment.\n"
. "down - Roll back a single migration from the current version.\n"
. "drop_user - Execute 'drop_user' the user for the current environment (traffic_ops).\n"
. "redo - Roll back the most recently applied migration, then run it again.\n"
. "reset - Execute db 'dropdb', 'createdb', load_schema, migrate on the database for the current environment.\n"
. "reverse_schema - Reverse engineer the lib/Schema/Result files from the environment database.\n"
. "seed - Execute sql from db/seeds.sql for loading static data.\n"
. "show_users - Execute sql to show all of the user for the current environment.\n"
. "status - Print the status of all migrations.\n"
. "upgrade - Execute migrate then seed on the database for the current environment.\n";
my $environment = 'development';
my $db_protocol;
# This is defaulted to 'to_development' so
# you don't have to specify --env=development for dev workstations
my $db_name = 'to_development';
my $db_super_user = 'postgres';
my $db_user = '';
my $db_password = '';
my $host_ip = '';
my $host_port = '';
GetOptions( "env=s" => \$environment );
$ENV{'MOJO_MODE'} = $environment;
parse_dbconf_yml_pg_driver();
STDERR->autoflush(1);
my $argument = shift(@ARGV);
if ( defined($argument) ) {
if ( $argument eq 'createdb' ) {
createdb();
}
elsif ( $argument eq 'dropdb' ) {
dropdb();
}
elsif ( $argument eq 'create_user' ) {
create_user();
}
elsif ( $argument eq 'drop_user' ) {
drop_user();
}
elsif ( $argument eq 'show_users' ) {
show_users();
}
elsif ( $argument eq 'reset' ) {
create_user();
dropdb();
createdb();
load_schema();
migrate('up');
}
elsif ( $argument eq 'upgrade' ) {
migrate('up');
seed();
}
elsif ( $argument eq 'migrate' ) {
migrate('up');
}
elsif ( $argument eq 'down' ) {
migrate('down');
}
elsif ( $argument eq 'redo' ) {
migrate('redo');
}
elsif ( $argument eq 'status' ) {
migrate('status');
}
elsif ( $argument eq 'dbversion' ) {
migrate('dbversion');
}
elsif ( $argument eq 'seed' ) {
seed();
}
elsif ( $argument eq 'load_schema' ) {
load_schema();
}
elsif ( $argument eq 'reverse_schema' ) {
reverse_schema();
}
else {
print $usage;
}
}
else {
print $usage;
}
exit(0);
sub parse_dbconf_yml_pg_driver {
my $db_conf = LoadFile('db/dbconf.yml');
my $db_connection = $db_conf->{$environment};
$db_protocol = $db_connection->{driver};
my $open = $db_connection->{open};
# Goose requires the 'open' line in the dbconf file to be a scalar.
# example:
# open: host=127.0.0.1 port=5432 user=to_user password=twelve dbname=to_development sslmode=disable
# We need access to these values for db connections so I am manipulating the 'open'
# line so that it can be loaded into a hash.
$open = join "\n", map { s/=/ : /; $_ } split " ", $open;
my $hash = Load $open;
$host_ip = $hash->{host};
$host_port = $hash->{port};
$db_user = $hash->{user};
$db_password = $hash->{password};
$db_name = $hash->{dbname};
}
sub migrate {
my ($command) = @_;
print "Migrating database...\n";
if ( system("goose --env=$environment $command") != 0 ) {
die "Can't run goose\n";
}
}
sub seed {
print "Seeding database.\n";
local $ENV{PGPASSWORD} = $db_password;
if ( system("psql -h $host_ip -p $host_port -d $db_name -U $db_user -e < db/seeds.sql") != 0 ) {
die "Can't seed database\n";
}
}
sub load_schema {
print "Creating database tables.\n";
local $ENV{PGPASSWORD} = $db_password;
if ( system("psql -h $host_ip -p $host_port -d $db_name -U $db_user -e < db/create_tables.sql") != 0 ) {
die "Can't create database tables\n";
}
}
sub dropdb {
print "Dropping database: $db_name\n";
if ( system("dropdb -h $host_ip -p $host_port -U $db_super_user -e --if-exists $db_name;") != 0 ) {
die "Can't drop db $db_name\n";
}
}
sub createdb {
my $db_exists = `psql -h $host_ip -U $db_super_user -p $host_port -tAc "SELECT 1 FROM pg_database WHERE datname='$db_name'"`;
if ($db_exists) {
print "Database $db_name already exists\n";
return;
}
my $cmd = "createdb -h $host_ip -p $host_port -U $db_super_user -e --owner $db_user $db_name;";
if ( system($cmd) != 0 ) {
die "Can't create db $db_name\n";
}
}
sub create_user {
print "Creating user: $db_user\n";
my $user_exists = `psql -h $host_ip -p $host_port -U $db_super_user -tAc "SELECT 1 FROM pg_roles WHERE rolname='$db_user'"`;
if (!$user_exists) {
my $cmd = "CREATE USER $db_user WITH LOGIN ENCRYPTED PASSWORD '$db_password'";
if ( system(qq{psql -h $host_ip -p $host_port -U $db_super_user -etAc "$cmd"}) != 0 ) {
die "Can't create user $db_user\n";
}
}
}
sub drop_user {
if ( system("dropuser -h $host_ip -p $host_port -U $db_super_user-i -e $db_user;") != 0 ) {
die "Can't drop user $db_user\n";
}
}
sub show_users {
if ( system("psql -h $host_ip -p $host_port -U $db_super_user -ec '\\du';") != 0 ) {
die "Can't show users";
}
}
sub reverse_schema {
my $db_info = Schema->get_dbinfo();
my $user = $db_info->{user};
my $pass = $db_info->{password};
my $dsn = Schema->get_dsn();
make_schema_at(
'Schema', {
debug => 1,
dump_directory => './lib',
overwrite_modifications => 1,
},
[ $dsn, $user, $pass ],
);
}
| weifensh/incubator-trafficcontrol | traffic_ops/app/db/admin.pl | Perl | apache-2.0 | 7,915 |
#
# Copyright 2016 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package storage::netapp::snmp::mode::fan;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
$options{options}->add_options(arguments =>
{
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
}
sub run {
my ($self, %options) = @_;
$self->{snmp} = $options{snmp};
my $oid_envFailedFanCount = '.1.3.6.1.4.1.789.1.2.4.2';
my $oid_envFailedFanMessage = '.1.3.6.1.4.1.789.1.2.4.3';
my $oid_nodeName = '.1.3.6.1.4.1.789.1.25.2.1.1';
my $oid_nodeEnvFailedFanCount = '.1.3.6.1.4.1.789.1.25.2.1.19';
my $oid_nodeEnvFailedFanMessage = '.1.3.6.1.4.1.789.1.25.2.1.20';
my $results = $self->{snmp}->get_multiple_table(oids => [
{ oid => $oid_envFailedFanCount },
{ oid => $oid_envFailedFanMessage },
{ oid => $oid_nodeName },
{ oid => $oid_nodeEnvFailedFanCount },
{ oid => $oid_nodeEnvFailedFanMessage }
], nothing_quit => 1);
if (defined($results->{$oid_envFailedFanCount}->{$oid_envFailedFanCount . '.0'})) {
$self->{output}->output_add(severity => 'OK',
short_msg => 'Fans are ok.');
if ($results->{$oid_envFailedFanCount}->{$oid_envFailedFanCount . '.0'} != 0) {
$self->{output}->output_add(severity => 'CRITICAL',
short_msg => sprintf("'%d' fans are failed [message: %s].",
$results->{$oid_envFailedFanCount}->{$oid_envFailedFanCount . '.0'},
$results->{$oid_envFailedFanMessage}->{$oid_envFailedFanMessage . '.0'}));
}
} else {
$self->{output}->output_add(severity => 'OK',
short_msg => 'Fans are ok on all nodes');
foreach my $oid ($self->{snmp}->oid_lex_sort(keys %{$results->{$oid_nodeEnvFailedFanCount}})) {
$oid =~ /^$oid_nodeEnvFailedFanCount\.(.*)$/;
my $instance = $1;
my $name = $results->{$oid_nodeName}->{$oid_nodeName . '.' . $instance};
my $count = $results->{$oid_nodeEnvFailedFanCount}->{$oid};
my $message = $results->{$oid_nodeEnvFailedFanMessage}->{$oid_nodeEnvFailedFanMessage . '.' . $instance};
$self->{output}->output_add(long_msg => sprintf("'%d' fans are failed on node '%s' [message: %s]",
$count, $name, defined($message) ? $message : '-'));
if ($count != 0) {
$self->{output}->output_add(severity => 'CRITICAL',
short_msg => sprintf("'%d' fans are failed on node '%s' [message: %s]",
$count, $name, defined($message) ? $message : '-'));
}
}
}
$self->{output}->display();
$self->{output}->exit();
}
1;
__END__
=head1 MODE
Check if fans are failed (not operating within the recommended RPM range).
=over 8
=back
=cut
| bcournaud/centreon-plugins | storage/netapp/snmp/mode/fan.pm | Perl | apache-2.0 | 4,396 |
#! /usr/bin/env perl
##**************************************************************
##
## Copyright (C) 1990-2007, Condor Team, Computer Sciences Department,
## University of Wisconsin-Madison, WI.
##
## Licensed under the Apache License, Version 2.0 (the "License"); you
## may not use this file except in compliance with the License. You may
## obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
##**************************************************************
##########################################################################
# For information on command line options of this script, call it with -h
#
# The logic in this code is divided into two functions. They are parse()
# and reconstitute().
# parse() takes one string as an argument. It will parse the string into
# one giant associative array, and return a reference.
# reconstitute() takes
#
# Because this deals a great deal with specifying meta-information on
# parameters, comments and variable naming can be confusing. So I will
# try to use the term "property" to only refer to the meta-information,
# and the term "parameter" or "params" to refer to those parameters that
# are actually referenced in Condor code, and (will be) actually
# configured by users.
# This main sub simply specifies a default, and calls parse.
# NOTE: $option contains values of command line options. See configure()
# at the bottom of this script if you want more info.
sub main {
# fetch contents of input file into string $input_contents
my $input_contents = file_get_contents($options{input});
# parse contents, and put into associative array $params
my $params = &parse($input_contents);
# set defaults
my $defaults = {
version => '7.1.0',
};
# call reconstitute on the params to do all of the outputting.
reconstitute($params, $default) unless $options{on_the_fly};
# The reason that it is not called if on_the_fly is set is because
# on_the_fly will cause &parse to call many reconstitute commands
# "on the fly" as it parses the string. If it were called anyway,
# then it would end up repeating output.
# hack for our build system
# This #includes param_info_init.c
`touch param_info.c`;
}
##########################################################################
use strict;
use warnings;
no warnings 'closure';
use Data::Dumper;
use Getopt::Std;
# Global variables. The first three are used internally by &parse,
# and the %options is set immediately after execution with the command
# line options for easy access, as specified in &configure.
use vars qw($remaining_text $parameters $current_parameter %options);
# You may be surprised to see $remaining_text, $parameters, and
# $current_parameter listed here as global variables, even though they
# are used exclusively by the &parse sub. While it probably isn't as
# clean as it (c|sh)ould be, it ended up being a step in the simplest
# solution to making recursive calls to &parse function as expected,
# due to a variety of subtleties involving scoping in subs contained
# Here we define the syntax rules for the parser. Each character class
# consists of an array ref of two elements, the first being a regular
# expression meant to match at least one of those characters, the latter
# being a string name for the character class.
# These constants will be used in the &parse function.
use constant {
WHITESPACE => ['[\r\n\s]+','whitespace'], # TODO: combine these into
COMMENTS => ['\#[^\n]+\n', 'comments'],
LINEBREAK => ['\n\r?','linebreak'],
SPACES => ['[ ]+','spaces'],
ASSIGNMENT => ['\:?\=?','assignment operator'],
ASSIGNMENT_EQUALS => ['\=?','assignment equals operator'],
ASSIGNMENT_COLON => ['\:?','assignment colon operator'],
ASSIGNMENT_HEREDOC => ['[A-Za-z]+', 'heredoc deliminator'],
PARAMETER_TITLE => ['[a-zA-Z0-9_\.]+','parameter title'],
PROPERTY_NAME => ['[a-zA-Z0-9_-]+','property name'],
PROPERTY_VALUE => ['[^\n]+','property value'],
DATACLASS_NAME => ['[a-zA-Z0-9_-]+','dataclass name'],
OPEN_BRACKET => ['\[','open bracket'],
CLOSE_BRACKET => ['\]','close bracket'],
OPEN_PARENTHESIS => ['\(', 'open parenthesis'],
CLOSE_PARENTHESIS => ['\)','close parenthesis'],
};
##################################################################################
# This is the template to be used when substituting for the parameters properties.
# The string that is substituted is in the format of %property%, where property is
# the name of the property to be substituted.
# (property types and names are defined farther below in $property_types)
##################################################################################
use constant { RECONSTITUTE_TEMPLATE =>
'param_info_insert(%parameter_name%, %aliases%, %default%, %version%, %range%,
%state%, %type%, %is_macro%, %reconfig%, %customization%,
%friendly_name%, %usage%,
%url%,
%tags%);
'
};
##################################################################################
# $property_types customizes the type and options of the properties. Each property is
# pointing toward a hash, containing the following metadata:
# type => (String specifying the type of that property. Types are defined in
# the $type_subs variable below)
# optional => (Set this to 1 to make this property optional.)
# dont_trim => (Set this to 1 to not trim trailing whitespace on value.)
##################################################################################
my $property_types = {
parameter_name => { type => "char[]" },
default => { type => "char[]", dont_trim => 1 },
friendly_name => { type => "char[]" },
type => { type => "param_type" },
state => { type => "state_type" },
version => { type => "char[]", optional => 1 },
tags => { type => "char[]" },
usage => { type => "char[]" },
# id => { type => "int", optional => 1},
aliases => { type => "char[]", optional => 1 },
range => { type => "char[]", optional => 1 },
is_macro => { type => "is_macro_type", optional => 1 },
reconfig => { type => "reconfig_type", optional => 1 },
customization => { type => "customization_type", optional => 1 },
url => { type => "char[]", optional => 1 }
};
##################################################################################
# $type_subs tells this script how to treat all the different types of parameters
# Each sub takes the value as an argument and returns the properly formatted value.
# It should be formatted such that it can be inserted without problem in the
# RECONSTITUTE_TEMPLATE.
# Also, it should be in charge of dieing if it encounters a bad value.
# When writing these subs, you have the following subs available:
# escape( $ ): takes one argument, escapes all potentially problematic characters.
# enum($, @_ ...): The first argument should be the input value. The remaining
# arguments should be acceptable values. If will try to
# (case-insensitively) match the user input with the remaining
# acceptable values. If it cannot find a match, it will die.
# Otherwise, it will correct the capitalization.
# type_error($, $): Dies with a nice error message. The first argument should be
# the value, the second the type.
##################################################################################
my $type_subs = {
'char[]' => sub { return '"'.escape($_[0]).'"'; },
'bool' => sub { return enum($_[0],'true','false'); },
'int' => sub { return $_[0]=~/^\d+$/?$_[0]:type_error($_[0], 'int'); },
'float' => sub { return $_[0]=~/^\d+\.\d+$/?$_[0]:type_error($_[0], 'float'); },
'double' => sub { return $_[0]=~/^\d+\.\d+$/?$_[0]: type_error($_[0], 'double');},
'char' => sub { return $_[0]=~/^\d+$/ and $_[0]<256 ? $_[0]:type_error($_[0], 'char');},
'state_type' => sub {
my $state = enum($_[0],'USER','AUTODEFAULT','DEFAULT', 'RUNTIME');
return "STATE_".$state;
},
'param_type' => sub {
my $type = enum($_[0],'STRING','INT','BOOL', 'DOUBLE');
return "PARAM_TYPE_".$type;
},
'is_macro_type' => sub {
my $is_macro = enum($_[0],'true','false');
return ($is_macro =~ /true/) ? 1 : 0;
},
'reconfig_type' => sub {
my $reconfig = enum($_[0],'true', 'false');
return ($reconfig =~ /true/) ? 1 : 0;
},
'customization_type' => sub {
my $customization = enum($_[0], 'NORMAL', 'SELDOM', 'EXPERT');
return "CUSTOMIZATION_".$customization;
},
};
###############################################################################################
# The reconstitute function takes a hash of parameters as its first argument, and a default
# parameter structure as its second. The hash of parameters should be in the same format as
# the one that is generated by the &parse function. The default parameters should be a hash,
# with the keys being property names and the values being the actual default property
# values.
# Possible TODO: Allow &parse to load default structure from a magic "[_default]" parameter.
sub reconstitute {
my $structure = shift;
my $default_structure = shift;
my $output_filename = $options{output};
###########################################################################
## All of the actual file output is contained in this section. ##
###########################################################################
sub begin_output {
open REC_OUT, ($options{append}?'>':'').">$output_filename" unless $options{stdout};
$options{append} = 1;
}
sub continue_output {
if ($options{stdout}) { print $_[0]; }
else { print REC_OUT $_[0]; }
}
sub end_output {
close REC_OUT unless $options{stdout};
}
############################################################################
# replace_by_hash takes a hash as the first argument and a string as a second.
# It then replaces all keys contained in the hash by their respective values.
sub replace_by_hash {
my ($replace,$context) = @_;
while(my($key, $val) = each %{$replace} ) {
$key =~ s/\W/\\$&/mg;
$context =~ s/$key/$val/ ;
}
return $context;
}
# param_err is just a simple wrapper for errors encountered.
sub param_err {
print STDERR $_[0]."\n\t\t" and die unless($options{ignore});
}
#####################################################################
# do_one_property
# This function gets the correct replacement value of one property.
# It is called with a ref to the parameters substructure, with the
# type (ie, char[], int, etc) of the property, and with the name
# of the property. If it cannot find the property, it will return
# the default value instead.
sub do_one_property {
# $s is a ref to the structure of this parameter (ie, {name=>'foo',usage=>'bar'})
# $i is the metadata of the field (ie, {type=>'char[]',optional=1})
# $p is the name of the property (ie, 'friendly_name')
my ($s,$i,$p) = @_;
##############################################################################
# escape and enum are two functions useful for subs contained in type_subs. #
# They assist with common user input formatting needs. #
##############################################################################
# type_error generates a nice error message for wrong types
sub type_error {
my ($value, $type) = @_;
param_err("PARAMETER TYPE ERROR: '$value' is not a valid type $type.");
}
# escape will escape various control characters from a string so that it
# can be safely used in quotes in C code.
sub escape {
my $input = shift;
return $input unless $input;
# trim trailing whitespace
if (exists($i->{dont_trim})) {
$input =~ s/\s+$// if $i->{dont_trim} != 1;
}
$input =~ s/\\/\\\\/g;
$input =~ s/\n/\\n/g;
$input =~ s/\t/\\t/g;
$input =~ s/\r/\\r/g;
$input =~ s/\f/\\f/g;
$input =~ s/'/\\\'/g;
$input =~ s/"/\\\"/g;
$input =~ s/\?/\\\?/g;
return $input;
}
# The first argument of enum is a user inputted value that is matched
# in a case-insensitive manner with the remaining arguments. If there is
# a match, then it returns the match, using the capitalization of the
# latter argument. If there is not a match, it will explode with an error.
sub enum {
my $c = shift;
my @list = @_;
foreach (@list) { return $_ if lc($c) eq lc($_); }
return param_err("$p isn't valid ".$i->{type}.". Use one of '@_' instead of $c.");
}
# All the logic in this function is contained in the line below. It calls the
# type_sub for proper type, with either the param's value for that property,
# or the default value for that property (if the param does not contain that
# property).
return $type_subs->{$i->{type}}(exists $s->{$p} ? $s->{$p} : $default_structure->{$p} );
}
#####################################################################
# Here we have the main logic of this function.
begin_output(); # opening the file, and beginning output
# Loop through each of the parameters in the structure passed as an argument
while(my ($param_name, $sub_structure) = each %{$structure}){
my %replace=();
# Quickly add the pseudo-property "parameter_name" for the name of the
# parameter, so that it can be treated just like any other property.
$sub_structure->{'parameter_name'} = $param_name;
print Dumper($sub_structure) if $options{debug};
# Loop through each of the properties in the hash specifying property
# rules. (This hash is defined at the top of this file and it details
# how every property should be treated).
while(my($name, $info) = each %{$property_types}){
# unless the $sub_structure contains the property or if that property
# is optional, summon an error.
unless(defined $sub_structure->{$name} or $info->{'optional'}){
param_err ("$param_name does not have required property $name.");}
# Get the property value; procesed, formatted, and ready for insertion
# by do_one_property().
$replace{"%$name%"}=do_one_property($sub_structure,$info,$name);
# TYPECHECK: certain parameters types must have a non-empty default
if ($name eq "type")
{
# Integer parameters
if ($type_subs->{$info->{type}}(exists $sub_structure->{type} ? $sub_structure->{type} : $default_structure->{type}) eq "PARAM_TYPE_INT")
{
if ($sub_structure->{'default'} eq "") {
print "ERROR: Integer parameter $param_name needs " .
"a default!\n";
}
}
# Boolean parameters
if ($type_subs->{$info->{type}}(exists $sub_structure->{type} ? $sub_structure->{type} : $default_structure->{type}) eq "PARAM_TYPE_BOOL")
{
if ($sub_structure->{'default'} eq "") {
print "ERROR: Boolean parameter $param_name needs " .
"a default!\n";
}
}
# Double parameters
if ($type_subs->{$info->{type}}(exists $sub_structure->{type} ? $sub_structure->{type} : $default_structure->{type}) eq "PARAM_TYPE_DOUBLE")
{
if ($sub_structure->{'default'} eq "") {
print "ERROR: Double parameter $param_name needs " .
"a default!\n";
}
}
}
}
# Here we actually apply the template and output the parameter.
continue_output(replace_by_hash(\%replace, RECONSTITUTE_TEMPLATE));
}
# wrap things up.
end_output();
}
##########################################################################
# &parse parses a string. It is totally self-contained, using no outside functions (although
# it does use the character type constants such as PARAMETER_NAME defined in the top of this
# file). It accepts a string as its only argument, and returns a hash structure. No attempt
# is made (in *this* function) to check any of the data; it ONLY parses strings into more
# readable formats.
# The following string...
# - - - - - - - - - -
# [TEST_PARAM]
# # Comment, I am ignored
# var1 = vala
# var2 = valb
#
# [NEXT_PARAM]
# var1 = blah a
# var2 : EOF
# multiline string line 1
# line 2
# EOF
# recursive_structure : (classname) EOF
# sub_val1 = 1
# sub_val2 = 2
# EOF
# - - - - - - - - - -
# ...would be parsed into...
# - - - - - - - - - -
# {
# NEXT_PARAM => { var1 => 'blah a', var2 => "multiline string line 1\nline 2"
# recursive_structure=>{ '_dataclass'=>'classname', sub_val1=>'1', sub_val2=>'2' }
# },
# TEST_PARAM => { var1 => 'vala', var2 => 'valb'}
# }
# - - - - - - - - - -
##########################################################################
sub parse {
# TODO:
# it would be be best if WHITESPACE and COMMENT types were
# combined, as anywhere there is a WHITESPACE ignored, comments should be
# ignored also.
# get argument and put into $remaining_text
$remaining_text = shift;
# reset current_parameter and parameters variables
$current_parameter = {};
$parameters = {};
#########################################################
# Main parser logic functions #
#########################################################
sub error { die ("ERROR! Expected valid ".$_[0]." at '".$_[1]."'\n"); }
# ignore will ignore the supplied character class
sub ignore {
my $regex = $_[0]->[0];
$remaining_text =~ s/\A$regex//s;
}
# next_is will look ahead and return true if the next character(s) match
# the given chracter class
sub next_is {
my $regex = $_[0]->[0];
$remaining_text =~ /^($regex)/s;
return $1 && length($1) > 0;
}
# not_ignore is used by &accept and &until. It requires that the next characters
# be of the supplied regex and will return them, otherwise erroring.
sub not_ignore {
my ($regex, $context, $chartype) = @_;
($context ? $context : $remaining_text) =~ s/$regex//s;
return (length($1)>0) ? $1 : error($chartype, substr($remaining_text,0,90));
}
# accept will require that the next characters be of the supplied character class,
# returning the matching string.
sub accept { return not_ignore("^(".$_[0]->[0].")", $_[1], $_[0]->[1]); }
# until will require that there be somewhere ahead the supplied character class, and
# will return the text leading up to that supplied class
sub until { return not_ignore("^(.*?)(?=".$_[0]->[0].")", $_[1], $_[0]->[1]); }
##########################################
# Array building functions here #
##########################################
# add_property will add a property to $current_parameter. It is called with the
# property name, the property value, and the dataclass name (only if there is one.
# The dataclass name is not specified for normal strings or normal multiline strings).
sub add_property {
my ($property, $value, $dataclass) = @_;
if ($dataclass) {
# TODO: the [FOOBAR] thing is sloppy. It is only added to make
# the recursive call to &parse happy with the text and parse it.
# Actually, this entire block is rather sloppy...
$value = "[FOOBAR]$value";
# All of the $h_.+ type variables below are used to temporarily "freeze"
# the global variables in the execution stack, so that calling &parse
# recursively below will function as expected. It's pretty messy,
# but it works for now at least...
# TODO: Very sloppy
my $h_remaining_text = $remaining_text;
my %h_current_parameter = %{$current_parameter};
my %h_parameters = %{$parameters};
$remaining_text = $value; # reassigning $remaining_text to equal $value
$value = parse("$value")->{'FOOBAR'}; # actual parse call
$value->{'_dataclass'} = $dataclass;
$remaining_text = $h_remaining_text;
$current_parameter = \%h_current_parameter;
$parameters = \%h_parameters;
}
$current_parameter->{$property} = $value;
}
# add_parameter is called after a parameter is added. It resets $current_parameter.
# It then adds $current_parameter to the %parameters hash.
# If on_the_fly is set to 1, it will call reconstitute on the parameter right away.
sub add_parameter {
my ($title) = @_;
$parameters->{$title} = $current_parameter;
reconstitute({"$title"=>$current_parameter}) if $options{on_the_fly};
$current_parameter = {};
}
#################################################################
# Actual parser logic contained here... #
#################################################################
&ignore(WHITESPACE); # First, ignore all whitespace and comments
&ignore(COMMENTS);
&ignore(WHITESPACE);
while(length($remaining_text)>1){ ### Main loop, through the entire text
# We first get the name of the next parameter, enclosed in brackets
&accept(OPEN_BRACKET);
my $parameter_title = &accept(PARAMETER_TITLE);
&accept(CLOSE_BRACKET);
&ignore(WHITESPACE);
&ignore(COMMENTS);
&ignore(WHITESPACE);
until(&next_is(OPEN_BRACKET)){
# Now we get all of its properties, looping through until we hit the
# next parameter definition.
if(length($remaining_text)<1){ last; } # End of file
# Get the property name...
my $property_name = &accept(PROPERTY_NAME);
&ignore(WHITESPACE);
my $assignment = &accept(ASSIGNMENT);
# Get the assignment operator
my ($property_value, $dataclass_name);
if($assignment eq '=') {
# If it is an equals sign (normal assignment)...
&ignore(SPACES);
$property_value = "" if &next_is(LINEBREAK);
$property_value = &accept(PROPERTY_VALUE) unless &next_is(LINEBREAK);
&ignore(LINEBREAK);
} else {
# If it is a colon (multiline and special
# dataclass assignment, such as for roles)...
&ignore(SPACES);
if(&next_is(OPEN_PARENTHESIS)){
# This means that it is NOT simply a multiline string,
# but rather a dataclass (such as, default : (role) EOF)
&accept(OPEN_PARENTHESIS);
&ignore(SPACES);
$dataclass_name = &accept(DATACLASS_NAME);
&ignore(SPACES);
&accept(CLOSE_PARENTHESIS);
&ignore(SPACES);
}
# This code grabs heredoc delimiter, and then the text until
# the heredoc delimiter. It will be used for both multiline
# strings and dataclass assignments.
my $heredoc = &accept(ASSIGNMENT_HEREDOC);
&ignore(SPACES);
&accept(LINEBREAK);
my $heredoc_charclass = ['\r?\n'.$heredoc.'\r?\n', $heredoc];
$property_value = &until($heredoc_charclass);
&ignore($heredoc_charclass);
}
# add_property will add the newly created property to
# @current_parameter. If it is a single or multiline string, it
# will simply set the new parameter to equal the string.
# However, if $dataclass is longer than 0 characters, it will
# attempt to parse the string.
add_property($property_name, $property_value, $dataclass_name);
ignore(WHITESPACE);
&ignore(COMMENTS);
&ignore(WHITESPACE);
if(length($remaining_text)<1){ last; } # End of file
}
# add_parameter will add @current_parameter (the parameter implicitly
# constructed with add_property) to the hash $parameters. If on_the_fly
# is set, it will also call the reconstruct function on this structure
# and output the results on the fly.
add_parameter($parameter_title);
}
return $parameters;
}
# Really simple function that just brutally gets the contents of an entire file into
# a string.
# If, however, the option stdin is set, then it will instead get input from
# standard in.
sub file_get_contents {
my $file_path = shift;
my @text;
if ($options{stdin}){
@text = <STDIN>;
} else {
open FILE_NAME, "<$file_path" or die "Cannot find $file_path...";
@text = <FILE_NAME>;
close FILE_NAME;
}
return join "", @text;
}
############################################################
# Some generic configuration code... #
# This makes adding / removing new switches much easier #
# To add new command line options, just add them to the #
# list @switches contained below. #
############################################################
sub configure {
my @switches = (
# flag, arg, short name, default, usage description
['h', 0, 'help', 0, 'print this usage information'],
['f', 0, 'on_the_fly', 0, 'output the result as it is parsed'],
['i', 1, 'input', 'param_info.in', 'input file (default: "param_info.in")'],
['o', 1, 'output', 'param_info_init.c', 'output file (default: "param_info.c")'],
['I', 0, 'stdin', 0, 'input from standard in instead of file'],
['O', 0, 'stdout', 0, 'print to standard out instead of file'],
['a', 0, 'append', 0, "append: don't clobber output file"],
['e', 0, 'errors', 0, 'do not die on some errors'],
['d', 0, 'debug', 0, 0], # 0 makes it hidden on -h
);
sub usage {
my $switches;
# goes through all of the flags, generating a "help" line for each item
foreach my $s(@_) {
$s->[2]=~s/_/ /g; # replace underscores
# (the "$switch->[4] and" allows options to be disabled from display by setting the usage description to a false value)
$s->[4] and $switches .= "\t-".$s->[0].($s->[1]?" [".($s->[2])."]\t":"\t\t")."\t".$s->[4]."\n";
}
print << "EOF";
Parameter Parser for Condor
Example usage:
perl $0 -i param_table -o output_source.C -f
Full argument list:
$switches
EOF
}
sub bomb { usage(@_); exit 0; }
my %opts;
getopts(join ('', map { $_->[0].($_->[1]?':':'') } @switches),\%opts); # get CLI options, with ':' properly specifying arguments
$opts{'h'} and bomb(@switches);
for my $switch (@switches) {
if( !defined $opts{$switch->[0]} or $opts{$switch->[0]}=~/^$/ ) { # If argument was not set...
$options{$switch->[2]} = $switch->[3]; # ...set the options value equal to the default value.
} else { # Otherwise, set the options value equal to either the argument value, or in the case of...
$options{$switch->[2]} = $switch->[1] ? $opts{$switch->[0]} : !$switch->[3]; # ...a flag style switch...
} # ...instead invert the default value.
}
}
configure();
main();
exit(0);
| clalancette/condor-dcloud | src/condor_utils/param_info_c_generator.pl | Perl | apache-2.0 | 26,453 |
#!/usr/bin/env perl
=head1 LICENSE
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2017] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
sub
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
=head1 NAME
Bio::EnsEMBL::Analysis::RunnableDB::Exonerate2Genes -
=head1 SYNOPSIS
my $exonerate2genes = Bio::EnsEMBL::Analysis::RunnableDB::Exonerate2Genes->new(
-db => $refdb,
-analysis => $analysis_obj,
-input_id => $chunk_file_name
);
$exonerate2genes->fetch_input();
$exonerate2genes->run();
$exonerate2genes->output();
$exonerate2genes->write_output(); #writes to DB
=head1 DESCRIPTION
This object wraps Bio::EnsEMBL::Analysis::Runnable::ExonerateTranscript
It is meant to provide the interface for mapping ESTs to the genome
sequence and writing the results as genes. By the way Exonerate is run
we do not cluster transcripts into genes and only write one transcript per gene.
we then create a dbadaptor for the target database.
=head1 METHODS
=head1 APPENDIX
The rest of the documentation details each of the object methods.
Internal methods are usually preceded with a '_'
=cut
package Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveExonerate2Genes;
use warnings ;
use strict;
use feature 'say';
use Bio::EnsEMBL::Analysis::Tools::GeneBuildUtils::GeneUtils qw(empty_Gene);
use Bio::EnsEMBL::Analysis::Runnable::ExonerateTranscript;
use Bio::EnsEMBL::Gene;
use Bio::EnsEMBL::KillList::KillList;
use Bio::SeqIO;
use Data::Dumper;
use parent ('Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveBaseRunnableDB');
sub fetch_input {
my($self) = @_;
my $dba = $self->hrdb_get_dba($self->param('target_db'));
my $dna_dba = $self->hrdb_get_dba($self->param('dna_db'));
if($dna_dba) {
$dba->dnadb($dna_dba);
}
$self->hrdb_set_con($dba,'target_db');
# This call will set the config file parameters. Note this will set REFGB (which overrides the
# value in $self->db and OUTDB
$self->hive_set_config;
##########################################
# set up the target (genome)
##########################################
my @db_files;
my @target_list = $self->GENOMICSEQS;
foreach my $target (@target_list){
if(ref $target eq 'ARRAY'){
#check to see if we have multiple files or directories:
my $dir = 0;
foreach my $alt_target (@$target){
if (-d $alt_target){
$dir = 1;
last;
}
}
# genome is in multiple directories; the order of directories determines
# which file is used in case of duplicates. New versions should therefore
# be in the directory listed first.
if ($dir) {
foreach my $chr_name ($self->get_chr_names) {
my $found = 0;
DIRCHECK:
foreach my $alt_target (@$target){
if (-s "$alt_target/$chr_name.fa") {
push @db_files, "$alt_target/$chr_name.fa";
$found = 1;
last DIRCHECK;
}
}
if(!$found){
$self->warning( "Could not find fasta file for '$chr_name' in directories:\n".
join("\n\t", @$target)."\n");
}
}
}else{
foreach my $alt_target (@$target){
if (-s $alt_target){
push @db_files, $alt_target;
}
}
}
} # // end target is a directory
else {
$target =~s/^\s+//;
if (-e $target and -d $target) {
# genome is in a directory; the directory must contain the complete
# genome else we cannot do best-in-genome filtering.
# We would like to use exonerate's ability to accept a directory as
# target (because bestn then works), but we must check that the directory
# contains only toplevel sequence files
my %dir_contents;
opendir DIR, $target;
while(my $entry = readdir DIR) {
if ($entry ne '.' and $entry ne '..') {
$dir_contents{$entry} = 0;
}
}
closedir(DIR);
foreach my $chr ($self->get_chr_names) {
my $seq_fname = "$chr.fa";
if (-s "$target/$seq_fname") {
$dir_contents{$seq_fname}++;
push @db_files, "$target/$seq_fname";
} else {
$self->warning( "Could not find fasta file for '$chr' in '$target'\n");
}
}
# if all files in dir were expected, we can revert to having
# the whole directory as target
if (not grep { $dir_contents{$_} == 0 } keys %dir_contents) {
@db_files = ($target);
}
}
elsif (-e $target and -s $target) {
# genome sequence is in a single file
@db_files = ($target);
} else {
$self->throw("'$target' refers to something that could not be made sense of");
}
}
}
##########################################
# set up the query (est/cDNA/protein)
##########################################
my $iid_type = $self->param('iid_type');
my ($query_file,$query_seq,$chunk_number,$chunk_total);
unless($iid_type) {
$self->throw("You haven't provided an input id type. Need to provide one via the 'iid_type' param");
}
if($iid_type eq 'db_seq') {
my $accession_array = $self->param('iid');
$query_seq = $self->get_query_seq($accession_array);
$self->peptide_seq($query_seq->seq);
$self->calculate_coverage_and_pid($self->param('calculate_coverage_and_pid'));
@db_files = ($self->GENOMICSEQS);
} elsif($iid_type eq 'feature_region') {
my $feature_region_id = $self->param('iid');
my ($slice,$accession_array) = $self->parse_feature_region_id($feature_region_id);
$query_file = $self->output_query_file($accession_array);
@db_files = ($self->output_db_file($slice,$accession_array));
} elsif($iid_type eq 'feature_id') {
my $feature_type = $self->param('feature_type');
if($feature_type eq 'transcript') {
my $transcript_id = $self->param('iid');
my $transcript_dba = $self->hrdb_get_dba($self->param('transcript_db'));
if($dna_dba) {
$transcript_dba->dnadb($dna_dba);
}
$self->hrdb_set_con($transcript_dba,'transcript_db');
my ($slice,$accession_array) = $self->get_transcript_region($transcript_id);
#$query_file = $self->output_query_file($accession_array);
$query_seq = $self->get_query_seq($accession_array);
$self->peptide_seq($query_seq->seq);
$self->calculate_coverage_and_pid($self->param('calculate_coverage_and_pid'));
@db_files = ($self->output_db_file($slice,$accession_array));
} else {
$self->throw("The feature_type you passed in is not supported! Type:\n".$feature_type);
}
} elsif($iid_type eq 'chunk_file') {
my $query = $self->QUERYSEQS;
if(-e $query and -d $query) {
# query seqs is a directory; input id will be a file in that directory
# As input_id returns a string, I've made it parse out the file name. I don't
# like this solution but it is the quickest for the moment
my $input_id = $self->input_id;
$query_file = "$query/" . $input_id;
if (not -e $query_file) {
$self->throw( "Query file '$query_file' does not exist'\n");
}
if ($self->USE_KILL_LIST) {
$query_file = filter_killed_entries($query_file,$self->KILL_TYPE,$self->REFDB,$self->KILLLISTDB,$self->KILL_LIST_FILTER,$self->input_id);
$self->filtered_query_file($query_file);
}
}
elsif (-e $query and -s $query) {
# query seqs is a single file; input id will correspond to a chunk number
$query_file = $query;
my $iid_regexp = $self->IIDREGEXP;
$self->throw("When your input ids are not filenames, you must define ".
"IIDREGEXP in config to enable inference of chunk number and total")
if not defined $iid_regexp;
($chunk_number, $chunk_total) = $self->input_id =~ /$iid_regexp/;
###
### DO THE KILL LIST FILTER FOR QUERY FILE. AGAIN THE FILE CAN CONTAIN MULTIPLE ENTIRES
###
if ($self->USE_KILL_LIST) {
$query_file = filter_killed_entries($query_file,$self->KILL_TYPE,$self->REFDB,$self->KILLLISTDB,$self->KILL_LIST_FILTER);
}
} else {
$self->throw("'$query' refers to something that could not be made sense of\n");
}
} else {
$self->throw("You provided an input id type that was not recoginised via the 'iid_type' param. Type provided:\n".$iid_type);
}
##########################################
# Annotation file with CDS positions
##########################################
##########################################
# setup the runnables
##########################################
my %parameters = %{$self->parameters_hash};
if (not exists($parameters{-options}) and
defined $self->OPTIONS) {
$parameters{-options} = $self->OPTIONS;
}
if (not exists($parameters{-coverage_by_aligned}) and
defined $self->COVERAGE_BY_ALIGNED) {
$parameters{-coverage_by_aligned} = $self->COVERAGE_BY_ALIGNED;
}
# Old code, will leave here as I could activate it again
if (defined $self->PROGRAM && defined $self->analysis->program_file) {
if ($self->PROGRAM ne $self->analysis->program_file) {
# I'm just warning because for debugging it's easier to change just the PROGRAM parameters...
$self->warning("CONFLICT: You have defined -program in your config file and ".
"-program_file in your analysis table.");
}
}
# my $transcript_biotype = $self->transcript_biotype();
my $biotypes_hash = $self->get_biotype();
foreach my $database ( @db_files ){
my $runnable = Bio::EnsEMBL::Analysis::Runnable::ExonerateTranscript->new(
-program => $self->PROGRAM ? $self->PROGRAM : $self->analysis->program_file,
-analysis => $self->analysis,
-target_file => $database,
-query_type => $self->QUERYTYPE,
-query_file => $query_file,
-query_seqs => [$query_seq],
-annotation_file => $self->QUERYANNOTATION ? $self->QUERYANNOTATION : undef,
-query_chunk_number => $chunk_number ? $chunk_number : undef,
-query_chunk_total => $chunk_total ? $chunk_total : undef,
-biotypes => $biotypes_hash,
-calculate_coverage_and_pid => $self->param('calculate_coverage_and_pid'),
%parameters,
);
$self->runnable($runnable);
}
}
sub run {
my ($self) = @_;
my @results;
$self->throw("Can't run - no runnable objects") unless ($self->runnable);
foreach my $runnable (@{$self->runnable}){
# This is to catch the closing exonerate errors, which we currently have no actual solution for
# It seems to be more of a problem with the exonerate code itself
eval {
$runnable->run;
};
if($@) {
my $except = $@;
if($except =~ /Error closing exonerate command/) {
warn("Error closing exonerate command, this input id was not analysed successfully:\n".$self->input_id);
} else {
$self->throw($except);
}
} else {
push ( @results, @{$runnable->output} );
}
}
if ($self->USE_KILL_LIST) {
unlink $self->filtered_query_file;
# print "Removed temporary query file ".$self->filtered_query_file."\n";
}
if ($self->filter) {
my $filtered_transcripts = $self->filter->filter_results(\@results);
@results = @$filtered_transcripts;
}
my @genes = $self->make_genes(@results);
$self->param('output_genes',\@genes);
}
sub write_output {
my ($self) = @_;
my $outdb = $self->hrdb_get_con('target_db');
my $gene_adaptor = $outdb->get_GeneAdaptor;
my @output = @{$self->param('output_genes')};
$self->param('output_genes',undef);
my $fails = 0;
my $total = 0;
foreach my $gene (@output){
empty_Gene($gene);
eval {
$gene_adaptor->store($gene);
};
if ($@){
$self->warning("Unable to store gene!!\n$@");
$fails++;
}
$total++;
}
if ($fails > 0) {
$self->throw("Not all genes could be written successfully " .
"($fails fails out of $total)");
}
if($self->files_to_delete()) {
my $files_to_delete = $self->files_to_delete();
foreach my $file_to_delete (@{$files_to_delete}) {
`rm $file_to_delete`;
}
}
}
sub hive_set_config {
my $self = shift;
# Throw is these aren't present as they should both be defined
unless($self->param_is_defined('logic_name') && $self->param_is_defined('module')) {
$self->throw("You must define 'logic_name' and 'module' in the parameters hash of your analysis in the pipeline config file, ".
"even if they are already defined in the analysis hash itself. This is because the hive will not allow the runnableDB ".
"to read values of the analysis hash unless they are in the parameters hash. However we need to have a logic name to ".
"write the genes to and this should also include the module name even if it isn't strictly necessary"
);
}
# Make an analysis object and set it, this will allow the module to write to the output db
my $analysis = new Bio::EnsEMBL::Analysis(
-logic_name => $self->param('logic_name'),
-module => $self->param('module'),
);
$self->analysis($analysis);
# Now loop through all the keys in the parameters hash and set anything that can be set
my $config_hash = $self->param('config_settings');
foreach my $config_key (keys(%{$config_hash})) {
if(defined &$config_key) {
$self->$config_key($config_hash->{$config_key});
} else {
$self->throw("You have a key defined in the config_settings hash (in the analysis hash in the pipeline config) that does ".
"not have a corresponding getter/setter subroutine. Either remove the key or add the getter/setter. Offending ".
"key:\n".$config_key
);
}
}
if($self->FILTER) {
if(not ref($self->FILTER) eq "HASH" or not exists($self->FILTER->{OBJECT}) or not exists($self->FILTER->{PARAMETERS})) {
$self->throw("FILTER in config of ".$analysis->logic_name." must be a hash ref with elements:\n" .
" OBJECT : qualified name of the filter module;\n" .
" PARAMETERS : anonymous hash of parameters to pass to the filter");
} else {
my $module = $self->FILTER->{OBJECT};
my $pars = $self->FILTER->{PARAMETERS};
(my $class = $module) =~ s/::/\//g;
eval {
require "$class.pm";
};
$self->throw("Couldn't require ".$class." Exonerate2Genes:require_module $@") if($@);
$self->filter($module->new(%{$pars}));
}
}
}
sub get_transcript_region {
my ($self,$transcript_id) = @_;
my $transcript_dba = $self->hrdb_get_con('transcript_db');
my $transcript = $transcript_dba->get_TranscriptAdaptor()->fetch_by_dbID($transcript_id);
my $tsf = $transcript->get_all_supporting_features();
my $feature_pair = ${$tsf}[0];
my $accession = $feature_pair->hseqname();
if($self->param('use_genblast_best_in_genome')) {
my $logic_name = $transcript->analysis->logic_name();
if($logic_name =~ /_not_best$/) {
$self->best_in_genome_transcript(0);
} else {
$self->best_in_genome_transcript(1);
}
}
my $padding = $self->param('region_padding');
unless($self->param('region_padding')) {
$self->warning("You didn't pass in any value for padding. Defaulting to 10000");
$padding = 10000;
}
my $start = $transcript->seq_region_start;
my $end = $transcript->seq_region_end;
my $strand = $transcript->strand;
my $slice = $transcript->slice();
my $slice_length = $slice->length();
if($padding) {
$start = $start - $padding;
if($start < 1) {
$start = 1;
}
$end = $end + $padding;
my $slice = $transcript->slice();
my $slice_length = $slice->length();
if($end > $slice_length) {
$end = $slice_length;
}
}
my @slice_array = split(':',$slice->name());
$slice_array[3] = $start;
$slice_array[4] = $end;
$slice_array[5] = $strand;
my $new_slice_name = join(':',@slice_array);
my $sa = $transcript_dba->get_SliceAdaptor();
my $transcript_slice = $sa->fetch_by_name($new_slice_name);
return($transcript_slice,[$accession]);
}
sub best_in_genome_transcript {
my ($self,$val) = @_;
if(defined($val) && $val==1) {
$self->param('_best_in_genome_transcript', 1);
} elsif(defined($val) && $val==0) {
$self->param('_best_in_genome_transcript', 0);
}
return($self->param('_best_in_genome_transcript'));
}
sub make_genes{
my ($self,@transcripts) = @_;
my (@genes);
my $slice_adaptor = $self->hrdb_get_con('target_db')->get_SliceAdaptor;
my %genome_slices;
foreach my $tran ( @transcripts ){
my $gene = Bio::EnsEMBL::Gene->new();
$gene->analysis($self->analysis);
$gene->biotype($self->analysis->logic_name);
$tran->analysis($self->analysis);
if(defined($self->best_in_genome_transcript()) && $self->best_in_genome_transcript() == 0) {
my $analysis = $self->analysis;
my $logic_name = $analysis->logic_name."_not_best";
$analysis->logic_name($logic_name);
$gene->analysis($analysis);
$gene->biotype($logic_name);
$tran->analysis($analysis);
}
############################################################
# put a slice on the transcript
my $slice_id = $tran->start_Exon->seqname;
if (not exists $genome_slices{$slice_id}) {
# assumes genome seqs were named in the Ensembl API Slice naming
# convention, i.e. coord_syst:version:seq_reg_id:start:end:strand
$genome_slices{$slice_id} = $slice_adaptor->fetch_by_name($slice_id);
}
my $slice = $genome_slices{$slice_id};
foreach my $exon (@{$tran->get_all_Exons}){
$exon->slice($slice);
foreach my $evi (@{$exon->get_all_supporting_features}){
$evi->slice($slice);
$evi->analysis($self->analysis);
}
}
foreach my $evi (@{$tran->get_all_supporting_features}) {
$evi->slice($slice);
$evi->analysis($self->analysis);
}
if (!$slice){
my ($sf);
if (@{$tran->get_all_supporting_features}) {
($sf) = @{$tran->get_all_supporting_features};
} else {
my @exons = @{$tran->get_all_Exons};
($sf) = @{$exons[0]->get_all_supporting_features};
}
print $sf->hseqname."\t$slice_id\n";
}
$self->throw("Have no slice") if(!$slice);
$tran->slice($slice);
my $accession = $tran->{'accession'};
my $transcript_biotype = $self->get_biotype->{$accession};
$tran->biotype($transcript_biotype);
if($self->calculate_coverage_and_pid) {
$self->realign_translation($tran);
}
$gene->add_Transcript($tran);
push( @genes, $gene);
}
return @genes;
}
############################################################
sub get_chr_names{
my ($self) = @_;
my @chr_names;
my @chromosomes;
my $chr_adaptor = $self->hrdb_get_con('target_db')->get_SliceAdaptor;
#also fetching non-reference regions like DR52 for human by default.
#specify in Exonerate2Genes config-file.
if(defined($self->NONREF_REGIONS)){
@chromosomes = @{$chr_adaptor->fetch_all('toplevel', undef, 1)};
}
else{
@chromosomes = @{$chr_adaptor->fetch_all('toplevel')};
}
foreach my $chromosome ( @chromosomes ){
push( @chr_names, $chromosome->seq_region_name );
}
return @chr_names;
}
sub get_output_db {
my ($self) = @_;
my $outdb;
if ($self->OUTDB) {
if ( ref($self->OUTDB)=~m/HASH/) {
$outdb = new Bio::EnsEMBL::DBSQL::DBAdaptor(%{$self->OUTDB},
-dnadb => $self->hrdb_get_con('target_db'));
}else{
$outdb = $self->get_dbadaptor($self->OUTDB);
}
} else {
$outdb = $self->hrdb_get_con('target_db');
}
$self->hrdb_get_con('target_db')->dbc->disconnect_when_inactive(1) ;
$outdb->dbc->disconnect_when_inactive(1) ;
return $outdb;
}
sub input_id {
# Override the input_id inherited from Process, which is a stringify on the input id hash
# Note also that as HiveBaseRunnableDB inherits from RunnableDB also, this has an input_id
# sub too, which would not function normally as there is no attached input id object
my $self = shift;
my $input_id_string = $self->Bio::EnsEMBL::Hive::Process::input_id;
unless($input_id_string =~ /.+\=\>.+\"(.+)\"/) {
$self->throw("Could not find the chunk file in the input id. Input id:\n".$input_id_string);
}
$input_id_string = $1;
return($input_id_string);
}
sub parse_feature_region_id {
my ($self,$feature_region_id) = @_;
my $dba = $self->hrdb_get_con('target_db');
my $sa = $dba->get_SliceAdaptor();
unless($feature_region_id =~ s/\:([^\:]+)$//) {
$self->throw("Could not parse the accession from the feature region id. Expecting a normal slice id, with an extra colon ".
"followed by the accession. Offending feature_region_id:\n".$feature_region_id);
}
my $slice_name = $feature_region_id;
my $accession = $1;
my $slice = $sa->fetch_by_name($slice_name);
return($slice,[$accession]);
}
############################################################
#
# get/set methods
#
############################################################
sub QUERYSEQS {
my ($self,$value) = @_;
if (defined $value) {
$self->param('_CONFIG_QUERYSEQS',$value);
}
if ($self->param_is_defined('_CONFIG_QUERYSEQS')) {
return $self->param('_CONFIG_QUERYSEQS');
} else {
return undef;
}
}
sub QUERYTYPE {
my ($self,$value) = @_;
if (defined $value) {
$self->param('_CONFIG_QUERYTYPE',$value);
}
if ($self->param_is_defined('_CONFIG_QUERYTYPE')) {
return $self->param('_CONFIG_QUERYTYPE');
} else {
return undef;
}
}
sub QUERYANNOTATION {
my ($self,$value) = @_;
if (defined $value) {
$self->param('_CONFIG_QUERYANNOTATION',$value);
}
if ($self->param_is_defined('_CONFIG_QUERYANNOTATION')) {
return $self->param('_CONFIG_QUERYANNOTATION');
} else {
return undef;
}
}
sub GENOMICSEQS {
my ($self,$value) = @_;
if (defined $value) {
$self->param('_CONFIG_GENOMICSEQS',$value);
}
if ($self->param_is_defined('_CONFIG_GENOMICSEQS')) {
return $self->param('_CONFIG_GENOMICSEQS');
} else {
return undef;
}
}
sub IIDREGEXP {
my ($self,$value) = @_;
if (defined $value) {
$self->param('_CONFIG_IIDREGEXP',$value);
}
if ($self->param_is_defined('_CONFIG_IIDREGEXP')) {
return $self->param('_CONFIG_IIDREGEXP');
} else {
return undef;
}
}
sub REFDB {
my ($self,$value) = @_;
if (defined $value) {
my $dba = new Bio::EnsEMBL::DBSQL::DBAdaptor(
%$value
);
$self->param('_CONFIG_REFDB',$dba);
# Set this to override the default dbc which is inherited from Process and is to the Hive db
#$self->db($dba);
}
if ($self->param_is_defined('_CONFIG_REFDB')) {
return $self->param('_CONFIG_REFDB');
} else {
return undef;
}
}
sub OUTDB {
my ($self,$value) = @_;
if (defined $value) {
$self->param('_CONFIG_OUTDB',$value);
}
if ($self->param_is_defined('_CONFIG_OUTDB')) {
return $self->param('_CONFIG_OUTDB');
} else {
return undef;
}
}
sub KILLLISTDB {
my ($self,$value) = @_;
if (defined $value) {
$self->param('_CONFIG_KILLLISTDB',$value);
}
if ($self->param_is_defined('_CONFIG_KILLLISTDB')) {
return $self->param('_CONFIG_KILLLISTDB');
} else {
return undef;
}
}
sub COVERAGE_BY_ALIGNED {
my ($self,$value) = @_;
if (defined $value) {
$self->param('_CONFIG_COVERAGE',$value);
}
if ($self->param_is_defined('_CONFIG_COVERAGE')) {
return $self->param('_CONFIG_COVERAGE');
} else {
return undef;
}
}
sub FILTER {
my ($self,$value) = @_;
if (defined $value) {
$self->param('_CONFIG_FILTER',$value);
}
if ($self->param_is_defined('_CONFIG_FILTER')) {
return $self->param('_CONFIG_FILTER');
} else {
return undef;
}
}
sub OPTIONS {
my ($self,$value) = @_;
if (defined $value) {
$self->param('_CONFIG_OPTIONS',$value);
}
if ($self->param_is_defined('_CONFIG_OPTIONS')) {
return $self->param('_CONFIG_OPTIONS');
} else {
return undef;
}
}
sub NONREF_REGIONS {
my ($self,$value) = @_;
if (defined $value) {
$self->param('_CONFIG_NONREF_REGIONS',$value);
}
if ($self->param_is_defined('_CONFIG_NONREF_REGIONS')) {
return $self->param('_CONFIG_NONREF_REGIONS');
} else {
return undef;
}
}
sub PROGRAM {
my ($self,$value) = @_;
if (defined $value) {
$self->param('_CONFIG_PROGRAM',$value);
}
if ($self->param_is_defined('_CONFIG_PROGRAM')) {
return $self->param('_CONFIG_PROGRAM');
} else {
return undef;
}
}
sub USE_KILL_LIST {
my ($self,$value) = @_;
if (defined $value) {
$self->param('_CONFIG_USE_KILL_LIST',$value);
}
if ($self->param_is_defined('_CONFIG_USE_KILL_LIST')) {
return $self->param('_CONFIG_USE_KILL_LIST');
} else {
return undef;
}
}
sub KILL_TYPE {
my ($self,$value) = @_;
if (defined $value) {
$self->param('_CONFIG_KILL_TYPE',$value);
}
if ($self->param_is_defined('_CONFIG_KILL_TYPE')) {
return $self->param('_CONFIG_KILL_TYPE');
} else {
return undef;
}
}
sub KILL_LIST_FILTER {
my ($self,$value) = @_;
if (defined $value) {
$self->param('_CONFIG_KILL_LIST_FILTER',$value);
}
if ($self->param_is_defined('_CONFIG_KILL_LIST_FILTER')) {
return $self->param('_CONFIG_KILL_LIST_FILTER');
} else {
return undef;
}
}
sub SOFT_MASKED_REPEATS {
my ($self,$value) = @_;
if (defined $value) {
$self->param('_SOFT_MASKED_REPEATS',$value);
}
if ($self->param_is_defined('_SOFT_MASKED_REPEATS')) {
return $self->param('_SOFT_MASKED_REPEATS');
} else {
return undef;
}
}
sub SEQFETCHER_PARAMS {
my ($self,$value) = @_;
if (defined $value) {
$self->param('_SEQFETCHER_PARAMS',$value);
}
if ($self->param_is_defined('_SEQFETCHER_PARAMS')) {
return $self->param('_SEQFETCHER_PARAMS');
} else {
return undef;
}
}
sub SEQFETCHER_OBJECT {
my ($self,$value) = @_;
if (defined $value) {
$self->param('_SEQFETCHER_OBJECT',$value);
}
if ($self->param_is_defined('_SEQFETCHER_OBJECT')) {
return $self->param('_SEQFETCHER_OBJECT');
} else {
return undef;
}
}
sub query_seqs {
my ($self, @seqs) = @_;
if( @seqs ) {
unless ($seqs[0]->isa("Bio::PrimarySeqI") || $seqs[0]->isa("Bio::SeqI")){
$self->throw("query seq must be a Bio::SeqI or Bio::PrimarySeqI");
}
push( @{$self->param('_query_seqs')}, @seqs);
}
return @{$self->param('_query_seqs')};
}
############################################################
sub genomic {
my ($self, $seq) = @_;
if ($seq){
unless ($seq->isa("Bio::PrimarySeqI") || $seq->isa("Bio::SeqI")){
$self->throw("query seq must be a Bio::SeqI or Bio::PrimarySeqI");
}
$self->param('_genomic',$seq);
}
return $self->param('_genomic');
}
############################################################
sub database {
my ($self, $database) = @_;
if ($database) {
$self->param('_database',$database) = $database;
}
return $self->param('_database');
}
############################################################
sub filter {
my ($self, $val) = @_;
if ($val) {
$self->param('_transcript_filter',$val);
}
return $self->param('_transcript_filter');
}
############################################################
sub filtered_query_file {
my ($self, $val) = @_;
if ($val) {
$self->param('_filtered_query_file',$val);
}
return $self->param('_filtered_query_file');
}
#############################################################
# Declare and set up config variables
#############################################################
#sub read_and_check_config {
# my $self = shift;
# $self->SUPER::read_and_check_config($EXONERATE_CONFIG_BY_LOGIC);
##########
# CHECKS
##########
# my $logic = $self->analysis->logic_name;
# check that compulsory options have values
# foreach my $config_var (qw(QUERYSEQS
# QUERYTYPE
# GENOMICSEQS)) {
# $self->throw("You must define $config_var in config for logic '$logic'")
# if not defined $self->$config_var;
# }
# $self->throw("QUERYANNOTATION '" . $self->QUERYANNOTATION . "' in config must be readable")
# if $self->QUERYANNOTATION and not -e $self->QUERYANNOTATION;
# filter does not have to be defined, but if it is, it should
# give details of an object and its parameters
# if ($self->FILTER) {
# if (not ref($self->FILTER) eq "HASH" or
# not exists($self->FILTER->{OBJECT}) or
# not exists($self->FILTER->{PARAMETERS})) {
# $self->throw("FILTER in config fo '$logic' must be a hash ref with elements:\n" .
# " OBJECT : qualified name of the filter module;\n" .
# " PARAMETERS : anonymous hash of parameters to pass to the filter");
# } else {
# my $module = $self->FILTER->{OBJECT};
# my $pars = $self->FILTER->{PARAMETERS};
# (my $class = $module) =~ s/::/\//g;
# eval{
# require "$class.pm";
# };
# $self->throw("Couldn't require ".$class." Exonerate2Genes:require_module $@") if($@);
#
# $self->filter($module->new(%{$pars}));
# }
# }
#}
###############################################
### end of config
###############################################
sub filter_killed_entries {
my ($orig_query_filename, $mol_type,$input_db,$killlist_db,$filter_params,$inputID) = @_;
my $kill_list_object = Bio::EnsEMBL::KillList::KillList
->new(-TYPE => $mol_type, -GB_REF_DB => $input_db, -KILL_LIST_DB => $killlist_db, -FILTER_PARAMS => $filter_params);
my %kill_list = %{ $kill_list_object->get_kill_list() };
my $seqin = new Bio::SeqIO(-file => "<$orig_query_filename",
-format => "Fasta",
);
my $filtered_seqout_filename = "/tmp/$inputID"."_filtered";
print "Filename for my filtered sequence: $filtered_seqout_filename.\n";
my $seqout = new Bio::SeqIO(-file => ">$filtered_seqout_filename",
-format => "Fasta"
);
while( my $query_entry = $seqin->next_seq ){
my $display_id = $query_entry->display_id;
my $no_ver_id;
# Depending on the display ID's format, strip off the
# version number because the kill_list hash keys are
# without version numbers
if ($display_id =~/\w+\.\d/) {
($no_ver_id) = $display_id =~/(\w+)\.\d/;
} elsif ($display_id =~/\w+\-\d/) {
($no_ver_id) = $display_id =~/(\w+)\-\d/;
} elsif ($display_id =~/\w+/ ) {
($no_ver_id) = $display_id;
}
if ( !$kill_list{$no_ver_id} ) {
$seqout->write_seq($query_entry);
} elsif ( $kill_list{$no_ver_id} ) {
print "$mol_type $display_id is in the kill_list. Discarded from analysis.\n";
}
}
return $filtered_seqout_filename;
}
sub get_query_seq {
my ($self,$accession_array) = @_;
my $query_table_name = $self->param('query_table_name');
my $table_adaptor = $self->db->get_NakedTableAdaptor();
$table_adaptor->table_name($query_table_name);
my $accession = $$accession_array[0];
my $db_row = $table_adaptor->fetch_by_dbID($accession);
unless($db_row) {
$self->throw("Did not find an entry in the ".$query_table_name." table matching the accession. Accession:\n".$accession);
}
my $seq = $db_row->{'seq'};
# If the table has a biotype col set the value
my $biotypes_hash = {};
$biotypes_hash->{$accession} = $db_row->{'biotype'};
$self->get_biotype($biotypes_hash);
my $peptide_obj = Bio::Seq->new( -display_id => $accession,
-seq => $seq);
return($peptide_obj);
}
sub output_query_file {
my ($self,$accession_array) = @_;
my $query_table_name = $self->param('query_table_name');
my $table_adaptor = $self->db->get_NakedTableAdaptor();
$table_adaptor->table_name($query_table_name);
my $output_dir = $self->param('query_seq_dir');
# Note as each accession will occur in only one file, there should be no problem using the first one
my $outfile_name = "exonerate_".${$accession_array}[0].".".$$.".fasta";
my $outfile_path = $output_dir."/".$outfile_name;
my $biotypes_hash = {};
unless(-e $output_dir) {
`mkdir $output_dir`;
}
if(-e $outfile_path) {
$self->warning("Found the query file in the query dir already. Overwriting. File path\n:".$outfile_path);
}
open(QUERY_OUT,">".$outfile_path);
foreach my $accession (@{$accession_array}) {
my $db_row = $table_adaptor->fetch_by_dbID($accession);
unless($db_row) {
$self->throw("Did not find an entry in the uniprot_sequences table matching the accession. Accession:\n".$accession);
}
my $seq = $db_row->{'seq'};
$biotypes_hash->{$accession} = $db_row->{'biotype'};
my $record = ">".$accession."\n".$seq;
say QUERY_OUT $record;
}
close QUERY_OUT;
$self->files_to_delete($outfile_path);
$self->get_biotype($biotypes_hash);
return($outfile_path);
}
sub output_db_file {
my ($self,$slice,$accession_array) = @_;
my $output_dir = $self->param('query_seq_dir');
# Note as each accession will occur in only one file, there should be no problem using the first one
my $outfile_name = "exonerate_db_".${$accession_array}[0].".".$$.".fasta";
my $outfile_path = $output_dir."/".$outfile_name;
my $header = ">".$slice->name();
my $seq = $slice->seq;
open(DB_OUT,">".$outfile_path);
say DB_OUT $header;
say DB_OUT $seq;
close DB_OUT;
$self->files_to_delete($outfile_path);
return($outfile_path);
}
sub get_biotype {
my ($self,$biotype_hash) = @_;
if($biotype_hash) {
$self->param('_biotype_hash',$biotype_hash);
}
return($self->param('_biotype_hash'));
}
sub files_to_delete {
my ($self,$val) = @_;
unless($self->param('_files_to_delete')) {
$self->param('_files_to_delete',[]);
}
if($val) {
push(@{$self->param('_files_to_delete')}, $val);
}
return($self->param('_files_to_delete'));
}
sub peptide_seq {
my ($self, $value) = @_;
if($value){
$self->{_peptide_seq} = $value;
}
return $self->{_peptide_seq};
}
sub calculate_coverage_and_pid {
my ($self, $value) = @_;
if($value){
$self->{_calculate_coverage_and_pid} = $value;
}
return $self->{_calculate_coverage_and_pid};
}
sub realign_translation {
my ($self,$transcript) = @_;
my $query_seq = $self->peptide_seq;
my $translation = $transcript->translate->seq();
my $align_input_file = "/tmp/exonerate_align_".$$.".fa";
my $align_output_file = "/tmp/exonerate_align_".$$.".aln";
open(INPUT,">".$align_input_file);
say INPUT ">query";
say INPUT $query_seq;
say INPUT ">target";
say INPUT $translation;
close INPUT;
my $align_program_path = 'muscle';
my $cmd = $align_program_path." -in ".$align_input_file." -out ".$align_output_file;
my $result = system($cmd);
if($result) {
$self->throw("Got a non-zero exit code from alignment. Commandline used:\n".$cmd);
}
my $file = "";
open(ALIGN,$align_output_file);
while(<ALIGN>) {
$file .= $_;
}
close ALIGN;
unless($file =~ /\>.+\n(([^>]+\n)+)\>.+\n(([^>]+\n)+)/) {
$self->throw("Could not parse the alignment file for the alignment sequences. Alignment file: ".$align_output_file);
}
my $aligned_query_seq = $1;
my $aligned_target_seq = $3;
$aligned_query_seq =~ s/\n//g;
$aligned_target_seq =~ s/\n//g;
say "Aligned query:\n".$aligned_query_seq;
say "Aligned target:\n".$aligned_target_seq;
`rm $align_input_file`;
`rm $align_output_file`;
# Work out coverage
my $coverage;
my $temp = $aligned_target_seq;
my $target_gap_count = $temp =~ s/\-//g;
my $ungapped_query_seq = $aligned_query_seq;
$ungapped_query_seq =~ s/\-//g;
if(length($ungapped_query_seq) == 0) {
$coverage = 0;
} else {
$coverage = 100 - (($target_gap_count/length($ungapped_query_seq)) * 100);
}
# Work out percent identity
my $match_count = 0;
my $aligned_positions = 0;
for(my $j=0; $j<length($aligned_query_seq); $j++) {
my $char_query = substr($aligned_query_seq,$j,1);
my $char_target = substr($aligned_target_seq,$j,1);
if($char_query eq '-' || $char_target eq '-') {
next;
}
if($char_query eq $char_target) {
$match_count++;
}
$aligned_positions++;
}
unless($aligned_positions) {
$self->throw("Pairwise alignment between the query sequence and the translation shows zero aligned positions. Something has gone wrong");
}
my $percent_id = ($match_count / $aligned_positions) * 100;
# Get all exons and transcript supporting features
my $transcript_supporting_features = $transcript->get_all_supporting_features();
my $exons = $transcript->get_all_Exons();
# Now clean these out
$transcript->flush_Exons();
$transcript->flush_supporting_features();
# Loop through the TSFs and add the coverage and pid, then add back into transcript
foreach my $transcript_supporting_feature (@{$transcript_supporting_features}) {
$transcript_supporting_feature->hcoverage($coverage);
$transcript_supporting_feature->percent_id($percent_id);
$transcript->add_supporting_features($transcript_supporting_feature);
}
# Loop through exons, get supporting features for each, flush existing SF, add coverage and pid, add back to exon, add exon to transcript
foreach my $exon (@{$exons}) {
my $exon_supporting_features = $exon->get_all_supporting_features();
$exon->flush_supporting_features();
foreach my $exon_supporting_feature (@{$exon_supporting_features}) {
$exon_supporting_feature->hcoverage($coverage);
$exon_supporting_feature->percent_id($percent_id);
$exon->add_supporting_features($exon_supporting_feature);
}
$transcript->add_Exon($exon);
}
}
1;
| james-monkeyshines/ensembl-analysis | modules/Bio/EnsEMBL/Analysis/Hive/RunnableDB/HiveExonerate2Genes.pm | Perl | apache-2.0 | 39,172 |
#
# Copyright 2022 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package apps::rudder::restapi::plugin;
use strict;
use warnings;
use base qw(centreon::plugins::script_custom);
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '0.1';
%{$self->{modes}} = (
'discovery' => 'apps::rudder::restapi::mode::discovery',
'global-compliance' => 'apps::rudder::restapi::mode::globalcompliance',
'list-nodes' => 'apps::rudder::restapi::mode::listnodes',
'list-rules' => 'apps::rudder::restapi::mode::listrules',
'node-compliance' => 'apps::rudder::restapi::mode::nodecompliance',
'nodes-overall-compliance' => 'apps::rudder::restapi::mode::nodesoverallcompliance',
'rule-compliance' => 'apps::rudder::restapi::mode::rulecompliance',
'statistics' => 'apps::rudder::restapi::mode::statistics',
);
$self->{custom_modes}{api} = 'apps::rudder::restapi::custom::api';
return $self;
}
1;
__END__
=head1 PLUGIN DESCRIPTION
Check Rudder.
=cut
| centreon/centreon-plugins | apps/rudder/restapi/plugin.pm | Perl | apache-2.0 | 1,906 |
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V10::Services::OfflineUserDataJobService::CreateOfflineUserDataJobResponse;
use strict;
use warnings;
use base qw(Google::Ads::GoogleAds::BaseEntity);
use Google::Ads::GoogleAds::Utils::GoogleAdsHelper;
sub new {
my ($class, $args) = @_;
my $self = {resourceName => $args->{resourceName}};
# Delete the unassigned fields in this object for a more concise JSON payload
remove_unassigned_fields($self, $args);
bless $self, $class;
return $self;
}
1;
| googleads/google-ads-perl | lib/Google/Ads/GoogleAds/V10/Services/OfflineUserDataJobService/CreateOfflineUserDataJobResponse.pm | Perl | apache-2.0 | 1,074 |
package Bio::EnsEMBL::Variation::Pipeline::ProteinFunction::RunWeka;
use strict;
use Bio::EnsEMBL::Hive::Utils qw(stringify);
use base ('Bio::EnsEMBL::Variation::Pipeline::BaseVariationProcess');
my $DEBUG = 0;
sub run {
my $self = shift;
my $transcript_stable_id = $self->param('transcript_stable_id');
my $feature_file = $self->param('feature_file');
my $pph_dir = $self->param('pph_dir'),
my $var_dba = $self->get_species_adaptor('variation');
my $dbh = $var_dba->dbc->db_handle;
my $aa = $var_dba->get_AttributeAdaptor;
my $save_sth = $dbh->prepare_cached(qq{
INSERT INTO polyphen_prediction (
protein_position_id,
amino_acid,
prediction_attrib_id,
probability
)
VALUES (?,?,?,?)
}) or die "DB error: ".$dbh->errstr;
my $save_extra_sth = $dbh->prepare_cached(qq{
INSERT INTO polyphen_supplementary_data (
polyphen_prediction_id,
compressed_result_hash
)
VALUES (?,COMPRESS(?))
}) or die "DB error: ".$dbh->errstr;
if ($feature_file =~ /\.gz$/ && -e $feature_file) {
system("gunzip -f $feature_file") == 0 or die "Failed to gunzip feature_file: $feature_file";
}
$feature_file =~ s/.gz$//;
my ($output_dir) = $feature_file =~ /(.+)\/${transcript_stable_id}.features$/;
my @to_delete;
my $output_file = "${output_dir}/${transcript_stable_id}.out";
my $error_file = "${output_dir}/${transcript_stable_id}.weka_stderr";
push @to_delete, $feature_file, $output_file;
my $cmd = "$pph_dir/bin/run_weka.pl $feature_file 1> $output_file 2> $error_file";
if ($DEBUG) {
$cmd = "cp $feature_file $output_file";
}
system($cmd) == 0 or die "Failed to run $cmd: $?";
if (-s $error_file) {
warn "run_weka.pl STDERR output in $error_file\n";
}
else {
push @to_delete, $error_file;
}
my $get_pos_sth = $dbh->prepare_cached(qq{
SELECT pp.protein_position_id
FROM protein_position pp, protein_info pi
WHERE pp.protein_info_id = pi.protein_info_id
AND pi.transcript_stable_id = ?
AND pp.position = ?
}) or die "DB error: ".$dbh->errstr;
open (RESULT, "<$output_file") or die "Failed to open output file: $!";
my @fields;
my @output_ids;
while (<RESULT>) {
if (/^#/) {
s/#//g;
@fields = split /\s+/;
next;
}
die "No header line in result file $output_file?" unless @fields;
my @values = split /\t/;
# trim whitespace
map { $_ =~ s/^\s+//; $_ =~ s/\s+$// } @values;
# parse the results into a hash
my %results = map { $fields[$_] => $values[$_] } (0 .. @fields-1);
# fetch and delete information we store in columns
my $tran_ver = delete $results{o_acc};
my $alt_aa = delete $results{o_aa2};
my $prediction = delete $results{prediction};
my $prob = delete $results{pph2_prob};
my $position = delete $results{o_pos};
# delete results we don't need
for my $val (qw{
o_snp_id
o_pos
o_aa1
snp_id
acc
pos
aa1
aa2
nt1
nt2
based_on
effect
}) {
delete $results{$val};
}
# get rid of any fields with no results
for my $key (keys %results) {
delete $results{$key} unless length $results{$key};
}
# serialize the hash (if anything remains in it)
my $result_string = keys %results ? stringify(\%results) : undef;
# fetch the relevant protein_position_id
my ($transcript_stable_id_from_file, $transcript_version) = split /\./, $tran_ver;
die "Mismatching transcript stable ids in $feature_file"
unless $transcript_stable_id_from_file eq $transcript_stable_id;
$get_pos_sth->execute(
$transcript_stable_id,
$position
);
my ($pos_id) = $get_pos_sth->fetchrow_array;
die "No protein_position for $transcript_stable_id pos $position" unless $pos_id;
# store the results in the database
my $pred_attrib_id = $aa->attrib_id_for_type_value('polyphen_prediction', $prediction);
die "No attrib_id for polyphen prediction: '$prediction'!" unless defined $pred_attrib_id;
$save_sth->execute(
$pos_id,
$alt_aa,
$pred_attrib_id,
$prob
);
my $pph_pred_id = $dbh->last_insert_id(undef, undef, undef, undef);
$save_extra_sth->execute(
$pph_pred_id,
$result_string
);
}
system("gzip -f $feature_file") == 0 or warn "Failed to gzip $feature_file: $?";
unlink @to_delete;
}
sub write_output {
my $self = shift;
}
1;
| adamsardar/perl-libs-custom | EnsemblAPI/ensembl-variation/modules/Bio/EnsEMBL/Variation/Pipeline/ProteinFunction/RunWeka.pm | Perl | apache-2.0 | 5,128 |
package VMOMI::HostParallelScsiTargetTransport;
use parent 'VMOMI::HostTargetTransport';
use strict;
use warnings;
our @class_ancestors = (
'HostTargetTransport',
'DynamicData',
);
our @class_members = ( );
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/HostParallelScsiTargetTransport.pm | Perl | apache-2.0 | 436 |
package VMOMI::TaskReasonUser;
use parent 'VMOMI::TaskReason';
use strict;
use warnings;
our @class_ancestors = (
'TaskReason',
'DynamicData',
);
our @class_members = (
['userName', undef, 0, ],
);
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/TaskReasonUser.pm | Perl | apache-2.0 | 432 |
package Sisimai::Reason::Delivered;
use feature ':5.10';
use strict;
use warnings;
sub text { 'delivered' }
sub description { 'Email delivered successfully' }
sub match { return undef }
sub true { return undef }
1;
__END__
=encoding utf-8
=head1 NAME
Sisimai::Reason::Delivered - Email delivered successfully
=head1 SYNOPSIS
use Sisimai::Reason::Delivered;
print Sisimai::Reason::Delivered->text; # delivered
=head1 DESCRIPTION
Sisimai::Reason::Delivered checks the email you sent is delivered successfully
or not by matching diagnostic messages with message patterns. Sisimai will set
"delivered" to the value of "reason" when Status: field in the bounce message
begins with "2" like following:
Final-Recipient: rfc822; kijitora@neko.nyaan.jp
Action: deliverable
Status: 2.1.5
Diagnostic-Code: SMTP; 250 2.1.5 OK
This class is called only Sisimai->reason method. This is NOT AN ERROR reason.
=head1 CLASS METHODS
=head2 C<B<text()>>
C<text()> returns string: C<delivered>.
print Sisimai::Reason::Delivered->text; # delivered
=head2 C<B<match(I<string>)>>
C<match()> always return undef
=head2 C<B<true(I<Sisimai::Data>)>>
C<true()> always return undef
=head1 AUTHOR
azumakuniyuki
=head1 COPYRIGHT
Copyright (C) 2016 azumakuniyuki, All rights reserved.
=head1 LICENSE
This software is distributed under The BSD 2-Clause License.
=cut
| sisimai/p5-Sisimai | lib/Sisimai/Reason/Delivered.pm | Perl | bsd-2-clause | 1,391 |
package OpenResty::Handler::View;
use strict;
use warnings;
#use Smart::Comments;
use OpenResty::Util;
use Params::Util qw( _HASH _STRING );
use OpenResty::Limits;
use OpenResty::RestyScript::View;
use OpenResty::Handler::Model;
use OpenResty::QuasiQuote::SQL;
use base 'OpenResty::Handler::Base';
__PACKAGE__->register('view');
sub level2name {
qw< view_list view view_param view_exec >[$_[-1]]
}
sub POST_view {
my ($self, $openresty, $bits) = @_;
my $data = _HASH($openresty->{_req_data}) or
die "The view schema must be a HASH.\n";
my $view = $bits->[1];
my $name;
if ($view eq '~') {
$view = $data->{name};
}
if ($name = delete $data->{name} and $name ne $view) {
$openresty->warning("name \"$name\" in POST content ignored.");
}
$data->{name} = $view;
return $self->new_view($openresty, $data);
}
sub get_views {
my ($self, $openresty, $params) = @_;
my $sql = [:sql|
select name, description
from _views
order by id |];
return $openresty->select($sql, { use_hash => 1 });
}
sub get_view_names {
my ($self, $openresty) = @_;
my $sql = [:sql|
select name
from _views |];
my $res = $openresty->select($sql);
### $res
if ($res && ref $res && ref $res->[0]) {
@$res = map { @$_ } @$res;
}
$res;
}
sub GET_view_list {
my ($self, $openresty, $bits) = @_;
my $views = $self->get_views($openresty);
$views ||= [];
map { $_->{src} = "/=/view/$_->{name}" } @$views;
$views;
}
sub GET_view {
my ($self, $openresty, $bits) = @_;
my $view = $bits->[1];
if ($view eq '~') {
return $self->get_views($openresty);
}
if (!$openresty->has_view($view)) {
die "View \"$view\" not found.\n";
}
my $sql = [:sql|
select name, definition, description
from _views
where name = $view |];
return $openresty->select($sql, {use_hash => 1})->[0];
}
sub PUT_view {
my ($self, $openresty, $bits) = @_;
my $user = $openresty->current_user;
my $view = $bits->[1];
my $data = _HASH($openresty->{_req_data}) or
die "column spec must be a non-empty HASH.\n";
### $view
### $data
die "View \"$view\" not found.\n" unless $openresty->has_view($view);
my $update = OpenResty::SQL::Update->new('_views');
$update->where(name => Q($view));
my $new_name = delete $data->{name};
if (defined $new_name) {
_IDENT($new_name) or
die "Bad view name: ", $OpenResty::Dumper->($new_name), "\n";
$OpenResty::Cache->remove_has_view($user, $view);
$update->set( name => Q($new_name) );
}
my $new_def = delete $data->{definition};
if (defined $new_def) {
_STRING($new_def) or
die "Bad view definition: ", $OpenResty::Dumper->($new_def), "\n";
# XXX check the syntax of the def
my $restyscript = OpenResty::RestyScript::View->new;
my $res;
eval {
$res = $restyscript->parse(
$new_def,
{ quote => \&Q, quote_ident => \&QI }
);
};
if ($@) { die "minisql: $@\n"; }
my @models = @{ $res->{models} };
foreach my $model (@models){
next if $model =~ /^\s*$/;
if (!$openresty->has_model($model)) {
die "Model \"$model\" not found.\n";
}
}
$OpenResty::Cache->remove_view_def($user, $view);
$update->set(definition => Q($new_def));
}
my $new_desc = delete $data->{description};
if (defined $new_desc) {
_STRING($new_desc) or die "Bad view description: ", $OpenResty::Dumper->($new_desc), "\n";
$update->set(description => Q($new_desc));
}
### Update SQL: "$update"
if (%$data) {
die "Unknown keys in POST data: ", join(' ', keys %$data), "\n";
}
my $retval = $openresty->do("$update") + 0;
return { success => $retval >= 0 ? 1 : 0 };
}
sub exec_view {
my ($self, $openresty, $view, $bits, $cgi) = @_;
my $user = $openresty->current_user;
my $select = OpenResty::RestyScript::View->new;
my $view_def = $OpenResty::Cache->get_view_def($user, $view);
if (!$view_def) {
my $sql = "select definition from _views where name = " . Q($view);
### laser exec_view: "$sql"
$view_def = $openresty->select($sql)->[0][0];
$OpenResty::Cache->set_view_def($user, $view, $view_def);
}
my $fix_var = $bits->[2];
_IDENT($fix_var) or $fix_var eq '~' or die "Bad parameter name: ", $OpenResty::Dumper->($fix_var), "\n";
my $fix_var_value = $bits->[3];
my $exists;
my %vars;
foreach my $var ($openresty->url_param) {
$vars{$var} = $openresty->url_param($var) unless $var =~ /^_/;
}
if ($fix_var ne '~' and $fix_var_value ne '~') {
$vars{$fix_var} = $fix_var_value;
}
my $role = $openresty->get_role;
# yup...this part is hacky...we'll remove it once we have views running on the Haskell compiler...
$view_def =~ s/\$_ACCOUNT\b/Q($user)/seg;
$view_def =~ s/\$_ROLE\b/Q($role)/seg;
#warn $view_def;
my $res;
eval {
$res = $select->parse(
$view_def,
{ quote => \&Q,
quote_ident => \&QI,
vars => \%vars },
);
};
if ($@) {
die "minisql: $@\n";
}
my @unbound = @{ $res->{unbound} };
if (@unbound) {
die "Parameters required: @unbound\n";
}
#warn "view SQL: ", $res->{sql}, "\n";
return $openresty->select($res->{sql}, { use_hash => 1, read_only => 1 });
}
sub GET_view_exec {
my ($self, $openresty, $bits) = @_;
my $user = $openresty->current_user;
my $view = $bits->[1];
die "View \"$view\" not found.\n" unless $openresty->has_view($view);
return $self->exec_view($openresty, $view, $bits, $openresty->{_cgi});
}
sub view_count {
my ($self, $openresty) = @_;
return $openresty->select("select count(*) from _views")->[0][0];
}
sub new_view {
my ($self, $openresty, $data) = @_;
if (!$openresty->is_unlimited) {
my $nviews = $self->view_count($openresty);
if ($nviews >= $VIEW_LIMIT) {
die "Exceeded view count limit $VIEW_LIMIT.\n";
}
}
my $res;
my $name = delete $data->{name} or
die "No 'name' specified.\n";
_IDENT($name) or die "Bad view name: ", $OpenResty::Dumper->($name), "\n";
if ($openresty->has_view($name)) {
die "View \"$name\" already exists.\n";
}
my $minisql = delete $data->{definition};
if (!defined $minisql) {
die "No 'definition' specified.\n";
}
_STRING($minisql) or die "Bad definition: ", $OpenResty::Dumper->($minisql), "\n";
my $desc = delete $data->{description};
if (defined $desc) {
_STRING($desc) or die "View description must be a string.\n";
}
if (%$data) {
die "Unknown keys: ", join(" ", keys %$data), "\n";
}
my $select = OpenResty::RestyScript::View->new;
eval {
$res = $select->parse(
$minisql,
{ quote => \&Q, quote_ident => \&QI }
);
};
if ($@) {
die "minisql: $@\n";
}
#
# check to see if modes exists
#
my @models = @{ $res->{models} };
foreach my $model (@models){
next if $model =~ /^\s*$/;
if (!$openresty->has_model($model)) {
die "Model \"$model\" not found.\n";
}
}
my $sql = [:sql|
insert into _views (name, definition, description)
values($name, $minisql, $desc) |];
return { success => $openresty->do($sql) ? 1 : 0 };
}
sub DELETE_view {
my ($self, $openresty, $bits) = @_;
my $user = $openresty->current_user;
my $view = $bits->[1];
_IDENT($view) or $view eq '~' or
die "Bad view name: ", $OpenResty::Dumper->($view), "\n";
if ($view eq '~') {
return $self->DELETE_view_list($openresty);
}
if (!$openresty->has_view($view)) {
die "View \"$view\" not found.\n";
}
$OpenResty::Cache->remove_has_view($user, $view);
$OpenResty::Cache->remove_view_def($user, $view);
my $sql = "delete from _views where name = " . Q($view);
return { success => $openresty->do($sql) >= 0 ? 1 : 0 };
}
sub DELETE_view_list {
my ($self, $openresty, $bits) = @_;
my $user = $openresty->current_user;
my $views = $self->get_view_names($openresty);
for my $view (@$views) {
#warn "View $view...\n";
$OpenResty::Cache->remove_has_view($user, $view);
$OpenResty::Cache->remove_view_def($user, $view);
}
my $sql = "truncate _views;";
return { success => $openresty->do($sql) >= 0 ? 1 : 0 };
}
1;
__END__
=head1 NAME
OpenResty::Handler::View - The view handler for OpenResty
=head1 SYNOPSIS
=head1 DESCRIPTION
This OpenResty handler class implements the View API, i.e., the C</=/view/*> stuff.
=head1 METHODS
=head1 AUTHORS
Laser Henry (laser) C<< <laserhenry at gmail dot com> >>,
Yichun Zhang (agentzh) C<< <agentzh@gmail.com> >>
=head1 SEE ALSO
L<OpenResty::Handler::Model>, L<OpenResty::Handler::Role>, L<OpenResty::Handler::Action>, L<OpenResty::Handler::Feed>, L<OpenResty::Handler::Version>, L<OpenResty::Handler::Captcha>, L<OpenResty::Handler::Login>, L<OpenResty>.
| agentzh/old-openresty | lib/OpenResty/Handler/View.pm | Perl | bsd-3-clause | 9,390 |
package App::Wubot::Util::Tail;
use Moose;
our $VERSION = '0.3.4'; # VERSION
use Fcntl qw( SEEK_END SEEK_CUR SEEK_SET O_NONBLOCK O_RDONLY );
use App::Wubot::Logger;
=head1 NAME
App::Wubot::Util::Tail - follow the tail of a growing file
=head1 VERSION
version 0.3.4
=head1 SYNOPSIS
use App::Wubot::Util::Tail;
# for a complete example, see App::Wubot::Plugin::FileTail
=head1 DESCRIPTION
This class helps build plugins that need to monitor a log file that is
being continuously updated, and execute a bit of code for each new
line.
Once initialized, it holds the filehandle open while wubot is running.
The position in the file can be cached using the standard wubot
caching mechanism.
Plugins that use this library can call get_lines() in the check()
method to process all lines that showed up in the file since the last
time check() was called. This will execute a callback for each new
line. In addition, a callback can be defined to run if the filehandle
was reset (i.e. the filehandle was reopened or the file was
truncated).
=cut
has 'path' => ( is => 'rw',
isa => 'Str',
required => 1,
);
has 'callback' => ( is => 'rw',
isa => 'CodeRef',
);
has 'reset_callback' => ( is => 'rw',
isa => 'CodeRef',
);
has 'tail_fh' => ( is => 'rw',
lazy => 1,
default => sub {
return $_[0]->get_fh();
},
);
has 'lastread' => ( is => 'rw',
isa => 'Num',
);
has 'refresh' => ( is => 'rw',
isa => 'Num',
default => sub {
# default behavior is to recheck if file was
# renamed or truncated every time we go to
# check and don't find any new lines.
return 1;
}
);
has 'count' => ( is => 'rw',
isa => 'Num',
default => 0,
);
has 'leftover' => ( is => 'rw',
isa => 'Str',
default => '',
);
has 'position' => ( is => 'rw',
default => undef,
);
=head1 SUBROUTINES/METHODS
=over 8
=item $obj->get_lines();
Look for new lines in the file, and run the callback on each.
If no new lines are found in the file, then the filehandle is checked
to see if the file was truncated or the filehandle was closed and then
a new one was re-opened. In either case, the reset_callback is
executed and is passed the appropriate text:
filehandle was truncated: {$path}
file was renamed: {$path}
=cut
sub get_lines {
my ( $self ) = @_;
$self->count( $self->count + 1 );
my $path = $self->path;
unless ( -r $path ) {
$self->reset_callback->( "path not readable: $path" );
return;
}
my $fh = $self->tail_fh;
my $mtime = ( stat $path )[9];
if ( my $lines = $self->_get_lines_nonblock( $fh ) ) {
return $lines;
}
return 0 unless $self->count % $self->refresh == 0;
my $end_pos = sysseek( $fh, 0, SEEK_END);
my $was_truncated = $end_pos < $self->position ? 1 : 0;
my $was_renamed = $self->lastread && $mtime > $self->lastread ? 1 : 0;
if ( $was_truncated || $was_renamed ) {
if ( $was_truncated ) {
$self->reset_callback->( "file was truncated: $path" );
}
else {
$self->reset_callback->( "file was renamed: $path" );
}
$self->position( 0 );
$self->tail_fh( $self->get_fh() );
$fh = $self->tail_fh;
return $self->_get_lines_nonblock( $fh );
}
# file was not truncated, seek back to same spot
sysseek( $fh, 0, $self->position);
return 0;
}
=item $obj->get_fh()
Use sysopen to open the filehandle in non-blocking read-only mode. If
a position was defined on the object, seeks to that position.
=cut
sub get_fh {
my ( $self ) = @_;
my $path = $self->path;
sysopen( my $fh, $path, O_NONBLOCK|O_RDONLY )
or die "can't open $path: $!";
my $position = $self->position;
if ( defined $position ) {
sysseek( $fh, $position, SEEK_SET );
}
else {
sysseek( $fh, 0, SEEK_END );
}
return $fh;
}
=item $obj->_get_lines_nonblock( $fh )
Private method, code adapted from:
http://www.perlmonks.org/?node_id=55241
=cut
sub _get_lines_nonblock {
my ( $self, $fh ) = @_;
my $timeout = 0;
my $rfd = '';
vec($rfd,fileno($fh),1) = 1;
return unless select($rfd, undef, undef, $timeout)>=0;
# I'm not sure the following is necessary?
return unless vec($rfd,fileno($fh),1);
my $buf = '';
my $n = sysread($fh,$buf,1024*1024);
# no lines read, check if file was truncated/renamed
$self->position( sysseek( $fh, 0, SEEK_CUR) );
# No new lines found
return unless $n;
$self->lastread( time );
# Prepend the last unfinished line
$buf = $self->leftover . $buf;
# And save any newly unfinished lines
$self->leftover( (substr($buf,-1) !~ /[\r\n]/ && $buf =~ s/([^\r\n]*)$//) ? $1 : '' );
return unless $buf;
my $count = 0;
for my $line ( split( /\n/, $buf ) ) {
chomp $line;
next unless $line;
$self->callback->( $line );
$count++;
}
return $count;
}
__PACKAGE__->meta->make_immutable;
1;
__END__
=back
=head1 SIMILAR MODULES
I looked at a lot of other similar modules, but ended up having to
roll my own due some some specific requirements of wubot.
L<File::Tail> - I was unable to tweak the frequency at which this
module checks for updates or filehandle resets to work the way I
wanted. I wanted to do this reliably every time the check() method
was executed.
L<POE::Wheel::FollowTail> - I have used this module in the past and
love it. While old versions of wubot were based on POE, the current
version of wubot uses AnyEvent.
L<File::Tail> - this module has great mechanisms for detecting if the
file was replaced or the file was truncated, but unfortunately it does
pass that information on to programs that use the module.
=begin Pod::Coverage
SEEK_END
SEEK_CUR
SEEK_SET
O_NONBLOCK
O_RDONLY
=end Pod::Coverage
| gitpan/wubot | lib/App/Wubot/Util/Tail.pm | Perl | bsd-3-clause | 6,483 |
#!/usr/bin/perl
# original: http://www.angelfire.com/darkside/wbz2jpg/wbz2jpg.c
use strict;
use warnings;
use bytes;
use Getopt::Long qw/:config pass_through/;
GetOptions(
'input|infile=s' => \(my $infile = ''),
'output|outfile=s' => \(my $outfile = ''),
'find=s' => \(my $find_dir = ''),
) or die 'options';
use File::Find;
$infile ||= shift;
die 'options' if @ARGV;
if ($find_dir) {
find {
wanted => sub {
return unless /\.wb[01c]$/;
my $in = $File::Find::name;
return if grep -f,
"$in.jpg",
map sprintf("%s.%0*d.jpg", $in, $_, 0),
1..4;
print "$in\n";
my $ret = system { $^X } $^X, $0,
'--infile', $in,
'--outfile', "$in.jpg";
$ret and die "$in\n";
},
preprocess => sub { sort @_ },
}, $find_dir;
exit;
}
my $COLLECTION = "\xab\x16\xfa\x95";
my $NORMAL_IMG = "\xe2\xcd\x71\xf0";
my $JFIF_MARKER= "\xff\xd8\xff\xe0";
{
undef local $/;
local @ARGV = grep $_, $infile;
$_ = <>;
}
my @collection;
sub myodf {
my $f = shift;
return;
open my $od, "| myodc -I -f $f" or die "|myodc: $!";
print $od @_;
close $od;
}
sub myod { myodf 0, @_; }
{
if (/^WWBB(\d)\1\1\1/) {
@collection = ($_);
} elsif (/^\Q$COLLECTION\E/) {
open my $in, '<', \$_ or die "<ref: $!";
myod(substr $_, 0, 16);
my $off = unpack 'I', substr $_, 4, 4;
my $total = length;
while ($off < $total) {
my $hdr_info = substr $_, $off, 12;
my $type = substr $hdr_info, 0, 4;
my ($hdr_len, $len) = unpack 'II', substr $hdr_info, 4;
myod($hdr_info);
myodf($off, substr $_, $off, 64);
my @errors;
push @errors, ($type ne $NORMAL_IMG) ? 1 : 0;
push @errors, ($off+$hdr_len > $total) ? 1 : 0;
if (grep $_, @errors) {
my $errfile = $infile . '.err';
open my $err, '>', $errfile or die ">$errfile: $!";
print $err "@errors\n";
close $err;
last;
}
my $img_data = substr $_, $off+$hdr_len, $len;
myodf($off+$hdr_len, substr $img_data, 0, 16);
push @collection, $img_data;
$off += $len;
}
warn "FOUND ", 0+@collection, " imgs in $infile\n";
}
}
die "Couldn't find photos\n" unless @collection;
if (@collection > 1) {
$outfile ||= $infile . '.jpg' if $infile;
die "Must specify --outfile=BASE for collections\n" unless $outfile;
$outfile =~ s/$/\.jpg/ unless $outfile =~ /.\.\w\w\w\w?$/;
}
my %decode_const = (
'1111' => 0xf2,
'0000' => 0xa4,
);
sub wb2jpeg {
local $_ = shift;
return $_ if /^\Q$JFIF_MARKER\E/;
die "Bad WWBB version?\n" unless s/^WWBB(0000|1111)//;
my $decode_xor = $decode_const{$1};
my $rest=substr$_,200;
my @a=split//,substr $_, 0, 100;
my @c=(my @b=split//,substr $_,100,100);
my @d;
while (@a) {
my ($A, $B) = map ord shift @$_, \@a, \@b;
push @d, map chr, ($B ^ (255-$A)) ^ $decode_xor;
}
join '', @d, @c, $rest;
}
my $N = @collection;
my $l = length $N;
for my $i (0..$#collection) {
my $out = *STDOUT;
my $outfn = $outfile ? $outfile : "";
$outfn =~ s/(\.\w\w\w\w?)$/sprintf ".%0*d%s", $l, $i, $1/e if $N > 1;
open $out, '>', $outfn if $outfn;
print $out wb2jpeg $collection[$i];
}
__END__
perl -nwe 'BEGIN{undef$/} die "WWBB" unless s/^WWBB(0000|1111)//; $magic = ($1 eq "1111") ? 0xf2 : 0xa4; $jpg=substr$_,200; @a=split//,substr $_, 0, 100; @c=(@b=split//,substr $_,100,100); while (@a) { $a=ord shift@a; $b=ord shift@b; push @d, map chr, ($b ^ (255-$a)) ^ $magic; } print @d, @c, $jpg' $l | display jpg:-
| benizi/dotfiles | bin/webshots-to-jpeg.pl | Perl | mit | 3,354 |
/*************************************************************************
name: semOntology.pl (Chapter 6)
version: July 10, 1999
description: Predicates for working with the semantic ontology
authors: Patrick Blackburn & Johan Bos
*************************************************************************/
:- module(semOntology,[generateOntology/1,
generateOntology/2,
consistent/2]).
:- ensure_loaded(comsemOperators).
:- use_module(comsemPredicates,[memberList/2,
appendLists/3,
basicFormula/1,
selectFromList/3,
compose/3]).
:- use_module(englishLexicon,[lexicon/4]).
/*========================================================================
Generating Ontology in First-Order Formulas
========================================================================*/
generateOntology(Formula1 & Formula2):-
isaRelations(Isa),
disjointRelations(Disjoint),
relations2fol(Isa,Formula1),
relations2fol(Disjoint,Formula2).
generateOntology(Input,Formula):-
isaRelations(Isa),
disjointRelations(Disjoint),
selectRelations(Input,Isa-_,Disjoint-_,[]-Relations),
relations2fol(Relations,Formula).
/*========================================================================
Generating isa/2 and disjoint/2 relations from english lexicon
========================================================================*/
isaRelations(Isa):-
setof(isa(Concept,SuperConcept),
SuperConcepts^Words^(
lexicon(noun,Concept,Words,SuperConcepts),
memberList(SuperConcept,SuperConcepts)
),
Isa).
disjointRelations(Disjoint):-
setof(disjoint(Concept1,Concept2),
DisjointConcepts^Words^(
lexicon(adj,Concept1,Words,DisjointConcepts),
memberList(Concept2,DisjointConcepts)
),
Disjoint).
/*========================================================================
Translating ISA-relations to first-order formulas
========================================================================*/
relations2fol([],p v ~p).
relations2fol([isa(S1,S2)],forall(X,F1 > F2)):-
compose(F1,S1,[X]),
compose(F2,S2,[X]).
relations2fol([disjoint(S1,S2)],forall(X,F1 > ~ F2)):-
compose(F1,S1,[X]),
compose(F2,S2,[X]).
relations2fol([A,B|L],Formula1 & Formula2):-
relations2fol([A],Formula1),
relations2fol([B|L],Formula2).
/*========================================================================
Select isa/disjoint relations from a Formula
========================================================================*/
selectRelations(forall(_,F),I1-I2,D1-D2,R1-R2):-
selectRelations(F,I1-I2,D1-D2,R1-R2).
selectRelations(exists(_,F),I1-I2,D1-D2,R1-R2):-
selectRelations(F,I1-I2,D1-D2,R1-R2).
selectRelations(lambda(_,F),I1-I2,D1-D2,R1-R2):-
selectRelations(F,I1-I2,D1-D2,R1-R2).
selectRelations(~ F,I1-I2,D1-D2,R1-R2):-
selectRelations(F,I1-I2,D1-D2,R1-R2).
selectRelations(F1 & F2,I1-I3,D1-D3,R1-R3):-
selectRelations(F1,I1-I2,D1-D2,R1-R2),
selectRelations(F2,I2-I3,D2-D3,R2-R3).
selectRelations(F1 v F2,I1-I3,D1-D3,R1-R3):-
selectRelations(F1,I1-I2,D1-D2,R1-R2),
selectRelations(F2,I2-I3,D2-D3,R2-R3).
selectRelations(F1 > F2,I1-I3,D1-D3,R1-R3):-
selectRelations(F1,I1-I2,D1-D2,R1-R2),
selectRelations(F2,I2-I3,D2-D3,R2-R3).
selectRelations(Basic,I1-I2,D1-D2,R1-R2):-
basicFormula(Basic),
compose(Basic,Symbol,_),
(
selectFromList(isa(Symbol,Hyper),I1,I3), !,
selectRelations(Hyper,I3-I4,D1-D3,R1-R3),
selectRelations(Symbol,I4-I2,D3-D2,R3-R4),
R2=[isa(Symbol,Hyper)|R4]
;
selectFromList(disjoint(Symbol,Concept),D1,D2), !,
I2=I1,
R2=[disjoint(Symbol,Concept)|R1]
;
I2=I1,
D2=D1,
R2=R1
).
/*========================================================================
Consistency Check
========================================================================*/
consistent(X,Y):-
isaRelations(Isa),
disjointRelations(Disjoint),
\+ inconsistent(X,Y,Isa,Disjoint).
inconsistent(X,Y,_,Disjoint):-
memberList(disjoint(X,Y),Disjoint).
inconsistent(X,Y,_,Disjoint):-
memberList(disjoint(Y,X),Disjoint).
inconsistent(X,Y,Isa,Disjoint):-
memberList(isa(X,Z),Isa),
inconsistent(Z,Y,Isa,Disjoint).
inconsistent(X,Y,Isa,Disjoint):-
memberList(isa(Y,Z),Isa),
inconsistent(X,Z,Isa,Disjoint).
| TeamSPoon/logicmoo_workspace | packs_sys/logicmoo_nlu/ext/CURT/bb0/semOntology1.pl | Perl | mit | 4,328 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package Apache::TestConfig;
use strict;
use warnings FATAL => 'all';
use constant WIN32 => $^O eq 'MSWin32';
use constant OSX => $^O eq 'darwin';
use constant CYGWIN => $^O eq 'cygwin';
use constant NETWARE => $^O eq 'NetWare';
use constant SOLARIS => $^O eq 'solaris';
use constant AIX => $^O eq 'aix';
use constant WINFU => WIN32 || NETWARE;
use constant COLOR => ($ENV{APACHE_TEST_COLOR} && -t STDOUT) ? 1 : 0;
use constant DEFAULT_PORT => 8529;
use constant IS_MOD_PERL_2 =>
eval { require mod_perl2 } || 0;
use constant IS_MOD_PERL_2_BUILD => IS_MOD_PERL_2 &&
eval { require Apache2::Build && Apache2::Build::IS_MOD_PERL_BUILD() };
use constant IS_APACHE_TEST_BUILD =>
grep { -e "$_/lib/Apache/TestConfig.pm" }
qw(Apache-Test . .. ../Apache-Test);
use lib ();
use File::Copy ();
use File::Find qw(finddepth);
use File::Basename qw(dirname);
use File::Path ();
use File::Spec::Functions qw(catfile abs2rel splitdir canonpath
catdir file_name_is_absolute devnull);
use Cwd qw(fastcwd);
use Socket ();
use Symbol ();
use Apache::TestConfigPerl ();
use Apache::TestConfigParse ();
use Apache::TestTrace;
use Apache::TestServer ();
use Apache::TestRun ();
use vars qw(%Usage);
%Usage = (
top_dir => 'top-level directory (default is $PWD)',
t_dir => 'the t/ test directory (default is $top_dir/t)',
t_conf => 'the conf/ test directory (default is $t_dir/conf)',
t_logs => 'the logs/ test directory (default is $t_dir/logs)',
t_pid_file => 'location of the pid file (default is $t_logs/httpd.pid)',
t_conf_file => 'test httpd.conf file (default is $t_conf/httpd.conf)',
src_dir => 'source directory to look for mod_foos.so',
serverroot => 'ServerRoot (default is $t_dir)',
documentroot => 'DocumentRoot (default is $ServerRoot/htdocs',
port => 'Port [port_number|select] (default ' . DEFAULT_PORT . ')',
servername => 'ServerName (default is localhost)',
user => 'User to run test server as (default is $USER)',
group => 'Group to run test server as (default is $GROUP)',
bindir => 'Apache bin/ dir (default is apxs -q BINDIR)',
sbindir => 'Apache sbin/ dir (default is apxs -q SBINDIR)',
httpd => 'server to use for testing (default is $bindir/httpd)',
target => 'name of server binary (default is apxs -q TARGET)',
apxs => 'location of apxs (default is from Apache2::BuildConfig)',
startup_timeout => 'seconds to wait for the server to start (default is 60)',
httpd_conf => 'inherit config from this file (default is apxs derived)',
httpd_conf_extra=> 'inherit additional config from this file',
minclients => 'minimum number of concurrent clients (default is 1)',
maxclients => 'maximum number of concurrent clients (default is minclients+1)',
perlpod => 'location of perl pod documents (for testing downloads)',
proxyssl_url => 'url for testing ProxyPass / https (default is localhost)',
sslca => 'location of SSL CA (default is $t_conf/ssl/ca)',
sslcaorg => 'SSL CA organization to use for tests (default is asf)',
libmodperl => 'path to mod_perl\'s .so (full or relative to LIBEXECDIR)',
defines => 'values to add as -D defines (for example, "VAR1 VAR2")',
(map { $_ . '_module_name', "$_ module name"} qw(cgi ssl thread access auth php)),
);
my %filepath_conf_opts = map { $_ => 1 }
qw(top_dir t_dir t_conf t_logs t_pid_file t_conf_file src_dir serverroot
documentroot bindir sbindir httpd apxs httpd_conf httpd_conf_extra
perlpod sslca libmodperl);
sub conf_opt_is_a_filepath {
my $opt = shift;
$opt && exists $filepath_conf_opts{$opt};
}
sub usage {
for my $hash (\%Usage) {
for (sort keys %$hash){
printf " -%-18s %s\n", $_, $hash->{$_};
}
}
}
sub filter_args {
my($args, $wanted_args) = @_;
my(@pass, %keep);
my @filter = @$args;
if (ref($filter[0])) {
push @pass, shift @filter;
}
while (@filter) {
my $key = shift @filter;
# optinal - or -- prefix
if (defined $key && $key =~ /^-?-?(.+)/ && exists $wanted_args->{$1}) {
if (@filter) {
$keep{$1} = shift @filter;
}
else {
die "key $1 requires a matching value";
}
}
else {
push @pass, $key;
}
}
return (\@pass, \%keep);
}
my %passenv = map { $_,1 } qw{
APACHE_TEST_APXS
APACHE_TEST_HTTPD
APACHE_TEST_GROUP
APACHE_TEST_USER
APACHE_TEST_PORT
};
sub passenv {
\%passenv;
}
sub passenv_makestr {
my @vars;
for (keys %passenv) {
push @vars, "$_=\$($_)";
}
"@vars";
}
sub server { shift->{server} }
sub modperl_build_config {
my $self = shift;
my $server = ref $self ? $self->server : new_test_server();
# we can't do this if we're using httpd 1.3.X
# even if mod_perl2 is installed on the box
# similarly, we shouldn't be loading mp2 if we're not
# absolutely certain we're in a 2.X environment yet
# (such as mod_perl's own build or runtime environment)
if (($server->{rev} && $server->{rev} == 2) ||
IS_MOD_PERL_2_BUILD || $ENV{MOD_PERL_API_VERSION}) {
eval {
require Apache2::Build;
} or return;
return Apache2::Build->build_config;
}
return;
}
sub new_test_server {
my($self, $args) = @_;
Apache::TestServer->new($args || $self)
}
# setup httpd-independent components
# for httpd-specific call $self->httpd_config()
sub new {
my $class = shift;
my $args;
$args = shift if $_[0] and ref $_[0];
$args = $args ? {%$args} : {@_}; #copy
#see Apache::TestMM::{filter_args,generate_script}
#we do this so 'perl Makefile.PL' can be passed options such as apxs
#without forcing regeneration of configuration and recompilation of c-modules
#as 't/TEST apxs /path/to/apache/bin/apxs' would do
while (my($key, $val) = each %Apache::TestConfig::Argv) {
$args->{$key} = $val;
}
my $top_dir = fastcwd;
$top_dir = pop_dir($top_dir, 't');
# untaint as we are going to use it a lot later on in -T sensitive
# operations (.e.g @INC)
$top_dir = $1 if $top_dir =~ /(.*)/;
# make sure that t/conf/apache_test_config.pm is found
# (unfortunately sometimes we get thrown into / by Apache so we
# can't just rely on $top_dir
lib->import($top_dir);
my $thaw = {};
#thaw current config
for (qw(conf t/conf)) {
last if eval {
require "$_/apache_test_config.pm";
$thaw = 'apache_test_config'->new;
delete $thaw->{save};
#incase class that generated the config was
#something else, which we can't be sure how to load
bless $thaw, 'Apache::TestConfig';
};
}
if ($args->{thaw} and ref($thaw) ne 'HASH') {
#dont generate any new config
$thaw->{vars}->{$_} = $args->{$_} for keys %$args;
$thaw->{server} = $thaw->new_test_server;
$thaw->add_inc;
return $thaw;
}
#regenerating config, so forget old
if ($args->{save}) {
for (qw(vhosts inherit_config modules inc cmodules)) {
delete $thaw->{$_} if exists $thaw->{$_};
}
}
my $self = bless {
clean => {},
vhosts => {},
inherit_config => {},
modules => {},
inc => [],
%$thaw,
mpm => "",
httpd_defines => {},
vars => $args,
postamble => [],
preamble => [],
postamble_hooks => [],
preamble_hooks => [],
}, ref($class) || $class;
my $vars = $self->{vars}; #things that can be overridden
for (qw(save verbose)) {
next unless exists $args->{$_};
$self->{$_} = delete $args->{$_};
}
$vars->{top_dir} ||= $top_dir;
$self->add_inc;
#help to find libmodperl.so
unless ($vars->{src_dir}) {
my $src_dir = catfile $vars->{top_dir}, qw(.. src modules perl);
if (-d $src_dir) {
$vars->{src_dir} = $src_dir;
} else {
$src_dir = catfile $vars->{top_dir}, qw(src modules perl);
$vars->{src_dir} = $src_dir if -d $src_dir;
}
}
$vars->{t_dir} ||= catfile $vars->{top_dir}, 't';
$vars->{serverroot} ||= $vars->{t_dir};
$vars->{documentroot} ||= catfile $vars->{serverroot}, 'htdocs';
$vars->{perlpod} ||= $self->find_in_inc('pods') ||
$self->find_in_inc('pod');
$vars->{perl} ||= $^X;
$vars->{t_conf} ||= catfile $vars->{serverroot}, 'conf';
$vars->{sslca} ||= catfile $vars->{t_conf}, 'ssl', 'ca';
$vars->{sslcaorg} ||= 'asf';
$vars->{t_logs} ||= catfile $vars->{serverroot}, 'logs';
$vars->{t_conf_file} ||= catfile $vars->{t_conf}, 'httpd.conf';
$vars->{t_pid_file} ||= catfile $vars->{t_logs}, 'httpd.pid';
if (WINFU) {
for (keys %$vars) {
$vars->{$_} =~ s|\\|\/|g if defined $vars->{$_};
}
}
$vars->{scheme} ||= 'http';
$vars->{servername} ||= $self->default_servername;
$vars->{port} = $self->select_first_port;
$vars->{remote_addr} ||= $self->our_remote_addr;
$vars->{user} ||= $self->default_user;
$vars->{group} ||= $self->default_group;
$vars->{serveradmin} ||= $self->default_serveradmin;
$vars->{minclients} ||= 1;
$vars->{maxclients_preset} = $vars->{maxclients} || 0;
# if maxclients wasn't explicitly passed try to
# prevent 'server reached MaxClients setting' errors
$vars->{maxclients} ||= $vars->{minclients} + 1;
# if a preset maxclients valus is smaller than minclients,
# maxclients overrides minclients
if ($vars->{maxclients_preset} &&
$vars->{maxclients_preset} < $vars->{minclients}) {
$vars->{minclients} = $vars->{maxclients_preset};
}
# for threaded mpms MaxClients must be a multiple of
# ThreadsPerChild (i.e. maxclients % minclients == 0)
# so unless -maxclients was explicitly specified use a double of
# minclients
$vars->{maxclientsthreadedmpm} =
$vars->{maxclients_preset} || $vars->{minclients} * 2;
$vars->{proxy} ||= 'off';
$vars->{proxyssl_url} ||= '';
$vars->{defines} ||= '';
$self->{hostport} = $self->hostport;
$self->{server} = $self->new_test_server;
return $self;
}
# figure out where httpd is and run extra config hooks which require
# knowledge of where httpd is
sub httpd_config {
my $self = shift;
$self->configure_apxs;
$self->configure_httpd;
my $vars = $self->{vars};
unless ($vars->{httpd} or $vars->{apxs}) {
# mod_perl 2.0 build (almost) always knows the right httpd
# location (and optionally apxs). if we get here we can't
# continue because the interactive config can't work with
# mod_perl 2.0 build (by design)
if (IS_MOD_PERL_2_BUILD){
my $mp2_build = $self->modperl_build_config();
# if mod_perl 2 was built against the httpd source it
# doesn't know where to find apxs/httpd, so in this case
# fall back to interactive config
unless ($mp2_build->{MP_APXS}) {
die "mod_perl 2 was built against Apache sources, we " .
"don't know where httpd/apxs executables are, therefore " .
"skipping the test suite execution"
}
# not sure what else could go wrong but we can't continue
die "something is wrong, mod_perl 2.0 build should have " .
"supplied all the needed information to run the tests. " .
"Please post lib/Apache/BuildConfig.pm along with the " .
"bug report";
}
$self->clean(1);
error "You must explicitly specify -httpd and/or -apxs options, " .
"or set \$ENV{APACHE_TEST_HTTPD} and \$ENV{APACHE_TEST_APXS}, " .
"or set your \$PATH to include the httpd and apxs binaries.";
Apache::TestRun::exit_perl(1);
}
else {
debug "Using httpd: $vars->{httpd}";
}
$self->inherit_config; #see TestConfigParse.pm
$self->configure_httpd_eapi; #must come after inherit_config
$self->default_module(cgi => [qw(mod_cgi mod_cgid)]);
$self->default_module(thread => [qw(worker threaded)]);
$self->default_module(ssl => [qw(mod_ssl)]);
$self->default_module(access => [qw(mod_access mod_authz_host)]);
$self->default_module(auth => [qw(mod_auth mod_auth_basic)]);
$self->default_module(php => [qw(sapi_apache2 mod_php4 mod_php5)]);
$self->{server}->post_config;
return $self;
}
sub default_module {
my($self, $name, $choices) = @_;
my $mname = $name . '_module_name';
unless ($self->{vars}->{$mname}) {
($self->{vars}->{$mname}) = grep {
$self->{modules}->{"$_.c"};
} @$choices;
$self->{vars}->{$mname} ||= $choices->[0];
}
$self->{vars}->{$name . '_module'} =
$self->{vars}->{$mname} . '.c'
}
sub configure_apxs {
my $self = shift;
$self->{APXS} = $self->default_apxs;
return unless $self->{APXS};
$self->{APXS} =~ s{/}{\\}g if WIN32;
my $vars = $self->{vars};
$vars->{bindir} ||= $self->apxs('BINDIR', 1);
$vars->{sbindir} ||= $self->apxs('SBINDIR');
$vars->{target} ||= $self->apxs('TARGET');
$vars->{conf_dir} ||= $self->apxs('SYSCONFDIR');
if ($vars->{conf_dir}) {
$vars->{httpd_conf} ||= catfile $vars->{conf_dir}, 'httpd.conf';
}
}
sub configure_httpd {
my $self = shift;
my $vars = $self->{vars};
debug "configuring httpd";
$vars->{target} ||= (WIN32 ? 'Apache.EXE' : 'httpd');
unless ($vars->{httpd}) {
#sbindir should be bin/ with the default layout
#but its eaiser to workaround apxs than fix apxs
for my $dir (map { $vars->{$_} } qw(sbindir bindir)) {
next unless defined $dir;
my $httpd = catfile $dir, $vars->{target};
next unless -x $httpd;
$vars->{httpd} = $httpd;
last;
}
$vars->{httpd} ||= $self->default_httpd;
}
if ($vars->{httpd}) {
my @chunks = splitdir $vars->{httpd};
#handle both $prefix/bin/httpd and $prefix/Apache.exe
for (1,2) {
pop @chunks;
last unless @chunks;
$self->{httpd_basedir} = catfile @chunks;
last if -d "$self->{httpd_basedir}/bin";
}
}
#cleanup httpd droppings
my $sem = catfile $vars->{t_logs}, 'apache_runtime_status.sem';
unless (-e $sem) {
$self->clean_add_file($sem);
}
}
sub configure_httpd_eapi {
my $self = shift;
my $vars = $self->{vars};
#deal with EAPI_MM_CORE_PATH if defined.
if (defined($self->{httpd_defines}->{EAPI_MM_CORE_PATH})) {
my $path = $self->{httpd_defines}->{EAPI_MM_CORE_PATH};
#ensure the directory exists
my @chunks = splitdir $path;
pop @chunks; #the file component of the path
$path = catdir @chunks;
unless (file_name_is_absolute $path) {
$path = catdir $vars->{serverroot}, $path;
}
$self->gendir($path);
}
}
sub configure_proxy {
my $self = shift;
my $vars = $self->{vars};
#if we proxy to ourselves, must bump the maxclients
if ($vars->{proxy} =~ /^on$/i) {
unless ($vars->{maxclients_preset}) {
$vars->{minclients}++;
$vars->{maxclients}++;
}
$vars->{proxy} = $self->{vhosts}->{'mod_proxy'}->{hostport};
return $vars->{proxy};
}
return undef;
}
# adds the config to the head of the group instead of the tail
# XXX: would be even better to add to a different sub-group
# (e.g. preamble_first) of only those that want to be first and then,
# make sure that they are dumped to the config file first in the same
# group (e.g. preamble)
sub add_config_first {
my $self = shift;
my $where = shift;
unshift @{ $self->{$where} }, $self->massage_config_args(@_);
}
sub add_config_last {
my $self = shift;
my $where = shift;
push @{ $self->{$where} }, $self->massage_config_args(@_);
}
sub massage_config_args {
my $self = shift;
my($directive, $arg, $data) = @_;
my $args = "";
if ($data) {
$args = "<$directive $arg>\n";
if (ref($data) eq 'HASH') {
while (my($k,$v) = each %$data) {
$args .= " $k $v\n";
}
}
elsif (ref($data) eq 'ARRAY') {
# balanced (key=>val) list
my $pairs = @$data / 2;
for my $i (0..($pairs-1)) {
$args .= sprintf " %s %s\n", $data->[$i*2], $data->[$i*2+1];
}
}
else {
$data=~s/\n(?!\z)/\n /g;
$args .= " $data";
}
$args .= "</$directive>\n";
}
elsif (ref($directive) eq 'ARRAY') {
$args = join "\n", @$directive;
}
else {
$args = join " ", grep length($_), $directive,
(ref($arg) && (ref($arg) eq 'ARRAY') ? "@$arg" : $arg || "");
}
return $args;
}
sub postamble_first {
shift->add_config_first(postamble => @_);
}
sub postamble {
shift->add_config_last(postamble => @_);
}
sub preamble_first {
shift->add_config_first(preamble => @_);
}
sub preamble {
shift->add_config_last(preamble => @_);
}
sub postamble_register {
push @{ shift->{postamble_hooks} }, @_;
}
sub preamble_register {
push @{ shift->{preamble_hooks} }, @_;
}
sub add_config_hooks_run {
my($self, $where, $out) = @_;
for (@{ $self->{"${where}_hooks"} }) {
if ((ref($_) and ref($_) eq 'CODE') or $self->can($_)) {
$self->$_();
}
else {
error "cannot run configure hook: `$_'";
}
}
for (@{ $self->{$where} }) {
$self->replace;
s/\n?$/\n/;
print $out "$_";
}
}
sub postamble_run {
shift->add_config_hooks_run(postamble => @_);
}
sub preamble_run {
shift->add_config_hooks_run(preamble => @_);
}
sub default_group {
return if WINFU;
my $gid = $);
#use only first value if $) contains more than one
$gid =~ s/^(\d+).*$/$1/;
my $group = $ENV{APACHE_TEST_GROUP} || (getgrgid($gid) || "#$gid");
if ($group eq 'root') {
# similar to default_user, we want to avoid perms problems,
# when the server is started with group 'root'. When running
# under group root it may fail to create dirs and files,
# writable only by user
my $user = default_user();
my $gid = $user ? (getpwnam($user))[3] : '';
$group = (getgrgid($gid) || "#$gid") if $gid;
}
$group;
}
sub default_user {
return if WINFU;
my $uid = $>;
my $user = $ENV{APACHE_TEST_USER} || (getpwuid($uid) || "#$uid");
if ($user eq 'root') {
my $other = (getpwnam('nobody'))[0];
if ($other) {
$user = $other;
}
else {
die "cannot run tests as User root";
#XXX: prompt for another username
}
}
return $user;
}
sub default_serveradmin {
my $vars = shift->{vars};
join '@', ($vars->{user} || 'unknown'), $vars->{servername};
}
sub default_apxs {
my $self = shift;
return $self->{vars}->{apxs} if $self->{vars}->{apxs};
if (my $build_config = $self->modperl_build_config()) {
return $build_config->{MP_APXS};
}
if ($ENV{APACHE_TEST_APXS}) {
return $ENV{APACHE_TEST_APXS};
}
# look in PATH as a last resort
if (my $apxs = which('apxs')) {
return $apxs;
} elsif ($apxs = which('apxs2')) {
return $apxs;
}
return;
}
sub default_httpd {
my $self = shift;
my $vars = $self->{vars};
if (my $build_config = $self->modperl_build_config()) {
if (my $p = $build_config->{MP_AP_PREFIX}) {
for my $bindir (qw(bin sbin)) {
my $httpd = catfile $p, $bindir, $vars->{target};
return $httpd if -e $httpd;
# The executable on Win32 in Apache/2.2 is httpd.exe,
# so try that if Apache.exe doesn't exist
if (WIN32) {
$httpd = catfile $p, $bindir, 'httpd.EXE';
if (-e $httpd) {
$vars->{target} = 'httpd.EXE';
return $httpd;
}
}
}
}
}
if ($ENV{APACHE_TEST_HTTPD}) {
return $ENV{APACHE_TEST_HTTPD};
}
# look in PATH as a last resort
if (my $httpd = which('httpd')) {
return $httpd;
} elsif ($httpd = which('httpd2')) {
return $httpd;
} elsif ($httpd = which('apache')) {
return $httpd;
} elsif ($httpd = which('apache2')) {
return $httpd;
}
return;
}
my $localhost;
sub default_localhost {
my $localhost_addr = pack('C4', 127, 0, 0, 1);
gethostbyaddr($localhost_addr, Socket::AF_INET()) || 'localhost';
}
sub default_servername {
my $self = shift;
$localhost ||= $self->default_localhost;
die "Can't figure out the default localhost's server name"
unless $localhost;
}
# memoize the selected value (so we make sure that the same port is used
# via select). The problem is that select_first_port() is called 3 times after
# -clean, and it's possible that a lower port will get released
# between calls, leading to various places in the test suite getting a
# different base port selection.
#
# XXX: There is still a problem if two t/TEST's configure at the same
# time, so they both see the same port free, but only the first one to
# bind() will actually get the port. So there is a need in another
# check and reconfiguration just before the server starts.
#
my $port_memoized;
sub select_first_port {
my $self = shift;
my $port ||= $port_memoized || $ENV{APACHE_TEST_PORT}
|| $self->{vars}{port} || DEFAULT_PORT;
# memoize
$port_memoized = $port;
return $port unless $port eq 'select';
# port select mode: try to find another available port, take into
# account that each instance of the test suite may use more than
# one port for virtual hosts, therefore try to check ports in big
# steps (20?).
my $step = 20;
my $tries = 20;
$port = DEFAULT_PORT;
until (Apache::TestServer->port_available($port)) {
unless (--$tries) {
error "no ports available";
error "tried ports @{[DEFAULT_PORT]} - $port in $step increments";
return 0;
}
$port += $step;
}
info "the default base port is used, using base port $port instead"
unless $port == DEFAULT_PORT;
# memoize
$port_memoized = $port;
return $port;
}
my $remote_addr;
sub our_remote_addr {
my $self = shift;
my $name = $self->default_servername;
my $iaddr = (gethostbyname($name))[-1];
unless (defined $iaddr) {
error "Can't resolve host: '$name' (check /etc/hosts)";
exit 1;
}
$remote_addr ||= Socket::inet_ntoa($iaddr);
}
sub default_loopback {
'127.0.0.1';
}
sub port {
my($self, $module) = @_;
unless ($module) {
my $vars = $self->{vars};
return $self->select_first_port() unless $vars->{scheme} eq 'https';
$module = $vars->{ssl_module_name};
}
return $self->{vhosts}->{$module}->{port};
}
sub hostport {
my $self = shift;
my $vars = shift || $self->{vars};
my $module = shift || '';
my $name = $vars->{servername};
join ':', $name , $self->port($module || '');
}
#look for mod_foo.so
sub find_apache_module {
my($self, $module) = @_;
die "find_apache_module: module name argument is required"
unless $module;
my $vars = $self->{vars};
my $sroot = $vars->{serverroot};
my @trys = grep { $_ }
($vars->{src_dir},
$self->apxs('LIBEXECDIR'),
catfile($sroot, 'modules'),
catfile($sroot, 'libexec'));
for (@trys) {
my $file = catfile $_, $module;
if (-e $file) {
debug "found $module => $file";
return $file;
}
}
# if the module wasn't found try to lookup in the list of modules
# inherited from the system-wide httpd.conf
my $name = $module;
$name =~ s/\.s[ol]$/.c/; #mod_info.so => mod_info.c
$name =~ s/^lib/mod_/; #libphp4.so => mod_php4.c
return $self->{modules}->{$name} if $self->{modules}->{$name};
}
#generate files and directories
my %warn_style = (
html => sub { "<!-- @_ -->" },
c => sub { "/* @_ */" },
php => sub { "<?php /* \n@_ \n*/ ?>" },
default => sub { join '', grep {s/^/\# /gm} @_ },
);
my %file_ext = (
map({$_ => 'html'} qw(htm html)),
map({$_ => 'c' } qw(c h)),
map({$_ => 'php' } qw(php)),
);
# return the passed file's extension or '' if there is no one
# note: that '/foo/bar.conf.in' returns an extension: 'conf.in';
# note: a hidden file .foo will be recognized as an extension 'foo'
sub filename_ext {
my ($self, $filename) = @_;
my $ext = (File::Basename::fileparse($filename, '\..*'))[2] || '';
$ext =~ s/^\.(.*)/lc $1/e;
$ext;
}
sub warn_style_sub_ref {
my ($self, $filename) = @_;
my $ext = $self->filename_ext($filename);
return $warn_style{ $file_ext{$ext} || 'default' };
}
sub genwarning {
my($self, $filename, $from_filename) = @_;
return unless $filename;
my $time = scalar localtime;
my $warning = "WARNING: this file is generated";
$warning .= " (from $from_filename)" if defined $from_filename;
$warning .= ", do not edit\n";
$warning .= "generated on $time\n";
$warning .= calls_trace();
return $self->warn_style_sub_ref($filename)->($warning);
}
sub calls_trace {
my $frame = 1;
my $trace = '';
while (1) {
my($package, $filename, $line) = caller($frame);
last unless $filename;
$trace .= sprintf "%02d: %s:%d\n", $frame, $filename, $line;
$frame++;
}
return $trace;
}
sub clean_add_file {
my($self, $file) = @_;
$self->{clean}->{files}->{ rel2abs($file) } = 1;
}
sub clean_add_path {
my($self, $path) = @_;
$path = rel2abs($path);
# remember which dirs were created and should be cleaned up
while (1) {
$self->{clean}->{dirs}->{$path} = 1;
$path = dirname $path;
last if -e $path;
}
}
sub genfile_trace {
my($self, $file, $from_file) = @_;
my $name = abs2rel $file, $self->{vars}->{t_dir};
my $msg = "generating $name";
$msg .= " from $from_file" if defined $from_file;
debug $msg;
}
sub genfile_warning {
my($self, $file, $from_file, $fh) = @_;
if (my $msg = $self->genwarning($file, $from_file)) {
print $fh $msg, "\n";
}
}
# $from_file == undef if there was no templates used
sub genfile {
my($self, $file, $from_file, $nowarning) = @_;
# create the parent dir if it doesn't exist yet
my $dir = dirname $file;
$self->makepath($dir);
$self->genfile_trace($file, $from_file);
my $fh = Symbol::gensym();
open $fh, ">$file" or die "open $file: $!";
$self->genfile_warning($file, $from_file, $fh) unless $nowarning;
$self->clean_add_file($file);
return $fh;
}
# gen + write file
sub writefile {
my($self, $file, $content, $nowarning) = @_;
my $fh = $self->genfile($file, undef, $nowarning);
print $fh $content if $content;
close $fh;
}
sub perlscript_header {
require FindBin;
my @dirs = ();
# mp2 needs its modper-2.0/lib before blib was created
if (IS_MOD_PERL_2_BUILD || $ENV{APACHE_TEST_LIVE_DEV}) {
# the live 'lib/' dir of the distro
# (e.g. modperl-2.0/ModPerl-Registry/lib)
my $dir = canonpath catdir $FindBin::Bin, "lib";
push @dirs, $dir if -d $dir;
# the live dir of the top dir if any (e.g. modperl-2.0/lib)
if (-e catfile($FindBin::Bin, "..", "Makefile.PL")) {
my $dir = canonpath catdir $FindBin::Bin, "..", "lib";
push @dirs, $dir if -d $dir;
}
}
for (qw(. ..)) {
my $dir = canonpath catdir $FindBin::Bin, $_ , "Apache-Test", "lib";
if (-d $dir) {
push @dirs, $dir;
last;
}
}
{
my $dir = canonpath catdir $FindBin::Bin, "t", "lib";
push @dirs, $dir if -d $dir;
}
my $dirs = join("\n ", '', @dirs) . "\n";;
return <<"EOF";
use strict;
use warnings FATAL => 'all';
use lib qw($dirs);
EOF
}
# gen + write executable perl script file
sub write_perlscript {
my($self, $file, $content) = @_;
my $fh = $self->genfile($file, undef, 1);
my $shebang = make_shebang();
print $fh $shebang;
$self->genfile_warning($file, undef, $fh);
print $fh $content if $content;
close $fh;
chmod 0755, $file;
}
sub make_shebang {
# if perlpath is longer than 62 chars, some shells on certain
# platforms won't be able to run the shebang line, so when seeing
# a long perlpath use the eval workaround.
# see: http://en.wikipedia.org/wiki/Shebang
# http://homepages.cwi.nl/~aeb/std/shebang/
my $shebang = length $Config{perlpath} < 62
? "#!$Config{perlpath}\n"
: <<EOI;
$Config{'startperl'}
eval 'exec $Config{perlpath} -S \$0 \${1+"\$@"}'
if \$running_under_some_shell;
EOI
return $shebang;
}
sub cpfile {
my($self, $from, $to) = @_;
File::Copy::copy($from, $to);
$self->clean_add_file($to);
}
sub symlink {
my($self, $from, $to) = @_;
CORE::symlink($from, $to);
$self->clean_add_file($to);
}
sub gendir {
my($self, $dir) = @_;
$self->makepath($dir);
}
# returns a list of dirs successfully created
sub makepath {
my($self, $path) = @_;
return if !defined($path) || -e $path;
$self->clean_add_path($path);
return File::Path::mkpath($path, 0, 0755);
}
sub open_cmd {
my($self, $cmd) = @_;
# untaint some %ENV fields
local @ENV{ qw(IFS CDPATH ENV BASH_ENV) };
local $ENV{PATH} = untaint_path($ENV{PATH});
# launder for -T
$cmd = $1 if $cmd =~ /(.*)/;
my $handle = Symbol::gensym();
open $handle, "$cmd|" or die "$cmd failed: $!";
return $handle;
}
sub clean {
my $self = shift;
$self->{clean_level} = shift || 2; #2 == really clean, 1 == reconfigure
$self->new_test_server->clean;
$self->cmodules_clean;
$self->sslca_clean;
for (keys %{ $self->{clean}->{files} }) {
if (-e $_) {
debug "unlink $_";
unlink $_;
}
else {
debug "unlink $_: $!";
}
}
# if /foo comes before /foo/bar, /foo will never be removed
# hence ensure that sub-dirs are always treated before a parent dir
for (reverse sort keys %{ $self->{clean}->{dirs} }) {
if (-d $_) {
my $dh = Symbol::gensym();
opendir($dh, $_);
my $notempty = grep { ! /^\.{1,2}$/ } readdir $dh;
closedir $dh;
next if $notempty;
debug "rmdir $_";
rmdir $_;
}
}
}
my %special_tokens = (
nextavailableport => sub { shift->server->select_next_port }
);
sub replace {
my $self = shift;
my $file = $Apache::TestConfig::File
? "in file $Apache::TestConfig::File" : '';
s[@(\w+)@]
[ my $key = lc $1;
if (my $callback = $special_tokens{$key}) {
$self->$callback;
}
elsif (exists $self->{vars}->{$key}) {
$self->{vars}->{$key};
}
else {
die "invalid token: \@$1\@ $file\n";
}
]ge;
}
#need to configure the vhost port for redirects and $ENV{SERVER_PORT}
#to have the correct values
my %servername_config = (
0 => sub {
my($name, $port) = @_;
[ServerName => ''], [Port => 0];
},
1 => sub {
my($name, $port) = @_;
[ServerName => $name], [Port => $port];
},
2 => sub {
my($name, $port) = @_;
[ServerName => "$name:$port"];
},
);
sub servername_config {
my $self = shift;
$self->server->version_of(\%servername_config)->(@_);
}
sub parse_vhost {
my($self, $line) = @_;
my($indent, $module, $namebased);
if ($line =~ /^(\s*)<VirtualHost\s+(?:_default_:|([^:]+):(?!:))?(.*?)\s*>\s*$/) {
$indent = $1 || "";
$namebased = $2 || "";
$module = $3;
}
else {
return undef;
}
my $vars = $self->{vars};
my $mods = $self->{modules};
my $have_module = "$module.c";
my $ssl_module = $vars->{ssl_module};
#if module ends with _ssl and it is not the module that implements ssl,
#then assume this module is a vhost with SSLEngine On (or similar)
#see mod_echo in extra.conf.in for example
if ($module =~ /^(mod_\w+)_ssl$/ and $have_module ne $ssl_module) {
$have_module = "$1.c"; #e.g. s/mod_echo_ssl.c/mod_echo.c/
return undef unless $mods->{$ssl_module};
}
#don't allocate a port if this module is not configured
#assumes the configuration is inside an <IfModule $have_module>
if ($module =~ /^mod_/ and not $mods->{$have_module}) {
return undef;
}
#allocate a port and configure this module into $self->{vhosts}
my $port = $self->new_vhost($module, $namebased);
#extra config that should go *inside* the <VirtualHost ...>
my @in_config = $self->servername_config($namebased
? $namebased
: $vars->{servername},
$port);
my @out_config = ();
if ($self->{vhosts}->{$module}->{namebased} < 2) {
#extra config that should go *outside* the <VirtualHost ...>
@out_config = ([Listen => '0.0.0.0:' . $port]);
if ($self->{vhosts}->{$module}->{namebased}) {
push @out_config => ["<IfVersion < 2.3.11>\n".
"${indent}${indent}NameVirtualHost"
=> "*:$port\n${indent}</IfVersion>"];
}
}
$self->{vars}->{$module . '_port'} = $port;
#there are two ways of building a vhost
#first is when we parse test .pm and .c files
#second is when we scan *.conf.in
my $form_postamble = sub {
my $indent = shift;
for my $pair (@_) {
$self->postamble("$indent@$pair");
}
};
my $form_string = sub {
my $indent = shift;
join "\n", map { "$indent@$_\n" } @_;
};
my $double_indent = $indent ? $indent x 2 : ' ' x 4;
return {
port => $port,
#used when parsing .pm and .c test modules
in_postamble => sub { $form_postamble->($double_indent, @in_config) },
out_postamble => sub { $form_postamble->($indent, @out_config) },
#used when parsing *.conf.in files
in_string => $form_string->($double_indent, @in_config),
out_string => $form_string->($indent, @out_config),
line => "$indent<VirtualHost " . ($namebased ? '*' : '_default_') .
":$port>",
};
}
sub find_and_load_module {
my ($self, $name) = @_;
my $mod_path = $self->find_apache_module($name) or return;
my ($sym) = $name =~ m/mod_(\w+)\./;
if ($mod_path && -e $mod_path) {
$self->preamble(IfModule => "!mod_$sym.c",
qq{LoadModule ${sym}_module "$mod_path"\n});
}
return 1;
}
sub replace_vhost_modules {
my $self = shift;
if (my $cfg = $self->parse_vhost($_)) {
$_ = '';
for my $key (qw(out_string line in_string)) {
next unless $cfg->{$key};
$_ .= "$cfg->{$key}\n";
}
}
}
sub replace_vars {
my($self, $in, $out) = @_;
local $_;
while (<$in>) {
$self->replace;
$self->replace_vhost_modules;
print $out $_;
}
}
sub index_html_template {
my $self = shift;
return "welcome to $self->{server}->{name}\n";
}
sub generate_index_html {
my $self = shift;
my $dir = $self->{vars}->{documentroot};
$self->gendir($dir);
my $file = catfile $dir, 'index.html';
return if -e $file;
my $fh = $self->genfile($file);
print $fh $self->index_html_template;
}
sub types_config_template {
return <<EOF;
text/html html htm
image/gif gif
image/jpeg jpeg jpg jpe
image/png png
text/plain asc txt
EOF
}
sub generate_types_config {
my $self = shift;
# handle the case when mod_mime is built as a shared object
# but wasn't included in the system-wide httpd.conf
$self->find_and_load_module('mod_mime.so');
unless ($self->{inherit_config}->{TypesConfig}) {
my $types = catfile $self->{vars}->{t_conf}, 'mime.types';
unless (-e $types) {
my $fh = $self->genfile($types);
print $fh $self->types_config_template;
close $fh;
}
$self->postamble(<<EOI);
<IfModule mod_mime.c>
TypesConfig "$types"
</IfModule>
EOI
}
}
# various dup bugs in older perl and perlio in perl < 5.8.4 need a
# workaround to explicitly rewind the dupped DATA fh before using it
my $DATA_pos = tell DATA;
sub httpd_conf_template {
my($self, $try) = @_;
my $in = Symbol::gensym();
if (open $in, $try) {
return $in;
}
else {
my $dup = Symbol::gensym();
open $dup, "<&DATA" or die "Can't dup DATA: $!";
seek $dup, $DATA_pos, 0; # rewind to the beginning
return $dup; # so we don't close DATA
}
}
#certain variables may not be available until certain config files
#are generated. for example, we don't know the ssl port until ssl.conf.in
#is parsed. ssl port is needed for proxyssl testing
sub check_vars {
my $self = shift;
my $vars = $self->{vars};
unless ($vars->{proxyssl_url}) {
my $ssl = $self->{vhosts}->{ $vars->{ssl_module_name} };
if ($ssl) {
$vars->{proxyssl_url} ||= $ssl->{hostport};
}
if ($vars->{proxyssl_url}) {
unless ($vars->{maxclients_preset}) {
$vars->{minclients}++;
$vars->{maxclients}++;
}
}
}
}
sub extra_conf_files_needing_update {
my $self = shift;
my @need_update = ();
finddepth(sub {
return unless /\.in$/;
(my $generated = $File::Find::name) =~ s/\.in$//;
push @need_update, $generated
unless -e $generated && -M $generated < -M $File::Find::name;
}, $self->{vars}->{t_conf});
return @need_update;
}
sub generate_extra_conf {
my $self = shift;
my(@extra_conf, @conf_in, @conf_files);
finddepth(sub {
return unless /\.in$/;
push @conf_in, catdir $File::Find::dir, $_;
}, $self->{vars}->{t_conf});
#make ssl port always be 8530 when available
for my $file (@conf_in) {
if (basename($file) =~ /^ssl/) {
unshift @conf_files, $file;
}
else {
push @conf_files, $file;
}
}
for my $file (@conf_files) {
(my $generated = $file) =~ s/\.in$//;
debug "Will 'Include' $generated config file";
push @extra_conf, $generated;
}
# regenerate .conf files
for my $file (@conf_files) {
local $Apache::TestConfig::File = $file;
my $in = Symbol::gensym();
open($in, $file) or next;
(my $generated = $file) =~ s/\.in$//;
my $out = $self->genfile($generated, $file);
$self->replace_vars($in, $out);
close $in;
close $out;
$self->check_vars;
}
#we changed order to give ssl the first port after DEFAULT_PORT
#but we want extra.conf Included first so vhosts inherit base config
#such as LimitRequest*
return [ sort @extra_conf ];
}
sub sslca_can {
my($self, $check) = @_;
my $vars = $self->{vars};
return 0 unless $self->{modules}->{ $vars->{ssl_module} };
return 0 unless -d "$vars->{t_conf}/ssl";
require Apache::TestSSLCA;
if ($check) {
my $openssl = Apache::TestSSLCA::openssl();
if (which($openssl)) {
return 1;
}
error "cannot locate '$openssl' program required to generate SSL CA";
exit(1);
}
return 1;
}
sub sslca_generate {
my $self = shift;
my $ca = $self->{vars}->{sslca};
return if $ca and -d $ca; #t/conf/ssl/ca
return unless $self->sslca_can(1);
Apache::TestSSLCA::generate($self);
}
sub sslca_clean {
my $self = shift;
# XXX: httpd config is required, for now just skip ssl clean if
# there is none. should probably add some flag which will tell us
# when httpd_config was already run
return unless $self->{vars}->{httpd} && $self->{vars}->{ssl_module};
return unless $self->sslca_can;
Apache::TestSSLCA::clean($self);
}
#XXX: just a quick hack to support t/TEST -ssl
#outside of httpd-test/perl-framework
sub generate_ssl_conf {
my $self = shift;
my $vars = $self->{vars};
my $conf = "$vars->{t_conf}/ssl";
my $httpd_test_ssl = "../httpd-test/perl-framework/t/conf/ssl";
my $ssl_conf = "$vars->{top_dir}/$httpd_test_ssl";
if (-d $ssl_conf and not -d $conf) {
$self->gendir($conf);
for (qw(ssl.conf.in)) {
$self->cpfile("$ssl_conf/$_", "$conf/$_");
}
for (qw(certs keys crl)) {
$self->symlink("$ssl_conf/$_", "$conf/$_");
}
}
}
sub find_in_inc {
my($self, $dir) = @_;
for my $path (@INC) {
my $location = "$path/$dir";
return $location if -d $location;
}
return "";
}
sub prepare_t_conf {
my $self = shift;
$self->gendir($self->{vars}->{t_conf});
}
my %aliases = (
"perl-pod" => "perlpod",
"binary-httpd" => "httpd",
"binary-perl" => "perl",
);
sub generate_httpd_conf {
my $self = shift;
my $vars = $self->{vars};
#generated httpd.conf depends on these things to exist
$self->generate_types_config;
$self->generate_index_html;
$self->gendir($vars->{t_logs});
$self->gendir($vars->{t_conf});
my @very_last_postamble = ();
if (my $extra_conf = $self->generate_extra_conf) {
for my $file (@$extra_conf) {
my $entry;
if ($file =~ /\.conf$/) {
next if $file =~ m|/httpd\.conf$|;
$entry = qq(Include "$file");
}
elsif ($file =~ /\.pl$/) {
$entry = qq(<IfModule mod_perl.c>\n PerlRequire "$file"\n</IfModule>\n);
}
else {
next;
}
# put the .last includes very last
if ($file =~ /\.last\.(conf|pl)$/) {
push @very_last_postamble, $entry;
}
else {
$self->postamble($entry);
}
}
}
$self->configure_proxy;
my $conf_file = $vars->{t_conf_file};
my $conf_file_in = join '.', $conf_file, 'in';
my $in = $self->httpd_conf_template($conf_file_in);
my $out = $self->genfile($conf_file);
$self->find_and_load_module('mod_alias.so');
$self->preamble_run($out);
for my $name (qw(user group)) { #win32
if ($vars->{$name}) {
print $out qq[\u$name "$vars->{$name}"\n];
}
}
#2.0: ServerName $ServerName:$Port
#1.3: ServerName $ServerName
# Port $Port
my @name_cfg = $self->servername_config($vars->{servername},
$vars->{port});
for my $pair (@name_cfg) {
print $out "@$pair\n";
}
$self->replace_vars($in, $out);
# handle the case when mod_alias is built as a shared object
# but wasn't included in the system-wide httpd.conf
print $out "<IfModule mod_alias.c>\n";
for (keys %aliases) {
next unless $vars->{$aliases{$_}};
print $out " Alias /getfiles-$_ $vars->{$aliases{$_}}\n";
}
print $out "</IfModule>\n";
print $out "\n";
$self->postamble_run($out);
print $out join "\n", @very_last_postamble;
close $in;
close $out or die "close $conf_file: $!";
}
sub need_reconfiguration {
my($self, $conf_opts) = @_;
my @reasons = ();
my $vars = $self->{vars};
# if '-port select' we need to check from scratch which ports are
# available
if (my $port = $conf_opts->{port} || $Apache::TestConfig::Argv{port}) {
if ($port eq 'select') {
push @reasons, "'-port $port' requires reconfiguration";
}
}
my $exe = $vars->{apxs} || $vars->{httpd} || '';
# if httpd.conf is older than executable
push @reasons,
"$exe is newer than $vars->{t_conf_file}"
if -e $exe &&
-e $vars->{t_conf_file} &&
-M $exe < -M $vars->{t_conf_file};
# any .in files are newer than their derived versions?
if (my @files = $self->extra_conf_files_needing_update) {
# invalidate the vhosts cache, since a different port could be
# assigned on reparse
$self->{vhosts} = {};
for my $file (@files) {
push @reasons, "$file.in is newer than $file";
}
}
# if special env variables are used (since they can change any time)
# XXX: may be we could check whether they have changed since the
# last run and thus avoid the reconfiguration?
{
my $passenv = passenv();
if (my @env_vars = grep { $ENV{$_} } keys %$passenv) {
push @reasons, "environment variables (@env_vars) are set";
}
}
# if the generated config was created with a version of Apache-Test
# less than the current version
{
my $current = Apache::Test->VERSION;
my $config = $self->{apache_test_version};
if (! $config || $config < $current) {
push @reasons, "configuration generated with old Apache-Test";
}
}
return @reasons;
}
sub error_log {
my($self, $rel) = @_;
my $file = catfile $self->{vars}->{t_logs}, 'error_log';
my $rfile = abs2rel $file, $self->{vars}->{top_dir};
return wantarray ? ($file, $rfile) :
$rel ? $rfile : $file;
}
#utils
#For Win32 systems, stores the extensions used for executable files
#They may be . prefixed, so we will strip the leading periods.
my @path_ext = ();
if (WIN32) {
if ($ENV{PATHEXT}) {
push @path_ext, split ';', $ENV{PATHEXT};
for my $ext (@path_ext) {
$ext =~ s/^\.*(.+)$/$1/;
}
}
else {
#Win9X: doesn't have PATHEXT
push @path_ext, qw(com exe bat);
}
}
sub which {
my $program = shift;
return undef unless $program;
my @dirs = File::Spec->path();
require Config;
my $perl_bin = $Config::Config{bin} || '';
push @dirs, $perl_bin if $perl_bin and -d $perl_bin;
for my $base (map { catfile $_, $program } @dirs) {
if ($ENV{HOME} and not WIN32) {
# only works on Unix, but that's normal:
# on Win32 the shell doesn't have special treatment of '~'
$base =~ s/~/$ENV{HOME}/o;
}
return $base if -x $base && -f _;
if (WIN32) {
for my $ext (@path_ext) {
return "$base.$ext" if -x "$base.$ext" && -f _;
}
}
}
}
sub apxs {
my($self, $q, $ok_fail) = @_;
return unless $self->{APXS};
my $val;
unless (exists $self->{_apxs}{$q}) {
local @ENV{ qw(IFS CDPATH ENV BASH_ENV) };
local $ENV{PATH} = untaint_path($ENV{PATH});
my $devnull = devnull();
my $apxs = shell_ready($self->{APXS});
$val = qx($apxs -q $q 2>$devnull);
chomp $val if defined $val; # apxs post-2.0.40 adds a new line
if ($val) {
$self->{_apxs}{$q} = $val;
}
unless ($val) {
if ($ok_fail) {
return "";
}
else {
warn "APXS ($self->{APXS}) query for $q failed\n";
return $val;
}
}
}
$self->{_apxs}{$q};
}
# return an untainted PATH
sub untaint_path {
my $path = shift;
return '' unless defined $path;
($path) = ( $path =~ /(.*)/ );
# win32 uses ';' for a path separator, assume others use ':'
my $sep = WIN32 ? ';' : ':';
# -T disallows relative and empty directories in the PATH
return join $sep, grep File::Spec->file_name_is_absolute($_),
grep length($_), split /$sep/, $path;
}
sub pop_dir {
my $dir = shift;
my @chunks = splitdir $dir;
while (my $remove = shift) {
pop @chunks if $chunks[-1] eq $remove;
}
catfile @chunks;
}
sub add_inc {
my $self = shift;
return if $ENV{MOD_PERL}; #already setup by mod_perl
require lib;
# make sure that Apache-Test/lib will be first in @INC,
# followed by modperl-2.0/lib (or some other project's lib/),
# followed by blib/ and finally system-wide libs.
my $top_dir = $self->{vars}->{top_dir};
my @dirs = map { catdir $top_dir, "blib", $_ } qw(lib arch);
my $apache_test_dir = catdir $top_dir, "Apache-Test";
unshift @dirs, $apache_test_dir if -d $apache_test_dir;
lib::->import(@dirs);
if ($ENV{APACHE_TEST_LIVE_DEV}) {
# add lib/ in a separate call to ensure that it'll end up on
# top of @INC
my $lib_dir = catdir $top_dir, "lib";
lib::->import($lib_dir) if -d $lib_dir;
}
#print join "\n", "add_inc", @INC, "";
}
#freeze/thaw so other processes can access config
sub thaw {
my $class = shift;
$class->new({thaw => 1, @_});
}
sub freeze {
require Data::Dumper;
local $Data::Dumper::Terse = 1;
my $data = Data::Dumper::Dumper(shift);
chomp $data;
$data;
}
sub sync_vars {
my $self = shift;
return if $self->{save}; #this is not a cached config
my $changed = 0;
my $thaw = $self->thaw;
my $tvars = $thaw->{vars};
my $svars = $self->{vars};
for my $key (@_) {
for my $v ($tvars, $svars) {
if (exists $v->{$key} and not defined $v->{$key}) {
$v->{$key} = ''; #rid undef
}
}
next if exists $tvars->{$key} and exists $svars->{$key} and
$tvars->{$key} eq $svars->{$key};
$tvars->{$key} = $svars->{$key};
$changed = 1;
}
return unless $changed;
$thaw->{save} = 1;
$thaw->save;
}
sub save {
my($self) = @_;
return unless $self->{save};
# add in the Apache-Test version for later comparisions
$self->{apache_test_version} = Apache::Test->VERSION;
my $name = 'apache_test_config';
my $file = catfile $self->{vars}->{t_conf}, "$name.pm";
my $fh = $self->genfile($file);
debug "saving config data to $name.pm";
(my $obj = $self->freeze) =~ s/^/ /;
print $fh <<EOF;
package $name;
sub new {
$obj;
}
1;
EOF
close $fh or die "failed to write $file: $!";
}
sub as_string {
my $cfg = '';
my $command = '';
# httpd opts
my $test_config = Apache::TestConfig->new({thaw=>1});
# XXX: need to run httpd config to get the value of httpd
if (my $httpd = $test_config->{vars}->{httpd}) {
$httpd = shell_ready($httpd);
$command = "$httpd -V";
$cfg .= "\n*** $command\n";
$cfg .= qx{$command};
$cfg .= ldd_as_string($httpd);
}
else {
$cfg .= "\n\n*** The httpd binary was not found\n";
}
# perl opts
my $perl = shell_ready($^X);
$command = "$perl -V";
$cfg .= "\n\n*** $command\n";
$cfg .= qx{$command};
return $cfg;
}
sub ldd_as_string {
my $httpd = shift;
my $command;
if (OSX) {
my $otool = which('otool');
$command = "$otool -L $httpd" if $otool;
}
elsif (!WIN32) {
my $ldd = which('ldd');
$command = "$ldd $httpd" if $ldd;
}
my $cfg = '';
if ($command) {
$cfg .= "\n*** $command\n";
$cfg .= qx{$command};
}
return $cfg;
}
# make a string suitable for feed to shell calls (wrap in quotes and
# escape quotes)
sub shell_ready {
my $arg = shift;
$arg =~ s!\\?"!\\"!g;
return qq["$arg"];
}
1;
=head1 NAME
Apache::TestConfig -- Test Configuration setup module
=head1 SYNOPSIS
use Apache::TestConfig;
my $cfg = Apache::TestConfig->new(%args)
my $fh = $cfg->genfile($file);
$cfg->writefile($file, $content);
$cfg->gendir($dir);
...
=head1 DESCRIPTION
C<Apache::TestConfig> is used in creating the C<Apache::Test>
configuration files.
=head1 FUNCTIONS
=over
=item genwarning()
my $warn = $cfg->genwarning($filename)
genwarning() returns a warning string as a comment, saying that the
file was autogenerated and that it's not a good idea to modify this
file. After the warning a perl trace of calls to this this function is
appended. This trace is useful for finding what code has created the
file.
my $warn = $cfg->genwarning($filename, $from_filename)
If C<$from_filename> is specified it'll be used in the warning to tell
which file it was generated from.
genwarning() automatically recognizes the comment type based on the
file extension. If the extension is not recognized, the default C<#>
style is used.
Currently it support C<E<lt>!-- --E<gt>>, C</* ... */> and C<#>
styles.
=item genfile()
my $fh = $cfg->genfile($file);
genfile() creates a new file C<$file> for writing and returns a file
handle.
If parent directories of C<$file> don't exist they will be
automagically created.
The file C<$file> and any created parent directories (if found empty)
will be automatically removed on cleanup.
A comment with a warning and calls trace is added to the top of this
file. See genwarning() for more info about this comment.
my $fh = $cfg->genfile($file, $from_file);
If C<$from_filename> is specified it'll be used in the warning to tell
which file it was generated from.
my $fh = $cfg->genfile($file, $from_file, $nowarning);
If C<$nowarning> is true, the warning won't be added. If using this
optional argument and there is no C<$from_file> you must pass undef as
in:
my $fh = $cfg->genfile($file, undef, $nowarning);
=item writefile()
$cfg->writefile($file, $content, [$nowarning]);
writefile() creates a new file C<$file> with the content of
C<$content>.
A comment with a warning and calls trace is added to the top of this
file unless C<$nowarnings> is passed and set to a true value. See
genwarning() for more info about this comment.
If parent directories of C<$file> don't exist they will be
automagically created.
The file C<$file> and any created parent directories (if found empty)
will be automatically removed on cleanup.
=item write_perlscript()
$cfg->write_perlscript($filename, @lines);
Similar to writefile() but creates an executable Perl script with
correctly set shebang line.
=item gendir()
$cfg->gendir($dir);
gendir() creates a new directory C<$dir>.
If parent directories of C<$dir> don't exist they will be
automagically created.
The directory C<$dir> and any created parent directories will be
automatically removed on cleanup if found empty.
=back
=head1 Environment Variables
The following environment variables affect the configuration and the
run-time of the C<Apache::Test> framework:
=head2 APACHE_TEST_COLOR
To aid visual control over the configuration process and the run-time
phase, C<Apache::Test> uses coloured fonts when the environment
variable C<APACHE_TEST_COLOR> is set to a true value.
=head2 APACHE_TEST_LIVE_DEV
When using C<Apache::Test> during the project development phase, it's
often convenient to have the I<project/lib> (live) directory appearing
first in C<@INC> so any changes to the Perl modules, residing in it,
immediately affect the server, without a need to rerun C<make> to
update I<blib/lib>. When the environment variable
C<APACHE_TEST_LIVE_DEV> is set to a true value during the
configuration phase (C<t/TEST -config>, C<Apache::Test> will
automatically unshift the I<project/lib> directory into C<@INC>, via
the autogenerated I<t/conf/modperl_inc.pl> file.
=head1 Special Placeholders
When generating configuration files from the I<*.in> templates,
special placeholder variables get substituted. To embed a placeholder
use the C<@foo@> syntax. For example in I<extra.conf.in> you can
write:
Include @ServerRoot@/conf/myconfig.conf
When I<extra.conf> is generated, C<@ServerRoot@> will get replaced
with the location of the server root.
Placeholders are case-insensitive.
Available placeholders:
=head2 Configuration Options
All configuration variables that can be passed to C<t/TEST>, such as
C<MaxClients>, C<DocumentRoot>, C<ServerRoot>, etc. To see the
complete list run:
% t/TEST --help
and you will find them in the C<configuration options> sections.
=head2 NextAvailablePort
Every time this placeholder is encountered it'll be replaced with the
next available port. This is very useful if you need to allocate a
special port, but not hardcode it. Later when running:
% t/TEST -port=select
it's possible to run several concurrent test suites on the same
machine, w/o having port collisions.
=head1 AUTHOR
=head1 SEE ALSO
perl(1), Apache::Test(3)
=cut
__DATA__
Listen 0.0.0.0:@Port@
ServerRoot "@ServerRoot@"
DocumentRoot "@DocumentRoot@"
PidFile @t_pid_file@
ErrorLog @t_logs@/error_log
LogLevel debug
<IfModule mod_version.c>
<IfVersion > 2.4.1>
DefaultRunTimeDir "@t_logs@"
</IfVersion>
</IfModule>
<IfModule mod_log_config.c>
TransferLog @t_logs@/access_log
</IfModule>
<IfModule mod_cgid.c>
ScriptSock @t_logs@/cgisock
</IfModule>
ServerAdmin @ServerAdmin@
#needed for http/1.1 testing
KeepAlive On
HostnameLookups Off
<Directory />
Options FollowSymLinks
AllowOverride None
</Directory>
<IfModule @THREAD_MODULE@>
<IfModule mod_version.c>
<IfVersion < 2.3.4>
LockFile @t_logs@/accept.lock
</IfVersion>
</IfModule>
StartServers 1
MinSpareThreads @MinClients@
MaxSpareThreads @MinClients@
ThreadsPerChild @MinClients@
MaxClients @MaxClientsThreadedMPM@
MaxRequestsPerChild 0
</IfModule>
<IfModule perchild.c>
<IfModule mod_version.c>
<IfVersion < 2.3.4>
LockFile @t_logs@/accept.lock
</IfVersion>
</IfModule>
NumServers 1
StartThreads @MinClients@
MinSpareThreads @MinClients@
MaxSpareThreads @MinClients@
MaxThreadsPerChild @MaxClients@
MaxRequestsPerChild 0
</IfModule>
<IfModule prefork.c>
<IfModule mod_version.c>
<IfVersion < 2.3.4>
LockFile @t_logs@/accept.lock
</IfVersion>
</IfModule>
StartServers @MinClients@
MinSpareServers @MinClients@
MaxSpareServers @MinClients@
MaxClients @MaxClients@
MaxRequestsPerChild 0
</IfModule>
<IfDefine APACHE1>
LockFile @t_logs@/accept.lock
StartServers @MinClients@
MinSpareServers @MinClients@
MaxSpareServers @MinClients@
MaxClients @MaxClients@
MaxRequestsPerChild 0
</IfDefine>
<IfModule mpm_winnt.c>
ThreadsPerChild 50
MaxRequestsPerChild 0
</IfModule>
<Location /server-info>
SetHandler server-info
</Location>
<Location /server-status>
SetHandler server-status
</Location>
| Distrotech/Apache-Test | lib/Apache/TestConfig.pm | Perl | apache-2.0 | 60,701 |
package Google::Ads::AdWords::v201409::SharedCriterionReturnValue;
use strict;
use warnings;
__PACKAGE__->_set_element_form_qualified(1);
sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201409' };
our $XML_ATTRIBUTE_CLASS;
undef $XML_ATTRIBUTE_CLASS;
sub __get_attr_class {
return $XML_ATTRIBUTE_CLASS;
}
use base qw(Google::Ads::AdWords::v201409::ListReturnValue);
# Variety: sequence
use Class::Std::Fast::Storable constructor => 'none';
use base qw(Google::Ads::SOAP::Typelib::ComplexType);
{ # BLOCK to scope variables
my %ListReturnValue__Type_of :ATTR(:get<ListReturnValue__Type>);
my %value_of :ATTR(:get<value>);
__PACKAGE__->_factory(
[ qw( ListReturnValue__Type
value
) ],
{
'ListReturnValue__Type' => \%ListReturnValue__Type_of,
'value' => \%value_of,
},
{
'ListReturnValue__Type' => 'SOAP::WSDL::XSD::Typelib::Builtin::string',
'value' => 'Google::Ads::AdWords::v201409::SharedCriterion',
},
{
'ListReturnValue__Type' => 'ListReturnValue.Type',
'value' => 'value',
}
);
} # end BLOCK
1;
=pod
=head1 NAME
Google::Ads::AdWords::v201409::SharedCriterionReturnValue
=head1 DESCRIPTION
Perl data type class for the XML Schema defined complexType
SharedCriterionReturnValue from the namespace https://adwords.google.com/api/adwords/cm/v201409.
=head2 PROPERTIES
The following properties may be accessed using get_PROPERTY / set_PROPERTY
methods:
=over
=item * value
=back
=head1 METHODS
=head2 new
Constructor. The following data structure may be passed to new():
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| gitpan/GOOGLE-ADWORDS-PERL-CLIENT | lib/Google/Ads/AdWords/v201409/SharedCriterionReturnValue.pm | Perl | apache-2.0 | 1,676 |
package CoGe::Graphics::Feature::Block;
use strict;
use base qw(CoGe::Graphics::Feature);
=head1 AUTHOR
Eric Lyons
elyons@nature.berkeley.edu
=head1 COPYRIGHT
Permission to use, copy, modify, and distribute this software and its documentation for educational, research, and not-for-profit purposes, without fee and without a signed licensing agreement, is hereby granted, provided that the above copyright notice, this paragraph and the following two paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-7201, for commercial licensing opportunities.
The full text of the license can be found in the
LICENSE file included with this module.
=cut
BEGIN {
use vars qw($VERSION $HEIGHT $BLOCK_HEIGHT);
$VERSION = '0.1';
$HEIGHT = 25;
$BLOCK_HEIGHT = 20;
__PACKAGE__->mk_accessors(
"block_height",
"segments",
"print_label", #flag for printing feature label in gene
"add_type", #flag to set if gene type should be appended to label
);
}
sub add_segment
{
my $self = shift;
my %opts = @_;
my $start = $opts{start} || $opts{begin} || $opts{START} || $opts{BEGIN};
my $stop = $opts{stop} || $opts{end} || $opts{STOP} || $opts{END};
if ($start > $stop)
{
my $tmp = $start;
$start = $stop;
$stop = $tmp;
}
my @segs;
push @segs, @{$self->segments} if $self->segments;
push @segs, [$start, $stop];
$self->segments([sort {$a->[0]<=>$b->[0]} @segs]);
}
sub _initialize
{
my $self = shift;
my %opts = @_;
my $h = $HEIGHT; #total image height
my $s;
my $e;
unless ($self->segments)
{
$self->segments([]);
push @{$self->segments}, [$self->start, $self->stop] if defined $self->start && defined $self->stop;
}
foreach my $seg (sort {$a->[0] <=> $b->[0]} @{$self->segments})
{
$s = $seg->[0] unless $s;
$e = $seg->[1] unless $e;
$s = $seg->[0] if $seg->[0] < $s;
$e = $seg->[1] if $seg->[1] > $e;
}
my $w = $e-$s;
$w =1 unless $w;
$self->start($s);
$self->stop($e);
$self->image_width($w);
$self->image_height($h);
$self->merge_percent(100);
$self->bgcolor([255,255,255]) unless $self->bgcolor;
$self->color([255,100,255]) unless $self->bgcolor;
$self->skip_overlap_search(0);
# $self->font_size(1);
$self->block_height($BLOCK_HEIGHT) unless $self->block_height;
$self->print_label(0) unless defined $self->print_label();
$self->type('block');
}
sub _post_initialize
{
my $self = shift;
my %opts = @_;
$self->label($self->label." (".$self->type.")") if $self->add_type && $self->type;
# my $label_loc = $self->strand =~ /-/ ? "bot" : "top";
# $self->label_location($label_loc);
my $gd = $self->gd;
$gd->fill(0,0, $self->get_color($self->bgcolor));
# $gd->transparent($self->get_color($self->bgcolor));
my $s = $self->start;
my $black = $self->get_color(0,0,0);
my $color = $self->get_color($self->color);
my $last;
my $c = $self->ih()/2;
my $bh = $self->image_height/2;
my @sorted = sort {$a->[0] <=> $b->[0]} @{$self->segments};
foreach my $seg (@sorted)
{
my $x1 = $seg->[0] - $s;
my $x2 = $seg->[1] - $s;
$x2 = $x1+1 if $x1 == $x2;
my $y1 = $c-$bh;
my $y2 = $c+$bh;
$gd->filledRectangle($x1,$y1, $x2, $y2, $color);
$gd->rectangle($x1,$y1, $x2, $y2, $black);
$gd->setStyle($black, $black, $black, GD::gdTransparent, GD::gdTransparent);
if ($last)
{
my $liney = $y1+($y2-$y1)/2;
my $mid = ($x1-$last)/2+$last;
$gd->line($last, $liney, $mid, 0, GD::gdStyled);
$gd->line($mid, 0, $x1, $liney, GD::gdStyled);
}
$last = $x2;
}
# $self->_gd_string(y=>$c-$bh+2, x=>$x, text=>$self->label, size=>$self->block_height-4) if $self->print_label;
}
#################### subroutine header begin ####################
=head2 sample_function
Usage : How to use this function/method
Purpose : What it does
Returns : What it returns
Argument : What it wants to know
Throws : Exceptions and other anomolies
Comment : This is a sample subroutine header.
: It is polite to include more pod and fewer comments.
See Also :
=cut
#################### subroutine header end ####################
#################### main pod documentation begin ###################
## Below is the stub of documentation for your module.
## You better edit it!
=head1 NAME
CoGe::Graphics::Feature::Base
=head1 SYNOPSIS
use CoGe::Graphics::Feature::Base
=head1 DESCRIPTION
=head1 USAGE
=head1 BUGS
=head1 SUPPORT
=head1 AUTHOR
Eric Lyons
elyons@nature.berkeley.edu
=head1 COPYRIGHT
This program is free software licensed under the...
The Artistic License
The full text of the license can be found in the
LICENSE file included with this module.
=head1 SEE ALSO
perl(1).
=cut
#################### main pod documentation end ###################
1;
# The preceding line will help the module return a true value
| asherkhb/coge | modules/Graphics/lib/CoGe/Graphics/Feature/Block.pm | Perl | bsd-2-clause | 5,071 |
#
# Copyright 2017 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package centreon::common::fastpath::mode::components::fan;
use strict;
use warnings;
my %map_fan_status = (
1 => 'notpresent',
2 => 'operational',
3 => 'failed',
4 => 'powering',
5 => 'nopower',
6 => 'notpowering',
7 => 'incompatible',
);
my $mapping = {
boxServicesFanItemState => { oid => '.1.3.6.1.4.1.674.10895.5000.2.6132.1.1.43.1.6.1.3', map => \%map_fan_status },
boxServicesFanSpeed => { oid => '.1.3.6.1.4.1.674.10895.5000.2.6132.1.1.43.1.6.1.4' },
};
my $oid_boxServicesFansEntry = '.1.3.6.1.4.1.674.10895.5000.2.6132.1.1.43.1.6.1';
sub load {
my ($self) = @_;
push @{$self->{request}}, { oid => $oid_boxServicesFansEntry, begin => $mapping->{boxServicesFanItemState}->{oid} };
}
sub check {
my ($self) = @_;
$self->{output}->output_add(long_msg => "Checking fans");
$self->{components}->{fan} = {name => 'fans', total => 0, skip => 0};
return if ($self->check_filter(section => 'fan'));
my ($exit, $warn, $crit, $checked);
foreach my $oid ($self->{snmp}->oid_lex_sort(keys %{$self->{results}->{$oid_boxServicesFansEntry}})) {
next if ($oid !~ /^$mapping->{boxServicesFanItemState}->{oid}\.(.*)$/);
my $instance = $1;
my $result = $self->{snmp}->map_instance(mapping => $mapping, results => $self->{results}->{$oid_boxServicesFansEntry}, instance => $instance);
next if ($self->check_filter(section => 'fan', instance => $instance));
if ($result->{boxServicesFanItemState} =~ /notPresent/i) {
$self->absent_problem(section => 'fan', instance => $instance);
next;
}
$self->{components}->{fan}->{total}++;
$self->{output}->output_add(long_msg => sprintf("fan '%s' status is '%s' [instance = %s, speed = %s]",
$instance, $result->{boxServicesFanItemState}, $instance, defined($result->{boxServicesFanSpeed}) ? $result->{boxServicesFanSpeed} : 'unknown'));
$exit = $self->get_severity(label => 'default', section => 'fan', value => $result->{boxServicesFanItemState});
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("Fan '%s' status is '%s'", $instance, $result->{boxServicesFanItemState}));
next;
}
($exit, $warn, $crit, $checked) = $self->get_severity_numeric(section => 'fan', instance => $instance, value => $result->{boxServicesFanSpeed});
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("Fan '%s' is '%s' rpm", $instance, $result->{boxServicesFanSpeed}));
}
$self->{output}->perfdata_add(label => 'fan_' . $instance, unit => 'rpm',
value => $result->{boxServicesFanSpeed},
warning => $warn,
critical => $crit, min => 0
);
}
}
1; | nichols-356/centreon-plugins | centreon/common/fastpath/mode/components/fan.pm | Perl | apache-2.0 | 3,997 |
#
# Copyright 2017 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package centreon::common::cisco::standard::snmp::mode::memory;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
$options{options}->add_options(arguments =>
{
"warning:s" => { name => 'warning' },
"critical:s" => { name => 'critical' },
"filter-pool:s" => { name => 'filter_pool' },
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
if (($self->{perfdata}->threshold_validate(label => 'warning', value => $self->{option_results}->{warning})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong warning threshold '" . $self->{option_results}->{warning} . "'.");
$self->{output}->option_exit();
}
if (($self->{perfdata}->threshold_validate(label => 'critical', value => $self->{option_results}->{critical})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong critical threshold '" . $self->{option_results}->{critical} . "'.");
$self->{output}->option_exit();
}
}
sub check_table_memory {
my ($self, %options) = @_;
my $checked = 0;
foreach my $oid (keys %{$self->{results}->{$options{entry}}}) {
next if ($oid !~ /^$options{poolName}/);
$oid =~ /\.([0-9]+)$/;
my $instance = $1;
my $memory_name = $self->{results}->{$options{entry}}->{$oid};
my $memory_used = $self->{results}->{$options{entry}}->{$options{poolUsed} . '.' . $instance};
my $memory_free = $self->{results}->{$options{entry}}->{$options{poolFree} . '.' . $instance};
next if ($memory_name eq '');
if (defined($self->{option_results}->{filter_pool}) && $self->{option_results}->{filter_pool} ne '' &&
$memory_name !~ /$self->{option_results}->{filter_pool}/) {
$self->{output}->output_add(long_msg => "Skipping pool '" . $memory_name . "'.");
next;
}
$checked = 1;
my $total_size = $memory_used + $memory_free;
my $prct_used = $memory_used * 100 / $total_size;
my $prct_free = 100 - $prct_used;
my $exit = $self->{perfdata}->threshold_check(value => $prct_used, threshold => [ { label => 'critical', 'exit_litteral' => 'critical' }, { label => 'warning', exit_litteral => 'warning' } ]);
my ($total_value, $total_unit) = $self->{perfdata}->change_bytes(value => $total_size);
my ($used_value, $used_unit) = $self->{perfdata}->change_bytes(value => $memory_used);
my ($free_value, $free_unit) = $self->{perfdata}->change_bytes(value => $memory_free);
$self->{output}->output_add(long_msg => sprintf("Memory '%s' Total: %s Used: %s (%.2f%%) Free: %s (%.2f%%)", $memory_name,
$total_value . " " . $total_unit,
$used_value . " " . $used_unit, $prct_used,
$free_value . " " . $free_unit, $prct_free));
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("Memory '%s' Total: %s Used: %s (%.2f%%) Free: %s (%.2f%%)", $memory_name,
$total_value . " " . $total_unit,
$used_value . " " . $used_unit, $prct_used,
$free_value . " " . $free_unit, $prct_free));
}
$self->{output}->perfdata_add(label => "used_" . $memory_name, unit => 'B',
value => $memory_used,
warning => $self->{perfdata}->get_perfdata_for_output(label => 'warning', total => $total_size),
critical => $self->{perfdata}->get_perfdata_for_output(label => 'critical', total => $total_size),
min => 0, max => $total_size);
}
return $checked;
}
sub check_percent_memory {
my ($self, %options) = @_;
my $checked = 0;
foreach my $oid (keys %{$self->{results}->{$options{entry}}}) {
next if ($oid !~ /^$options{memUsage}/);
$oid =~ /\.([0-9]+)$/;
my $instance = $1;
my $memory_usage = $self->{results}->{$options{entry}}->{$oid};
next if ($memory_usage eq '');
$checked = 1;
my $exit = $self->{perfdata}->threshold_check(value => $memory_usage,
threshold => [ { label => 'critical', 'exit_litteral' => 'critical' }, { label => 'warning', exit_litteral => 'warning' } ]);
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("Memory used : %.2f%%", $memory_usage));
}
$self->{output}->perfdata_add(label => "utilization",
value => $memory_usage,
unit => "%",
warning => $self->{perfdata}->get_perfdata_for_output(label => 'warning'),
critical => $self->{perfdata}->get_perfdata_for_output(label => 'critical'),
min => 0, max => 100);
}
return $checked;
}
sub run {
my ($self, %options) = @_;
$self->{snmp} = $options{snmp};
my $oid_ciscoMemoryPoolEntry = '.1.3.6.1.4.1.9.9.48.1.1.1';
my $oid_ciscoMemoryPoolName = '.1.3.6.1.4.1.9.9.48.1.1.1.2';
my $oid_ciscoMemoryPoolUsed = '.1.3.6.1.4.1.9.9.48.1.1.1.5'; # in B
my $oid_ciscoMemoryPoolFree = '.1.3.6.1.4.1.9.9.48.1.1.1.6'; # in B
# OIDs for Nexus
my $oid_cseSysMemoryEntry = '.1.3.6.1.4.1.9.9.305.1.1';
my $oid_cseSysMemoryUtilization = '.1.3.6.1.4.1.9.9.305.1.1.2';
$self->{results} = $self->{snmp}->get_multiple_table(oids => [
{ oid => $oid_ciscoMemoryPoolEntry,
start => $oid_ciscoMemoryPoolName, end => $oid_ciscoMemoryPoolFree
},
{ oid => $oid_cseSysMemoryEntry,
start => $oid_cseSysMemoryUtilization, end => $oid_cseSysMemoryUtilization }],
nothing_quit => 1);
$self->{output}->output_add(severity => 'OK',
short_msg => 'Memory is ok.');
if (!$self->check_table_memory(entry => $oid_ciscoMemoryPoolEntry, poolName => $oid_ciscoMemoryPoolName, poolUsed => $oid_ciscoMemoryPoolUsed, poolFree => $oid_ciscoMemoryPoolFree)
&& !$self->check_percent_memory(entry => $oid_cseSysMemoryEntry, memUsage => $oid_cseSysMemoryUtilization)
) {
$self->{output}->output_add(severity => 'UNKNOWN',
short_msg => sprintf("Cannot find Memory informations."));
}
$self->{output}->display();
$self->{output}->exit();
}
1;
__END__
=head1 MODE
Check memory usage (CISCO-MEMORY-POOL-MIB and CISCO-SYSTEM-EXT-MIB).
=over 8
=item B<--warning>
Threshold warning in percent.
=item B<--critical>
Threshold critical in percent.
=item B<--filter-pool>
Filter pool to check (can use regexp).
=back
=cut
| Shini31/centreon-plugins | centreon/common/cisco/standard/snmp/mode/memory.pm | Perl | apache-2.0 | 8,658 |
#!/usr/bin/env perl
use strict;
use warnings;
use Carp;
use Getopt::Long qw(:config posix_default no_ignore_case bundling pass_through);
use FindBin;
use lib "$FindBin::RealBin/../../PerlLib";
use Pipeliner;
use File::Basename;
my $usage = <<__EOUSAGE__;
#######################################################################################
#
# --outfmt6_grouped <string> outfmt6 grouped output
#
# --min_pct_len <int> minimum percent length covered by pairwise matches
#
# --min_per_id <int> minimum percent identity
#
# --inflation_factor <float> inflation factor for MCL clustering
#
#######################################################################################
__EOUSAGE__
;
my $help_flag;
my $outfmt6_grouped_file;
my $inflation_factor;
my $min_pct_len;
my $min_per_id;
&GetOptions ( 'h' => \$help_flag,
'outfmt6_grouped=s' => \$outfmt6_grouped_file,
'min_pct_len=i' => \$min_pct_len,
'min_per_id=i' => \$min_per_id,
'inflation_factor=f' => \$inflation_factor,
);
if ($help_flag) {
die $usage;
}
if (@ARGV) {
die "Error, dont understand parameters @ARGV";
}
unless ($outfmt6_grouped_file && $inflation_factor && $min_pct_len && $min_per_id) {
die $usage;
}
# add MCL to PATH setting
$ENV{PATH} = "/seq/regev_genome_portal/SOFTWARE/MCL/bin/:$ENV{PATH}";
main: {
my $filtered_hits = basename($outfmt6_grouped_file . ".minLEN_${min_pct_len}_pct_len.minPID_${min_per_id}.abc");
my $checkpoint = ".$filtered_hits.ok";
if (! -e $checkpoint) {
my %best_hits;
open (my $fh, $outfmt6_grouped_file) or die "Error, cannot open file $outfmt6_grouped_file";
while (<$fh>) {
if (/^\#/) { next; }
chomp;
my @x = split(/\t/);
my ($transA, $transB, $per_id, $E_value, @rest) = split(/\t/);
my $per_len_match = pop @rest;
if ($per_len_match >= $min_pct_len && $per_id >= $min_per_id) {
my $geneA = &parse_gene_name($transA);
my $geneB = &parse_gene_name($transB);
if ($geneA eq $geneB) { next; }
($geneA, $geneB) = sort ($geneA, $geneB);
my $gene_pair_token = join("$;", $geneA, $geneB);
my $lowest_evalue = $best_hits{$gene_pair_token};
if ( (! defined $lowest_evalue) || $lowest_evalue > $E_value) {
$best_hits{$gene_pair_token} = $E_value;
}
}
}
# write best hits file
open (my $ofh, ">$filtered_hits") or die "Error, cannot write to $filtered_hits";
foreach my $gene_pair_token (keys %best_hits) {
my ($geneA, $geneB) = split(/$;/, $gene_pair_token);
my $E_value = $best_hits{$gene_pair_token};
print $ofh join("\t", $geneA, $geneB, $E_value) . "\n";
}
close $ofh;
`touch $checkpoint`;
}
my $pipeliner = new Pipeliner(-verbose => 1);
my $cmd = "mcxload -abc $filtered_hits --stream-mirror --stream-neg-log10 "
. " -stream-tf 'ceil(200)' -o $filtered_hits.mci -write-tab $filtered_hits.tab";
$pipeliner->add_commands( new Command($cmd, ".$filtered_hits.tab.ok") );
$inflation_factor = sprintf("%.1f", $inflation_factor);
my $inflation_factor_dec_removed = $inflation_factor;
$inflation_factor_dec_removed =~ s/\.//;
$cmd = "mcl $filtered_hits.mci -I $inflation_factor";
my $mcl_outfile = "out.$filtered_hits.mci.I$inflation_factor_dec_removed";
$pipeliner->add_commands( new Command($cmd, ".$mcl_outfile.ok"));
$cmd = "mcxdump -icl $mcl_outfile -tabr $filtered_hits.tab -o dump.$mcl_outfile";
$pipeliner->add_commands( new Command($cmd, ".dump.$mcl_outfile.ok"));
$pipeliner->run();
exit(0);
}
####
sub parse_gene_name {
my ($trans_info) = @_;
my ($gene_symbol, $trans_id);
if (/;/) {
($trans_id, $gene_symbol) = split(/;/, $trans_info);
}
elsif (/\|/) {
($gene_symbol, $trans_id) = split(/\|/, $trans_info);
}
if ($gene_symbol) {
return($gene_symbol);
}
else {
return($trans_info);
}
}
| g1o/trinityrnaseq | util/misc/blast_outfmt6_group_segments.to_Markov_Clustering.pl | Perl | bsd-3-clause | 4,446 |
#! /usr/bin/env perl
use strict;
use warnings;
use File::Temp qw/tempfile/;
my $topdir = shift;
processallfiles($topdir);
print "Success\n";
sub processallfiles {
my $dir = shift;
my @files = glob "$dir/*.c $dir/*.h $dir/*.h.in $dir/*.pod *dir/*.pod.in";
open (my $STDOUT_ORIG, '>&', STDOUT);
foreach my $file (@files) {
my ($tmpfh, $tmpfile) = tempfile();
print "Processing $file\n";
open(STDOUT, '>>', $tmpfile);
open(INFILE, $file);
processfile(\*INFILE);
close(STDOUT);
rename($tmpfile, $file);
unlink($tmpfile);
# restore STDOUT
open (STDOUT, '>&', $STDOUT_ORIG);
}
#Recurse through subdirs
opendir my $dh, $dir or die "Cannot open directory";
while (defined(my $subdir = readdir $dh)) {
next unless -d "$dir/$subdir";
next if (rindex $subdir, ".", 0) == 0;
processallfiles("$dir/$subdir");
}
closedir $dh;
}
sub processfile {
my $fh = shift;
my $multiline = 0;
my @params;
my $indent;
my $paramstr = "";
foreach my $line (<$fh>) {
chomp($line);
if (!$multiline) {
if ($line =~ /^(.+)_with_libctx\((.*[^\\])$/) {
my $preline = $1;
my $postline = $2;
#Strip trailing whitespace
$postline =~ s/\s+$//;
print $preline.'_ex(';
my @rets = extracttoclose($postline);
if (@rets) {
print "$postline\n";
$multiline = 0;
} else {
$multiline = 1;
$paramstr = $postline;
$indent = (length $preline) + (length '_ex(');
}
} else {
#Any other reference to _with_libctx we just replace
$line =~ s/_with_libctx/_ex/g;
print $line."\n";
}
} else {
#Strip leading whitespace
$line =~ s/^\s+//;
#Strip trailing whitespace
$line =~ s/\s+$//;
my @rets = extracttoclose($paramstr.$line);
if (@rets) {
my $pre = shift @rets;
my $post = shift @rets;
@params = split(",", $pre);
my @params = grep(s/^\s*|\s*$//g, @params);
formatparams($indent, @params);
print ')'.$post."\n";
$multiline = 0;
} else {
$paramstr .= $line;
}
}
}
die "End of multiline not found" if $multiline;
}
sub formatparams {
my $indent = shift;
my @params = @_;
if (@params) {
my $param = shift @params;
my $lensofar += $indent + (length $param) + 1;
print "$param";
print "," if @params;
while (@params) {
my $param = shift @params;
if (($lensofar + (length $param) + 2) > 80) {
print "\n".(" " x $indent);
print $param;
$lensofar = $indent + (length $param) + 1;
} else {
print ' '.$param;
$lensofar += (length $param) + 2;
}
print "," if @params;
}
}
}
sub extracttoclose {
my $inline = shift;
my $outline = "";
while ($inline =~ /^([^\)]*?)\((.*)$/) {
my @rets = extracttoclose($2);
if (!@rets) {
return ();
}
my $inside = shift @rets;
my $post = shift @rets;
$outline .= $1.'('.$inside.')';
$inline = $post;
}
if ($inline =~ /^(.*?)\)(.*)$/) {
return ($outline.$1, $2);
}
return ();
}
| openssl/openssl | util/withlibctx.pl | Perl | apache-2.0 | 3,728 |
#!/usr/bin/env perl
use strict;
use warnings;
use WebApplicationDBHandle;
use Getopt::Long;
sub usage {
print "user_add.pl >>> add a user to the web application database\n";
print "user_add.pl -firstname <fn> -lastname <ln> -login <login> -email <email> [ -comment <text> ]\n";
}
# read in parameters
my $firstname = '';
my $lastname = '';
my $login = '';
my $email = '';
my $comment = '';
GetOptions ( 'firstname=s' => \$firstname,
'lastname=s' => \$lastname,
'login=s' => \$login,
'email=s' => \$email,
'comment=s' => \$comment,
);
unless ($firstname and $lastname and $login and $email) {
&usage();
exit 0;
}
# initialize db-master
my ($dbmaster, $error) = WebApplicationDBHandle->new();
# check if we got a dbmaster
if ($error) {
print $error."\n";
exit 0;
}
print "Creating user $login ($firstname $lastname, $email).\n";
# sanity checks
my $user;
if ($user = $dbmaster->User->init({ email => $email })) {
print "This email has already been registered for ".$user->login.", aborting.\n";
exit 0;
}
if ($user = $dbmaster->User->init({ login => $login })) {
print "This login has already been registered for ".
$user->firstname." ".$user->lastname.", aborting.\n";
exit 0;
}
# create the user in the db
$user = $dbmaster->User->create( { email => $email,
firstname => $firstname,
lastname => $lastname,
login => $login,
active => 1,
} );
unless (ref $user and $user->isa('WebServerBackend::User')) {
print "Unable to create user. Quit.\n";
exit 0;
}
print "Done.\n";
| teharrison/MG-RAST | src/WebApplication/scripts/user_add.pl | Perl | bsd-2-clause | 1,628 |
#! /usr/bin/env perl
# Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
#
# Licensed under the OpenSSL license (the "License"). You may not use
# this file except in compliance with the License. You can obtain a copy
# in the file LICENSE in the source distribution or at
# https://www.openssl.org/source/license.html
use strict;
use warnings;
use File::Spec::Functions;
use File::Basename;
use File::Copy;
use File::Path;
use if $^O ne "VMS", 'File::Glob' => qw/glob/;
use Getopt::Long;
use Pod::Usage;
use lib '.';
use configdata;
# We know we are in the 'util' directory and that our perl modules are
# in util/perl
use lib catdir(dirname($0), "perl");
use OpenSSL::Util::Pod;
my %options = ();
GetOptions(\%options,
'sourcedir=s', # Source directory
'subdir=s%', # Subdirectories to look through,
# with associated section numbers
'destdir=s', # Destination directory
#'in=s@', # Explicit files to process (ignores sourcedir)
#'section=i', # Default section used for --in files
'type=s', # The result type, 'man' or 'html'
'remove', # To remove files rather than writing them
'dry-run|n', # Only output file names on STDOUT
'debug|D+',
);
unless ($options{subdir}) {
$options{subdir} = { apps => '1',
crypto => '3',
ssl => '3' };
}
unless ($options{sourcedir}) {
$options{sourcedir} = catdir($config{sourcedir}, "doc");
}
pod2usage(1) unless ( defined $options{subdir}
&& defined $options{sourcedir}
&& defined $options{destdir}
&& defined $options{type}
&& ($options{type} eq 'man'
|| $options{type} eq 'html') );
if ($options{debug}) {
print STDERR "DEBUG: options:\n";
print STDERR "DEBUG: --sourcedir = $options{sourcedir}\n"
if defined $options{sourcedir};
print STDERR "DEBUG: --destdir = $options{destdir}\n"
if defined $options{destdir};
print STDERR "DEBUG: --type = $options{type}\n"
if defined $options{type};
foreach (keys %{$options{subdir}}) {
print STDERR "DEBUG: --subdir = $_=$options{subdir}->{$_}\n";
}
print STDERR "DEBUG: --remove = $options{remove}\n"
if defined $options{remove};
print STDERR "DEBUG: --debug = $options{debug}\n"
if defined $options{debug};
print STDERR "DEBUG: --dry-run = $options{\"dry-run\"}\n"
if defined $options{"dry-run"};
}
my $symlink_exists = eval { symlink("",""); 1 };
foreach my $subdir (keys %{$options{subdir}}) {
my $section = $options{subdir}->{$subdir};
my $podsourcedir = catfile($options{sourcedir}, $subdir);
my $podglob = catfile($podsourcedir, "*.pod");
foreach my $podfile (glob $podglob) {
my $podname = basename($podfile, ".pod");
my $podpath = catfile($podfile);
my %podinfo = extract_pod_info($podpath,
{ debug => $options{debug},
section => $section });
my @podfiles = grep { $_ ne $podname } @{$podinfo{names}};
my $updir = updir();
my $name = uc $podname;
my $suffix = { man => ".$podinfo{section}",
html => ".html" } -> {$options{type}};
my $generate = { man => "pod2man --name=$name --section=$podinfo{section} --center=OpenSSL --release=$config{version} \"$podpath\"",
html => "pod2html \"--podroot=$options{sourcedir}\" --htmldir=$updir --podpath=apps:crypto:ssl \"--infile=$podpath\" \"--title=$podname\""
} -> {$options{type}};
my $output_dir = catdir($options{destdir}, "man$podinfo{section}");
my $output_file = $podname . $suffix;
my $output_path = catfile($output_dir, $output_file);
if (! $options{remove}) {
my @output;
print STDERR "DEBUG: Processing, using \"$generate\"\n"
if $options{debug};
unless ($options{"dry-run"}) {
@output = `$generate`;
map { s|href="http://man\.he\.net/man|href="../man|g; } @output
if $options{type} eq "html";
}
print STDERR "DEBUG: Done processing\n" if $options{debug};
if (! -d $output_dir) {
print STDERR "DEBUG: Creating directory $output_dir\n" if $options{debug};
unless ($options{"dry-run"}) {
mkpath $output_dir
or die "Trying to create directory $output_dir: $!\n";
}
}
print STDERR "DEBUG: Writing $output_path\n" if $options{debug};
unless ($options{"dry-run"}) {
open my $output_fh, '>', $output_path
or die "Trying to write to $output_path: $!\n";
foreach (@output) {
print $output_fh $_;
}
close $output_fh;
}
print STDERR "DEBUG: Done writing $output_path\n" if $options{debug};
} else {
print STDERR "DEBUG: Removing $output_path\n" if $options{debug};
unless ($options{"dry-run"}) {
while (unlink $output_path) {}
}
}
print "$output_path\n";
foreach (@podfiles) {
my $link_file = $_ . $suffix;
my $link_path = catfile($output_dir, $link_file);
if (! $options{remove}) {
if ($symlink_exists) {
print STDERR "DEBUG: Linking $link_path -> $output_file\n"
if $options{debug};
unless ($options{"dry-run"}) {
symlink $output_file, $link_path;
}
} else {
print STDERR "DEBUG: Copying $output_path to link_path\n"
if $options{debug};
unless ($options{"dry-run"}) {
copy $output_path, $link_path;
}
}
} else {
print STDERR "DEBUG: Removing $link_path\n" if $options{debug};
unless ($options{"dry-run"}) {
while (unlink $link_path) {}
}
}
print "$link_path -> $output_path\n";
}
}
}
__END__
=pod
=head1 NAME
process_docs.pl - A script to process OpenSSL docs
=head1 SYNOPSIS
B<process_docs.pl>
[B<--sourcedir>=I<dir>]
B<--destdir>=I<dir>
B<--type>=B<man>|B<html>
[B<--remove>]
[B<--dry-run>|B<-n>]
[B<--debug>|B<-D>]
=head1 DESCRIPTION
This script looks for .pod files in the subdirectories 'apps', 'crypto'
and 'ssl' under the given source directory.
The OpenSSL configuration data file F<configdata.pm> I<must> reside in
the current directory, I<or> perl must have the directory it resides in
in its inclusion array. For the latter variant, a call like this would
work:
perl -I../foo util/process_docs.pl {options ...}
=head1 OPTIONS
=over 4
=item B<--sourcedir>=I<dir>
Top directory where the source files are found.
=item B<--destdir>=I<dir>
Top directory where the resulting files should end up
=item B<--type>=B<man>|B<html>
Type of output to produce. Currently supported are man pages and HTML files.
=item B<--remove>
Instead of writing the files, remove them.
=item B<--dry-run>|B<-n>
Do not perform any file writing, directory creation or file removal.
=item B<--debug>|B<-D>
Print extra debugging output.
=back
=head1 COPYRIGHT
Copyright 2013-2016 The OpenSSL Project Authors. All Rights Reserved.
Licensed under the OpenSSL license (the "License"). You may not use
this file except in compliance with the License. You can obtain a copy
in the file LICENSE in the source distribution or at
https://www.openssl.org/source/license.html
=cut
| GaloisInc/hacrypto | src/C/openssl/openssl-1.1.0b/util/process_docs.pl | Perl | bsd-3-clause | 8,123 |
% ----------------------------------------------------------------------
% BEGIN LICENSE BLOCK
% Version: CMPL 1.1
%
% The contents of this file are subject to the Cisco-style Mozilla Public
% License Version 1.1 (the "License"); you may not use this file except
% in compliance with the License. You may obtain a copy of the License
% at www.eclipse-clp.org/license.
%
% Software distributed under the License is distributed on an "AS IS"
% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
% the License for the specific language governing rights and limitations
% under the License.
%
% The Original Code is The ECLiPSe Constraint Logic Programming System.
% The Initial Developer of the Original Code is Cisco Systems, Inc.
% Portions created by the Initial Developer are
% Copyright (C) 1994-2006 Cisco Systems, Inc. All Rights Reserved.
%
% Contributor(s): ECRC GmbH
% Contributor(s): IC-Parc, Imperal College London
%
% END LICENSE BLOCK
%
% System: ECLiPSe Constraint Logic Programming System
% Version: $Id: par_util.pl,v 1.1 2008/06/30 17:43:48 jschimpf Exp $
% ----------------------------------------------------------------------
%
% ECLiPSe PROLOG LIBRARY MODULE
%
% IDENTIFICATION: par_util.pl
%
% AUTHOR: Joachim Schimpf
%
% CONTENTS:
% par_member(?Element, +List)
% par_delete(?Element, +List, ?Rest)
% par_between(+From, +To, ?I)
% par_maplist(+Pred, +ListIn, ?ListOut)
%
% statistics_par
% statistics_par_reset
%
% Goal1 & Goal2
%
% DESCRIPTION: Parallel versions of various predicates.
% The semantics of par_member/2, par_maplist/3
% and &/2 is not exactly the same as the
% corresponding sequential version.
%
:- module(par_util).
:- pragma(nodebug).
:- comment(summary, "Parallel versions of various predicates").
:- comment(author, "Joachim Schimpf, ECRC Munich").
:- comment(copyright, "Cisco Systems, Inc").
:- comment(date, "$Date: 2008/06/30 17:43:48 $").
:- comment(par_member/2, [template:"par_member(?Element, +List)",
summary:"Parallel version of member/2",
desc:html("Parallel version of member/2, i.e. selects elements from
the given list in parallel. Note that it cannot work backwards
and generate lists like member/2 can, the list must be a proper list.")]).
:- comment(par_delete/3, [template:"par_delete(?Element, ?List, ?Rest)",
summary:"Parallel version of delete/3"]).
:- comment(par_between/3, [template:"par_between(+From, +To, ?I)",
summary:"Generates integers between From and To in parallel",
see_also:[fork/2, between/3, between/4]]).
:- comment(par_maplist/3, [template:"par_maplist(+Pred, +In, ?Out)",
summary:"Parallel version of maplist/3",
desc:html("Parallel version of maplist/3. The semantics is not
exactly the same as maplist/3: It does not work backwards and it
does not cope with aliasing between the In and the Out list, since
it is implemented on top of findall/3. There will only be a
performance gain if the mapping predicate does enough computation
to make the overhead pay off."),
see_also:[maplist/3]]).
:- comment((&)/2, [template:"Goal1 & Goal2",
summary:"Parallel AND operator implemented on top of OR-parallelism",
desc:html("Parallel AND operator implemented on top of
OR-parallelism. This will only pay off for sufficiently
coarse-grained computations in Goal1 and Goal2.")]).
:- export
(&)/2,
par_between/3,
par_delete/3,
par_maplist/3,
par_member/2,
statistics_par/0,
statistics_par_reset/0.
:- export op(950, xfy, (&)).
:- import worker_statistics/2, worker_statistics_reset/1 from sepia_kernel.
% Parallel member(?, +List), it can't generate lists!
par_member(X, List) :-
List = [_|_],
Arr =.. [arr|List],
functor(Arr, arr, N),
N1 is N+1,
fork(N, I),
I1 is N1-I,
arg(I1, Arr, X).
:- parallel par_delete/3.
par_delete(A, [A|C], C).
par_delete(A, [B|C], [B|D]) :-
par_delete(A, C, D).
par_between(From, To, X) :-
To1 is To+1,
N is To1-From,
N > 0,
fork(N, I),
X is To1-I.
:- tool((&)/2, '&_body'/3).
'&_body'(Goal1, Goal2, Module) :-
findall(Sol, parand(Sol, Goal1, Goal2, Module), Bag),
member(a-Goal1, Bag),
member(b-Goal2, Bag).
:- parallel parand/4.
:- mode parand(-,+,+,+).
parand(a-Goal1, Goal1, _, Module) :- call(Goal1)@Module.
parand(b-Goal2, _, Goal2, Module) :- call(Goal2)@Module.
:- tool(par_maplist/3, par_maplist_body/4).
par_maplist_body(Pred, In, Out, Module) :-
findall(Sol, map_elements(Pred, In, Sol, Module), Out0),
sort(1, >=, Out0, Out1),
strip_key(Out1, Out).
map_elements(Pred, In, I-Xout, Module) :-
Pred =.. PL,
append(PL, [Xin, Xout], NewPred),
Call =.. NewPred,
InArr =.. [in|In],
functor(InArr, in, N),
N1 is N+1,
fork(N, I),
I1 is N1-I,
arg(I1, InArr, Xin),
( call(Call)@Module, true -> true ).
strip_key([], []).
strip_key([_-X|Xs], [X|Ys]) :- strip_key(Xs, Ys).
% Parallel statistics
statistics_par :-
writeln(" Wrkr Jobs Prun Published Copy Copied Idling Working Copying Scheduling"),
writeln(" ID # # cpts alts # bytes ms ms ms ms\n"),
get_flag(workerids, _Host:AliveIds+SleepIds),
(member(Wid, AliveIds) ; member(Wid, SleepIds)),
worker_statistics(Wid, Data),
arg(1, Data, Jobs),
arg(2, Data, Prunes),
arg(4, Data, CopyFromCnt),
arg(5, Data, CopyFromBytes),
arg(8, Data, _Publish),
arg(9, Data, PubChpts),
arg(10, Data, PubAlts),
IdleMs is arg(14, Data) + arg(23, Data),
WorkMs is arg(15, Data),
CopyMs is arg(17, Data) + arg(18, Data) + arg(22, Data),
SchedMs is arg(16, Data) + arg(19, Data) + arg(20, Data) + arg(21, Data),
printf("%5d%5d%5d%5d%5d%5d%12d%8.0f%8.0f%8.0f%8.0f\n",
[Wid, Jobs, Prunes, PubChpts, PubAlts, CopyFromCnt,
CopyFromBytes, IdleMs, WorkMs, CopyMs, SchedMs]),
fail.
statistics_par.
statistics_par_reset :-
get_flag(workerids, _Host:AliveIds+SleepIds),
(member(Wid, AliveIds) ; member(Wid, SleepIds)),
worker_statistics_reset(Wid),
fail.
statistics_par_reset.
| daleooo/barrelfish | usr/skb/eclipse_kernel/lib/par_util.pl | Perl | mit | 5,961 |
# !!!!!!! DO NOT EDIT THIS FILE !!!!!!!
# This file is machine-generated by mktables from the Unicode
# database, Version 6.1.0. Any changes made here will be lost!
# !!!!!!! INTERNAL PERL USE ONLY !!!!!!!
# This file is for internal use by core Perl only. The format and even the
# name or existence of this file are subject to change without notice. Don't
# use it directly.
# The name this swash is to be known by, with the format of the mappings in
# the main body of the table, and what all code points missing from this file
# map to.
$utf8::SwashInfo{'ToJg'}{'format'} = 's'; # string
$utf8::SwashInfo{'ToJg'}{'missing'} = 'No_Joining_Group';
return <<'END';
0620 Yeh
0622 0623 Alef
0624 Waw
0625 Alef
0626 Yeh
0627 Alef
0628 Beh
0629 Teh_Marbuta
062A 062B Beh
062C 062E Hah
062F 0630 Dal
0631 0632 Reh
0633 0634 Seen
0635 0636 Sad
0637 0638 Tah
0639 063A Ain
063B 063C Gaf
063D 063F Farsi_Yeh
0641 Feh
0642 Qaf
0643 Kaf
0644 Lam
0645 Meem
0646 Noon
0647 Heh
0648 Waw
0649 064A Yeh
066E Beh
066F Qaf
0671 0673 Alef
0675 Alef
0676 0677 Waw
0678 Yeh
0679 0680 Beh
0681 0687 Hah
0688 0690 Dal
0691 0699 Reh
069A 069C Seen
069D 069E Sad
069F Tah
06A0 Ain
06A1 06A6 Feh
06A7 06A8 Qaf
06A9 Gaf
06AA Swash_Kaf
06AB Gaf
06AC 06AE Kaf
06AF 06B4 Gaf
06B5 06B8 Lam
06B9 06BC Noon
06BD Nya
06BE Knotted_Heh
06BF Hah
06C0 Teh_Marbuta
06C1 06C2 Heh_Goal
06C3 Teh_Marbuta_Goal
06C4 06CB Waw
06CC Farsi_Yeh
06CD Yeh_With_Tail
06CE Farsi_Yeh
06CF Waw
06D0 06D1 Yeh
06D2 06D3 Yeh_Barree
06D5 Teh_Marbuta
06EE Dal
06EF Reh
06FA Seen
06FB Sad
06FC Ain
06FF Knotted_Heh
0710 Alaph
0712 Beth
0713 0714 Gamal
0715 0716 Dalath_Rish
0717 He
0718 Syriac_Waw
0719 Zain
071A Heth
071B 071C Teth
071D Yudh
071E Yudh_He
071F Kaph
0720 Lamadh
0721 Mim
0722 Nun
0723 Semkath
0724 Final_Semkath
0725 E
0726 Pe
0727 Reversed_Pe
0728 Sadhe
0729 Qaph
072A Dalath_Rish
072B Shin
072C Taw
072D Beth
072E Gamal
072F Dalath_Rish
074D Zhain
074E Khaph
074F Fe
0750 0756 Beh
0757 0758 Hah
0759 075A Dal
075B Reh
075C Seen
075D 075F Ain
0760 0761 Feh
0762 0764 Gaf
0765 0766 Meem
0767 0769 Noon
076A Lam
076B 076C Reh
076D Seen
076E 076F Hah
0770 Seen
0771 Reh
0772 Hah
0773 0774 Alef
0775 0776 Farsi_Yeh
0777 Yeh
0778 0779 Waw
077A 077B Burushaski_Yeh_Barree
077C Hah
077D 077E Seen
077F Kaf
08A0 Beh
08A2 Hah
08A3 Tah
08A4 Feh
08A5 Qaf
08A6 Lam
08A7 Meem
08A8 08A9 Yeh
08AA Reh
08AB Waw
08AC Rohingya_Yeh
END
| liuyangning/WX_web | xampp/perl/lib/unicore/To/Jg.pl | Perl | mit | 2,479 |
#!/usr/bin/env perl
#
# ====================================================================
# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
# ====================================================================
#
# Version 2.1.
#
# aes-*-cbc benchmarks are improved by >70% [compared to gcc 3.3.2 on
# Opteron 240 CPU] plus all the bells-n-whistles from 32-bit version
# [you'll notice a lot of resemblance], such as compressed S-boxes
# in little-endian byte order, prefetch of these tables in CBC mode,
# as well as avoiding L1 cache aliasing between stack frame and key
# schedule and already mentioned tables, compressed Td4...
#
# Performance in number of cycles per processed byte for 128-bit key:
#
# ECB encrypt ECB decrypt CBC large chunk
# AMD64 33 43 13.0
# EM64T 38 56 18.6(*)
# Core 2 30 42 14.5(*)
# Atom 65 86 32.1(*)
#
# (*) with hyper-threading off
$flavour = shift;
$output = shift;
if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
die "can't locate x86_64-xlate.pl";
open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
*STDOUT=*OUT;
$verticalspin=1; # unlike 32-bit version $verticalspin performs
# ~15% better on both AMD and Intel cores
$speed_limit=512; # see aes-586.pl for details
$code=".text\n";
$s0="%eax";
$s1="%ebx";
$s2="%ecx";
$s3="%edx";
$acc0="%esi"; $mask80="%rsi";
$acc1="%edi"; $maskfe="%rdi";
$acc2="%ebp"; $mask1b="%rbp";
$inp="%r8";
$out="%r9";
$t0="%r10d";
$t1="%r11d";
$t2="%r12d";
$rnds="%r13d";
$sbox="%r14";
$key="%r15";
sub hi() { my $r=shift; $r =~ s/%[er]([a-d])x/%\1h/; $r; }
sub lo() { my $r=shift; $r =~ s/%[er]([a-d])x/%\1l/;
$r =~ s/%[er]([sd]i)/%\1l/;
$r =~ s/%(r[0-9]+)[d]?/%\1b/; $r; }
sub LO() { my $r=shift; $r =~ s/%r([a-z]+)/%e\1/;
$r =~ s/%r([0-9]+)/%r\1d/; $r; }
sub _data_word()
{ my $i;
while(defined($i=shift)) { $code.=sprintf".long\t0x%08x,0x%08x\n",$i,$i; }
}
sub data_word()
{ my $i;
my $last=pop(@_);
$code.=".long\t";
while(defined($i=shift)) { $code.=sprintf"0x%08x,",$i; }
$code.=sprintf"0x%08x\n",$last;
}
sub data_byte()
{ my $i;
my $last=pop(@_);
$code.=".byte\t";
while(defined($i=shift)) { $code.=sprintf"0x%02x,",$i&0xff; }
$code.=sprintf"0x%02x\n",$last&0xff;
}
sub encvert()
{ my $t3="%r8d"; # zaps $inp!
$code.=<<___;
# favor 3-way issue Opteron pipeline...
movzb `&lo("$s0")`,$acc0
movzb `&lo("$s1")`,$acc1
movzb `&lo("$s2")`,$acc2
mov 0($sbox,$acc0,8),$t0
mov 0($sbox,$acc1,8),$t1
mov 0($sbox,$acc2,8),$t2
movzb `&hi("$s1")`,$acc0
movzb `&hi("$s2")`,$acc1
movzb `&lo("$s3")`,$acc2
xor 3($sbox,$acc0,8),$t0
xor 3($sbox,$acc1,8),$t1
mov 0($sbox,$acc2,8),$t3
movzb `&hi("$s3")`,$acc0
shr \$16,$s2
movzb `&hi("$s0")`,$acc2
xor 3($sbox,$acc0,8),$t2
shr \$16,$s3
xor 3($sbox,$acc2,8),$t3
shr \$16,$s1
lea 16($key),$key
shr \$16,$s0
movzb `&lo("$s2")`,$acc0
movzb `&lo("$s3")`,$acc1
movzb `&lo("$s0")`,$acc2
xor 2($sbox,$acc0,8),$t0
xor 2($sbox,$acc1,8),$t1
xor 2($sbox,$acc2,8),$t2
movzb `&hi("$s3")`,$acc0
movzb `&hi("$s0")`,$acc1
movzb `&lo("$s1")`,$acc2
xor 1($sbox,$acc0,8),$t0
xor 1($sbox,$acc1,8),$t1
xor 2($sbox,$acc2,8),$t3
mov 12($key),$s3
movzb `&hi("$s1")`,$acc1
movzb `&hi("$s2")`,$acc2
mov 0($key),$s0
xor 1($sbox,$acc1,8),$t2
xor 1($sbox,$acc2,8),$t3
mov 4($key),$s1
mov 8($key),$s2
xor $t0,$s0
xor $t1,$s1
xor $t2,$s2
xor $t3,$s3
___
}
sub enclastvert()
{ my $t3="%r8d"; # zaps $inp!
$code.=<<___;
movzb `&lo("$s0")`,$acc0
movzb `&lo("$s1")`,$acc1
movzb `&lo("$s2")`,$acc2
movzb 2($sbox,$acc0,8),$t0
movzb 2($sbox,$acc1,8),$t1
movzb 2($sbox,$acc2,8),$t2
movzb `&lo("$s3")`,$acc0
movzb `&hi("$s1")`,$acc1
movzb `&hi("$s2")`,$acc2
movzb 2($sbox,$acc0,8),$t3
mov 0($sbox,$acc1,8),$acc1 #$t0
mov 0($sbox,$acc2,8),$acc2 #$t1
and \$0x0000ff00,$acc1
and \$0x0000ff00,$acc2
xor $acc1,$t0
xor $acc2,$t1
shr \$16,$s2
movzb `&hi("$s3")`,$acc0
movzb `&hi("$s0")`,$acc1
shr \$16,$s3
mov 0($sbox,$acc0,8),$acc0 #$t2
mov 0($sbox,$acc1,8),$acc1 #$t3
and \$0x0000ff00,$acc0
and \$0x0000ff00,$acc1
shr \$16,$s1
xor $acc0,$t2
xor $acc1,$t3
shr \$16,$s0
movzb `&lo("$s2")`,$acc0
movzb `&lo("$s3")`,$acc1
movzb `&lo("$s0")`,$acc2
mov 0($sbox,$acc0,8),$acc0 #$t0
mov 0($sbox,$acc1,8),$acc1 #$t1
mov 0($sbox,$acc2,8),$acc2 #$t2
and \$0x00ff0000,$acc0
and \$0x00ff0000,$acc1
and \$0x00ff0000,$acc2
xor $acc0,$t0
xor $acc1,$t1
xor $acc2,$t2
movzb `&lo("$s1")`,$acc0
movzb `&hi("$s3")`,$acc1
movzb `&hi("$s0")`,$acc2
mov 0($sbox,$acc0,8),$acc0 #$t3
mov 2($sbox,$acc1,8),$acc1 #$t0
mov 2($sbox,$acc2,8),$acc2 #$t1
and \$0x00ff0000,$acc0
and \$0xff000000,$acc1
and \$0xff000000,$acc2
xor $acc0,$t3
xor $acc1,$t0
xor $acc2,$t1
movzb `&hi("$s1")`,$acc0
movzb `&hi("$s2")`,$acc1
mov 16+12($key),$s3
mov 2($sbox,$acc0,8),$acc0 #$t2
mov 2($sbox,$acc1,8),$acc1 #$t3
mov 16+0($key),$s0
and \$0xff000000,$acc0
and \$0xff000000,$acc1
xor $acc0,$t2
xor $acc1,$t3
mov 16+4($key),$s1
mov 16+8($key),$s2
xor $t0,$s0
xor $t1,$s1
xor $t2,$s2
xor $t3,$s3
___
}
sub encstep()
{ my ($i,@s) = @_;
my $tmp0=$acc0;
my $tmp1=$acc1;
my $tmp2=$acc2;
my $out=($t0,$t1,$t2,$s[0])[$i];
if ($i==3) {
$tmp0=$s[1];
$tmp1=$s[2];
$tmp2=$s[3];
}
$code.=" movzb ".&lo($s[0]).",$out\n";
$code.=" mov $s[2],$tmp1\n" if ($i!=3);
$code.=" lea 16($key),$key\n" if ($i==0);
$code.=" movzb ".&hi($s[1]).",$tmp0\n";
$code.=" mov 0($sbox,$out,8),$out\n";
$code.=" shr \$16,$tmp1\n";
$code.=" mov $s[3],$tmp2\n" if ($i!=3);
$code.=" xor 3($sbox,$tmp0,8),$out\n";
$code.=" movzb ".&lo($tmp1).",$tmp1\n";
$code.=" shr \$24,$tmp2\n";
$code.=" xor 4*$i($key),$out\n";
$code.=" xor 2($sbox,$tmp1,8),$out\n";
$code.=" xor 1($sbox,$tmp2,8),$out\n";
$code.=" mov $t0,$s[1]\n" if ($i==3);
$code.=" mov $t1,$s[2]\n" if ($i==3);
$code.=" mov $t2,$s[3]\n" if ($i==3);
$code.="\n";
}
sub enclast()
{ my ($i,@s)=@_;
my $tmp0=$acc0;
my $tmp1=$acc1;
my $tmp2=$acc2;
my $out=($t0,$t1,$t2,$s[0])[$i];
if ($i==3) {
$tmp0=$s[1];
$tmp1=$s[2];
$tmp2=$s[3];
}
$code.=" movzb ".&lo($s[0]).",$out\n";
$code.=" mov $s[2],$tmp1\n" if ($i!=3);
$code.=" mov 2($sbox,$out,8),$out\n";
$code.=" shr \$16,$tmp1\n";
$code.=" mov $s[3],$tmp2\n" if ($i!=3);
$code.=" and \$0x000000ff,$out\n";
$code.=" movzb ".&hi($s[1]).",$tmp0\n";
$code.=" movzb ".&lo($tmp1).",$tmp1\n";
$code.=" shr \$24,$tmp2\n";
$code.=" mov 0($sbox,$tmp0,8),$tmp0\n";
$code.=" mov 0($sbox,$tmp1,8),$tmp1\n";
$code.=" mov 2($sbox,$tmp2,8),$tmp2\n";
$code.=" and \$0x0000ff00,$tmp0\n";
$code.=" and \$0x00ff0000,$tmp1\n";
$code.=" and \$0xff000000,$tmp2\n";
$code.=" xor $tmp0,$out\n";
$code.=" mov $t0,$s[1]\n" if ($i==3);
$code.=" xor $tmp1,$out\n";
$code.=" mov $t1,$s[2]\n" if ($i==3);
$code.=" xor $tmp2,$out\n";
$code.=" mov $t2,$s[3]\n" if ($i==3);
$code.="\n";
}
$code.=<<___;
.type _x86_64_AES_encrypt,\@abi-omnipotent
.align 16
_x86_64_AES_encrypt:
xor 0($key),$s0 # xor with key
xor 4($key),$s1
xor 8($key),$s2
xor 12($key),$s3
mov 240($key),$rnds # load key->rounds
sub \$1,$rnds
jmp .Lenc_loop
.align 16
.Lenc_loop:
___
if ($verticalspin) { &encvert(); }
else { &encstep(0,$s0,$s1,$s2,$s3);
&encstep(1,$s1,$s2,$s3,$s0);
&encstep(2,$s2,$s3,$s0,$s1);
&encstep(3,$s3,$s0,$s1,$s2);
}
$code.=<<___;
sub \$1,$rnds
jnz .Lenc_loop
___
if ($verticalspin) { &enclastvert(); }
else { &enclast(0,$s0,$s1,$s2,$s3);
&enclast(1,$s1,$s2,$s3,$s0);
&enclast(2,$s2,$s3,$s0,$s1);
&enclast(3,$s3,$s0,$s1,$s2);
$code.=<<___;
xor 16+0($key),$s0 # xor with key
xor 16+4($key),$s1
xor 16+8($key),$s2
xor 16+12($key),$s3
___
}
$code.=<<___;
.byte 0xf3,0xc3 # rep ret
.size _x86_64_AES_encrypt,.-_x86_64_AES_encrypt
___
# it's possible to implement this by shifting tN by 8, filling least
# significant byte with byte load and finally bswap-ing at the end,
# but such partial register load kills Core 2...
sub enccompactvert()
{ my ($t3,$t4,$t5)=("%r8d","%r9d","%r13d");
$code.=<<___;
movzb `&lo("$s0")`,$t0
movzb `&lo("$s1")`,$t1
movzb `&lo("$s2")`,$t2
movzb `&lo("$s3")`,$t3
movzb `&hi("$s1")`,$acc0
movzb `&hi("$s2")`,$acc1
shr \$16,$s2
movzb `&hi("$s3")`,$acc2
movzb ($sbox,$t0,1),$t0
movzb ($sbox,$t1,1),$t1
movzb ($sbox,$t2,1),$t2
movzb ($sbox,$t3,1),$t3
movzb ($sbox,$acc0,1),$t4 #$t0
movzb `&hi("$s0")`,$acc0
movzb ($sbox,$acc1,1),$t5 #$t1
movzb `&lo("$s2")`,$acc1
movzb ($sbox,$acc2,1),$acc2 #$t2
movzb ($sbox,$acc0,1),$acc0 #$t3
shl \$8,$t4
shr \$16,$s3
shl \$8,$t5
xor $t4,$t0
shr \$16,$s0
movzb `&lo("$s3")`,$t4
shr \$16,$s1
xor $t5,$t1
shl \$8,$acc2
movzb `&lo("$s0")`,$t5
movzb ($sbox,$acc1,1),$acc1 #$t0
xor $acc2,$t2
shl \$8,$acc0
movzb `&lo("$s1")`,$acc2
shl \$16,$acc1
xor $acc0,$t3
movzb ($sbox,$t4,1),$t4 #$t1
movzb `&hi("$s3")`,$acc0
movzb ($sbox,$t5,1),$t5 #$t2
xor $acc1,$t0
shr \$8,$s2
movzb `&hi("$s0")`,$acc1
shl \$16,$t4
shr \$8,$s1
shl \$16,$t5
xor $t4,$t1
movzb ($sbox,$acc2,1),$acc2 #$t3
movzb ($sbox,$acc0,1),$acc0 #$t0
movzb ($sbox,$acc1,1),$acc1 #$t1
movzb ($sbox,$s2,1),$s3 #$t3
movzb ($sbox,$s1,1),$s2 #$t2
shl \$16,$acc2
xor $t5,$t2
shl \$24,$acc0
xor $acc2,$t3
shl \$24,$acc1
xor $acc0,$t0
shl \$24,$s3
xor $acc1,$t1
shl \$24,$s2
mov $t0,$s0
mov $t1,$s1
xor $t2,$s2
xor $t3,$s3
___
}
sub enctransform_ref()
{ my $sn = shift;
my ($acc,$r2,$tmp)=("%r8d","%r9d","%r13d");
$code.=<<___;
mov $sn,$acc
and \$0x80808080,$acc
mov $acc,$tmp
shr \$7,$tmp
lea ($sn,$sn),$r2
sub $tmp,$acc
and \$0xfefefefe,$r2
and \$0x1b1b1b1b,$acc
mov $sn,$tmp
xor $acc,$r2
xor $r2,$sn
rol \$24,$sn
xor $r2,$sn
ror \$16,$tmp
xor $tmp,$sn
ror \$8,$tmp
xor $tmp,$sn
___
}
# unlike decrypt case it does not pay off to parallelize enctransform
sub enctransform()
{ my ($t3,$r20,$r21)=($acc2,"%r8d","%r9d");
$code.=<<___;
mov \$0x80808080,$t0
mov \$0x80808080,$t1
and $s0,$t0
and $s1,$t1
mov $t0,$acc0
mov $t1,$acc1
shr \$7,$t0
lea ($s0,$s0),$r20
shr \$7,$t1
lea ($s1,$s1),$r21
sub $t0,$acc0
sub $t1,$acc1
and \$0xfefefefe,$r20
and \$0xfefefefe,$r21
and \$0x1b1b1b1b,$acc0
and \$0x1b1b1b1b,$acc1
mov $s0,$t0
mov $s1,$t1
xor $acc0,$r20
xor $acc1,$r21
xor $r20,$s0
xor $r21,$s1
mov \$0x80808080,$t2
rol \$24,$s0
mov \$0x80808080,$t3
rol \$24,$s1
and $s2,$t2
and $s3,$t3
xor $r20,$s0
xor $r21,$s1
mov $t2,$acc0
ror \$16,$t0
mov $t3,$acc1
ror \$16,$t1
lea ($s2,$s2),$r20
shr \$7,$t2
xor $t0,$s0
shr \$7,$t3
xor $t1,$s1
ror \$8,$t0
lea ($s3,$s3),$r21
ror \$8,$t1
sub $t2,$acc0
sub $t3,$acc1
xor $t0,$s0
xor $t1,$s1
and \$0xfefefefe,$r20
and \$0xfefefefe,$r21
and \$0x1b1b1b1b,$acc0
and \$0x1b1b1b1b,$acc1
mov $s2,$t2
mov $s3,$t3
xor $acc0,$r20
xor $acc1,$r21
ror \$16,$t2
xor $r20,$s2
ror \$16,$t3
xor $r21,$s3
rol \$24,$s2
mov 0($sbox),$acc0 # prefetch Te4
rol \$24,$s3
xor $r20,$s2
mov 64($sbox),$acc1
xor $r21,$s3
mov 128($sbox),$r20
xor $t2,$s2
ror \$8,$t2
xor $t3,$s3
ror \$8,$t3
xor $t2,$s2
mov 192($sbox),$r21
xor $t3,$s3
___
}
$code.=<<___;
.type _x86_64_AES_encrypt_compact,\@abi-omnipotent
.align 16
_x86_64_AES_encrypt_compact:
lea 128($sbox),$inp # size optimization
mov 0-128($inp),$acc1 # prefetch Te4
mov 32-128($inp),$acc2
mov 64-128($inp),$t0
mov 96-128($inp),$t1
mov 128-128($inp),$acc1
mov 160-128($inp),$acc2
mov 192-128($inp),$t0
mov 224-128($inp),$t1
jmp .Lenc_loop_compact
.align 16
.Lenc_loop_compact:
xor 0($key),$s0 # xor with key
xor 4($key),$s1
xor 8($key),$s2
xor 12($key),$s3
lea 16($key),$key
___
&enccompactvert();
$code.=<<___;
cmp 16(%rsp),$key
je .Lenc_compact_done
___
&enctransform();
$code.=<<___;
jmp .Lenc_loop_compact
.align 16
.Lenc_compact_done:
xor 0($key),$s0
xor 4($key),$s1
xor 8($key),$s2
xor 12($key),$s3
.byte 0xf3,0xc3 # rep ret
.size _x86_64_AES_encrypt_compact,.-_x86_64_AES_encrypt_compact
___
# void asm_AES_encrypt (const void *inp,void *out,const AES_KEY *key);
$code.=<<___;
.align 16
.globl asm_AES_encrypt
.type asm_AES_encrypt,\@function,3
.hidden asm_AES_encrypt
asm_AES_encrypt:
mov %rsp,%rax
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
# allocate frame "above" key schedule
lea -63(%rdx),%rcx # %rdx is key argument
and \$-64,%rsp
sub %rsp,%rcx
neg %rcx
and \$0x3c0,%rcx
sub %rcx,%rsp
sub \$32,%rsp
mov %rsi,16(%rsp) # save out
mov %rax,24(%rsp) # save original stack pointer
.Lenc_prologue:
mov %rdx,$key
mov 240($key),$rnds # load rounds
mov 0(%rdi),$s0 # load input vector
mov 4(%rdi),$s1
mov 8(%rdi),$s2
mov 12(%rdi),$s3
shl \$4,$rnds
lea ($key,$rnds),%rbp
mov $key,(%rsp) # key schedule
mov %rbp,8(%rsp) # end of key schedule
# pick Te4 copy which can't "overlap" with stack frame or key schedule
lea .LAES_Te+2048(%rip),$sbox
lea 768(%rsp),%rbp
sub $sbox,%rbp
and \$0x300,%rbp
lea ($sbox,%rbp),$sbox
call _x86_64_AES_encrypt_compact
mov 16(%rsp),$out # restore out
mov 24(%rsp),%rsi # restore saved stack pointer
mov $s0,0($out) # write output vector
mov $s1,4($out)
mov $s2,8($out)
mov $s3,12($out)
mov -48(%rsi),%r15
mov -40(%rsi),%r14
mov -32(%rsi),%r13
mov -24(%rsi),%r12
mov -16(%rsi),%rbp
mov -8(%rsi),%rbx
lea (%rsi),%rsp
.Lenc_epilogue:
ret
.size asm_AES_encrypt,.-asm_AES_encrypt
___
#------------------------------------------------------------------#
sub decvert()
{ my $t3="%r8d"; # zaps $inp!
$code.=<<___;
# favor 3-way issue Opteron pipeline...
movzb `&lo("$s0")`,$acc0
movzb `&lo("$s1")`,$acc1
movzb `&lo("$s2")`,$acc2
mov 0($sbox,$acc0,8),$t0
mov 0($sbox,$acc1,8),$t1
mov 0($sbox,$acc2,8),$t2
movzb `&hi("$s3")`,$acc0
movzb `&hi("$s0")`,$acc1
movzb `&lo("$s3")`,$acc2
xor 3($sbox,$acc0,8),$t0
xor 3($sbox,$acc1,8),$t1
mov 0($sbox,$acc2,8),$t3
movzb `&hi("$s1")`,$acc0
shr \$16,$s0
movzb `&hi("$s2")`,$acc2
xor 3($sbox,$acc0,8),$t2
shr \$16,$s3
xor 3($sbox,$acc2,8),$t3
shr \$16,$s1
lea 16($key),$key
shr \$16,$s2
movzb `&lo("$s2")`,$acc0
movzb `&lo("$s3")`,$acc1
movzb `&lo("$s0")`,$acc2
xor 2($sbox,$acc0,8),$t0
xor 2($sbox,$acc1,8),$t1
xor 2($sbox,$acc2,8),$t2
movzb `&hi("$s1")`,$acc0
movzb `&hi("$s2")`,$acc1
movzb `&lo("$s1")`,$acc2
xor 1($sbox,$acc0,8),$t0
xor 1($sbox,$acc1,8),$t1
xor 2($sbox,$acc2,8),$t3
movzb `&hi("$s3")`,$acc0
mov 12($key),$s3
movzb `&hi("$s0")`,$acc2
xor 1($sbox,$acc0,8),$t2
mov 0($key),$s0
xor 1($sbox,$acc2,8),$t3
xor $t0,$s0
mov 4($key),$s1
mov 8($key),$s2
xor $t2,$s2
xor $t1,$s1
xor $t3,$s3
___
}
sub declastvert()
{ my $t3="%r8d"; # zaps $inp!
$code.=<<___;
lea 2048($sbox),$sbox # size optimization
movzb `&lo("$s0")`,$acc0
movzb `&lo("$s1")`,$acc1
movzb `&lo("$s2")`,$acc2
movzb ($sbox,$acc0,1),$t0
movzb ($sbox,$acc1,1),$t1
movzb ($sbox,$acc2,1),$t2
movzb `&lo("$s3")`,$acc0
movzb `&hi("$s3")`,$acc1
movzb `&hi("$s0")`,$acc2
movzb ($sbox,$acc0,1),$t3
movzb ($sbox,$acc1,1),$acc1 #$t0
movzb ($sbox,$acc2,1),$acc2 #$t1
shl \$8,$acc1
shl \$8,$acc2
xor $acc1,$t0
xor $acc2,$t1
shr \$16,$s3
movzb `&hi("$s1")`,$acc0
movzb `&hi("$s2")`,$acc1
shr \$16,$s0
movzb ($sbox,$acc0,1),$acc0 #$t2
movzb ($sbox,$acc1,1),$acc1 #$t3
shl \$8,$acc0
shl \$8,$acc1
shr \$16,$s1
xor $acc0,$t2
xor $acc1,$t3
shr \$16,$s2
movzb `&lo("$s2")`,$acc0
movzb `&lo("$s3")`,$acc1
movzb `&lo("$s0")`,$acc2
movzb ($sbox,$acc0,1),$acc0 #$t0
movzb ($sbox,$acc1,1),$acc1 #$t1
movzb ($sbox,$acc2,1),$acc2 #$t2
shl \$16,$acc0
shl \$16,$acc1
shl \$16,$acc2
xor $acc0,$t0
xor $acc1,$t1
xor $acc2,$t2
movzb `&lo("$s1")`,$acc0
movzb `&hi("$s1")`,$acc1
movzb `&hi("$s2")`,$acc2
movzb ($sbox,$acc0,1),$acc0 #$t3
movzb ($sbox,$acc1,1),$acc1 #$t0
movzb ($sbox,$acc2,1),$acc2 #$t1
shl \$16,$acc0
shl \$24,$acc1
shl \$24,$acc2
xor $acc0,$t3
xor $acc1,$t0
xor $acc2,$t1
movzb `&hi("$s3")`,$acc0
movzb `&hi("$s0")`,$acc1
mov 16+12($key),$s3
movzb ($sbox,$acc0,1),$acc0 #$t2
movzb ($sbox,$acc1,1),$acc1 #$t3
mov 16+0($key),$s0
shl \$24,$acc0
shl \$24,$acc1
xor $acc0,$t2
xor $acc1,$t3
mov 16+4($key),$s1
mov 16+8($key),$s2
lea -2048($sbox),$sbox
xor $t0,$s0
xor $t1,$s1
xor $t2,$s2
xor $t3,$s3
___
}
sub decstep()
{ my ($i,@s) = @_;
my $tmp0=$acc0;
my $tmp1=$acc1;
my $tmp2=$acc2;
my $out=($t0,$t1,$t2,$s[0])[$i];
$code.=" mov $s[0],$out\n" if ($i!=3);
$tmp1=$s[2] if ($i==3);
$code.=" mov $s[2],$tmp1\n" if ($i!=3);
$code.=" and \$0xFF,$out\n";
$code.=" mov 0($sbox,$out,8),$out\n";
$code.=" shr \$16,$tmp1\n";
$tmp2=$s[3] if ($i==3);
$code.=" mov $s[3],$tmp2\n" if ($i!=3);
$tmp0=$s[1] if ($i==3);
$code.=" movzb ".&hi($s[1]).",$tmp0\n";
$code.=" and \$0xFF,$tmp1\n";
$code.=" shr \$24,$tmp2\n";
$code.=" xor 3($sbox,$tmp0,8),$out\n";
$code.=" xor 2($sbox,$tmp1,8),$out\n";
$code.=" xor 1($sbox,$tmp2,8),$out\n";
$code.=" mov $t2,$s[1]\n" if ($i==3);
$code.=" mov $t1,$s[2]\n" if ($i==3);
$code.=" mov $t0,$s[3]\n" if ($i==3);
$code.="\n";
}
sub declast()
{ my ($i,@s)=@_;
my $tmp0=$acc0;
my $tmp1=$acc1;
my $tmp2=$acc2;
my $out=($t0,$t1,$t2,$s[0])[$i];
$code.=" mov $s[0],$out\n" if ($i!=3);
$tmp1=$s[2] if ($i==3);
$code.=" mov $s[2],$tmp1\n" if ($i!=3);
$code.=" and \$0xFF,$out\n";
$code.=" movzb 2048($sbox,$out,1),$out\n";
$code.=" shr \$16,$tmp1\n";
$tmp2=$s[3] if ($i==3);
$code.=" mov $s[3],$tmp2\n" if ($i!=3);
$tmp0=$s[1] if ($i==3);
$code.=" movzb ".&hi($s[1]).",$tmp0\n";
$code.=" and \$0xFF,$tmp1\n";
$code.=" shr \$24,$tmp2\n";
$code.=" movzb 2048($sbox,$tmp0,1),$tmp0\n";
$code.=" movzb 2048($sbox,$tmp1,1),$tmp1\n";
$code.=" movzb 2048($sbox,$tmp2,1),$tmp2\n";
$code.=" shl \$8,$tmp0\n";
$code.=" shl \$16,$tmp1\n";
$code.=" shl \$24,$tmp2\n";
$code.=" xor $tmp0,$out\n";
$code.=" mov $t2,$s[1]\n" if ($i==3);
$code.=" xor $tmp1,$out\n";
$code.=" mov $t1,$s[2]\n" if ($i==3);
$code.=" xor $tmp2,$out\n";
$code.=" mov $t0,$s[3]\n" if ($i==3);
$code.="\n";
}
$code.=<<___;
.type _x86_64_AES_decrypt,\@abi-omnipotent
.align 16
_x86_64_AES_decrypt:
xor 0($key),$s0 # xor with key
xor 4($key),$s1
xor 8($key),$s2
xor 12($key),$s3
mov 240($key),$rnds # load key->rounds
sub \$1,$rnds
jmp .Ldec_loop
.align 16
.Ldec_loop:
___
if ($verticalspin) { &decvert(); }
else { &decstep(0,$s0,$s3,$s2,$s1);
&decstep(1,$s1,$s0,$s3,$s2);
&decstep(2,$s2,$s1,$s0,$s3);
&decstep(3,$s3,$s2,$s1,$s0);
$code.=<<___;
lea 16($key),$key
xor 0($key),$s0 # xor with key
xor 4($key),$s1
xor 8($key),$s2
xor 12($key),$s3
___
}
$code.=<<___;
sub \$1,$rnds
jnz .Ldec_loop
___
if ($verticalspin) { &declastvert(); }
else { &declast(0,$s0,$s3,$s2,$s1);
&declast(1,$s1,$s0,$s3,$s2);
&declast(2,$s2,$s1,$s0,$s3);
&declast(3,$s3,$s2,$s1,$s0);
$code.=<<___;
xor 16+0($key),$s0 # xor with key
xor 16+4($key),$s1
xor 16+8($key),$s2
xor 16+12($key),$s3
___
}
$code.=<<___;
.byte 0xf3,0xc3 # rep ret
.size _x86_64_AES_decrypt,.-_x86_64_AES_decrypt
___
sub deccompactvert()
{ my ($t3,$t4,$t5)=("%r8d","%r9d","%r13d");
$code.=<<___;
movzb `&lo("$s0")`,$t0
movzb `&lo("$s1")`,$t1
movzb `&lo("$s2")`,$t2
movzb `&lo("$s3")`,$t3
movzb `&hi("$s3")`,$acc0
movzb `&hi("$s0")`,$acc1
shr \$16,$s3
movzb `&hi("$s1")`,$acc2
movzb ($sbox,$t0,1),$t0
movzb ($sbox,$t1,1),$t1
movzb ($sbox,$t2,1),$t2
movzb ($sbox,$t3,1),$t3
movzb ($sbox,$acc0,1),$t4 #$t0
movzb `&hi("$s2")`,$acc0
movzb ($sbox,$acc1,1),$t5 #$t1
movzb ($sbox,$acc2,1),$acc2 #$t2
movzb ($sbox,$acc0,1),$acc0 #$t3
shr \$16,$s2
shl \$8,$t5
shl \$8,$t4
movzb `&lo("$s2")`,$acc1
shr \$16,$s0
xor $t4,$t0
shr \$16,$s1
movzb `&lo("$s3")`,$t4
shl \$8,$acc2
xor $t5,$t1
shl \$8,$acc0
movzb `&lo("$s0")`,$t5
movzb ($sbox,$acc1,1),$acc1 #$t0
xor $acc2,$t2
movzb `&lo("$s1")`,$acc2
shl \$16,$acc1
xor $acc0,$t3
movzb ($sbox,$t4,1),$t4 #$t1
movzb `&hi("$s1")`,$acc0
movzb ($sbox,$acc2,1),$acc2 #$t3
xor $acc1,$t0
movzb ($sbox,$t5,1),$t5 #$t2
movzb `&hi("$s2")`,$acc1
shl \$16,$acc2
shl \$16,$t4
shl \$16,$t5
xor $acc2,$t3
movzb `&hi("$s3")`,$acc2
xor $t4,$t1
shr \$8,$s0
xor $t5,$t2
movzb ($sbox,$acc0,1),$acc0 #$t0
movzb ($sbox,$acc1,1),$s1 #$t1
movzb ($sbox,$acc2,1),$s2 #$t2
movzb ($sbox,$s0,1),$s3 #$t3
mov $t0,$s0
shl \$24,$acc0
shl \$24,$s1
shl \$24,$s2
xor $acc0,$s0
shl \$24,$s3
xor $t1,$s1
xor $t2,$s2
xor $t3,$s3
___
}
# parallelized version! input is pair of 64-bit values: %rax=s1.s0
# and %rcx=s3.s2, output is four 32-bit values in %eax=s0, %ebx=s1,
# %ecx=s2 and %edx=s3.
sub dectransform()
{ my ($tp10,$tp20,$tp40,$tp80,$acc0)=("%rax","%r8", "%r9", "%r10","%rbx");
my ($tp18,$tp28,$tp48,$tp88,$acc8)=("%rcx","%r11","%r12","%r13","%rdx");
my $prefetch = shift;
$code.=<<___;
mov $mask80,$tp40
mov $mask80,$tp48
and $tp10,$tp40
and $tp18,$tp48
mov $tp40,$acc0
mov $tp48,$acc8
shr \$7,$tp40
lea ($tp10,$tp10),$tp20
shr \$7,$tp48
lea ($tp18,$tp18),$tp28
sub $tp40,$acc0
sub $tp48,$acc8
and $maskfe,$tp20
and $maskfe,$tp28
and $mask1b,$acc0
and $mask1b,$acc8
xor $acc0,$tp20
xor $acc8,$tp28
mov $mask80,$tp80
mov $mask80,$tp88
and $tp20,$tp80
and $tp28,$tp88
mov $tp80,$acc0
mov $tp88,$acc8
shr \$7,$tp80
lea ($tp20,$tp20),$tp40
shr \$7,$tp88
lea ($tp28,$tp28),$tp48
sub $tp80,$acc0
sub $tp88,$acc8
and $maskfe,$tp40
and $maskfe,$tp48
and $mask1b,$acc0
and $mask1b,$acc8
xor $acc0,$tp40
xor $acc8,$tp48
mov $mask80,$tp80
mov $mask80,$tp88
and $tp40,$tp80
and $tp48,$tp88
mov $tp80,$acc0
mov $tp88,$acc8
shr \$7,$tp80
xor $tp10,$tp20 # tp2^=tp1
shr \$7,$tp88
xor $tp18,$tp28 # tp2^=tp1
sub $tp80,$acc0
sub $tp88,$acc8
lea ($tp40,$tp40),$tp80
lea ($tp48,$tp48),$tp88
xor $tp10,$tp40 # tp4^=tp1
xor $tp18,$tp48 # tp4^=tp1
and $maskfe,$tp80
and $maskfe,$tp88
and $mask1b,$acc0
and $mask1b,$acc8
xor $acc0,$tp80
xor $acc8,$tp88
xor $tp80,$tp10 # tp1^=tp8
xor $tp88,$tp18 # tp1^=tp8
xor $tp80,$tp20 # tp2^tp1^=tp8
xor $tp88,$tp28 # tp2^tp1^=tp8
mov $tp10,$acc0
mov $tp18,$acc8
xor $tp80,$tp40 # tp4^tp1^=tp8
shr \$32,$acc0
xor $tp88,$tp48 # tp4^tp1^=tp8
shr \$32,$acc8
xor $tp20,$tp80 # tp8^=tp8^tp2^tp1=tp2^tp1
rol \$8,`&LO("$tp10")` # ROTATE(tp1^tp8,8)
xor $tp28,$tp88 # tp8^=tp8^tp2^tp1=tp2^tp1
rol \$8,`&LO("$tp18")` # ROTATE(tp1^tp8,8)
xor $tp40,$tp80 # tp2^tp1^=tp8^tp4^tp1=tp8^tp4^tp2
rol \$8,`&LO("$acc0")` # ROTATE(tp1^tp8,8)
xor $tp48,$tp88 # tp2^tp1^=tp8^tp4^tp1=tp8^tp4^tp2
rol \$8,`&LO("$acc8")` # ROTATE(tp1^tp8,8)
xor `&LO("$tp80")`,`&LO("$tp10")`
shr \$32,$tp80
xor `&LO("$tp88")`,`&LO("$tp18")`
shr \$32,$tp88
xor `&LO("$tp80")`,`&LO("$acc0")`
xor `&LO("$tp88")`,`&LO("$acc8")`
mov $tp20,$tp80
rol \$24,`&LO("$tp20")` # ROTATE(tp2^tp1^tp8,24)
mov $tp28,$tp88
rol \$24,`&LO("$tp28")` # ROTATE(tp2^tp1^tp8,24)
shr \$32,$tp80
xor `&LO("$tp20")`,`&LO("$tp10")`
shr \$32,$tp88
xor `&LO("$tp28")`,`&LO("$tp18")`
rol \$24,`&LO("$tp80")` # ROTATE(tp2^tp1^tp8,24)
mov $tp40,$tp20
rol \$24,`&LO("$tp88")` # ROTATE(tp2^tp1^tp8,24)
mov $tp48,$tp28
shr \$32,$tp20
xor `&LO("$tp80")`,`&LO("$acc0")`
shr \$32,$tp28
xor `&LO("$tp88")`,`&LO("$acc8")`
`"mov 0($sbox),$mask80" if ($prefetch)`
rol \$16,`&LO("$tp40")` # ROTATE(tp4^tp1^tp8,16)
`"mov 64($sbox),$maskfe" if ($prefetch)`
rol \$16,`&LO("$tp48")` # ROTATE(tp4^tp1^tp8,16)
`"mov 128($sbox),$mask1b" if ($prefetch)`
rol \$16,`&LO("$tp20")` # ROTATE(tp4^tp1^tp8,16)
`"mov 192($sbox),$tp80" if ($prefetch)`
xor `&LO("$tp40")`,`&LO("$tp10")`
rol \$16,`&LO("$tp28")` # ROTATE(tp4^tp1^tp8,16)
xor `&LO("$tp48")`,`&LO("$tp18")`
`"mov 256($sbox),$tp88" if ($prefetch)`
xor `&LO("$tp20")`,`&LO("$acc0")`
xor `&LO("$tp28")`,`&LO("$acc8")`
___
}
$code.=<<___;
.type _x86_64_AES_decrypt_compact,\@abi-omnipotent
.align 16
_x86_64_AES_decrypt_compact:
lea 128($sbox),$inp # size optimization
mov 0-128($inp),$acc1 # prefetch Td4
mov 32-128($inp),$acc2
mov 64-128($inp),$t0
mov 96-128($inp),$t1
mov 128-128($inp),$acc1
mov 160-128($inp),$acc2
mov 192-128($inp),$t0
mov 224-128($inp),$t1
jmp .Ldec_loop_compact
.align 16
.Ldec_loop_compact:
xor 0($key),$s0 # xor with key
xor 4($key),$s1
xor 8($key),$s2
xor 12($key),$s3
lea 16($key),$key
___
&deccompactvert();
$code.=<<___;
cmp 16(%rsp),$key
je .Ldec_compact_done
mov 256+0($sbox),$mask80
shl \$32,%rbx
shl \$32,%rdx
mov 256+8($sbox),$maskfe
or %rbx,%rax
or %rdx,%rcx
mov 256+16($sbox),$mask1b
___
&dectransform(1);
$code.=<<___;
jmp .Ldec_loop_compact
.align 16
.Ldec_compact_done:
xor 0($key),$s0
xor 4($key),$s1
xor 8($key),$s2
xor 12($key),$s3
.byte 0xf3,0xc3 # rep ret
.size _x86_64_AES_decrypt_compact,.-_x86_64_AES_decrypt_compact
___
# void asm_AES_decrypt (const void *inp,void *out,const AES_KEY *key);
$code.=<<___;
.align 16
.globl asm_AES_decrypt
.type asm_AES_decrypt,\@function,3
.hidden asm_AES_decrypt
asm_AES_decrypt:
mov %rsp,%rax
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
# allocate frame "above" key schedule
lea -63(%rdx),%rcx # %rdx is key argument
and \$-64,%rsp
sub %rsp,%rcx
neg %rcx
and \$0x3c0,%rcx
sub %rcx,%rsp
sub \$32,%rsp
mov %rsi,16(%rsp) # save out
mov %rax,24(%rsp) # save original stack pointer
.Ldec_prologue:
mov %rdx,$key
mov 240($key),$rnds # load rounds
mov 0(%rdi),$s0 # load input vector
mov 4(%rdi),$s1
mov 8(%rdi),$s2
mov 12(%rdi),$s3
shl \$4,$rnds
lea ($key,$rnds),%rbp
mov $key,(%rsp) # key schedule
mov %rbp,8(%rsp) # end of key schedule
# pick Td4 copy which can't "overlap" with stack frame or key schedule
lea .LAES_Td+2048(%rip),$sbox
lea 768(%rsp),%rbp
sub $sbox,%rbp
and \$0x300,%rbp
lea ($sbox,%rbp),$sbox
shr \$3,%rbp # recall "magic" constants!
add %rbp,$sbox
call _x86_64_AES_decrypt_compact
mov 16(%rsp),$out # restore out
mov 24(%rsp),%rsi # restore saved stack pointer
mov $s0,0($out) # write output vector
mov $s1,4($out)
mov $s2,8($out)
mov $s3,12($out)
mov -48(%rsi),%r15
mov -40(%rsi),%r14
mov -32(%rsi),%r13
mov -24(%rsi),%r12
mov -16(%rsi),%rbp
mov -8(%rsi),%rbx
lea (%rsi),%rsp
.Ldec_epilogue:
ret
.size asm_AES_decrypt,.-asm_AES_decrypt
___
#------------------------------------------------------------------#
sub enckey()
{
$code.=<<___;
movz %dl,%esi # rk[i]>>0
movzb -128(%rbp,%rsi),%ebx
movz %dh,%esi # rk[i]>>8
shl \$24,%ebx
xor %ebx,%eax
movzb -128(%rbp,%rsi),%ebx
shr \$16,%edx
movz %dl,%esi # rk[i]>>16
xor %ebx,%eax
movzb -128(%rbp,%rsi),%ebx
movz %dh,%esi # rk[i]>>24
shl \$8,%ebx
xor %ebx,%eax
movzb -128(%rbp,%rsi),%ebx
shl \$16,%ebx
xor %ebx,%eax
xor 1024-128(%rbp,%rcx,4),%eax # rcon
___
}
# int asm_AES_set_encrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key)
$code.=<<___;
.align 16
.globl asm_AES_set_encrypt_key
.type asm_AES_set_encrypt_key,\@function,3
asm_AES_set_encrypt_key:
push %rbx
push %rbp
push %r12 # redundant, but allows to share
push %r13 # exception handler...
push %r14
push %r15
sub \$8,%rsp
.Lenc_key_prologue:
call _x86_64_AES_set_encrypt_key
mov 40(%rsp),%rbp
mov 48(%rsp),%rbx
add \$56,%rsp
.Lenc_key_epilogue:
ret
.size asm_AES_set_encrypt_key,.-asm_AES_set_encrypt_key
.type _x86_64_AES_set_encrypt_key,\@abi-omnipotent
.align 16
_x86_64_AES_set_encrypt_key:
mov %esi,%ecx # %ecx=bits
mov %rdi,%rsi # %rsi=userKey
mov %rdx,%rdi # %rdi=key
test \$-1,%rsi
jz .Lbadpointer
test \$-1,%rdi
jz .Lbadpointer
lea .LAES_Te(%rip),%rbp
lea 2048+128(%rbp),%rbp
# prefetch Te4
mov 0-128(%rbp),%eax
mov 32-128(%rbp),%ebx
mov 64-128(%rbp),%r8d
mov 96-128(%rbp),%edx
mov 128-128(%rbp),%eax
mov 160-128(%rbp),%ebx
mov 192-128(%rbp),%r8d
mov 224-128(%rbp),%edx
cmp \$128,%ecx
je .L10rounds
cmp \$192,%ecx
je .L12rounds
cmp \$256,%ecx
je .L14rounds
mov \$-2,%rax # invalid number of bits
jmp .Lexit
.L10rounds:
mov 0(%rsi),%rax # copy first 4 dwords
mov 8(%rsi),%rdx
mov %rax,0(%rdi)
mov %rdx,8(%rdi)
shr \$32,%rdx
xor %ecx,%ecx
jmp .L10shortcut
.align 4
.L10loop:
mov 0(%rdi),%eax # rk[0]
mov 12(%rdi),%edx # rk[3]
.L10shortcut:
___
&enckey ();
$code.=<<___;
mov %eax,16(%rdi) # rk[4]
xor 4(%rdi),%eax
mov %eax,20(%rdi) # rk[5]
xor 8(%rdi),%eax
mov %eax,24(%rdi) # rk[6]
xor 12(%rdi),%eax
mov %eax,28(%rdi) # rk[7]
add \$1,%ecx
lea 16(%rdi),%rdi
cmp \$10,%ecx
jl .L10loop
movl \$10,80(%rdi) # setup number of rounds
xor %rax,%rax
jmp .Lexit
.L12rounds:
mov 0(%rsi),%rax # copy first 6 dwords
mov 8(%rsi),%rbx
mov 16(%rsi),%rdx
mov %rax,0(%rdi)
mov %rbx,8(%rdi)
mov %rdx,16(%rdi)
shr \$32,%rdx
xor %ecx,%ecx
jmp .L12shortcut
.align 4
.L12loop:
mov 0(%rdi),%eax # rk[0]
mov 20(%rdi),%edx # rk[5]
.L12shortcut:
___
&enckey ();
$code.=<<___;
mov %eax,24(%rdi) # rk[6]
xor 4(%rdi),%eax
mov %eax,28(%rdi) # rk[7]
xor 8(%rdi),%eax
mov %eax,32(%rdi) # rk[8]
xor 12(%rdi),%eax
mov %eax,36(%rdi) # rk[9]
cmp \$7,%ecx
je .L12break
add \$1,%ecx
xor 16(%rdi),%eax
mov %eax,40(%rdi) # rk[10]
xor 20(%rdi),%eax
mov %eax,44(%rdi) # rk[11]
lea 24(%rdi),%rdi
jmp .L12loop
.L12break:
movl \$12,72(%rdi) # setup number of rounds
xor %rax,%rax
jmp .Lexit
.L14rounds:
mov 0(%rsi),%rax # copy first 8 dwords
mov 8(%rsi),%rbx
mov 16(%rsi),%rcx
mov 24(%rsi),%rdx
mov %rax,0(%rdi)
mov %rbx,8(%rdi)
mov %rcx,16(%rdi)
mov %rdx,24(%rdi)
shr \$32,%rdx
xor %ecx,%ecx
jmp .L14shortcut
.align 4
.L14loop:
mov 0(%rdi),%eax # rk[0]
mov 28(%rdi),%edx # rk[4]
.L14shortcut:
___
&enckey ();
$code.=<<___;
mov %eax,32(%rdi) # rk[8]
xor 4(%rdi),%eax
mov %eax,36(%rdi) # rk[9]
xor 8(%rdi),%eax
mov %eax,40(%rdi) # rk[10]
xor 12(%rdi),%eax
mov %eax,44(%rdi) # rk[11]
cmp \$6,%ecx
je .L14break
add \$1,%ecx
mov %eax,%edx
mov 16(%rdi),%eax # rk[4]
movz %dl,%esi # rk[11]>>0
movzb -128(%rbp,%rsi),%ebx
movz %dh,%esi # rk[11]>>8
xor %ebx,%eax
movzb -128(%rbp,%rsi),%ebx
shr \$16,%edx
shl \$8,%ebx
movz %dl,%esi # rk[11]>>16
xor %ebx,%eax
movzb -128(%rbp,%rsi),%ebx
movz %dh,%esi # rk[11]>>24
shl \$16,%ebx
xor %ebx,%eax
movzb -128(%rbp,%rsi),%ebx
shl \$24,%ebx
xor %ebx,%eax
mov %eax,48(%rdi) # rk[12]
xor 20(%rdi),%eax
mov %eax,52(%rdi) # rk[13]
xor 24(%rdi),%eax
mov %eax,56(%rdi) # rk[14]
xor 28(%rdi),%eax
mov %eax,60(%rdi) # rk[15]
lea 32(%rdi),%rdi
jmp .L14loop
.L14break:
movl \$14,48(%rdi) # setup number of rounds
xor %rax,%rax
jmp .Lexit
.Lbadpointer:
mov \$-1,%rax
.Lexit:
.byte 0xf3,0xc3 # rep ret
.size _x86_64_AES_set_encrypt_key,.-_x86_64_AES_set_encrypt_key
___
sub deckey_ref()
{ my ($i,$ptr,$te,$td) = @_;
my ($tp1,$tp2,$tp4,$tp8,$acc)=("%eax","%ebx","%edi","%edx","%r8d");
$code.=<<___;
mov $i($ptr),$tp1
mov $tp1,$acc
and \$0x80808080,$acc
mov $acc,$tp4
shr \$7,$tp4
lea 0($tp1,$tp1),$tp2
sub $tp4,$acc
and \$0xfefefefe,$tp2
and \$0x1b1b1b1b,$acc
xor $tp2,$acc
mov $acc,$tp2
and \$0x80808080,$acc
mov $acc,$tp8
shr \$7,$tp8
lea 0($tp2,$tp2),$tp4
sub $tp8,$acc
and \$0xfefefefe,$tp4
and \$0x1b1b1b1b,$acc
xor $tp1,$tp2 # tp2^tp1
xor $tp4,$acc
mov $acc,$tp4
and \$0x80808080,$acc
mov $acc,$tp8
shr \$7,$tp8
sub $tp8,$acc
lea 0($tp4,$tp4),$tp8
xor $tp1,$tp4 # tp4^tp1
and \$0xfefefefe,$tp8
and \$0x1b1b1b1b,$acc
xor $acc,$tp8
xor $tp8,$tp1 # tp1^tp8
rol \$8,$tp1 # ROTATE(tp1^tp8,8)
xor $tp8,$tp2 # tp2^tp1^tp8
xor $tp8,$tp4 # tp4^tp1^tp8
xor $tp2,$tp8
xor $tp4,$tp8 # tp8^(tp8^tp4^tp1)^(tp8^tp2^tp1)=tp8^tp4^tp2
xor $tp8,$tp1
rol \$24,$tp2 # ROTATE(tp2^tp1^tp8,24)
xor $tp2,$tp1
rol \$16,$tp4 # ROTATE(tp4^tp1^tp8,16)
xor $tp4,$tp1
mov $tp1,$i($ptr)
___
}
# int asm_AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key)
$code.=<<___;
.align 16
.globl asm_AES_set_decrypt_key
.type asm_AES_set_decrypt_key,\@function,3
asm_AES_set_decrypt_key:
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
push %rdx # save key schedule
.Ldec_key_prologue:
call _x86_64_AES_set_encrypt_key
mov (%rsp),%r8 # restore key schedule
cmp \$0,%eax
jne .Labort
mov 240(%r8),%r14d # pull number of rounds
xor %rdi,%rdi
lea (%rdi,%r14d,4),%rcx
mov %r8,%rsi
lea (%r8,%rcx,4),%rdi # pointer to last chunk
.align 4
.Linvert:
mov 0(%rsi),%rax
mov 8(%rsi),%rbx
mov 0(%rdi),%rcx
mov 8(%rdi),%rdx
mov %rax,0(%rdi)
mov %rbx,8(%rdi)
mov %rcx,0(%rsi)
mov %rdx,8(%rsi)
lea 16(%rsi),%rsi
lea -16(%rdi),%rdi
cmp %rsi,%rdi
jne .Linvert
lea .LAES_Te+2048+1024(%rip),%rax # rcon
mov 40(%rax),$mask80
mov 48(%rax),$maskfe
mov 56(%rax),$mask1b
mov %r8,$key
sub \$1,%r14d
.align 4
.Lpermute:
lea 16($key),$key
mov 0($key),%rax
mov 8($key),%rcx
___
&dectransform ();
$code.=<<___;
mov %eax,0($key)
mov %ebx,4($key)
mov %ecx,8($key)
mov %edx,12($key)
sub \$1,%r14d
jnz .Lpermute
xor %rax,%rax
.Labort:
mov 8(%rsp),%r15
mov 16(%rsp),%r14
mov 24(%rsp),%r13
mov 32(%rsp),%r12
mov 40(%rsp),%rbp
mov 48(%rsp),%rbx
add \$56,%rsp
.Ldec_key_epilogue:
ret
.size asm_AES_set_decrypt_key,.-asm_AES_set_decrypt_key
___
# void asm_AES_cbc_encrypt (const void char *inp, unsigned char *out,
# size_t length, const AES_KEY *key,
# unsigned char *ivp,const int enc);
{
# stack frame layout
# -8(%rsp) return address
my $keyp="0(%rsp)"; # one to pass as $key
my $keyend="8(%rsp)"; # &(keyp->rd_key[4*keyp->rounds])
my $_rsp="16(%rsp)"; # saved %rsp
my $_inp="24(%rsp)"; # copy of 1st parameter, inp
my $_out="32(%rsp)"; # copy of 2nd parameter, out
my $_len="40(%rsp)"; # copy of 3rd parameter, length
my $_key="48(%rsp)"; # copy of 4th parameter, key
my $_ivp="56(%rsp)"; # copy of 5th parameter, ivp
my $ivec="64(%rsp)"; # ivec[16]
my $aes_key="80(%rsp)"; # copy of aes_key
my $mark="80+240(%rsp)"; # copy of aes_key->rounds
$code.=<<___;
.align 16
.globl asm_AES_cbc_encrypt
.type asm_AES_cbc_encrypt,\@function,6
.extern OPENSSL_ia32cap_P
.hidden asm_AES_cbc_encrypt
asm_AES_cbc_encrypt:
cmp \$0,%rdx # check length
je .Lcbc_epilogue
pushfq
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
.Lcbc_prologue:
cld
mov %r9d,%r9d # clear upper half of enc
lea .LAES_Te(%rip),$sbox
lea .LAES_Td(%rip),%r10
cmp \$0,%r9
cmoveq %r10,$sbox
mov OPENSSL_ia32cap_P(%rip),%r10d
cmp \$$speed_limit,%rdx
jb .Lcbc_slow_prologue
test \$15,%rdx
jnz .Lcbc_slow_prologue
bt \$28,%r10d
jc .Lcbc_slow_prologue
# allocate aligned stack frame...
lea -88-248(%rsp),$key
and \$-64,$key
# ... and make sure it doesn't alias with AES_T[ed] modulo 4096
mov $sbox,%r10
lea 2304($sbox),%r11
mov $key,%r12
and \$0xFFF,%r10 # s = $sbox&0xfff
and \$0xFFF,%r11 # e = ($sbox+2048)&0xfff
and \$0xFFF,%r12 # p = %rsp&0xfff
cmp %r11,%r12 # if (p=>e) %rsp =- (p-e);
jb .Lcbc_te_break_out
sub %r11,%r12
sub %r12,$key
jmp .Lcbc_te_ok
.Lcbc_te_break_out: # else %rsp -= (p-s)&0xfff + framesz
sub %r10,%r12
and \$0xFFF,%r12
add \$320,%r12
sub %r12,$key
.align 4
.Lcbc_te_ok:
xchg %rsp,$key
#add \$8,%rsp # reserve for return address!
mov $key,$_rsp # save %rsp
.Lcbc_fast_body:
mov %rdi,$_inp # save copy of inp
mov %rsi,$_out # save copy of out
mov %rdx,$_len # save copy of len
mov %rcx,$_key # save copy of key
mov %r8,$_ivp # save copy of ivp
movl \$0,$mark # copy of aes_key->rounds = 0;
mov %r8,%rbp # rearrange input arguments
mov %r9,%rbx
mov %rsi,$out
mov %rdi,$inp
mov %rcx,$key
mov 240($key),%eax # key->rounds
# do we copy key schedule to stack?
mov $key,%r10
sub $sbox,%r10
and \$0xfff,%r10
cmp \$2304,%r10
jb .Lcbc_do_ecopy
cmp \$4096-248,%r10
jb .Lcbc_skip_ecopy
.align 4
.Lcbc_do_ecopy:
mov $key,%rsi
lea $aes_key,%rdi
lea $aes_key,$key
mov \$240/8,%ecx
.long 0x90A548F3 # rep movsq
mov %eax,(%rdi) # copy aes_key->rounds
.Lcbc_skip_ecopy:
mov $key,$keyp # save key pointer
mov \$18,%ecx
.align 4
.Lcbc_prefetch_te:
mov 0($sbox),%r10
mov 32($sbox),%r11
mov 64($sbox),%r12
mov 96($sbox),%r13
lea 128($sbox),$sbox
sub \$1,%ecx
jnz .Lcbc_prefetch_te
lea -2304($sbox),$sbox
cmp \$0,%rbx
je .LFAST_DECRYPT
#----------------------------- ENCRYPT -----------------------------#
mov 0(%rbp),$s0 # load iv
mov 4(%rbp),$s1
mov 8(%rbp),$s2
mov 12(%rbp),$s3
.align 4
.Lcbc_fast_enc_loop:
xor 0($inp),$s0
xor 4($inp),$s1
xor 8($inp),$s2
xor 12($inp),$s3
mov $keyp,$key # restore key
mov $inp,$_inp # if ($verticalspin) save inp
call _x86_64_AES_encrypt
mov $_inp,$inp # if ($verticalspin) restore inp
mov $_len,%r10
mov $s0,0($out)
mov $s1,4($out)
mov $s2,8($out)
mov $s3,12($out)
lea 16($inp),$inp
lea 16($out),$out
sub \$16,%r10
test \$-16,%r10
mov %r10,$_len
jnz .Lcbc_fast_enc_loop
mov $_ivp,%rbp # restore ivp
mov $s0,0(%rbp) # save ivec
mov $s1,4(%rbp)
mov $s2,8(%rbp)
mov $s3,12(%rbp)
jmp .Lcbc_fast_cleanup
#----------------------------- DECRYPT -----------------------------#
.align 16
.LFAST_DECRYPT:
cmp $inp,$out
je .Lcbc_fast_dec_in_place
mov %rbp,$ivec
.align 4
.Lcbc_fast_dec_loop:
mov 0($inp),$s0 # read input
mov 4($inp),$s1
mov 8($inp),$s2
mov 12($inp),$s3
mov $keyp,$key # restore key
mov $inp,$_inp # if ($verticalspin) save inp
call _x86_64_AES_decrypt
mov $ivec,%rbp # load ivp
mov $_inp,$inp # if ($verticalspin) restore inp
mov $_len,%r10 # load len
xor 0(%rbp),$s0 # xor iv
xor 4(%rbp),$s1
xor 8(%rbp),$s2
xor 12(%rbp),$s3
mov $inp,%rbp # current input, next iv
sub \$16,%r10
mov %r10,$_len # update len
mov %rbp,$ivec # update ivp
mov $s0,0($out) # write output
mov $s1,4($out)
mov $s2,8($out)
mov $s3,12($out)
lea 16($inp),$inp
lea 16($out),$out
jnz .Lcbc_fast_dec_loop
mov $_ivp,%r12 # load user ivp
mov 0(%rbp),%r10 # load iv
mov 8(%rbp),%r11
mov %r10,0(%r12) # copy back to user
mov %r11,8(%r12)
jmp .Lcbc_fast_cleanup
.align 16
.Lcbc_fast_dec_in_place:
mov 0(%rbp),%r10 # copy iv to stack
mov 8(%rbp),%r11
mov %r10,0+$ivec
mov %r11,8+$ivec
.align 4
.Lcbc_fast_dec_in_place_loop:
mov 0($inp),$s0 # load input
mov 4($inp),$s1
mov 8($inp),$s2
mov 12($inp),$s3
mov $keyp,$key # restore key
mov $inp,$_inp # if ($verticalspin) save inp
call _x86_64_AES_decrypt
mov $_inp,$inp # if ($verticalspin) restore inp
mov $_len,%r10
xor 0+$ivec,$s0
xor 4+$ivec,$s1
xor 8+$ivec,$s2
xor 12+$ivec,$s3
mov 0($inp),%r11 # load input
mov 8($inp),%r12
sub \$16,%r10
jz .Lcbc_fast_dec_in_place_done
mov %r11,0+$ivec # copy input to iv
mov %r12,8+$ivec
mov $s0,0($out) # save output [zaps input]
mov $s1,4($out)
mov $s2,8($out)
mov $s3,12($out)
lea 16($inp),$inp
lea 16($out),$out
mov %r10,$_len
jmp .Lcbc_fast_dec_in_place_loop
.Lcbc_fast_dec_in_place_done:
mov $_ivp,%rdi
mov %r11,0(%rdi) # copy iv back to user
mov %r12,8(%rdi)
mov $s0,0($out) # save output [zaps input]
mov $s1,4($out)
mov $s2,8($out)
mov $s3,12($out)
.align 4
.Lcbc_fast_cleanup:
cmpl \$0,$mark # was the key schedule copied?
lea $aes_key,%rdi
je .Lcbc_exit
mov \$240/8,%ecx
xor %rax,%rax
.long 0x90AB48F3 # rep stosq
jmp .Lcbc_exit
#--------------------------- SLOW ROUTINE ---------------------------#
.align 16
.Lcbc_slow_prologue:
# allocate aligned stack frame...
lea -88(%rsp),%rbp
and \$-64,%rbp
# ... just "above" key schedule
lea -88-63(%rcx),%r10
sub %rbp,%r10
neg %r10
and \$0x3c0,%r10
sub %r10,%rbp
xchg %rsp,%rbp
#add \$8,%rsp # reserve for return address!
mov %rbp,$_rsp # save %rsp
.Lcbc_slow_body:
#mov %rdi,$_inp # save copy of inp
#mov %rsi,$_out # save copy of out
#mov %rdx,$_len # save copy of len
#mov %rcx,$_key # save copy of key
mov %r8,$_ivp # save copy of ivp
mov %r8,%rbp # rearrange input arguments
mov %r9,%rbx
mov %rsi,$out
mov %rdi,$inp
mov %rcx,$key
mov %rdx,%r10
mov 240($key),%eax
mov $key,$keyp # save key pointer
shl \$4,%eax
lea ($key,%rax),%rax
mov %rax,$keyend
# pick Te4 copy which can't "overlap" with stack frame or key scdedule
lea 2048($sbox),$sbox
lea 768-8(%rsp),%rax
sub $sbox,%rax
and \$0x300,%rax
lea ($sbox,%rax),$sbox
cmp \$0,%rbx
je .LSLOW_DECRYPT
#--------------------------- SLOW ENCRYPT ---------------------------#
test \$-16,%r10 # check upon length
mov 0(%rbp),$s0 # load iv
mov 4(%rbp),$s1
mov 8(%rbp),$s2
mov 12(%rbp),$s3
jz .Lcbc_slow_enc_tail # short input...
.align 4
.Lcbc_slow_enc_loop:
xor 0($inp),$s0
xor 4($inp),$s1
xor 8($inp),$s2
xor 12($inp),$s3
mov $keyp,$key # restore key
mov $inp,$_inp # save inp
mov $out,$_out # save out
mov %r10,$_len # save len
call _x86_64_AES_encrypt_compact
mov $_inp,$inp # restore inp
mov $_out,$out # restore out
mov $_len,%r10 # restore len
mov $s0,0($out)
mov $s1,4($out)
mov $s2,8($out)
mov $s3,12($out)
lea 16($inp),$inp
lea 16($out),$out
sub \$16,%r10
test \$-16,%r10
jnz .Lcbc_slow_enc_loop
test \$15,%r10
jnz .Lcbc_slow_enc_tail
mov $_ivp,%rbp # restore ivp
mov $s0,0(%rbp) # save ivec
mov $s1,4(%rbp)
mov $s2,8(%rbp)
mov $s3,12(%rbp)
jmp .Lcbc_exit
.align 4
.Lcbc_slow_enc_tail:
mov %rax,%r11
mov %rcx,%r12
mov %r10,%rcx
mov $inp,%rsi
mov $out,%rdi
.long 0x9066A4F3 # rep movsb
mov \$16,%rcx # zero tail
sub %r10,%rcx
xor %rax,%rax
.long 0x9066AAF3 # rep stosb
mov $out,$inp # this is not a mistake!
mov \$16,%r10 # len=16
mov %r11,%rax
mov %r12,%rcx
jmp .Lcbc_slow_enc_loop # one more spin...
#--------------------------- SLOW DECRYPT ---------------------------#
.align 16
.LSLOW_DECRYPT:
shr \$3,%rax
add %rax,$sbox # recall "magic" constants!
mov 0(%rbp),%r11 # copy iv to stack
mov 8(%rbp),%r12
mov %r11,0+$ivec
mov %r12,8+$ivec
.align 4
.Lcbc_slow_dec_loop:
mov 0($inp),$s0 # load input
mov 4($inp),$s1
mov 8($inp),$s2
mov 12($inp),$s3
mov $keyp,$key # restore key
mov $inp,$_inp # save inp
mov $out,$_out # save out
mov %r10,$_len # save len
call _x86_64_AES_decrypt_compact
mov $_inp,$inp # restore inp
mov $_out,$out # restore out
mov $_len,%r10
xor 0+$ivec,$s0
xor 4+$ivec,$s1
xor 8+$ivec,$s2
xor 12+$ivec,$s3
mov 0($inp),%r11 # load input
mov 8($inp),%r12
sub \$16,%r10
jc .Lcbc_slow_dec_partial
jz .Lcbc_slow_dec_done
mov %r11,0+$ivec # copy input to iv
mov %r12,8+$ivec
mov $s0,0($out) # save output [can zap input]
mov $s1,4($out)
mov $s2,8($out)
mov $s3,12($out)
lea 16($inp),$inp
lea 16($out),$out
jmp .Lcbc_slow_dec_loop
.Lcbc_slow_dec_done:
mov $_ivp,%rdi
mov %r11,0(%rdi) # copy iv back to user
mov %r12,8(%rdi)
mov $s0,0($out) # save output [can zap input]
mov $s1,4($out)
mov $s2,8($out)
mov $s3,12($out)
jmp .Lcbc_exit
.align 4
.Lcbc_slow_dec_partial:
mov $_ivp,%rdi
mov %r11,0(%rdi) # copy iv back to user
mov %r12,8(%rdi)
mov $s0,0+$ivec # save output to stack
mov $s1,4+$ivec
mov $s2,8+$ivec
mov $s3,12+$ivec
mov $out,%rdi
lea $ivec,%rsi
lea 16(%r10),%rcx
.long 0x9066A4F3 # rep movsb
jmp .Lcbc_exit
.align 16
.Lcbc_exit:
mov $_rsp,%rsi
mov (%rsi),%r15
mov 8(%rsi),%r14
mov 16(%rsi),%r13
mov 24(%rsi),%r12
mov 32(%rsi),%rbp
mov 40(%rsi),%rbx
lea 48(%rsi),%rsp
.Lcbc_popfq:
popfq
.Lcbc_epilogue:
ret
.size asm_AES_cbc_encrypt,.-asm_AES_cbc_encrypt
___
}
$code.=<<___;
.align 64
.LAES_Te:
___
&_data_word(0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6);
&_data_word(0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591);
&_data_word(0x50303060, 0x03010102, 0xa96767ce, 0x7d2b2b56);
&_data_word(0x19fefee7, 0x62d7d7b5, 0xe6abab4d, 0x9a7676ec);
&_data_word(0x45caca8f, 0x9d82821f, 0x40c9c989, 0x877d7dfa);
&_data_word(0x15fafaef, 0xeb5959b2, 0xc947478e, 0x0bf0f0fb);
&_data_word(0xecadad41, 0x67d4d4b3, 0xfda2a25f, 0xeaafaf45);
&_data_word(0xbf9c9c23, 0xf7a4a453, 0x967272e4, 0x5bc0c09b);
&_data_word(0xc2b7b775, 0x1cfdfde1, 0xae93933d, 0x6a26264c);
&_data_word(0x5a36366c, 0x413f3f7e, 0x02f7f7f5, 0x4fcccc83);
&_data_word(0x5c343468, 0xf4a5a551, 0x34e5e5d1, 0x08f1f1f9);
&_data_word(0x937171e2, 0x73d8d8ab, 0x53313162, 0x3f15152a);
&_data_word(0x0c040408, 0x52c7c795, 0x65232346, 0x5ec3c39d);
&_data_word(0x28181830, 0xa1969637, 0x0f05050a, 0xb59a9a2f);
&_data_word(0x0907070e, 0x36121224, 0x9b80801b, 0x3de2e2df);
&_data_word(0x26ebebcd, 0x6927274e, 0xcdb2b27f, 0x9f7575ea);
&_data_word(0x1b090912, 0x9e83831d, 0x742c2c58, 0x2e1a1a34);
&_data_word(0x2d1b1b36, 0xb26e6edc, 0xee5a5ab4, 0xfba0a05b);
&_data_word(0xf65252a4, 0x4d3b3b76, 0x61d6d6b7, 0xceb3b37d);
&_data_word(0x7b292952, 0x3ee3e3dd, 0x712f2f5e, 0x97848413);
&_data_word(0xf55353a6, 0x68d1d1b9, 0x00000000, 0x2cededc1);
&_data_word(0x60202040, 0x1ffcfce3, 0xc8b1b179, 0xed5b5bb6);
&_data_word(0xbe6a6ad4, 0x46cbcb8d, 0xd9bebe67, 0x4b393972);
&_data_word(0xde4a4a94, 0xd44c4c98, 0xe85858b0, 0x4acfcf85);
&_data_word(0x6bd0d0bb, 0x2aefefc5, 0xe5aaaa4f, 0x16fbfbed);
&_data_word(0xc5434386, 0xd74d4d9a, 0x55333366, 0x94858511);
&_data_word(0xcf45458a, 0x10f9f9e9, 0x06020204, 0x817f7ffe);
&_data_word(0xf05050a0, 0x443c3c78, 0xba9f9f25, 0xe3a8a84b);
&_data_word(0xf35151a2, 0xfea3a35d, 0xc0404080, 0x8a8f8f05);
&_data_word(0xad92923f, 0xbc9d9d21, 0x48383870, 0x04f5f5f1);
&_data_word(0xdfbcbc63, 0xc1b6b677, 0x75dadaaf, 0x63212142);
&_data_word(0x30101020, 0x1affffe5, 0x0ef3f3fd, 0x6dd2d2bf);
&_data_word(0x4ccdcd81, 0x140c0c18, 0x35131326, 0x2fececc3);
&_data_word(0xe15f5fbe, 0xa2979735, 0xcc444488, 0x3917172e);
&_data_word(0x57c4c493, 0xf2a7a755, 0x827e7efc, 0x473d3d7a);
&_data_word(0xac6464c8, 0xe75d5dba, 0x2b191932, 0x957373e6);
&_data_word(0xa06060c0, 0x98818119, 0xd14f4f9e, 0x7fdcdca3);
&_data_word(0x66222244, 0x7e2a2a54, 0xab90903b, 0x8388880b);
&_data_word(0xca46468c, 0x29eeeec7, 0xd3b8b86b, 0x3c141428);
&_data_word(0x79dedea7, 0xe25e5ebc, 0x1d0b0b16, 0x76dbdbad);
&_data_word(0x3be0e0db, 0x56323264, 0x4e3a3a74, 0x1e0a0a14);
&_data_word(0xdb494992, 0x0a06060c, 0x6c242448, 0xe45c5cb8);
&_data_word(0x5dc2c29f, 0x6ed3d3bd, 0xefacac43, 0xa66262c4);
&_data_word(0xa8919139, 0xa4959531, 0x37e4e4d3, 0x8b7979f2);
&_data_word(0x32e7e7d5, 0x43c8c88b, 0x5937376e, 0xb76d6dda);
&_data_word(0x8c8d8d01, 0x64d5d5b1, 0xd24e4e9c, 0xe0a9a949);
&_data_word(0xb46c6cd8, 0xfa5656ac, 0x07f4f4f3, 0x25eaeacf);
&_data_word(0xaf6565ca, 0x8e7a7af4, 0xe9aeae47, 0x18080810);
&_data_word(0xd5baba6f, 0x887878f0, 0x6f25254a, 0x722e2e5c);
&_data_word(0x241c1c38, 0xf1a6a657, 0xc7b4b473, 0x51c6c697);
&_data_word(0x23e8e8cb, 0x7cdddda1, 0x9c7474e8, 0x211f1f3e);
&_data_word(0xdd4b4b96, 0xdcbdbd61, 0x868b8b0d, 0x858a8a0f);
&_data_word(0x907070e0, 0x423e3e7c, 0xc4b5b571, 0xaa6666cc);
&_data_word(0xd8484890, 0x05030306, 0x01f6f6f7, 0x120e0e1c);
&_data_word(0xa36161c2, 0x5f35356a, 0xf95757ae, 0xd0b9b969);
&_data_word(0x91868617, 0x58c1c199, 0x271d1d3a, 0xb99e9e27);
&_data_word(0x38e1e1d9, 0x13f8f8eb, 0xb398982b, 0x33111122);
&_data_word(0xbb6969d2, 0x70d9d9a9, 0x898e8e07, 0xa7949433);
&_data_word(0xb69b9b2d, 0x221e1e3c, 0x92878715, 0x20e9e9c9);
&_data_word(0x49cece87, 0xff5555aa, 0x78282850, 0x7adfdfa5);
&_data_word(0x8f8c8c03, 0xf8a1a159, 0x80898909, 0x170d0d1a);
&_data_word(0xdabfbf65, 0x31e6e6d7, 0xc6424284, 0xb86868d0);
&_data_word(0xc3414182, 0xb0999929, 0x772d2d5a, 0x110f0f1e);
&_data_word(0xcbb0b07b, 0xfc5454a8, 0xd6bbbb6d, 0x3a16162c);
#Te4 # four copies of Te4 to choose from to avoid L1 aliasing
&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
#rcon:
$code.=<<___;
.long 0x00000001, 0x00000002, 0x00000004, 0x00000008
.long 0x00000010, 0x00000020, 0x00000040, 0x00000080
.long 0x0000001b, 0x00000036, 0x80808080, 0x80808080
.long 0xfefefefe, 0xfefefefe, 0x1b1b1b1b, 0x1b1b1b1b
___
$code.=<<___;
.align 64
.LAES_Td:
___
&_data_word(0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a);
&_data_word(0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b);
&_data_word(0x55fa3020, 0xf66d76ad, 0x9176cc88, 0x254c02f5);
&_data_word(0xfcd7e54f, 0xd7cb2ac5, 0x80443526, 0x8fa362b5);
&_data_word(0x495ab1de, 0x671bba25, 0x980eea45, 0xe1c0fe5d);
&_data_word(0x02752fc3, 0x12f04c81, 0xa397468d, 0xc6f9d36b);
&_data_word(0xe75f8f03, 0x959c9215, 0xeb7a6dbf, 0xda595295);
&_data_word(0x2d83bed4, 0xd3217458, 0x2969e049, 0x44c8c98e);
&_data_word(0x6a89c275, 0x78798ef4, 0x6b3e5899, 0xdd71b927);
&_data_word(0xb64fe1be, 0x17ad88f0, 0x66ac20c9, 0xb43ace7d);
&_data_word(0x184adf63, 0x82311ae5, 0x60335197, 0x457f5362);
&_data_word(0xe07764b1, 0x84ae6bbb, 0x1ca081fe, 0x942b08f9);
&_data_word(0x58684870, 0x19fd458f, 0x876cde94, 0xb7f87b52);
&_data_word(0x23d373ab, 0xe2024b72, 0x578f1fe3, 0x2aab5566);
&_data_word(0x0728ebb2, 0x03c2b52f, 0x9a7bc586, 0xa50837d3);
&_data_word(0xf2872830, 0xb2a5bf23, 0xba6a0302, 0x5c8216ed);
&_data_word(0x2b1ccf8a, 0x92b479a7, 0xf0f207f3, 0xa1e2694e);
&_data_word(0xcdf4da65, 0xd5be0506, 0x1f6234d1, 0x8afea6c4);
&_data_word(0x9d532e34, 0xa055f3a2, 0x32e18a05, 0x75ebf6a4);
&_data_word(0x39ec830b, 0xaaef6040, 0x069f715e, 0x51106ebd);
&_data_word(0xf98a213e, 0x3d06dd96, 0xae053edd, 0x46bde64d);
&_data_word(0xb58d5491, 0x055dc471, 0x6fd40604, 0xff155060);
&_data_word(0x24fb9819, 0x97e9bdd6, 0xcc434089, 0x779ed967);
&_data_word(0xbd42e8b0, 0x888b8907, 0x385b19e7, 0xdbeec879);
&_data_word(0x470a7ca1, 0xe90f427c, 0xc91e84f8, 0x00000000);
&_data_word(0x83868009, 0x48ed2b32, 0xac70111e, 0x4e725a6c);
&_data_word(0xfbff0efd, 0x5638850f, 0x1ed5ae3d, 0x27392d36);
&_data_word(0x64d90f0a, 0x21a65c68, 0xd1545b9b, 0x3a2e3624);
&_data_word(0xb1670a0c, 0x0fe75793, 0xd296eeb4, 0x9e919b1b);
&_data_word(0x4fc5c080, 0xa220dc61, 0x694b775a, 0x161a121c);
&_data_word(0x0aba93e2, 0xe52aa0c0, 0x43e0223c, 0x1d171b12);
&_data_word(0x0b0d090e, 0xadc78bf2, 0xb9a8b62d, 0xc8a91e14);
&_data_word(0x8519f157, 0x4c0775af, 0xbbdd99ee, 0xfd607fa3);
&_data_word(0x9f2601f7, 0xbcf5725c, 0xc53b6644, 0x347efb5b);
&_data_word(0x7629438b, 0xdcc623cb, 0x68fcedb6, 0x63f1e4b8);
&_data_word(0xcadc31d7, 0x10856342, 0x40229713, 0x2011c684);
&_data_word(0x7d244a85, 0xf83dbbd2, 0x1132f9ae, 0x6da129c7);
&_data_word(0x4b2f9e1d, 0xf330b2dc, 0xec52860d, 0xd0e3c177);
&_data_word(0x6c16b32b, 0x99b970a9, 0xfa489411, 0x2264e947);
&_data_word(0xc48cfca8, 0x1a3ff0a0, 0xd82c7d56, 0xef903322);
&_data_word(0xc74e4987, 0xc1d138d9, 0xfea2ca8c, 0x360bd498);
&_data_word(0xcf81f5a6, 0x28de7aa5, 0x268eb7da, 0xa4bfad3f);
&_data_word(0xe49d3a2c, 0x0d927850, 0x9bcc5f6a, 0x62467e54);
&_data_word(0xc2138df6, 0xe8b8d890, 0x5ef7392e, 0xf5afc382);
&_data_word(0xbe805d9f, 0x7c93d069, 0xa92dd56f, 0xb31225cf);
&_data_word(0x3b99acc8, 0xa77d1810, 0x6e639ce8, 0x7bbb3bdb);
&_data_word(0x097826cd, 0xf418596e, 0x01b79aec, 0xa89a4f83);
&_data_word(0x656e95e6, 0x7ee6ffaa, 0x08cfbc21, 0xe6e815ef);
&_data_word(0xd99be7ba, 0xce366f4a, 0xd4099fea, 0xd67cb029);
&_data_word(0xafb2a431, 0x31233f2a, 0x3094a5c6, 0xc066a235);
&_data_word(0x37bc4e74, 0xa6ca82fc, 0xb0d090e0, 0x15d8a733);
&_data_word(0x4a9804f1, 0xf7daec41, 0x0e50cd7f, 0x2ff69117);
&_data_word(0x8dd64d76, 0x4db0ef43, 0x544daacc, 0xdf0496e4);
&_data_word(0xe3b5d19e, 0x1b886a4c, 0xb81f2cc1, 0x7f516546);
&_data_word(0x04ea5e9d, 0x5d358c01, 0x737487fa, 0x2e410bfb);
&_data_word(0x5a1d67b3, 0x52d2db92, 0x335610e9, 0x1347d66d);
&_data_word(0x8c61d79a, 0x7a0ca137, 0x8e14f859, 0x893c13eb);
&_data_word(0xee27a9ce, 0x35c961b7, 0xede51ce1, 0x3cb1477a);
&_data_word(0x59dfd29c, 0x3f73f255, 0x79ce1418, 0xbf37c773);
&_data_word(0xeacdf753, 0x5baafd5f, 0x146f3ddf, 0x86db4478);
&_data_word(0x81f3afca, 0x3ec468b9, 0x2c342438, 0x5f40a3c2);
&_data_word(0x72c31d16, 0x0c25e2bc, 0x8b493c28, 0x41950dff);
&_data_word(0x7101a839, 0xdeb30c08, 0x9ce4b4d8, 0x90c15664);
&_data_word(0x6184cb7b, 0x70b632d5, 0x745c6c48, 0x4257b8d0);
#Td4: # four copies of Td4 to choose from to avoid L1 aliasing
&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
$code.=<<___;
.long 0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
.long 0x1b1b1b1b, 0x1b1b1b1b, 0, 0
___
&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
$code.=<<___;
.long 0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
.long 0x1b1b1b1b, 0x1b1b1b1b, 0, 0
___
&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
$code.=<<___;
.long 0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
.long 0x1b1b1b1b, 0x1b1b1b1b, 0, 0
___
&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
$code.=<<___;
.long 0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
.long 0x1b1b1b1b, 0x1b1b1b1b, 0, 0
.asciz "AES for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
.align 64
___
# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
# CONTEXT *context,DISPATCHER_CONTEXT *disp)
if ($win64) {
$rec="%rcx";
$frame="%rdx";
$context="%r8";
$disp="%r9";
$code.=<<___;
.extern __imp_RtlVirtualUnwind
.type block_se_handler,\@abi-omnipotent
.align 16
block_se_handler:
push %rsi
push %rdi
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
pushfq
sub \$64,%rsp
mov 120($context),%rax # pull context->Rax
mov 248($context),%rbx # pull context->Rip
mov 8($disp),%rsi # disp->ImageBase
mov 56($disp),%r11 # disp->HandlerData
mov 0(%r11),%r10d # HandlerData[0]
lea (%rsi,%r10),%r10 # prologue label
cmp %r10,%rbx # context->Rip<prologue label
jb .Lin_block_prologue
mov 152($context),%rax # pull context->Rsp
mov 4(%r11),%r10d # HandlerData[1]
lea (%rsi,%r10),%r10 # epilogue label
cmp %r10,%rbx # context->Rip>=epilogue label
jae .Lin_block_prologue
mov 24(%rax),%rax # pull saved real stack pointer
mov -8(%rax),%rbx
mov -16(%rax),%rbp
mov -24(%rax),%r12
mov -32(%rax),%r13
mov -40(%rax),%r14
mov -48(%rax),%r15
mov %rbx,144($context) # restore context->Rbx
mov %rbp,160($context) # restore context->Rbp
mov %r12,216($context) # restore context->R12
mov %r13,224($context) # restore context->R13
mov %r14,232($context) # restore context->R14
mov %r15,240($context) # restore context->R15
.Lin_block_prologue:
mov 8(%rax),%rdi
mov 16(%rax),%rsi
mov %rax,152($context) # restore context->Rsp
mov %rsi,168($context) # restore context->Rsi
mov %rdi,176($context) # restore context->Rdi
jmp .Lcommon_seh_exit
.size block_se_handler,.-block_se_handler
.type key_se_handler,\@abi-omnipotent
.align 16
key_se_handler:
push %rsi
push %rdi
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
pushfq
sub \$64,%rsp
mov 120($context),%rax # pull context->Rax
mov 248($context),%rbx # pull context->Rip
mov 8($disp),%rsi # disp->ImageBase
mov 56($disp),%r11 # disp->HandlerData
mov 0(%r11),%r10d # HandlerData[0]
lea (%rsi,%r10),%r10 # prologue label
cmp %r10,%rbx # context->Rip<prologue label
jb .Lin_key_prologue
mov 152($context),%rax # pull context->Rsp
mov 4(%r11),%r10d # HandlerData[1]
lea (%rsi,%r10),%r10 # epilogue label
cmp %r10,%rbx # context->Rip>=epilogue label
jae .Lin_key_prologue
lea 56(%rax),%rax
mov -8(%rax),%rbx
mov -16(%rax),%rbp
mov -24(%rax),%r12
mov -32(%rax),%r13
mov -40(%rax),%r14
mov -48(%rax),%r15
mov %rbx,144($context) # restore context->Rbx
mov %rbp,160($context) # restore context->Rbp
mov %r12,216($context) # restore context->R12
mov %r13,224($context) # restore context->R13
mov %r14,232($context) # restore context->R14
mov %r15,240($context) # restore context->R15
.Lin_key_prologue:
mov 8(%rax),%rdi
mov 16(%rax),%rsi
mov %rax,152($context) # restore context->Rsp
mov %rsi,168($context) # restore context->Rsi
mov %rdi,176($context) # restore context->Rdi
jmp .Lcommon_seh_exit
.size key_se_handler,.-key_se_handler
.type cbc_se_handler,\@abi-omnipotent
.align 16
cbc_se_handler:
push %rsi
push %rdi
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
pushfq
sub \$64,%rsp
mov 120($context),%rax # pull context->Rax
mov 248($context),%rbx # pull context->Rip
lea .Lcbc_prologue(%rip),%r10
cmp %r10,%rbx # context->Rip<.Lcbc_prologue
jb .Lin_cbc_prologue
lea .Lcbc_fast_body(%rip),%r10
cmp %r10,%rbx # context->Rip<.Lcbc_fast_body
jb .Lin_cbc_frame_setup
lea .Lcbc_slow_prologue(%rip),%r10
cmp %r10,%rbx # context->Rip<.Lcbc_slow_prologue
jb .Lin_cbc_body
lea .Lcbc_slow_body(%rip),%r10
cmp %r10,%rbx # context->Rip<.Lcbc_slow_body
jb .Lin_cbc_frame_setup
.Lin_cbc_body:
mov 152($context),%rax # pull context->Rsp
lea .Lcbc_epilogue(%rip),%r10
cmp %r10,%rbx # context->Rip>=.Lcbc_epilogue
jae .Lin_cbc_prologue
lea 8(%rax),%rax
lea .Lcbc_popfq(%rip),%r10
cmp %r10,%rbx # context->Rip>=.Lcbc_popfq
jae .Lin_cbc_prologue
mov `16-8`(%rax),%rax # biased $_rsp
lea 56(%rax),%rax
.Lin_cbc_frame_setup:
mov -16(%rax),%rbx
mov -24(%rax),%rbp
mov -32(%rax),%r12
mov -40(%rax),%r13
mov -48(%rax),%r14
mov -56(%rax),%r15
mov %rbx,144($context) # restore context->Rbx
mov %rbp,160($context) # restore context->Rbp
mov %r12,216($context) # restore context->R12
mov %r13,224($context) # restore context->R13
mov %r14,232($context) # restore context->R14
mov %r15,240($context) # restore context->R15
.Lin_cbc_prologue:
mov 8(%rax),%rdi
mov 16(%rax),%rsi
mov %rax,152($context) # restore context->Rsp
mov %rsi,168($context) # restore context->Rsi
mov %rdi,176($context) # restore context->Rdi
.Lcommon_seh_exit:
mov 40($disp),%rdi # disp->ContextRecord
mov $context,%rsi # context
mov \$`1232/8`,%ecx # sizeof(CONTEXT)
.long 0xa548f3fc # cld; rep movsq
mov $disp,%rsi
xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
mov 8(%rsi),%rdx # arg2, disp->ImageBase
mov 0(%rsi),%r8 # arg3, disp->ControlPc
mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
mov 40(%rsi),%r10 # disp->ContextRecord
lea 56(%rsi),%r11 # &disp->HandlerData
lea 24(%rsi),%r12 # &disp->EstablisherFrame
mov %r10,32(%rsp) # arg5
mov %r11,40(%rsp) # arg6
mov %r12,48(%rsp) # arg7
mov %rcx,56(%rsp) # arg8, (NULL)
call *__imp_RtlVirtualUnwind(%rip)
mov \$1,%eax # ExceptionContinueSearch
add \$64,%rsp
popfq
pop %r15
pop %r14
pop %r13
pop %r12
pop %rbp
pop %rbx
pop %rdi
pop %rsi
ret
.size cbc_se_handler,.-cbc_se_handler
.section .pdata
.align 4
.rva .LSEH_begin_asm_AES_encrypt
.rva .LSEH_end_asm_AES_encrypt
.rva .LSEH_info_asm_AES_encrypt
.rva .LSEH_begin_asm_AES_decrypt
.rva .LSEH_end_asm_AES_decrypt
.rva .LSEH_info_asm_AES_decrypt
.rva .LSEH_begin_asm_AES_set_encrypt_key
.rva .LSEH_end_asm_AES_set_encrypt_key
.rva .LSEH_info_asm_AES_set_encrypt_key
.rva .LSEH_begin_asm_AES_set_decrypt_key
.rva .LSEH_end_asm_AES_set_decrypt_key
.rva .LSEH_info_asm_AES_set_decrypt_key
.rva .LSEH_begin_asm_AES_cbc_encrypt
.rva .LSEH_end_asm_AES_cbc_encrypt
.rva .LSEH_info_asm_AES_cbc_encrypt
.section .xdata
.align 8
.LSEH_info_asm_AES_encrypt:
.byte 9,0,0,0
.rva block_se_handler
.rva .Lenc_prologue,.Lenc_epilogue # HandlerData[]
.LSEH_info_asm_AES_decrypt:
.byte 9,0,0,0
.rva block_se_handler
.rva .Ldec_prologue,.Ldec_epilogue # HandlerData[]
.LSEH_info_asm_AES_set_encrypt_key:
.byte 9,0,0,0
.rva key_se_handler
.rva .Lenc_key_prologue,.Lenc_key_epilogue # HandlerData[]
.LSEH_info_asm_AES_set_decrypt_key:
.byte 9,0,0,0
.rva key_se_handler
.rva .Ldec_key_prologue,.Ldec_key_epilogue # HandlerData[]
.LSEH_info_asm_AES_cbc_encrypt:
.byte 9,0,0,0
.rva cbc_se_handler
___
}
$code =~ s/\`([^\`]*)\`/eval($1)/gem;
print $code;
close STDOUT;
| adityachap/fabric-sdk-node | node_modules/grpc/third_party/boringssl/crypto/aes/asm/aes-x86_64.pl | Perl | apache-2.0 | 74,797 |
#!/usr/local/bin/perl
# Generate the report from a cron job
$no_acl_check++;
require './sarg-lib.pl';
$temp = &tempname();
($from, $to) = split(/\s+/, $config{'range'});
open(TEMP, ">$temp");
$rv = &generate_report(TEMP, 0, $config{'clear'}, $from, $to);
close(TEMP);
$out = `cat $temp`;
unlink($temp);
if (!$rv) {
print STDERR "Failed to generate Sarg report:\n";
print STDERR $out;
exit(1);
}
else {
exit(0);
}
| HasClass0/webmin | sarg/generate.pl | Perl | bsd-3-clause | 423 |
use strict;
use warnings;
package Test::Deep::HashEach;
use Test::Deep::Cmp;
sub init
{
my $self = shift;
my $val = shift;
$self->{val} = $val;
}
sub descend
{
my $self = shift;
my $got = shift;
my %exp;
@exp{keys %$got} = ($self->{val}) x (keys %$got);
return Test::Deep::descend($got, \%exp);
}
1;
| liuyangning/WX_web | xampp/perl/vendor/lib/Test/Deep/HashEach.pm | Perl | mit | 318 |
#!/usr/bin/env perl
# ====================================================================
# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
# ====================================================================
# SHA1 block procedure for MIPS.
# Performance improvement is 30% on unaligned input. The "secret" is
# to deploy lwl/lwr pair to load unaligned input. One could have
# vectorized Xupdate on MIPSIII/IV, but the goal was to code MIPS32-
# compatible subroutine. There is room for minor optimization on
# little-endian platforms...
######################################################################
# There is a number of MIPS ABI in use, O32 and N32/64 are most
# widely used. Then there is a new contender: NUBI. It appears that if
# one picks the latter, it's possible to arrange code in ABI neutral
# manner. Therefore let's stick to NUBI register layout:
#
($zero,$at,$t0,$t1,$t2)=map("\$$_",(0..2,24,25));
($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7,$s8,$s9,$s10,$s11)=map("\$$_",(12..23));
($gp,$tp,$sp,$fp,$ra)=map("\$$_",(3,28..31));
#
# The return value is placed in $a0. Following coding rules facilitate
# interoperability:
#
# - never ever touch $tp, "thread pointer", former $gp;
# - copy return value to $t0, former $v0 [or to $a0 if you're adapting
# old code];
# - on O32 populate $a4-$a7 with 'lw $aN,4*N($sp)' if necessary;
#
# For reference here is register layout for N32/64 MIPS ABIs:
#
# ($zero,$at,$v0,$v1)=map("\$$_",(0..3));
# ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
# ($t0,$t1,$t2,$t3,$t8,$t9)=map("\$$_",(12..15,24,25));
# ($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23));
# ($gp,$sp,$fp,$ra)=map("\$$_",(28..31));
#
$flavour = shift; # supported flavours are o32,n32,64,nubi32,nubi64
if ($flavour =~ /64|n32/i) {
$PTR_ADD="dadd"; # incidentally works even on n32
$PTR_SUB="dsub"; # incidentally works even on n32
$REG_S="sd";
$REG_L="ld";
$PTR_SLL="dsll"; # incidentally works even on n32
$SZREG=8;
} else {
$PTR_ADD="add";
$PTR_SUB="sub";
$REG_S="sw";
$REG_L="lw";
$PTR_SLL="sll";
$SZREG=4;
}
#
# <appro@openssl.org>
#
######################################################################
$big_endian=(`echo MIPSEL | $ENV{CC} -E -`=~/MIPSEL/)?1:0 if ($ENV{CC});
for (@ARGV) { $output=$_ if (/^\w[\w\-]*\.\w+$/); }
open STDOUT,">$output";
if (!defined($big_endian))
{ $big_endian=(unpack('L',pack('N',1))==1); }
# offsets of the Most and Least Significant Bytes
$MSB=$big_endian?0:3;
$LSB=3&~$MSB;
@X=map("\$$_",(8..23)); # a4-a7,s0-s11
$ctx=$a0;
$inp=$a1;
$num=$a2;
$A="\$1";
$B="\$2";
$C="\$3";
$D="\$7";
$E="\$24"; @V=($A,$B,$C,$D,$E);
$t0="\$25";
$t1=$num; # $num is offloaded to stack
$t2="\$30"; # fp
$K="\$31"; # ra
sub BODY_00_14 {
my ($i,$a,$b,$c,$d,$e)=@_;
my $j=$i+1;
$code.=<<___ if (!$big_endian);
srl $t0,@X[$i],24 # byte swap($i)
srl $t1,@X[$i],8
andi $t2,@X[$i],0xFF00
sll @X[$i],@X[$i],24
andi $t1,0xFF00
sll $t2,$t2,8
or @X[$i],$t0
or $t1,$t2
or @X[$i],$t1
___
$code.=<<___;
lwl @X[$j],$j*4+$MSB($inp)
sll $t0,$a,5 # $i
addu $e,$K
lwr @X[$j],$j*4+$LSB($inp)
srl $t1,$a,27
addu $e,$t0
xor $t0,$c,$d
addu $e,$t1
sll $t2,$b,30
and $t0,$b
srl $b,$b,2
xor $t0,$d
addu $e,@X[$i]
or $b,$t2
addu $e,$t0
___
}
sub BODY_15_19 {
my ($i,$a,$b,$c,$d,$e)=@_;
my $j=$i+1;
$code.=<<___ if (!$big_endian && $i==15);
srl $t0,@X[$i],24 # byte swap($i)
srl $t1,@X[$i],8
andi $t2,@X[$i],0xFF00
sll @X[$i],@X[$i],24
andi $t1,0xFF00
sll $t2,$t2,8
or @X[$i],$t0
or @X[$i],$t1
or @X[$i],$t2
___
$code.=<<___;
xor @X[$j%16],@X[($j+2)%16]
sll $t0,$a,5 # $i
addu $e,$K
srl $t1,$a,27
addu $e,$t0
xor @X[$j%16],@X[($j+8)%16]
xor $t0,$c,$d
addu $e,$t1
xor @X[$j%16],@X[($j+13)%16]
sll $t2,$b,30
and $t0,$b
srl $t1,@X[$j%16],31
addu @X[$j%16],@X[$j%16]
srl $b,$b,2
xor $t0,$d
or @X[$j%16],$t1
addu $e,@X[$i%16]
or $b,$t2
addu $e,$t0
___
}
sub BODY_20_39 {
my ($i,$a,$b,$c,$d,$e)=@_;
my $j=$i+1;
$code.=<<___ if ($i<79);
xor @X[$j%16],@X[($j+2)%16]
sll $t0,$a,5 # $i
addu $e,$K
srl $t1,$a,27
addu $e,$t0
xor @X[$j%16],@X[($j+8)%16]
xor $t0,$c,$d
addu $e,$t1
xor @X[$j%16],@X[($j+13)%16]
sll $t2,$b,30
xor $t0,$b
srl $t1,@X[$j%16],31
addu @X[$j%16],@X[$j%16]
srl $b,$b,2
addu $e,@X[$i%16]
or @X[$j%16],$t1
or $b,$t2
addu $e,$t0
___
$code.=<<___ if ($i==79);
lw @X[0],0($ctx)
sll $t0,$a,5 # $i
addu $e,$K
lw @X[1],4($ctx)
srl $t1,$a,27
addu $e,$t0
lw @X[2],8($ctx)
xor $t0,$c,$d
addu $e,$t1
lw @X[3],12($ctx)
sll $t2,$b,30
xor $t0,$b
lw @X[4],16($ctx)
srl $b,$b,2
addu $e,@X[$i%16]
or $b,$t2
addu $e,$t0
___
}
sub BODY_40_59 {
my ($i,$a,$b,$c,$d,$e)=@_;
my $j=$i+1;
$code.=<<___ if ($i<79);
xor @X[$j%16],@X[($j+2)%16]
sll $t0,$a,5 # $i
addu $e,$K
srl $t1,$a,27
addu $e,$t0
xor @X[$j%16],@X[($j+8)%16]
and $t0,$c,$d
addu $e,$t1
xor @X[$j%16],@X[($j+13)%16]
sll $t2,$b,30
addu $e,$t0
srl $t1,@X[$j%16],31
xor $t0,$c,$d
addu @X[$j%16],@X[$j%16]
and $t0,$b
srl $b,$b,2
or @X[$j%16],$t1
addu $e,@X[$i%16]
or $b,$t2
addu $e,$t0
___
}
$FRAMESIZE=16; # large enough to accomodate NUBI saved registers
$SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? 0xc0fff008 : 0xc0ff0000;
$code=<<___;
#ifdef OPENSSL_FIPSCANISTER
# include <openssl/fipssyms.h>
#endif
.text
.set noat
.set noreorder
.align 5
.globl sha1_block_data_order
.ent sha1_block_data_order
sha1_block_data_order:
.frame $sp,$FRAMESIZE*$SZREG,$ra
.mask $SAVED_REGS_MASK,-$SZREG
.set noreorder
$PTR_SUB $sp,$FRAMESIZE*$SZREG
$REG_S $ra,($FRAMESIZE-1)*$SZREG($sp)
$REG_S $fp,($FRAMESIZE-2)*$SZREG($sp)
$REG_S $s11,($FRAMESIZE-3)*$SZREG($sp)
$REG_S $s10,($FRAMESIZE-4)*$SZREG($sp)
$REG_S $s9,($FRAMESIZE-5)*$SZREG($sp)
$REG_S $s8,($FRAMESIZE-6)*$SZREG($sp)
$REG_S $s7,($FRAMESIZE-7)*$SZREG($sp)
$REG_S $s6,($FRAMESIZE-8)*$SZREG($sp)
$REG_S $s5,($FRAMESIZE-9)*$SZREG($sp)
$REG_S $s4,($FRAMESIZE-10)*$SZREG($sp)
___
$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi prologue
$REG_S $s3,($FRAMESIZE-11)*$SZREG($sp)
$REG_S $s2,($FRAMESIZE-12)*$SZREG($sp)
$REG_S $s1,($FRAMESIZE-13)*$SZREG($sp)
$REG_S $s0,($FRAMESIZE-14)*$SZREG($sp)
$REG_S $gp,($FRAMESIZE-15)*$SZREG($sp)
___
$code.=<<___;
$PTR_SLL $num,6
$PTR_ADD $num,$inp
$REG_S $num,0($sp)
lw $A,0($ctx)
lw $B,4($ctx)
lw $C,8($ctx)
lw $D,12($ctx)
b .Loop
lw $E,16($ctx)
.align 4
.Loop:
.set reorder
lwl @X[0],$MSB($inp)
lui $K,0x5a82
lwr @X[0],$LSB($inp)
ori $K,0x7999 # K_00_19
___
for ($i=0;$i<15;$i++) { &BODY_00_14($i,@V); unshift(@V,pop(@V)); }
for (;$i<20;$i++) { &BODY_15_19($i,@V); unshift(@V,pop(@V)); }
$code.=<<___;
lui $K,0x6ed9
ori $K,0xeba1 # K_20_39
___
for (;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
$code.=<<___;
lui $K,0x8f1b
ori $K,0xbcdc # K_40_59
___
for (;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
$code.=<<___;
lui $K,0xca62
ori $K,0xc1d6 # K_60_79
___
for (;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
$code.=<<___;
$PTR_ADD $inp,64
$REG_L $num,0($sp)
addu $A,$X[0]
addu $B,$X[1]
sw $A,0($ctx)
addu $C,$X[2]
addu $D,$X[3]
sw $B,4($ctx)
addu $E,$X[4]
sw $C,8($ctx)
sw $D,12($ctx)
sw $E,16($ctx)
.set noreorder
bne $inp,$num,.Loop
nop
.set noreorder
$REG_L $ra,($FRAMESIZE-1)*$SZREG($sp)
$REG_L $fp,($FRAMESIZE-2)*$SZREG($sp)
$REG_L $s11,($FRAMESIZE-3)*$SZREG($sp)
$REG_L $s10,($FRAMESIZE-4)*$SZREG($sp)
$REG_L $s9,($FRAMESIZE-5)*$SZREG($sp)
$REG_L $s8,($FRAMESIZE-6)*$SZREG($sp)
$REG_L $s7,($FRAMESIZE-7)*$SZREG($sp)
$REG_L $s6,($FRAMESIZE-8)*$SZREG($sp)
$REG_L $s5,($FRAMESIZE-9)*$SZREG($sp)
$REG_L $s4,($FRAMESIZE-10)*$SZREG($sp)
___
$code.=<<___ if ($flavour =~ /nubi/i);
$REG_L $s3,($FRAMESIZE-11)*$SZREG($sp)
$REG_L $s2,($FRAMESIZE-12)*$SZREG($sp)
$REG_L $s1,($FRAMESIZE-13)*$SZREG($sp)
$REG_L $s0,($FRAMESIZE-14)*$SZREG($sp)
$REG_L $gp,($FRAMESIZE-15)*$SZREG($sp)
___
$code.=<<___;
jr $ra
$PTR_ADD $sp,$FRAMESIZE*$SZREG
.end sha1_block_data_order
.rdata
.asciiz "SHA1 for MIPS, CRYPTOGAMS by <appro\@openssl.org>"
___
print $code;
close STDOUT;
| domenicosolazzo/philocademy | venv/src/node-v0.10.36/deps/openssl/openssl/crypto/sha/asm/sha1-mips.pl | Perl | mit | 8,278 |
package parallel_blast;
#used to run a blast search on multiple forks
use strict;
use warnings;
use List::MoreUtils qw(any);
use Data::Dumper;
sub parallel_blast {
my ( $args_href ) = @_;
#would it be easier to just send blast command and number of forks and parse everything out of command?
my $command = $args_href->{command};
my $num_forks = $args_href->{num_forks};
my $file = $1 if ( $command =~ m/-query\s([^\s]+)/ ); #match everything after -query until the next space
my $out = $1 if ( $command =~ m/-out\s([^\s]+)/ ); #match everything after -out until the next space
print($file);
if ( any {! defined} $num_forks, $command, $file, $out ) {
die "Could not parallelize blast.\n";
}
my $prefix = $1 if ($out =~ m{(\/.*\/)}); #match anything between 2 /... will break if trailing / is left off;
my $contigs = get_contigs($file);
# make the bins
my $bin_size = (length(keys $contigs) // $num_forks) + 1
my $counter = 1
foreach my $contig ( keys $contig ) {
if $counter <= $bin_size
$counter++
my $num_files = bsub_blast($contigs, $command, $prefix);
wait_all_jobs();
#write the results to an output file; it it becomes important to have the results in exactly the same order, the contigs could be read in as an array
open my $OUT, ">", $out;
for ( my $i = 1; $i <= $num_files; $i++ ) {
open my $IN, "<", "$prefix/blst_temp/$i.txt";
while (readline $IN) {print $OUT $_}
close $IN;
}
#cleanup
#print "system(rm -rf $prefix/blast_temp/ \n";
system("rm -rf $prefix/blst_temp/");
}
#minimalistic sub to turn a fasta into a hash
sub get_contigs {
my ($file) = @_;
open my $IN, "<", $file;
my %contigs;
my $header;
while (my $line = readline $IN) {
chomp $line;
if ( $line =~ m/\A>/ ) {
$header = $line;
}
else {
$contigs{$header} .= $line;
}
}
return \%contigs;
}
sub bsub_blast {
my ($contigs, $command, $prefix) = @_;
my $temp_dir = "$prefix/blst_temp";
(-d $temp_dir) or mkdir $temp_dir;
my $id = 1;
foreach my $contig ( keys %{$contigs} ) {
my $temp_fas = "$temp_dir/$id.fasta";
my $temp_result = "$temp_dir/$id.txt";
open my $OUT, ">", $temp_fas;
print $OUT $contig . "\n" . $contigs->{$contig} . "\n";
close $OUT;
my $sub_command = substitute_command($command, $temp_fas, $temp_result);
my $bsub = join " ", ( "bsub",
"-q week",
"-o $prefix/multi-out.bjobs",
"-e $prefix/multi-err.bjobs",
"-n 1",
"-J parallelBLAST",
"$sub_command"
);
#system($bsub) == 0 or die "Could not use bsub: \n $bsub\n";
$id++;
}
#return the number of files
return $id - 1;
}
sub wait_all_jobs {
my $is_running = 1;
#checks every 30s to see if the any job is still running based on the file size of the bjobs output
#might fail w/o write permissions
while ( $is_running ) {
sleep(30);
system("bjobs -J parallelBLAST > ACTIVE_JOBS?");
$is_running = -s "ACTIVE_JOBS?";
}
system("rm ACTIVE_JOBS?");
}
sub substitute_command {
my ($command, $sub_file, $sub_out) = @_;
my $query = $1 if ( $command =~ m/-query\s([^\s]+)/ ); #get query
my $out = $1 if ( $command =~ m/-out\s([^\s]+)/ ); #get out file
#print "$command\n\n$query\t$sub_file\n\n$out\t$sub_out\n";
#replace the original query and outs with a subfile
$command =~ s/$out/$sub_out/;
$command =~ s/$query/$sub_file/;
return $command;
}
1;
| hunter-cameron/Bioinformatics | perl/parallel_blast.pm | Perl | mit | 3,964 |
# $Id: Tag.pm,v 1.17 2008/03/03 16:55:04 asc Exp $
use strict;
package Net::Delicious::Tag;
use base qw (Net::Delicious::Object);
$Net::Delicious::Tag::VERSION = '1.13';
=head1 NAME
Net::Delicious::Tag - OOP for del.icio.us tag thingies
=head1 SYNOPSIS
use Net::Delicious;
my $del = Net::Delicious->new({...});
foreach my $tag ($del->tags()) {
# $tag is a Net::Delicious::Tag
# object.
print "$tag\n";
}
=head1 DESCRIPTION
OOP for del.icio.us tag thingies.
=head1 NOTES
=over 4
=item *
This package overrides the perl builtin I<stringify> operator and returns the value of the object's I<tag> method.
=item *
It isn't really expected that you will instantiate these
objects outside of I<Net::Delicious> itself.
=back
=cut
use overload q("") => sub { shift->tag() };
=head1 PACKAGE METHODS
=cut
=head1 __PACKAGE__->new(\%args)
Returns a I<Net::Delicious::Tag> object. Woot!
=cut
# Defined in Net::Delicious::Object
=head1 OBJECT METHODS
=cut
=head2 $obj->count()
Returns an int.
=cut
# Defined in Net::Delicious::Object
=head2 $obj->tag()
Returns an string.
=cut
# Defined in Net::Delicious::Object
=head2 $obj->as_hashref()
Return the object as a hash ref safe for serializing and re-blessing.
=cut
# Defined in Net::Delicious::Object
=head1 VERSION
1.13
=head1 DATE
$Date: 2008/03/03 16:55:04 $
=head1 AUTHOR
Aaron Straup Cope <ascope@cpan.org>
=head1 SEE ALSO
L<Net::Delicious>
=head1 LICENSE
Copyright (c) 2004-2008 Aaron Straup Cope. All rights reserved.
This is free software, you may use it and distribute it under the
same terms as Perl itself.
=cut
return 1;
| carlgao/lenga | images/lenny64-peon/usr/share/perl5/Net/Delicious/Tag.pm | Perl | mit | 1,647 |
#!/usr/bin/perl
# Generate the subtrees displayed by the given tree for the given taxa sets.
# Input: two files supplied on command line: first contains one newick tree, second contains k rows, where each row is a tab delim list of
# taxon names sampled from the original newick tree (ie, a subsets file).
# Output: k newick trees for the displayed subtrees
# USAGE: displaysub.pl treefile subsetsfile
###################
open FH, "<$ARGV[0]";
$nwk = <FH>;
close FH;
open FH, "<$ARGV[1]";
while (<FH>)
{
push @taxaSets, {map { $_ => 1 } split}; # array of the taxa sets (as hashes).
}
close FH;
$numTaxaSets = @taxaSets;
for $ts (0..$numTaxaSets-1)
{
my $Tree = treeInit($nwk);
@taxa = keys %{$taxaSets[$ts]};
displayedSubtree($Tree,$taxaSets[$ts]); # pass ref to tree data structure and a ref to the taxa array
writeNewick($Tree->{ROOT});
}
sub displayedSubtree
{
my ($tRef,$taxaRef)=@_;
recursePrune($tRef->{ROOT},$taxaRef);
# SHOULD RECALCULATE THE NUMBER OF DESCENDANTS OF ALL NODES HERE...
return ;
}
sub recursePrune
{
# return 1 if the parent should KEEP this node as its child; 0 if the parent should delete this node as its child
my ($node,$taxaRef)=@_;
if (isLeaf($node))
{
if (exists $taxaRef->{$node->{NAME}})
{return 1}
else
{return 0}
}
my @keptChildren=();
my $nDesc=scalar @{$node->{DESC}};
my $nKept=0;
foreach my $desc (@{$node->{DESC}})
{
if (recursePrune($desc,$taxaRef)==1)
{
push @keptChildren,$desc;
++$nKept;
}
}
if ($nKept == $nDesc) # keep all descendants, no changes, so just return to parent signalling all's well
{
return 1;
}
else
{
if ($nKept >= 2)
{
$node->{DESC} = [@keptChildren];
return 1;
}
elsif ($nKept==1) # we'll just make this node have the same information as its one descendant--effectively deleting the deg 1 node
{
my $onlyChild=$keptChildren[0];
$node->{NAME}=$onlyChild->{NAME};
$node->{DESC}=[@{$onlyChild->{DESC}}];
return 1;
}
else # nKept = 0
{
return 0; # don't have to worry about the {DESC} data structure, we'll get rid of the node when we return anyway...
}
}
}
####################################################################
sub treeInit
{
my ($newick)=@_;
my ($root,%treeH);
$name = '[\w\d\_\.]+|\'.*?\''; # notice the non-greedy match needed in the '...' format
@tokens = ($newick=~/($name|\,|\(|\)|\;)/g);
$tokens_ix=0;
parseCheck();
$tokens_ix=0;
#print "@tokens\n";
my $tok = next_tok();
if ($tok =~ /\(/)
{
$root = make_group();
}
else
{die "First token is incorrect: $tok\n";}
initIDTree(0,$root);
initNLEAVES($root);
my @tt = descLeafNodes($root);
my %leafH = makeLeafHash(@tt);
#foreach (keys %leafH) {print "$_\n"}
$treeH{ROOT}=$root;
$treeH{LEAVES}=[@tt];
$treeH{LEAFH}={%leafH};
$treeH{NLEAVES}= $root->{NLEAVES};
die ("Fatal error: tree is NOT binary\n") if (!isBinaryTree($root));
return \%treeH;
}
sub initIDTree
{
my ($startIX,$root)=@_;
$gIX=$startIX;
recurseIndexTree($root);
}
sub recurseIndexTree
{
my ($nodeRef)=@_;
$nodeRef->{ID}=$gIX++;
for $child (@{$nodeRef->{DESC}})
{ recurseIndexTree($child); }
return ;
}
sub initNLEAVES
{
my ($nodeRef)=@_;
my $sum;
if (isLeaf($nodeRef)) {$sum=1} else {$sum=0};
for $child (@{$nodeRef->{DESC}})
{ $sum += initNLEAVES($child); }
$nodeRef->{NLEAVES}=$sum;
return $sum;
}
####################################################################
sub makeLeafHash
{
my (%leafH)=();
foreach (@_)
{$leafH{$_->{NAME}}=$_}
return %leafH;
}
####################################################################
sub descLeafNames
{
return map {$_->{NAME}} descLeafNodes(@_[0]);
}
sub descLeafNodes
{
my ($root)=@_;
undef @taxaT;
recursePush($root);
return @taxaT;
}
sub recursePush
{
my ($nodeRef)=@_;
if (isLeaf($nodeRef))
{push @taxaT, $nodeRef;}
for $child (@{$nodeRef->{DESC}})
{ recursePush($child); }
return ;
}
####################################################################
sub make_group
{
my $rootRef=nodeNew("");
while (my $tok = next_tok())
{
if ($tok =~ /$name/)
{
my $nodeRef = nodeNew($tok);
addChild($rootRef,$nodeRef);
}
elsif ($tok =~ /\(/)
{
$nodeRef = make_group();
addChild($rootRef,$nodeRef);
}
elsif ($tok =~ /\)/)
{
return $rootRef;
}
elsif ($tok =~ /,/)
{
next;
}
}
}
# **********************************************************
sub next_tok
{
if ($tokens_ix >= $#tokens) {return 0}
return ($tokens[$tokens_ix++])
}
# **********************************************************
sub nodeNew
{
my ($name)=@_;
return {ID=>-1,NAME=>$name,DESC=>[],ANC=>-1,NLEAVES=>-1};
}
# **********************************************************
sub addChild
{
my ($nodeRef,$childRef)=@_;
$childRef->{ANC}=$nodeRef;
push @{ ${$nodeRef}{DESC} },$childRef;
}
# **********************************************************
sub isBinaryTree
{
my ($nodeRef)=@_;
if (isLeaf($nodeRef)) {return 1};
if (scalar @{$nodeRef->{DESC}} != 2) {return 0};
for $child (@{$nodeRef->{DESC}})
{ if (!isBinaryTree($child)) {return 0}; }
return 1 ;
}
# **********************************************************
sub recursePrint
{
my ($nodeRef)=@_;
print "$nodeRef->{ID}: num leaves:$nodeRef->{NLEAVES}:taxon name = $nodeRef->{NAME}\n";
if (!isLeaf($nodeRef) && exists $nodeRef->{LCA1})
{
print "\tTree 1 invLCA: $nodeRef->{LCA1}{ID}\n";
print "\tTree 2 invLCA: $nodeRef->{LCA2}{ID}\n";
}
for $child (@{$nodeRef->{DESC}})
{ recursePrint($child); }
return ;
}
# **********************************************************
sub isLeaf
{
my ($nodeRef)=@_;
if (scalar @{$nodeRef->{DESC}} == 0)
{return 1}
else
{return 0}
}
sub isRoot
{
my ($nodeRef)=@_;
if ($nodeRef->{ANC} == -1)
{return 1}
else
{return 0}
}
#**********************************************************
sub parseCheck
{
for $tok (@tokens)
{
if ($tok =~ /\(/) {$left++};
if ($tok =~ /\)/) {$right++};
if ($tok =~ /$name/) {$nTax++};
if ($tok =~ /\,/) {$commas++};
}
#print "ntaxa=$nTax,left=$left, right=$right,commas=$commas\n";
die "Unmatched parens in newick string\n" if ($left != $right);
}
######################
sub replaceChild
{
my ($node,$replaceThis,$withThis)=@_;
for my $i (0..$#{$node->{DESC}})
{
if ($node->{DESC}[$i] == $replaceThis) {$node->{DESC}[$i]=$withThis}
}
}
##########################
sub copyTree
{
my ($node)=@_;
my $newNode = copyNode($node);
foreach my $desc (@{$node->{DESC}})
{
my $newChild = copyTree($desc);
addChild($newNode,$newChild);
}
return $newNode;
}
sub copyNode
# make new node with info copied from old: copies name and nleaves, but not ID number,anc or descs;
{
my ($old)=@_;
my $new = nodeNew($old->{NAME});
$new->{NLEAVES}=$old->{NLEAVES};
return $new;
}
sub insertNode
{
my ($ndesc,$nanc)=@_;
my $ninsert = nodeNew("");
addChild($ninsert,$ndesc);
addChild($nanc,$ninsert);
return $ninsert;
}
#############################################
sub isA_anc_B
{
my ($A,$B)=@_;
if ($A==$B) {return 0};
#print "In isanc: $A->{ID},$B->{ID}\n";
my ($n)=$B;
#print "ID:$n->{ID}\n";
while (!isRoot($n))
{
$n = $n->{ANC};
#print "ID:$n->{ID}\n";
if ($n == $A) {return 1}
}
return 0;
}
#############################################
sub writeNewick
{
my ($node)=@_;
if (isLeaf($node))
{ print $node->{NAME} }
else
{
print "(";
for my $ix (0..$#{$node->{DESC}})
{
writeNewick($node->{DESC}[$ix]);
if ($ix < $#{$node->{DESC}}) { print "," };
}
print ")";
}
if (isRoot($node)) {print ";\n"}
return;
}
| zwickl/terraphy | examples/perlScripts/displaysub.pl | Perl | mit | 7,417 |
package Bc125At::ProgressBar;
# Copyright (c) 2013, Rikus Goodell.
#
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
=head1 CONSTRUCTION
Bc125At::ProgressBar->new( ... )
=head1 OPTIONS
=over 3
=item * value - startign value (defaults to 0)
=item * max - value at end of progress bar (defaults to 100)
=item * redisplay - redisplay interval (defaults to 5)
=item * callback - subroutine to call on progress bar updates (optional, defaults to nothing)
=back
=cut
sub new {
my ($package, @args) = @_;
my $self = {
value => 0,
max => 100,
redisplay => 5,
@args
};
return bless $self;
}
sub more {
my ($self, $newvalue) = @_;
if (defined $newvalue) {
$self->{value} = $newvalue;
}
else {
$self->{value}++;
}
$self->display()
if $self->{value} == 0
or $self->{value} % $self->{redisplay} == 0
or $self->{value} == $self->{max};
}
sub display {
my $self = shift;
local $| = 1;
my $frac = $self->{value} / $self->{max};
print "\r|" . ("-" x 50) . "|" . " $self->{value} / $self->{max}" . (" " x 10) . "\r" . "|" . ("#" x (50 * $frac));
print "\n" if $self->{value} >= $self->{max};
$self->{callback}->(@$self{qw(value max)}) if $self->{callback};
}
1;
| rikus--/bc125at-perl | lib/Bc125At/ProgressBar.pm | Perl | mit | 2,339 |
package BATCHJOB::IMPORT::REWRITES;
use strict;
use URI::Escape::XS;
use lib "/backend/lib";
use DBINFO;
use DOMAIN::TOOLS;
use ZTOOLKIT;
use SEARCH;
sub import {
my ($bj,$fieldsref,$lineref,$optionsref) = @_;
my ($USERNAME,$MID,$LUSERNAME,$PRT) = ($bj->username(),$bj->mid(),$bj->lusername(),$bj->prt());
use Data::Dumper;
print STDERR Dumper($fieldsref,$optionsref);
#print STDERR "$USERNAME: \n";
#print STDERR Dumper($fieldsref);
#print STDERR Dumper($lineref);
my $linecount = 0;
if (defined $optionsref->{'PRT'}) {
$PRT = int($optionsref->{'PRT'});
}
# my $metaref = $bj->meta(); print Dumper($metaref);
my $udbh = &DBINFO::db_user_connect($USERNAME);
if ($optionsref->{'REWRITE_DESTRUCTIVE'}==1) {
$bj->slog("Destroying existing URL maps for partition $PRT");
my $pstmt = "/* BATCHJOB::IMPORT::REWRITES::import */ select DOMAIN from DOMAINS where MID=$MID /* $USERNAME:$PRT */ and PRT=$PRT";
my $ref = $udbh->selectall_arrayref($pstmt);
my @DOMAINS = ();
foreach my $x (@{$ref}) { push @DOMAINS, $x->[0]; }
$pstmt = "delete from DOMAINS_URL_MAP where MID=$MID /* $USERNAME */ and DOMAIN in ".&DBINFO::makeset($udbh,\@DOMAINS);
print STDERR $pstmt."\n";
$udbh->do($pstmt);
}
my $rows_count = scalar(@{$lineref});
my $rows_done = 0;
# my ($LU) = LUSER->new($USERNAME,$LUSERNAME);
foreach my $line ( @{$lineref} ) {
my %DATA = ();
my $pos = 0; # $pos keeps track of which field in the @DATA array we are on.
foreach my $destfield (@{$fieldsref}) {
$DATA{ uc($fieldsref->[$pos]) } = $line->[$pos];
$pos++; # move to the next field that we should parse
}
my $TARGET_URL = $DATA{'%TARGETURL'};
my $KEYWORDS = $DATA{'%KEYWORDS'};
if ((defined $DATA{'%KEYWORDS'}) && ($DATA{'%KEYWORDS'} ne '')) {
$KEYWORDS = $DATA{'%KEYWORDS'};
}
if ((defined $DATA{'%EXTRACT_KEYWORDS'}) && ($DATA{'%EXTRACT_KEYWORDS'} ne '') && ($KEYWORDS eq '')) {
my $E = $DATA{'%EXTRACT_KEYWORDS'};
my @KW =();
if ($E =~ /^(.*?)\?(.*?)$/) {
## url: /asdf.html?k1=v1&k2=v2
## look and see if we've got a url with parameters on it.. then extract the v1 v2 as keywords from k1=v1&k2=v2
my ($url,$params) = ($1,$2);
$url =~ s/\.[A-Za-z]{3,4}$//s; # remove .html,.htm,.etc
@KW = split(/[\/\-]+/,$url);
my $kvs = &ZTOOLKIT::parseparams($params);
foreach my $k (keys %{$kvs}) {
next if ($kvs->{$k} eq '');
push @KW, $kvs->{$k};
}
}
elsif ($E =~ /^(.*?)$/) {
## /something-else.html
$E =~ s/\.[A-Za-z]{3,4}$//s; # remove .html,.htm,.etc
@KW = split(/[\/\-]+/,$E);
}
# print Dumper($E,\@KW);
$KEYWORDS = join(' ',@KW);
}
if (($TARGET_URL eq '') && ($KEYWORDS ne '')) {
my ($pids) = &SEARCH::search($USERNAME,
'PRT'=>$PRT,
'KEYWORDS'=>$KEYWORDS,
'CATALOG'=>$optionsref->{'CATALOG'},
);
print 'PIDS: '.Dumper($pids);
if ((not defined $pids) || (scalar(@{$pids})==0)) {
## zero results goes to homepage
$TARGET_URL = '/';
}
elsif (scalar(@{$pids})==1) {
## single results
$TARGET_URL = '/product/'.$pids->[0].'?keywords='.URI::Escape::XS::uri_escape($DATA{'%KEYWORDS'});
}
else {
$TARGET_URL = '/search.cgis?'.&ZTOOLKIT::buildparams({
'catalog'=>$optionsref->{'CATALOG'},
'keywords'=>$DATA{'%KEYWORDS'}
});
}
print STDERR "TARGET_URL: $TARGET_URL (KEYWORDS: $KEYWORDS)\n";
}
#+-----------+--------------+------+-----+---------------------+----------------+
#| Field | Type | Null | Key | Default | Extra |
#+-----------+--------------+------+-----+---------------------+----------------+
#| ID | int(11) | NO | PRI | NULL | auto_increment |
#| USERNAME | varchar(20) | NO | | NULL | |
#| MID | int(11) | NO | MUL | 0 | |
#| DOMAIN | varchar(50) | NO | | NULL | |
#| PATH | varchar(100) | NO | | NULL | |
#| TARGETURL | varchar(200) | NO | | NULL | |
#| CREATED | datetime | YES | | 0000-00-00 00:00:00 | |
#+-----------+--------------+------+-----+---------------------+----------------+
#7 rows in set (0.00 sec)
&DBINFO::insert($udbh,'DOMAINS_URL_MAP',{
'USERNAME'=>$USERNAME,
'MID'=>$MID,
'DOMAIN'=>$DATA{'%DOMAIN'},
'PATH'=>$DATA{'%PATH'},
'TARGETURL'=>$TARGET_URL,
'*CREATED'=>'now()',
},update=>1,key=>['MID','DOMAIN','PATH']);
if (($rows_done++%5)==0) {
$bj->progress($rows_done,$rows_count,"Updated Maps");
}
$bj->slog("Update $rows_done: $DATA{'%DOMAIN'} $DATA{'%PATH'}");
}
&DBINFO::db_user_close();
};
1;
__DATA__
1;
| CommerceRack/backend | lib/BATCHJOB/IMPORT/REWRITES.pm | Perl | mit | 4,819 |
package #
Date::Manip::Offset::off112;
# Copyright (c) 2008-2015 Sullivan Beck. All rights reserved.
# This program is free software; you can redistribute it and/or modify it
# under the same terms as Perl itself.
# This file was automatically generated. Any changes to this file will
# be lost the next time 'tzdata' is run.
# Generated on: Wed Nov 25 11:44:43 EST 2015
# Data version: tzdata2015g
# Code version: tzcode2015g
# This module contains data from the zoneinfo time zone database. The original
# data was obtained from the URL:
# ftp://ftp.iana.org/tz
use strict;
use warnings;
require 5.010000;
our ($VERSION);
$VERSION='6.52';
END { undef $VERSION; }
our ($Offset,%Offset);
END {
undef $Offset;
undef %Offset;
}
$Offset = '+05:31:40';
%Offset = (
0 => [
'asia/novosibirsk',
],
);
1;
| jkb78/extrajnm | local/lib/perl5/Date/Manip/Offset/off112.pm | Perl | mit | 855 |
package EnsEMBL::Web::Document::HTML::Null;
use strict;
use EnsEMBL::Web::Document::HTML;
use EnsEMBL::Web::Document::HTML::MastHead;
use EnsEMBL::Web::Document::HTML::SearchBox;
use EnsEMBL::Web::Document::HTML::Content;
use EnsEMBL::Web::Document::HTML::Copyright;
use EnsEMBL::Web::Document::HTML::Menu;
use EnsEMBL::Web::Document::HTML::Release;
use EnsEMBL::Web::Document::HTML::HelpLink;
use EnsEMBL::Web::Document::HTML::Title;
use EnsEMBL::Web::Document::HTML::Stylesheet;
use EnsEMBL::Web::Document::HTML::Javascript;
#use EnsEMBL::Web::Document::HTML::RSS;
#use EnsEMBL::Web::Document::HTML::Metax;
use vars qw(@ISA);
@ISA = qw(EnsEMBL::Web::Document::HTML
EnsEMBL::Web::Document::HTML::MastHead
EnsEMBL::Web::Document::HTML::SearchBox
EnsEMBL::Web::Document::HTML::Content
EnsEMBL::Web::Document::HTML::Copyright
EnsEMBL::Web::Document::HTML::Menu
EnsEMBL::Web::Document::HTML::Release
EnsEMBL::Web::Document::HTML::HelpLink
EnsEMBL::Web::Document::HTML::Title
EnsEMBL::Web::Document::HTML::Stylesheet
EnsEMBL::Web::Document::HTML::Javascript
EnsEMBL::Web::Document::HTML::RSS
EnsEMBL::Web::Document::HTML::Metax
);
sub render {
# Do nothing
}
1;
| warelab/gramene-ensembl | maize/modules/EnsEMBL/Web/Document/HTML/Null.pm | Perl | mit | 1,295 |
package IO::All::STDIO;
use strict;
use warnings;
use IO::All -base;
use IO::File;
const type => 'stdio';
sub stdio {
my $self = shift;
bless $self, __PACKAGE__;
return $self->_init;
}
sub stdin {
my $self = shift;
$self->open('<');
return $self;
}
sub stdout {
my $self = shift;
$self->open('>');
return $self;
}
sub stderr {
my $self = shift;
$self->open_stderr;
return $self;
}
sub open {
my $self = shift;
$self->is_open(1);
my $mode = shift || $self->mode || '<';
my $fileno = $mode eq '>'
? fileno(STDOUT)
: fileno(STDIN);
$self->io_handle(IO::File->new);
$self->io_handle->fdopen($fileno, $mode);
$self->set_binmode;
}
sub open_stderr {
my $self = shift;
$self->is_open(1);
$self->io_handle(IO::File->new);
$self->io_handle->fdopen(fileno(STDERR), '>') ? $self : 0;
}
# XXX Add overload support
=head1 NAME
IO::All::STDIO - STDIO Support for IO::All
=head1 SYNOPSIS
See L<IO::All>.
=head1 DESCRIPTION
=head1 AUTHOR
Ingy döt Net <ingy@cpan.org>
=head1 COPYRIGHT
Copyright (c) 2006. Ingy döt Net. All rights reserved.
Copyright (c) 2004. Brian Ingerson. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the same terms as Perl itself.
See http://www.perl.com/perl/misc/Artistic.html
=cut
1;
| carlgao/lenga | images/lenny64-peon/usr/share/perl5/IO/All/STDIO.pm | Perl | mit | 1,370 |
:- use_module(xsd2json).
:- use_module(library(http/json)).
opts_spec([
[
opt(space),
type(atom),
default(remove),
shortflags([ s ]),
longflags([ 'whitespace' ]),
help([
'whitespace handling, one of',
' remove : clean whitespace [default]',
' preserve : keep whitespace'
])
],
[
opt(indentation),
type(integer),
default(2),
shortflags([ i ]),
longflags([ 'indentation' ]),
help([
'set indentation level',
'use 0 for single-line output'
])
],
[
opt(trace),
type(boolean),
default(false),
shortflags([ t ]),
longflags([ 'trace' ]),
help([
'run with activated tracing'
])
],
[
opt(version),
type(boolean),
default(false),
shortflags([ v ]),
longflags([ 'version' ]),
help([
'show xsd2json version information'
])
],
[
opt(debuginfo),
type(boolean),
default(false),
longflags([ 'debug-info' ]),
help([
'show information usually needed for debugging'
])
],
[
opt(help),
type(boolean),
default(false),
shortflags([ h ]),
longflags([ 'help' ]),
help([
'display this help'
])
]
]).
main :-
opts_spec(OptsSpec),
opt_arguments(OptsSpec,Opts,PositionalArgs),
main(Opts,PositionalArgs).
main(Opts,_PositionalArgs) :-
memberchk(version(true),Opts), !,
xsd2json:version(Version),
writeln(Version),
halt(0).
main(Opts,_PositionalArgs) :-
memberchk(debuginfo(true),Opts), !,
xsd2json:version(Version),
writeln('# xsd2json'),
writeln(Version),
writeln('# SWI-Prolog'),
shell('swipl --version'),
writeln('# npm'),
shell('npm -version'),
writeln('# node'),
shell('node -v'),
halt(0).
main(Opts,PositionalArgs) :-
(
memberchk(help(true),Opts)
;
PositionalArgs = []
), !,
opts_spec(OptsSpec),
opt_help(OptsSpec,Help),
writeln('USAGE: xsd2json [options] <path>'), nl,
writeln('convert a XSD file into equivalent JSON schema'), nl,
writeln('Options:'),
writeln(Help),
halt(0).
main(Opts,PositionalArgs) :-
memberchk(trace(Trace),Opts),
(
PositionalArgs = [Filename]
;
PositionalArgs = [_CLI, Filename]
),
(
Trace = true,
chr_leash(none),
chr_trace
;
Trace = false
),
xsd2json(Filename,Opts,JSON),
% output
memberchk(indentation(Indentation),Opts),
json_write(user_output,JSON,[width(1),step(Indentation)]), nl,
% finished
halt(0).
main(_,_) :- halt(1).
| fnogatz/xsd2json | lib-pl/cli.pl | Perl | mit | 2,500 |
package IFTTT::CMS::IFTTT;
use strict;
use warnings;
sub profile {
my $app = shift;
my $user = $app->user;
$app->redirect(
$app->uri(
mode => 'view',
args => {
_type => 'author',
id => $user->id,
},
)
);
}
1;
| masiuchi/mt-plugin-ifttt | plugins/IFTTT/lib/IFTTT/CMS/IFTTT.pm | Perl | mit | 316 |
#!/usr/bin/env perl
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2017] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
=head1 NAME
ensembl-analysis/scripts/sequence_dump.pl
=head1 SYNOPSIS
This script will dump the sequence of all the seq_regions in a particular
coordinate system
=head1 DESCRIPTION
The script can dump into individual fasta files for each seq_region or one
file containing all the sequences. The format is fasta as standard but others
can be specified. The sequence can also be masked for repeats in either
normal uppercase Ns or with softmasking
=head1 OPTIONS
-dbhost host name for database (gets put as host= in locator)
-dbport For RDBs, what port to connect to (port= in locator)
-dbname For RDBs, what name to connect to (dbname= in locator)
-dbuser For RDBs, what username to connect as (user= in locator)
-dbpass For RDBs, what password to use (pass= in locator)
-species Species name/alias. Only required for multispecies dna DBs
-multi_species Boolean for multi-species DBs
-species_id Species ID in the database. Only required for multispecies DBs
-coord_system_name the name of the coordinate system you want to dump
-coord_system_version the version of the coordinate system you want to dump
-output_dir the directory to dump the files too
-toplevel to indicate you want to dump the top level seq regions
-sequence_level to indicate you want to dump the sequence level seq
regions
-nonref to indicate you want available non-reference regions
-include_duplicates Returns duplicate regions. In order to get non-PAR regions of
chrY padded with N's it needs to be turned off (default)
-padded_nonref Returns all non-reference sequences padded with N's to match the corresponding full length
of the reference chromosome for the given non-reference sequence. Only works if -include_duplicates is disabled.
-padded_human_Chr_Y Returns full length human chrY with non-PAR regions padded with N's.
Needed for the FuncGen pipeline. Only works if -include_duplicates is disabled
-human_female Creates a second output file that ommits chrY. Needs -onefile or -filename
-format Deprecated. Following suggestion of the Ensembl core team, Bio::SeqIO has been replaced with
Bio::EnsEMBL::Utils::IO::FASTASerializer. Therefore the output is limited to FASTA.
-header [default | basic | funcgen | rnaseq]
default: chromosome:GRCh37:11:1:135006516:1 chromosome 11
basic: chromosome:GRCh37:11:1:135006516:1
funcgen: 18 dna:chromosome chromosome:GRCh37:18:1:78077248:1
rnaseq: 18
-extension the file extention you want to give the dumped files, by
default this is fa
-mask to indicate you want the sequence repeatmasked
-mask_repeat, which logic name of repeats you want masked. This can
appear on the command line several times. If it doesnt but -mask
does all repeats will be masked
-softmask to indicate you want to softmask the repeats (ie lower case
rather than upper case Ns)
-onefile to indicate you want all the sequences in one file
-help this will print out the docs
=head1 EXAMPLES
perl sequence_dump.pl -dbhost myhost -dbuser myuser -dbpass mypass -dbname
mydatabase -dbport 3306 -coord_system_name chromosome
-output_dir /path/to/output
this will dump each chromosome in a separate file in the output directory
perl sequence_dump.pl -dbhost myhost -dbuser myuser -dbpass mypass -dbname
mydatabase -dbport 3306 -coord_system_name contig -onefile
-output_dir /path/to/output
this will dump the sequence of all the contigs into one file in the output
directory
perl sequence_dump.pl -dbhost myhost -dbuser myuser -dbpass mypass -dbname
mydatabase -dbport 3306 -coord_system_name chromosome
-output_dir /path/to/output -mask -mask_repeat RepeatMask -mask_repeat
Dust -softmask
this will dump each chromosome sequence softmasked for RepeatMasker and
Dust repeats into to separate files
=cut
use warnings ;
use strict;
use Getopt::Long qw(:config no_ignore_case);
use Data::Dumper;
use File::Spec;
use Cwd;
use Bio::EnsEMBL::Utils::Exception qw(throw warning);
use Bio::EnsEMBL::DBSQL::DBAdaptor;
use Bio::SeqIO;
use Bio::EnsEMBL::PaddedSlice;
use Bio::EnsEMBL::Utils::IO::FASTASerializer;
my $host = '';
my $port = '3306';
my $dbname = '';
my $dbuser = '';
my $dbpass = '';
my $serializer = 'Bio::EnsEMBL::Utils::IO::FASTASerializer';
my $species_id = 1;
my $header = 'default';
my $format = 'fasta';
my $softmask = 0;
my $extension = 'fa';
my $output_dir = getcwd();
my $species;
my $filename;
my $multi_species;
my $coord_system_name;
my $coord_system_version;
my $single_file;
my $top_level;
my $seq_level;
my $non_ref;
my $padded_human_Chr_Y;
my $human_female;
my $include_duplicates;
my @logic_names;
my $mask;
my $help;
my $padded_nonref;
GetOptions( 'dbhost|host|h:s' => \$host,
'dbport|port|P:n' => \$port,
'dbname|db|D:s' => \$dbname,
'dbuser|user|u:s' => \$dbuser,
'dbpass|pass|p:s' => \$dbpass,
'species=s' => \$species,
'multi_species' => \$multi_species,
'species_id=i' => \$species_id,
'header:s' => \$header,
'coord_system_name|cs_name:s' => \$coord_system_name,
'coord_system_version|cs_version:s' => \$coord_system_version,
'output_dir:s' => \$output_dir,
'extension:s' => \$extension,
'toplevel!' => \$top_level,
'seqlevel!' => \$seq_level,
'nonref!' => \$non_ref,
'padded_nonref!' => \$padded_nonref,
'padded_human_Chr_Y!' => \$padded_human_Chr_Y,
'human_female!' => \$human_female,
'include_duplicates!' => \$include_duplicates,
'format!' => \$format,
'mask!' => \$mask,
'mask_repeat:s@' => \@logic_names,
'softmask!' => \$softmask,
'onefile!' => \$single_file,
'filename=s' => \$filename,
'help!' => \$help,
) or ( $help = 1 );
if ($help) {
exec('perldoc', $0);
}
############################# Sanity tests ################################
if(!$host || !$dbname || !$dbuser){
my $message =
"Need -dbhost '$host' -dbuser '$dbuser' and -dbname '$dbname' to run. ".
'Use -help for more detailed documentation.';
throw($message);
}
if(!$coord_system_name && !$top_level && !$seq_level){
my $message =
'Must specify either -coord_system_name, -toplevel, or -seqlevel to run. '.
'Use -help for more detailed documentation.';
throw($message);
}
if($top_level && $seq_level){
my $message =
'Cannot specify both -toplevel and -seqlevel must be one or the other. '.
'Use -help for more detailed documentation.';
throw($message);
}
if(! $filename && (!$output_dir || ! -e $output_dir)){
my $message =
"Cannot dump sequence into '$output_dir' it does not exist ".
'Use -help for more detailed documentation.';
throw($message);
}
if($include_duplicates && $padded_human_Chr_Y){
my $message =
'Retrieving padded human ChrY only works if duplicates are excluded. '.
'Run again either not using -include_duplicates or not using '.
'-padded_human_Chr_Y';
throw($message);
}
if($human_female && ( (!$filename) && (!$single_file) ) ){
my $message =
'When using -human_female, you need to pass a filename or use -onefile';
throw($message);
}
if($human_female && $species ne 'homo_sapiens' ){
my $message =
"-human_female only works for homo_sapiens, not '$species'";
throw($message);
}
if($format !~ /^fasta$/i ){
my $message =
'Following advice from the core team, the output method has been changed '.
'from Bio::SeqIO to Bio::EnsEMBL::Utils::IO::FASTASerializer. ' .
'Output is therefore in FASTA format only at the moment. ' .
'FASTASerializer handles Slices more efficiently memory-wise and also ' .
'allows changing the FASTA-header. ';
throw($message);
}
############################# Global settings ###############################
if($top_level){
$coord_system_name = 'toplevel';
}
if($seq_level){
$coord_system_name = 'seqlevel';
}
my $db = Bio::EnsEMBL::DBSQL::DBAdaptor->new
(
-dbname => $dbname,
-host => $host,
-user => $dbuser,
-port => $port,
-pass => $dbpass,
-species => $species,
-multispecies_db => $multi_species,
-species_id => $species_id
);
################################################################################
# All sequences go into a single file
# If a filename was passed, use that one, otherwise build it
################################################################################
my ($singleSerializer, $fh_singleFile) = (undef, undef);
if($single_file || $filename){
$filename ||= File::Spec->catfile($output_dir , "$coord_system_name.$extension");
open($fh_singleFile, '>', $filename) or die
"Cannot open stream to $filename: $!";
$singleSerializer = $serializer->new($fh_singleFile);
}
################################################################################
################################################################################
# If using a single file, removing chrY can be very memory intensive as the
# resulting file can be 3GB+ (release 66). An option has been added that
# simultaneously writtes a female FASTA file by simply ommiting chrY.
# If the filename is given and contains male, it is replaced by female
# homo_sapiens_male_GRCh37_66_37_unmasked.fasta ->
# homo_sapiens_female_GRCh37_66_37_unmasked.fasta
################################################################################
my ($singleSerializer_female, $fh_singleFile_female) = (undef, undef);
if($human_female) {
my $tmpName;
if($filename){
$tmpName = $filename;
if($tmpName =~ /male/){$tmpName =~ s/male/female/}
else{$tmpName .= '.female'}
}
else{
$tmpName = File::Spec->catfile($output_dir, "$coord_system_name.female.$extension");
}
open($fh_singleFile_female, '>', $tmpName) or die
"Cant open stream to $tmpName: $!";
$singleSerializer_female = $serializer->new($fh_singleFile_female);
}
################################################################################
################################################################################
# get slice adaptor
# fetch slices where args are eg:
# '('toplevel','GRCh37',1,undef,undef)
# will toplevel chromosomes and the unique regions of haplotypes and Y
# but no Locus Reference Genomic (LRG) sequences
###############################################################################
my $sa = $db->get_SliceAdaptor;
my $slices =
$sa->fetch_all(
$coord_system_name,
$coord_system_version,
$non_ref,
$include_duplicates,
undef
);
################################################################################
################################################################################
# Dispatch table
# Used for creating project-specific headers
# -default:
# chromosome:GRCh37:11:1:135006516:1 chromosome 11
# -basic:
# chromosome:GRCh37:11:1:135006516:1
# -funcgen
# 18 dna:chromosome chromosome:GRCh37:18:1:78077248:1
# -rnaseq
# 18
################################################################################
my $dispatch = {
default => sub {
my ($slice) = @_;
return sprintf(
'%s %s %s',
$slice->name(),
$slice->coord_system_name(),
$slice->seq_region_name(),
);
},
basic => sub {
my ($slice) = @_;
return $slice->name();
},
funcgen => sub {
my ($slice) = @_;
return sprintf (
'%s %s %s',
$slice->seq_region_name(),
$slice->moltype().':'.$slice->coord_system_name(),
$slice->name(),
);
},
rnaseq => sub {
my ($slice) = @_;
return sprintf (
'%s',
$slice->seq_region_name(),
);
},
};
# Sanity check for header passed
if(not exists $dispatch->{$header}){
my $message =
"'$header' not defined. Please adjust dispatch table.\nHeaders currently ".
"available: " . join (', ', keys (%{$dispatch}));
throw($message);
}
################################################################################
################################################################################
# Process slices
################################################################################
SLICE:
foreach my $slice(@$slices){
# Compliance with header format used in previous version of this script
$singleSerializer->header_function($dispatch->{$header}) if($filename);
# For FuncGen pipeline, print a PAR-padded chrY
my ($padded_header, $padded_slice) = (undef, undef);
if($padded_human_Chr_Y && $slice->name() =~ /^chromosome:GRCh\d\d:Y/ ){
($padded_header, $padded_slice) = _build_complete_PAR_padded_chrY($slice);
next SLICE if (!$padded_header);
}
if($mask){
$slice = $slice->get_repeatmasked_seq(\@logic_names, $softmask);
}
if ($padded_nonref and (!($slice->is_reference()))) {
$padded_slice = Bio::EnsEMBL::PaddedSlice->new($slice);
$padded_header = sub {
my ($padded_slice) = @_;
my $original = $padded_slice->name();
my @header = split(/:/, $original);
$header[3] = 1;
$header[4] = $padded_slice->length();
my $tmp = join(q{:}, @header);
my $newHeader = "$tmp";
return ($newHeader);
};
}
# printing output
# An existing filename at this stage implies that output goes
# into a single file
if ($filename) {
if ($padded_header && $padded_slice) {
_print_padded($singleSerializer,$padded_header,$padded_slice);
} else {
$singleSerializer->print_Seq($slice);
# Write female file, if demanded
if( $human_female && $slice->name() !~ /^chromosome:GRCh\d\d:Y/){
$singleSerializer_female->print_Seq($slice);
}
}
}
# Write into separate files
else {
my $name = File::Spec->catfile($output_dir, $slice->seq_region_name.'.'.$extension);
print "Multi: $name\n";
open(my $fh, '>', $name) or die "Cant open stream to $name: $!";
my $multiSerializer = $serializer->new($fh);
if ($padded_header && $padded_slice) {
_print_padded($multiSerializer,$padded_header,$padded_slice);
} else {
$multiSerializer->header_function($dispatch->{$header});
$multiSerializer->print_Seq($slice);
}
close($fh);
}
}
close($fh_singleFile) if($fh_singleFile);
print "Finished\n";
=head2 _print_padded
Arg [1] : Bio::EnsEMBL::Utils::IO::FASTASerializer
Arg [2] : Modified header line
Arg [3] : Bio::EnsEMBL::Slice
Example : _print_padded($singleSerializer, $y_header, $y_slice)
Description: Replaces the original FASTA-header from the slice with
modified one. After writing the sequence, the original
header is restored.
Returntype : none
Exceptions : none
Caller : main method
Status : at risk
=cut
sub _print_padded {
my ($out, $padded_header, $padded_slice) = @_;
my $original_header_function = $out->header_function();
$out->header_function($padded_header);
$out->print_Seq($padded_slice);
$out->header_function($original_header_function);
}
=head2 _build_complete_PAR_padded_chrY
Arg [1] : Bio::EnsEMBL::Slice
Example : ($y_header, $y_slice) = _build_complete_PAR_padded_chrY($slice);
Description: This method was solely written to create human chrY version that
has non-PAR regions padded with N's. Therefore the script has to
be run using
Returntype : none
Exceptions : none
Caller : main method
Status : at risk
=cut
sub _build_complete_PAR_padded_chrY {
my ($slice) = @_;
if ($species ne 'homo_sapiens'){
my $message = "Only tested for homo_sapiens, not for '$species'";
throw($message);
}
# As this method is highly specific, test that we get what we expect
# The 1st 10,000bp are an accepted guess for the telomeric region of chrY
# The second slice contains non-PAR regions
if ( ($slice->name() eq 'chromosome:GRCh38:Y:1:10000:1') or ($slice->name() eq 'chromosome:GRCh38:Y:57217416:57227415:1') ){return 0}
elsif ($slice->name() eq 'chromosome:GRCh38:Y:2781480:56887902:1'){
print STDERR "Chromosome Y will have padded PAR regions\n";
if($mask){
$slice = $slice->get_repeatmasked_seq(\@logic_names, $softmask);
}
my $y_slice = Bio::EnsEMBL::PaddedSlice->new($slice);
my $y_header = sub {
my ($slice) = @_;
my $original = $slice->name();
my @header = split(/:/, $original);
$header[3] = 1;
$header[4] = $slice->length();
my $tmp = join(q{:}, @header);
#my $newHeader = "Y dna:chromosome $tmp";
my $newHeader = "$tmp";
return ($newHeader);
};
return($y_header, $y_slice);
}
else {
my $message =
'This method has been specifically written for the Ensembl FuncGen '.
'pipeline. The slice it expect are specific for GRCh38. Compliance '.
'with any other assembly has not been tested. The header found is '.
"not expected: '".$slice->name()."'";
throw($message);
}
}
| james-monkeyshines/ensembl-analysis | scripts/sequence_dump.pl | Perl | apache-2.0 | 18,409 |
package Paws::Route53Domains::ContactDetail;
use Moose;
has AddressLine1 => (is => 'ro', isa => 'Str');
has AddressLine2 => (is => 'ro', isa => 'Str');
has City => (is => 'ro', isa => 'Str');
has ContactType => (is => 'ro', isa => 'Str');
has CountryCode => (is => 'ro', isa => 'Str');
has Email => (is => 'ro', isa => 'Str');
has ExtraParams => (is => 'ro', isa => 'ArrayRef[Paws::Route53Domains::ExtraParam]');
has Fax => (is => 'ro', isa => 'Str');
has FirstName => (is => 'ro', isa => 'Str');
has LastName => (is => 'ro', isa => 'Str');
has OrganizationName => (is => 'ro', isa => 'Str');
has PhoneNumber => (is => 'ro', isa => 'Str');
has State => (is => 'ro', isa => 'Str');
has ZipCode => (is => 'ro', isa => 'Str');
1;
### main pod documentation begin ###
=head1 NAME
Paws::Route53Domains::ContactDetail
=head1 USAGE
This class represents one of two things:
=head3 Arguments in a call to a service
Use the attributes of this class as arguments to methods. You shouldn't make instances of this class.
Each attribute should be used as a named argument in the calls that expect this type of object.
As an example, if Att1 is expected to be a Paws::Route53Domains::ContactDetail object:
$service_obj->Method(Att1 => { AddressLine1 => $value, ..., ZipCode => $value });
=head3 Results returned from an API call
Use accessors for each attribute. If Att1 is expected to be an Paws::Route53Domains::ContactDetail object:
$result = $service_obj->Method(...);
$result->Att1->AddressLine1
=head1 DESCRIPTION
ContactDetail includes the following elements.
=head1 ATTRIBUTES
=head2 AddressLine1 => Str
First line of the contact's address.
=head2 AddressLine2 => Str
Second line of contact's address, if any.
=head2 City => Str
The city of the contact's address.
=head2 ContactType => Str
Indicates whether the contact is a person, company, association, or
public organization. If you choose an option other than C<PERSON>, you
must enter an organization name, and you can't enable privacy
protection for the contact.
=head2 CountryCode => Str
Code for the country of the contact's address.
=head2 Email => Str
Email address of the contact.
=head2 ExtraParams => ArrayRef[L<Paws::Route53Domains::ExtraParam>]
A list of name-value pairs for parameters required by certain top-level
domains.
=head2 Fax => Str
Fax number of the contact.
Constraints: Phone number must be specified in the format "+[country
dialing code].[number including any area code]". For example, a US
phone number might appear as C<"+1.1234567890">.
=head2 FirstName => Str
First name of contact.
=head2 LastName => Str
Last name of contact.
=head2 OrganizationName => Str
Name of the organization for contact types other than C<PERSON>.
=head2 PhoneNumber => Str
The phone number of the contact.
Constraints: Phone number must be specified in the format "+[country
dialing code].[number including any area codeE<gt>]". For example, a US
phone number might appear as C<"+1.1234567890">.
=head2 State => Str
The state or province of the contact's city.
=head2 ZipCode => Str
The zip or postal code of the contact's address.
=head1 SEE ALSO
This class forms part of L<Paws>, describing an object used in L<Paws::Route53Domains>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/Route53Domains/ContactDetail.pm | Perl | apache-2.0 | 3,496 |
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V9::Services::FeedItemSetLinkService;
use strict;
use warnings;
use base qw(Google::Ads::GoogleAds::BaseService);
sub get {
my $self = shift;
my $request_body = shift;
my $http_method = 'GET';
my $request_path = 'v9/{+resourceName}';
my $response_type = 'Google::Ads::GoogleAds::V9::Resources::FeedItemSetLink';
return $self->SUPER::call($http_method, $request_path, $request_body,
$response_type);
}
sub mutate {
my $self = shift;
my $request_body = shift;
my $http_method = 'POST';
my $request_path = 'v9/customers/{+customerId}/feedItemSetLinks:mutate';
my $response_type =
'Google::Ads::GoogleAds::V9::Services::FeedItemSetLinkService::MutateFeedItemSetLinksResponse';
return $self->SUPER::call($http_method, $request_path, $request_body,
$response_type);
}
1;
| googleads/google-ads-perl | lib/Google/Ads/GoogleAds/V9/Services/FeedItemSetLinkService.pm | Perl | apache-2.0 | 1,433 |
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V9::Services::CustomerService::MutateCustomerRequest;
use strict;
use warnings;
use base qw(Google::Ads::GoogleAds::BaseEntity);
use Google::Ads::GoogleAds::Utils::GoogleAdsHelper;
sub new {
my ($class, $args) = @_;
my $self = {
customerId => $args->{customerId},
operation => $args->{operation},
responseContentType => $args->{responseContentType},
validateOnly => $args->{validateOnly}};
# Delete the unassigned fields in this object for a more concise JSON payload
remove_unassigned_fields($self, $args);
bless $self, $class;
return $self;
}
1;
| googleads/google-ads-perl | lib/Google/Ads/GoogleAds/V9/Services/CustomerService/MutateCustomerRequest.pm | Perl | apache-2.0 | 1,216 |
#############################################################
# This file was automatically generated on 2015-07-28. #
# #
# Bindings Version 2.1.5 #
# #
# If you have a bugfix for this file and want to commit it, #
# please fix the bug in the generator. You can find a link #
# to the generators git repository on tinkerforge.com #
#############################################################
=pod
=encoding utf8
=head1 NAME
Tinkerforge::BrickletAnalogOutV2 - Generates configurable DC voltage between 0V and 12V
=cut
package Tinkerforge::BrickletAnalogOutV2;
use strict;
use warnings;
use Carp;
use threads;
use threads::shared;
use parent 'Tinkerforge::Device';
use Tinkerforge::IPConnection;
use Tinkerforge::Error;
=head1 CONSTANTS
=over
=item DEVICE_IDENTIFIER
This constant is used to identify a Analog Out Bricklet 2.0.
The get_identity() subroutine and the CALLBACK_ENUMERATE callback of the
IP Connection have a device_identifier parameter to specify the Brick's or
Bricklet's type.
=cut
use constant DEVICE_IDENTIFIER => 256;
=item DEVICE_DISPLAY_NAME
This constant represents the display name of a Analog Out Bricklet 2.0.
=cut
use constant DEVICE_DISPLAY_NAME => 'Analog Out Bricklet 2.0';
=item FUNCTION_SET_OUTPUT_VOLTAGE
This constant is used with the get_response_expected(), set_response_expected()
and set_response_expected_all() subroutines.
=cut
use constant FUNCTION_SET_OUTPUT_VOLTAGE => 1;
=item FUNCTION_GET_OUTPUT_VOLTAGE
This constant is used with the get_response_expected(), set_response_expected()
and set_response_expected_all() subroutines.
=cut
use constant FUNCTION_GET_OUTPUT_VOLTAGE => 2;
=item FUNCTION_GET_INPUT_VOLTAGE
This constant is used with the get_response_expected(), set_response_expected()
and set_response_expected_all() subroutines.
=cut
use constant FUNCTION_GET_INPUT_VOLTAGE => 3;
=item FUNCTION_GET_IDENTITY
This constant is used with the get_response_expected(), set_response_expected()
and set_response_expected_all() subroutines.
=cut
use constant FUNCTION_GET_IDENTITY => 255;
=back
=head1 FUNCTIONS
=over
=item new()
Creates an object with the unique device ID *uid* and adds it to
the IP Connection *ipcon*.
=cut
sub new
{
my ($class, $uid, $ipcon) = @_;
my $self = Tinkerforge::Device->_new($uid, $ipcon, [2, 0, 0]);
$self->{response_expected}->{&FUNCTION_SET_OUTPUT_VOLTAGE} = Tinkerforge::Device->_RESPONSE_EXPECTED_FALSE;
$self->{response_expected}->{&FUNCTION_GET_OUTPUT_VOLTAGE} = Tinkerforge::Device->_RESPONSE_EXPECTED_ALWAYS_TRUE;
$self->{response_expected}->{&FUNCTION_GET_INPUT_VOLTAGE} = Tinkerforge::Device->_RESPONSE_EXPECTED_ALWAYS_TRUE;
$self->{response_expected}->{&FUNCTION_GET_IDENTITY} = Tinkerforge::Device->_RESPONSE_EXPECTED_ALWAYS_TRUE;
bless($self, $class);
return $self;
}
=item set_output_voltage()
Sets the voltage in mV. The possible range is 0V to 12V (0-12000).
=cut
sub set_output_voltage
{
my ($self, $voltage) = @_;
$self->_send_request(&FUNCTION_SET_OUTPUT_VOLTAGE, [$voltage], 'S', '');
}
=item get_output_voltage()
Returns the voltage as set by :func:`SetOutputVoltage`.
=cut
sub get_output_voltage
{
my ($self) = @_;
return $self->_send_request(&FUNCTION_GET_OUTPUT_VOLTAGE, [], '', 'S');
}
=item get_input_voltage()
Returns the input voltage in mV.
=cut
sub get_input_voltage
{
my ($self) = @_;
return $self->_send_request(&FUNCTION_GET_INPUT_VOLTAGE, [], '', 'S');
}
=item get_identity()
Returns the UID, the UID where the Bricklet is connected to,
the position, the hardware and firmware version as well as the
device identifier.
The position can be 'a', 'b', 'c' or 'd'.
The device identifier numbers can be found :ref:`here <device_identifier>`.
|device_identifier_constant|
=cut
sub get_identity
{
my ($self) = @_;
return $self->_send_request(&FUNCTION_GET_IDENTITY, [], '', 'Z8 Z8 a C3 C3 S');
}
=back
=cut
1;
| IT-K/SR-Monitoring | lib/Tinkerforge/BrickletAnalogOutV2.pm | Perl | apache-2.0 | 4,070 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
package EnsEMBL::Web::Component::Tools::VEP;
### Parent class for all VEP components
use strict;
use warnings;
use parent qw(EnsEMBL::Web::Component::Tools);
sub job_statistics {
## Gets the job result stats for display on results pages
my $self = shift;
my $file = $self->object->result_files->{'stats_file'};
my $stats = {};
my $section;
for (split /\n/, $file->content) {
if (m/^\[(.+?)\]$/) {
$section = $1;
} elsif (m/\w+/) {
my ($key, $value) = split "\t";
$stats->{$section}->{$key} = $value;
push @{$stats->{'sort'}->{$section}}, $key;
}
}
return $stats;
}
1;
| muffato/public-plugins | tools/modules/EnsEMBL/Web/Component/Tools/VEP.pm | Perl | apache-2.0 | 1,346 |
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V10::Resources::Ad;
use strict;
use warnings;
use base qw(Google::Ads::GoogleAds::BaseEntity);
use Google::Ads::GoogleAds::Utils::GoogleAdsHelper;
sub new {
my ($class, $args) = @_;
my $self = {
addedByGoogleAds => $args->{addedByGoogleAds},
appAd => $args->{appAd},
appEngagementAd => $args->{appEngagementAd},
appPreRegistrationAd => $args->{appPreRegistrationAd},
callAd => $args->{callAd},
devicePreference => $args->{devicePreference},
displayUploadAd => $args->{displayUploadAd},
displayUrl => $args->{displayUrl},
expandedDynamicSearchAd => $args->{expandedDynamicSearchAd},
expandedTextAd => $args->{expandedTextAd},
finalAppUrls => $args->{finalAppUrls},
finalMobileUrls => $args->{finalMobileUrls},
finalUrlSuffix => $args->{finalUrlSuffix},
finalUrls => $args->{finalUrls},
gmailAd => $args->{gmailAd},
hotelAd => $args->{hotelAd},
id => $args->{id},
imageAd => $args->{imageAd},
legacyAppInstallAd => $args->{legacyAppInstallAd},
legacyResponsiveDisplayAd => $args->{legacyResponsiveDisplayAd},
localAd => $args->{localAd},
name => $args->{name},
resourceName => $args->{resourceName},
responsiveDisplayAd => $args->{responsiveDisplayAd},
responsiveSearchAd => $args->{responsiveSearchAd},
shoppingComparisonListingAd => $args->{shoppingComparisonListingAd},
shoppingProductAd => $args->{shoppingProductAd},
shoppingSmartAd => $args->{shoppingSmartAd},
smartCampaignAd => $args->{smartCampaignAd},
systemManagedResourceSource => $args->{systemManagedResourceSource},
textAd => $args->{textAd},
trackingUrlTemplate => $args->{trackingUrlTemplate},
type => $args->{type},
urlCollections => $args->{urlCollections},
urlCustomParameters => $args->{urlCustomParameters},
videoAd => $args->{videoAd},
videoResponsiveAd => $args->{videoResponsiveAd}};
# Delete the unassigned fields in this object for a more concise JSON payload
remove_unassigned_fields($self, $args);
bless $self, $class;
return $self;
}
1;
| googleads/google-ads-perl | lib/Google/Ads/GoogleAds/V10/Resources/Ad.pm | Perl | apache-2.0 | 3,185 |
#!/usr/bin/perl
use strict;
use File::Basename;
use XML::Simple;
my $program_name = File::Basename::basename $0;
my $release = "";
my $outdir = "";
my $scratch_dir = "";
my $pnor_data_dir = "";
my $pnor_filename = "";
my $payload = "";
my $bootkernel = "";
my $hb_image_dir = "";
my $xml_layout_file = "";
my $targeting_binary_filename = "";
my $targeting_RO_binary_filename = "";
my $targeting_RW_binary_filename = "";
my $sbec_binary_filename = "";
my $sbe_binary_filename = "";
my $wink_binary_filename = "";
my $occ_binary_filename = "";
my $openpower_version_filename = "";
my $wofdata_binary_filename = "";
my $memddata_binary_filename = "";
my $hdat_binary_filename = "";
my $ocmbfw_binary_filename = "";
while (@ARGV > 0){
$_ = $ARGV[0];
chomp($_);
$_ = &trim_string($_);
if (/^-h$/i || /^-help$/i || /^--help$/i){
#print help content
usage();
exit 0;
}
elsif (/^-release/i){
$release = $ARGV[1] or die "Bad command line arg given: expecting a release input.\n";
shift;
}
elsif (/^-scratch_dir/i){
$scratch_dir = $ARGV[1] or die "Bad command line arg given: expecting a scratch dir path.\n";
shift;
}
elsif (/^-outdir/i){
$outdir = $ARGV[1] or die "Bad command line arg given: expecting a directory for output data.\n";
shift;
}
elsif (/^-pnor_data_dir/i){
$pnor_data_dir = $ARGV[1] or die "Bad command line arg given: expecting a directory containing pnor data.\n";
shift;
}
elsif (/^-pnor_filename/i){
$pnor_filename = $ARGV[1] or die "Bad command line arg given: expecting a pnor filename.\n";
shift;
}
elsif (/^-hb_image_dir/i){
$hb_image_dir = $ARGV[1] or die "Bad command line arg given: expecting an hb image dir path.\n";
shift;
}
elsif (/^-xml_layout_file/i){
$xml_layout_file = $ARGV[1] or die "Bad command line arg given: expecting an xml layout file.\n";
shift;
}
elsif (/^-payload/i){
$payload = $ARGV[1] or die "Bad command line arg given: expecting a filepath to payload binary file.\n";
shift;
}
elsif (/^-bootkernel/i){
$bootkernel = $ARGV[1] or die "Bad command line arg given: expecting a filepath to bootloader kernel image.\n";
shift;
}
elsif (/^-targeting_binary_filename/i){
$targeting_binary_filename = $ARGV[1] or die "Bad command line arg given: expecting a targeting binary filename.\n";
shift;
}
elsif (/^-targeting_RO_binary_filename/i){
$targeting_RO_binary_filename = $ARGV[1] or die "Bad command line arg given: expecting a targeting RW binary filename.\n";
shift;
}
elsif (/^-targeting_RW_binary_filename/i){
$targeting_RW_binary_filename = $ARGV[1] or die "Bad command line arg given: expecting a targeting RW binary filename.\n";
shift;
}
elsif (/^-sbe_binary_filename/i){
$sbe_binary_filename = $ARGV[1] or die "Bad command line arg given: expecting an sbe binary filename.\n";
shift;
}
elsif (/^-sbec_binary_filename/i){
$sbec_binary_filename = $ARGV[1] or die "Bad command line arg given: expecting an sbec binary filename.\n";
shift;
}
elsif (/^-wink_binary_filename/i){
$wink_binary_filename = $ARGV[1] or die "Bad command line arg given: expecting an wink binary filename.\n";
shift;
}
elsif (/^-occ_binary_filename/i){
$occ_binary_filename = $ARGV[1] or die "Bad command line arg given: expecting an occ binary filename.\n";
shift;
}
elsif (/^-openpower_version_filename/i){
$openpower_version_filename = $ARGV[1] or die "Bad command line arg given: expecting openpower version filename.\n";
shift;
}
elsif (/^-wofdata_binary_filename/i){
$wofdata_binary_filename = $ARGV[1] or die "Bad command line arg given: expecting a wofdata binary filename.\n";
shift;
}
elsif (/^-memddata_binary_filename/i){
$memddata_binary_filename = $ARGV[1] or die "Bad command line arg given: expecting a memddata binary filename.\n";
shift;
}
elsif (/^-hdat_binary_filename/i){
$hdat_binary_filename = $ARGV[1] or die "Bad command line arg given: expecting an hdat binary filename.\n";
shift;
}
elsif (/^-ocmbfw_binary_filename/i){
$ocmbfw_binary_filename = $ARGV[1] or die "Bad command line arg given: expecting an ocmbfw binary filename.\n";
shift;
}
else {
print "Unrecognized command line arg: $_ \n";
print "To view all the options and help text run \'$program_name -h\' \n";
exit 1;
}
shift;
}
if ($outdir eq "") {
die "-outdir <path_to_directory_for_output_files> is a required command line variable. Please run again with this parameter.\n";
}
if ($release eq "") {
die "-release <p8 or p9> is a required command line variable. Please run again with this parameter.\n";
}
# Verify that pnor_layout file exists and parse it
if ( ! -e "$xml_layout_file")
{
die "$xml_layout_file does not exist\n";
}
my $xs = new XML::Simple(keyattr=>[], forcearray => 1);
my $parsed_pnor_layout = $xs->XMLin($xml_layout_file);
print "release = $release\n";
print "scratch_dir = $scratch_dir\n";
print "pnor_data_dir = $pnor_data_dir\n";
my $build_pnor_command = "$hb_image_dir/buildpnor.pl";
$build_pnor_command .= " --pnorOutBin $pnor_filename --pnorLayout $xml_layout_file --editedLayoutLocation $hb_image_dir";
# Process HBD section and possibly HBD_RW section
if (checkForPnorPartition("HBD_RW", $parsed_pnor_layout))
{
$build_pnor_command .= " --binFile_HBD $scratch_dir/$targeting_RO_binary_filename";
$build_pnor_command .= " --binFile_HBD_RW $scratch_dir/$targeting_RW_binary_filename";
}
else
{
$build_pnor_command .= " --binFile_HBD $scratch_dir/$targeting_binary_filename";
}
$build_pnor_command .= " --binFile_SBE $scratch_dir/$sbe_binary_filename";
$build_pnor_command .= " --binFile_HBB $scratch_dir/hostboot.header.bin.ecc";
$build_pnor_command .= " --binFile_HBI $scratch_dir/hostboot_extended.header.bin.ecc";
$build_pnor_command .= " --binFile_HBRT $scratch_dir/hostboot_runtime.header.bin.ecc";
$build_pnor_command .= " --binFile_HBEL $scratch_dir/hbel.bin.ecc";
$build_pnor_command .= " --binFile_GUARD $scratch_dir/guard.bin.ecc";
$build_pnor_command .= " --binFile_PAYLOAD $payload";
$build_pnor_command .= " --binFile_BOOTKERNEL $bootkernel";
$build_pnor_command .= " --binFile_NVRAM $scratch_dir/nvram.bin";
$build_pnor_command .= " --binFile_ATTR_TMP $scratch_dir/attr_tmp.bin.ecc";
$build_pnor_command .= " --binFile_OCC $occ_binary_filename.ecc";
$build_pnor_command .= " --binFile_ATTR_PERM $scratch_dir/attr_perm.bin.ecc";
$build_pnor_command .= " --binFile_FIRDATA $scratch_dir/firdata.bin.ecc";
$build_pnor_command .= " --binFile_CAPP $scratch_dir/cappucode.bin.ecc";
$build_pnor_command .= " --binFile_SECBOOT $scratch_dir/secboot.bin.ecc";
$build_pnor_command .= " --binFile_VERSION $scratch_dir/openpower_pnor_version.bin";
$build_pnor_command .= " --binFile_IMA_CATALOG $scratch_dir/ima_catalog.bin.ecc";
# These are optional sections not tied to a specific processor family type
if (checkForPnorPartition("DJVPD", $parsed_pnor_layout))
{
$build_pnor_command .= " --binFile_DJVPD $scratch_dir/djvpd_fill.bin.ecc";
}
if (checkForPnorPartition("CVPD", $parsed_pnor_layout))
{
$build_pnor_command .= " --binFile_CVPD $scratch_dir/cvpd.bin.ecc";
}
if (checkForPnorPartition("MVPD", $parsed_pnor_layout))
{
$build_pnor_command .= " --binFile_MVPD $scratch_dir/mvpd_fill.bin.ecc";
}
if (checkForPnorPartition("EECACHE", $parsed_pnor_layout))
{
$build_pnor_command .= " --binFile_EECACHE $scratch_dir/eecache_fill.bin.ecc";
}
if (checkForPnorPartition("OCMBFW", $parsed_pnor_layout))
{
$build_pnor_command .= " --binFile_OCMBFW $ocmbfw_binary_filename";
}
# Add sections based on processor family type
if ($release eq "p9"){
$build_pnor_command .= " --binFile_WOFDATA $wofdata_binary_filename" if -e $wofdata_binary_filename;
if (checkForPnorPartition("MEMD", $parsed_pnor_layout))
{
$build_pnor_command .= " --binFile_MEMD $memddata_binary_filename" if -e $memddata_binary_filename;
}
$build_pnor_command .= " --binFile_HDAT $hdat_binary_filename" if -e $hdat_binary_filename;
}
if ($release eq "p8"){
$build_pnor_command .= " --binFile_SBEC $scratch_dir/$sbec_binary_filename";
$build_pnor_command .= " --binFile_WINK $scratch_dir/$wink_binary_filename";
} else {
$build_pnor_command .= " --binFile_SBKT $scratch_dir/SBKT.bin";
$build_pnor_command .= " --binFile_HCODE $scratch_dir/$wink_binary_filename";
$build_pnor_command .= " --binFile_HBBL $scratch_dir/hbbl.bin.ecc";
$build_pnor_command .= " --binFile_RINGOVD $scratch_dir/ringOvd.bin";
$build_pnor_command .= " --binFile_HB_VOLATILE $scratch_dir/guard.bin.ecc";
}
$build_pnor_command .= " --fpartCmd \"fpart\"";
$build_pnor_command .= " --fcpCmd \"fcp\"";
print "###############################";
run_command("$build_pnor_command");
#END MAIN
#-------------------------------------------------------------------------
sub usage {
print <<"ENDUSAGE";
ENDUSAGE
;
}
sub parse_config_file {
}
#trim_string takes one string as input, trims leading and trailing whitespace
# before returning that string
sub trim_string {
my $str = shift;
$str =~ s/^\s+//;
$str =~ s/\s+$//;
return $str;
}
sub run_command {
my $command = shift;
print "$command\n";
my $rc = system($command);
if ($rc !=0 ){
die "Error running command: $command. Nonzero return code of ($rc) returned.\n";
}
return $rc;
}
# Function to check if a partition exists in the PNOR XML Layout File
sub checkForPnorPartition {
my $section = shift;
my $parsed_layout = shift;
# default the return as 0 (aka 'false') - partition does NOT exist
my $does_section_exist = 0;
#Iterate over the <section> elements.
foreach my $sectionEl (@{$parsed_layout->{section}})
{
my $eyeCatch = $sectionEl->{eyeCatch}[0];
if($eyeCatch eq $section)
{
$does_section_exist = 1;
last;
}
}
return $does_section_exist;
}
| open-power/pnor | create_pnor_image.pl | Perl | apache-2.0 | 10,320 |
package Google::Ads::AdWords::v201809::CampaignCriterionService::query;
use strict;
use warnings;
{ # BLOCK to scope variables
sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201809' }
__PACKAGE__->__set_name('query');
__PACKAGE__->__set_nillable();
__PACKAGE__->__set_minOccurs();
__PACKAGE__->__set_maxOccurs();
__PACKAGE__->__set_ref();
use base qw(
SOAP::WSDL::XSD::Typelib::Element
Google::Ads::SOAP::Typelib::ComplexType
);
our $XML_ATTRIBUTE_CLASS;
undef $XML_ATTRIBUTE_CLASS;
sub __get_attr_class {
return $XML_ATTRIBUTE_CLASS;
}
use Class::Std::Fast::Storable constructor => 'none';
use base qw(Google::Ads::SOAP::Typelib::ComplexType);
{ # BLOCK to scope variables
my %query_of :ATTR(:get<query>);
__PACKAGE__->_factory(
[ qw( query
) ],
{
'query' => \%query_of,
},
{
'query' => 'SOAP::WSDL::XSD::Typelib::Builtin::string',
},
{
'query' => 'query',
}
);
} # end BLOCK
} # end of BLOCK
1;
=pod
=head1 NAME
Google::Ads::AdWords::v201809::CampaignCriterionService::query
=head1 DESCRIPTION
Perl data type class for the XML Schema defined element
query from the namespace https://adwords.google.com/api/adwords/cm/v201809.
Returns the list of campaign criteria that match the query. @param query The SQL-like AWQL query string. @return A list of campaign criteria. @throws ApiException if problems occur while parsing the query or fetching campaign criteria.
=head1 PROPERTIES
The following properties may be accessed using get_PROPERTY / set_PROPERTY
methods:
=over
=item * query
$element->set_query($data);
$element->get_query();
=back
=head1 METHODS
=head2 new
my $element = Google::Ads::AdWords::v201809::CampaignCriterionService::query->new($data);
Constructor. The following data structure may be passed to new():
{
query => $some_value, # string
},
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| googleads/googleads-perl-lib | lib/Google/Ads/AdWords/v201809/CampaignCriterionService/query.pm | Perl | apache-2.0 | 1,956 |
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2018] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
=head1 NAME
Bio::EnsEMBL::Analysis::Tools::CdnaUpdateTranscriptFilter
=head1 SYNOPSIS
my $filter = new Bio::EnsEMBL::Analysis::Tools::CdnaUpdateTranscriptFilter
new->(
-best_in_genome => 1,
-reject_processed_pseudos => 1,
-coverage => 80,
-percent_id => 90,
-verbosity => 0,
);
my @filtered_results = @{$filter->filter_results(\@results)};
=head1 DESCRIPTION
This is a module used for filtering Exonerate transcripts
=cut
package Bio::EnsEMBL::Analysis::Tools::CdnaUpdateTranscriptFilter;
use strict;
use warnings;
use Bio::EnsEMBL::Root;
use Bio::EnsEMBL::Utils::Exception qw(verbose throw warning);
use Bio::EnsEMBL::Utils::Argument qw( rearrange );
use vars qw (@ISA);
@ISA = qw(Bio::EnsEMBL::Root);
=head2 new
Returntype: Bio::EnsEMBL::Analysis::Tools::CdnaUpdateTranscriptFilter
Exceptions: none
Example :
=cut
sub new {
my ($class,@args) = @_;
my $self = $class->SUPER::new(@args);
&verbose('WARNING');
my ($min_coverage,
$min_percent,
$best_in_genome,
$rpp,
$verbosity) =
rearrange([
'COVERAGE',
'PERCENT_ID',
'BEST_IN_GENOME',
'REJECT_PROCESSED_PSEUDOS',
'VERBOSITY',], @args);
######################
#SETTING THE DEFAULTS#
######################
$self->min_coverage($min_coverage) if defined $min_coverage;
$self->min_percent($min_percent) if defined $min_percent;
$self->best_in_genome($best_in_genome) if defined $best_in_genome;
$self->reject_processed_pseudos($rpp) if defined $rpp;
$self->verbosity($verbosity) if defined $verbosity;
return $self;
}
#filter methods
=head2 filter_results
Arg [1] : Bio::EnsEMBL::Analysis::Tools::DefaultExonerateFilter
Arg [2] : arrayref of Trancripts
Function : filter the given Transcripts in the tried and trusted manner
Returntype: arrayref
Exceptions: throws if passed nothing or not an arrayref
Example :
=cut
sub filter_results {
my ($self, $transcripts) = @_;
# results are Bio::EnsEMBL::Transcripts with exons and supp_features
my @good_matches;
my %matches;
my $printing = $self->verbosity;
TRAN:
foreach my $transcript (@$transcripts ){
my $coverage = $self->_get_transcript_coverage($transcript);
my $percent_id = $self->_get_transcript_percent_id($transcript);
#identify the longest intron
my @exons = @{$transcript->get_all_Exons()};
my $num_exons = scalar @exons;
my @sorted_exons = sort {$a->start <=> $b->start} @exons;
my $max_intron = 0;
my $short_intron = 0; #flag to check that not all introns are long
for my $a (0 .. $#sorted_exons - 1){
my $intron_len = ($sorted_exons[$a+1]->start - $sorted_exons[$a]->end) - 1;
if ($intron_len > $max_intron){
$max_intron = $intron_len;
}
if ($intron_len < 250000){
$short_intron = 1;
}
}
my $id = $self->_get_transcript_evidence_id($transcript);
push @{$matches{$id}}, {
transcript => $transcript,
coverage => $coverage,
percent_id => $percent_id,
num_exons => $num_exons,
is_spliced => $self->_transcript_is_spliced($transcript),
max_intron => $max_intron,
short_intron => $short_intron,
};
}
my %matches_sorted_by_coverage;
my %selected_matches;
QUERY:
foreach my $query_id ( keys( %matches ) ){
@{$matches_sorted_by_coverage{$query_id}} =
sort { $b->{coverage} <=> $a->{coverage} or
$b->{num_exons} <=> $a->{num_exons} or
$b->{percent_id} <=> $a->{percent_id} } @{$matches{$query_id}};
my $max_coverage;
my $perc_id_of_best;
my $count = 0;
my $splices_elsewhere = 0;
my $best_has_been_seen = 0;
#print STDERR "####################\n";
#print STDERR "Matches for $query_id:\n";
TRANSCRIPT:
foreach my $hit ( @{$matches_sorted_by_coverage{$query_id}} ){
my $coverage = $hit->{coverage};
my $percent_id = $hit->{percent_id};
my $is_spliced = $hit->{is_spliced};
unless ($max_coverage){
$max_coverage = $coverage;
}
unless ( $perc_id_of_best ){
$perc_id_of_best = $percent_id;
}
#sd3
#single exon genes (ie mouse olfactory receptors) being thrown out in favour of
#low percentage id multi-exon genes of equivalent coverage
#this loop checks for high quality matches in multi-exon hits before the splices_elsewhere flag is set
#so if have good coverage & percentage id:
if ( (($coverage >= $self->min_coverage && $percent_id >= $self->min_percent)
#or have high coverage & a slightly lower percentage id
|| ($coverage >= (1 + 5/100) * $self->min_coverage &&
$percent_id >= (1 - 3/100) * $self->min_percent))
&& $is_spliced){
$splices_elsewhere = 1;
last;
}
}
foreach my $hit ( @{$matches_sorted_by_coverage{$query_id}} ){
$count++;
my ($accept, $label);
my $transcript = $hit->{transcript};
my $strand = $transcript->strand;
my $coverage = $hit->{coverage};
my $percent_id = $hit->{percent_id};
my $is_spliced = $hit->{is_spliced};
my $max_intron = $hit->{max_intron};
my $short_intron = $hit->{short_intron};
my $num_exons = $hit->{num_exons};
if ( $count == 1 ){
$label = 'best_match';
} elsif ( $count > 1 &&
$splices_elsewhere &&
! $is_spliced) {
$label = 'potential_processed_pseudogene';
} else{
$label = $count;
}
# if ( $count == 1 && $is_spliced ){ #old way of doing it
# $splices_elsewhere = 1;
# }
if ( $self->best_in_genome ){
# we keep the hit with the best coverage...
if ($coverage == $max_coverage &&
# as long as it has coverage/percent_id above limits or...
(($coverage >= $self->min_coverage &&
$percent_id >= $self->min_percent)
||
# ...if coverage is significantly greater than the
# specified minimum, then we are willing to accept
# hits that have a percent_id just below the specified
# minimum
($coverage >= (1 + 5/100) * $self->min_coverage &&
$percent_id >= (1 - 3/100) * $self->min_percent))) {
if ( $self->reject_processed_pseudos
&& $count > 1
&& $splices_elsewhere
&& ! $is_spliced) {
$accept = 'NO';
if ($printing){
print "rpp $query_id\n";
}
}
elsif (($short_intron == 0) && ($num_exons > 1)){
#all long introns
$accept = 'NO';
if ($printing){
print "only long introns $query_id\n";
}
}
#Only accept long intron hits with very high coverage and percent_id
elsif($max_intron > 250000 ){
if (($coverage >= 98) && ($percent_id >= 98)){
$accept = 'YES';
push( @good_matches, $transcript);
#print "accept: intron $max_intron coverage $coverage \%id $percent_id $query_id\n";
#find out which introns are long - ie are they the first and last introns?
# my @exons = @{$transcript->get_all_Exons()};
# my @sorted_exons = sort {$a->start <=> $b->start} @exons;
# my $num_introns = scalar @exons - 1;
#
# for my $a (0 .. $#sorted_exons - 1){
# my $intron_len = ($sorted_exons[$a+1]->start - $sorted_exons[$a]->end) - 1;
# if ($intron_len > 250000){
# print "intron position ".($a + 1)." out of $num_introns\n";
# }
# }
}else{
$accept = 'NO';
if ($printing){
print "reject: intron $max_intron coverage $coverage \%id $percent_id $query_id\n";
}
}
}else{
$accept = 'YES';
push( @good_matches, $transcript);
}
}
else{
$accept = 'NO';
if ($printing){
print "max_coverage $max_coverage coverage $coverage \%id $percent_id $query_id\n";
}
}
}
else{
# we keep anything which is within the 2% of the best score...
if ($coverage >= (0.98 * $max_coverage) &&
# as long as it has coverage/percent_id above limits or...
(($coverage >= $self->min_coverage &&
$percent_id >= $self->min_percent)
||
# ...if coverage is significantly greater than the
# specified minimum, then we are willing to accept
# hits that have a percent_id just below the specified
# minimum
($coverage >= (1 + 5/100) * $self->min_coverage &&
$percent_id >= (1 - 3/100) * $self->min_percent))) {
############################################################
# non-best matches are kept only if they are not unspliced with the
# best match being spliced - otherwise they could be processed pseudogenes
if ( $self->reject_processed_pseudos &&
$count > 1 &&
$splices_elsewhere &&
! $is_spliced) {
$accept = 'NO';
}
else{
$accept = 'YES';
push( @good_matches, $transcript);
}
}
else{
$accept = 'NO';
}
}
}
}
return \@good_matches;
}
############################################################
sub _get_transcript_coverage {
my ($self,$tran) = @_;
if (@{$tran->get_all_supporting_features} and
defined $tran->get_all_supporting_features->[0]->hcoverage) {
my ($evi) = @{$tran->get_all_supporting_features};
return $evi->hcoverage;
} else {
my @exons = @{$tran->get_all_Exons};
my ($evi) = @{$exons[0]->get_all_supporting_features};
return $evi->score;
}
}
############################################################
sub _get_transcript_percent_id {
my ($self,$tran) = @_;
my ($sf);
if (@{$tran->get_all_supporting_features}) {
($sf) = @{$tran->get_all_supporting_features};
} else {
my @exons = @{$tran->get_all_Exons};
($sf) = @{$exons[0]->get_all_supporting_features};
}
return $sf->percent_id;
}
############################################################
sub _get_transcript_evidence_id {
my ($self,$tran) = @_;
my ($sf);
if (@{$tran->get_all_supporting_features}) {
($sf) = @{$tran->get_all_supporting_features};
} else {
my @exons = @{$tran->get_all_Exons};
($sf) = @{$exons[0]->get_all_supporting_features};
}
return $sf->hseqname;
}
############################################################
sub _transcript_is_spliced {
my ($self, $tran) = @_;
my @exons = sort { $a->start <=> $b->start } @{$tran->get_all_Exons};
if ( scalar (@exons) > 1 ){
# check that there are non "frameshift" introns
for(my $i=0; $i < @exons - 1; $i++){
my $intron_len = $exons[$i+1]->start - $exons[$i]->end - 1;
if ( $intron_len > 9 ){
return 1;
}
}
}
return 0;
}
# containers
sub min_coverage {
my $self = shift;
$self->{'_min_coverage'} = shift if(@_);
return exists($self->{'_min_coverage'}) ? $self->{'_min_coverage'} : undef;
}
sub min_percent {
my $self = shift;
$self->{'_min_percent'} = shift if(@_);
return exists($self->{'_min_percent'}) ? $self->{'_min_percent'} : undef;
}
sub best_in_genome {
my $self = shift;
$self->{'_best_in_genome'} = shift if(@_);
return exists($self->{'_best_in_genome'}) ? $self->{'_best_in_genome'} : 0;
}
sub reject_processed_pseudos {
my $self = shift;
$self->{'_reject_processed_pseudos'} = shift if(@_);
return exists($self->{'_reject_processed_pseudos'}) ? $self->{'_reject_processed_pseudos'} : 0;
}
sub verbosity {
my $self = shift;
$self->{'_verbosity'} = shift if(@_);
return exists($self->{'_verbosity'}) ? $self->{'_verbosity'} : 0;
}
1;
| kiwiroy/ensembl-analysis | modules/Bio/EnsEMBL/Analysis/Tools/CdnaUpdateTranscriptFilter.pm | Perl | apache-2.0 | 12,429 |
package Paws::IAM::CreateServiceSpecificCredentialResponse;
use Moose;
has ServiceSpecificCredential => (is => 'ro', isa => 'Paws::IAM::ServiceSpecificCredential');
has _request_id => (is => 'ro', isa => 'Str');
1;
### main pod documentation begin ###
=head1 NAME
Paws::IAM::CreateServiceSpecificCredentialResponse
=head1 ATTRIBUTES
=head2 ServiceSpecificCredential => L<Paws::IAM::ServiceSpecificCredential>
A structure that contains information about the newly created
service-specific credential.
This is the only time that the password for this credential set is
available. It cannot be recovered later. Instead, you will have to
reset the password with ResetServiceSpecificCredential.
=head2 _request_id => Str
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/IAM/CreateServiceSpecificCredentialResponse.pm | Perl | apache-2.0 | 742 |
#!/software/bin/perl
use warnings;
use strict;
use Bio::EnsEMBL::Compara::DBSQL::DBAdaptor;
use Bio::EnsEMBL::Compara::Graph::NewickParser;
use Getopt::Long;
#
# Script to take a full species tree and a set of required species taken from a database
# and prune it to leave only the required species.
#
my $help;
my $url;
my $tree_file;
my $output_taxon_file;
my $output_tree_file;
GetOptions('help' => \$help,
'url=s' => \$url,
'tree_file=s' => \$tree_file,
'taxon_output_filename=s' => \$output_taxon_file,
'njtree_output_filename=s' => \$output_tree_file,
);
if ($help) { usage(); }
my $compara_dba = new Bio::EnsEMBL::Compara::DBSQL::DBAdaptor(-url => $url)
or die "Must define a url";
if (defined $output_taxon_file) {
my $species_tree = $compara_dba->get_SpeciesTreeAdaptor()->create_species_tree();
open TF, ">$output_taxon_file" or die "$!";
print TF $species_tree->newick_format( 'njtree' );
close TF;
}
if (defined $output_tree_file) {
my $blength_tree = Bio::EnsEMBL::Compara::Graph::NewickParser::parse_newick_into_tree( `cat $tree_file` );
my $pruned_tree = $compara_dba->get_SpeciesTreeAdaptor()->prune_tree( $blength_tree );
open FH, ">$output_tree_file" or die "$!";
print FH $pruned_tree->newick_simple_format() . "\n";
close FH;
}
sub usage {
warn "Specifically used in the LowCoverageGenomeAlignment pipeline\n";
warn "prune_tree.pl [options]\n";
warn " -help : print this help\n";
warn " -url <url> : connect to compara at url and use \n";
warn " -tree_file <file> : read in full newick tree from file\n";
warn " -taxon_output_filename <file> : filename to write taxon_ids to\n";
warn " -njtree_output_filename <file> : filename to write pruned treee to\n";
exit(1);
}
| adamsardar/perl-libs-custom | EnsemblAPI/ensembl-compara/scripts/pipeline/prune_tree.pl | Perl | apache-2.0 | 1,872 |
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V10::Services::ReachPlanService::GenerateProductMixIdeasRequest;
use strict;
use warnings;
use base qw(Google::Ads::GoogleAds::BaseEntity);
use Google::Ads::GoogleAds::Utils::GoogleAdsHelper;
sub new {
my ($class, $args) = @_;
my $self = {
budgetMicros => $args->{budgetMicros},
currencyCode => $args->{currencyCode},
customerId => $args->{customerId},
plannableLocationId => $args->{plannableLocationId},
preferences => $args->{preferences}};
# Delete the unassigned fields in this object for a more concise JSON payload
remove_unassigned_fields($self, $args);
bless $self, $class;
return $self;
}
1;
| googleads/google-ads-perl | lib/Google/Ads/GoogleAds/V10/Services/ReachPlanService/GenerateProductMixIdeasRequest.pm | Perl | apache-2.0 | 1,279 |
package VMOMI::CreateTaskAction;
use parent 'VMOMI::Action';
use strict;
use warnings;
our @class_ancestors = (
'Action',
'DynamicData',
);
our @class_members = (
['taskTypeId', undef, 0, ],
['cancelable', 'boolean', 0, ],
);
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/CreateTaskAction.pm | Perl | apache-2.0 | 464 |
V#!/usr/bin/env perl
use strict;
use warnings;
use utf8;
use WebService::Amazon::Route53;
use JSON;
my $client = WebService::Amazon::Route53->new(id => 'KEY_ID',
key => 'SECRET_KEY');
my $response = $client->list_hosted_zones(max_items => 100);
for my $zones ($response->{'hosted_zones'}) {
for my $zone (@$zones) {
my $zonedetails = $client->list_resource_record_sets(zone_id => $zone->{'id'});
print JSON::XS->new->utf8->pretty->encode($zonedetails);
}
}
| swelljoe/route53-export-json | route53-export-json.pl | Perl | apache-2.0 | 533 |
#!/usr/bin/env perl
# Copyright [1999-2014] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script will lokk through all listed databases for
# database names which match given regexp.
# This version should be ok if one or more of the given database
# servers is not available.
use strict;
use warnings;
use DBI;
# seconds before we give up on database
my $timeout = 5;
my @all_locators = qw {
genebuild1:3306:ensro::
genebuild2:3306:ensro::
genebuild3:3306:ensro::
genebuild4:3306:ensro::
genebuild5:3306:ensro::
genebuild6:3306:ensro::
genebuild6:3307:ensro::
compara1:3306:ensro::
compara2:3306:ensro::
compara3:3306:ensro::
mart1:3306:ensro::
mart2:3306:ensro::
ens-genomics1:3306:ensro::
ens-genomics2:3306:ensro::
ens-research:3306:ensro::
ens-research:3309:ensro::
ensdb-1-11:3319:ensro::
ensdb-1-11:3317:ensro::
ens-staging:3306:ensro::
ens-livemirror:3306:ensro::
};
my $pattern=shift;
my $size = 0;
my %sizes;
if( @ARGV ) {
$size = 1;
}
if( !$pattern ) {
printf( "You need to supply a regexp as argument.\n" );
printf( "Use -list if you want to see which databases are configured.\n" );
printf( "If you have any parameters after the regexp, the program will print\n" );
printf( " database sizes as well.\n" );
printf( "\nExample: search_dbs.pl \"^stabenau\" -sizes | sort -knr3\n" );
exit;
}
my $list = 0;
if( $pattern eq "-list" ) {
$list = 1;
}
for my $loc ( @all_locators ) {
my @elems = split( ":", $loc );
my @dbnames = ();
%sizes = ();
my $dsn = sprintf( "DBI:mysql:host=%s;port=%s", $elems[0], $elems[1] );
$SIG{ALRM} = sub{die("timeout");};
eval {
alarm $timeout;
my $db = DBI->connect( $dsn, $elems[2], $elems[3], { RaiseError => 1 } );
my $res = $db->selectall_arrayref( "show databases" );
@dbnames = map { $_->[0] } @$res;
$db->disconnect();
alarm 0;
};
if( !@dbnames ) {
print STDERR "$loc NOT OK\n";
} else {
if( $size ) {
for my $dbname ( @dbnames ) {
if( $dbname =~ /$pattern/ ) {
eval {
alarm $timeout;
my $db = DBI->connect( $dsn, $elems[2], $elems[3], { RaiseError => 1, PrintError=> 0 } );
$db->do( "use $dbname" );
my $t_status = $db->selectall_arrayref( "show table status" );
my $size = 0;
map { $size += $_->[6]; $size += $_->[8] } @$t_status;
print "$loc $dbname $size\n";
$db->disconnect();
alarm 0;
};
if( $@ ) {
print( "Problem on $loc $dbname.\n ", $@ );
}
}
}
} else {
if( $list ) {
print STDERR "$loc ok\n";
} else {
for my $dbname ( @dbnames ) {
if( $dbname =~ /$pattern/ ) {
print "$loc $dbname\n";
}
}
}
}
}
}
| willmclaren/ensembl | misc-scripts/db/search_dbs.pl | Perl | apache-2.0 | 3,428 |
package VMOMI::ArrayOfDvsProfile;
use parent 'VMOMI::ComplexType';
use strict;
use warnings;
our @class_ancestors = ( );
our @class_members = (
['DvsProfile', 'DvsProfile', 1, 1],
);
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/ArrayOfDvsProfile.pm | Perl | apache-2.0 | 408 |
:- module(_,_,_).
:-use_module(dirs_manual).
:-load_compilation_module(dirs_manual).
:-use_module(lpdoclib(autodoc)).
main :-
display(hola).
| leuschel/ecce | www/CiaoDE/ciao/bugs/Pending/modules/lcm2.pl | Perl | apache-2.0 | 146 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
=head1 NAME
Bio::EnsEMBL::DBSQL::AssemblyExceptionFeatureAdaptor
=head1 SYNOPSIS
my $assembly_exception_feature_adaptor =
$database_adaptor->get_AssemblyExceptionFeatureAdaptor();
@assembly_exception_features =
$assembly_exception_feature_adaptor->fetch_all_by_Slice($slice);
=head1 DESCRIPTION
Assembly Exception Feature Adaptor - database access for assembly
exception features.
=head1 METHODS
=cut
package Bio::EnsEMBL::DBSQL::AssemblyExceptionFeatureAdaptor;
use strict;
use warnings;
no warnings qw(uninitialized);
use Bio::EnsEMBL::DBSQL::BaseAdaptor;
use Bio::EnsEMBL::DBSQL::BaseFeatureAdaptor;
use Bio::EnsEMBL::AssemblyExceptionFeature;
use Bio::EnsEMBL::Utils::Exception qw(throw warning);
use Bio::EnsEMBL::Utils::Cache;
our @ISA = qw(Bio::EnsEMBL::DBSQL::BaseAdaptor);
# set the number of slices you'd like to cache
our $ASSEMBLY_EXCEPTION_FEATURE_CACHE_SIZE = 100;
=head2 new
Arg [1] : list of args @args
Superclass constructor arguments
Example : none
Description: Constructor which just initializes internal cache structures
Returntype : Bio::EnsEMBL::DBSQL::AssemblyExceptionFeatureAdaptor
Exceptions : none
Caller : implementing subclass constructors
Status : Stable
=cut
sub new {
my $caller = shift;
my $class = ref($caller) || $caller;
my $self = $class->SUPER::new(@_);
# initialize an LRU cache for slices
my %cache;
tie(%cache, 'Bio::EnsEMBL::Utils::Cache',
$ASSEMBLY_EXCEPTION_FEATURE_CACHE_SIZE);
$self->{'_aexc_slice_cache'} = \%cache;
return $self;
}
=head2 fetch_all
Arg [1] : none
Example : my @axfs = @{$axfa->fetch_all()};
Description: Retrieves all assembly exception features which are in the
database and builds internal caches of the features.
Returntype : reference to list of Bio::EnsEMBL::AssemblyExceptionFeatures
Exceptions : none
Caller : fetch_by_dbID, fetch_by_Slice
Status : Stable
=cut
sub fetch_all {
my $self = shift;
# this is the "global" cache for all assembly exception features in the db
if(defined($self->{'_aexc_cache'})) {
return $self->{'_aexc_cache'};
}
my $statement = qq(
SELECT ae.assembly_exception_id,
ae.seq_region_id,
ae.seq_region_start,
ae.seq_region_end,
ae.exc_type,
ae.exc_seq_region_id,
ae.exc_seq_region_start,
ae.exc_seq_region_end,
ae.ori
FROM assembly_exception ae,
coord_system cs,
seq_region sr
WHERE cs.species_id = ?
AND sr.coord_system_id = cs.coord_system_id
AND sr.seq_region_id = ae.seq_region_id);
my $sth = $self->prepare($statement);
$sth->bind_param( 1, $self->species_id(), SQL_INTEGER );
$sth->execute();
my ($ax_id, $sr_id, $sr_start, $sr_end,
$x_type, $x_sr_id, $x_sr_start, $x_sr_end, $ori);
$sth->bind_columns(\$ax_id, \$sr_id, \$sr_start, \$sr_end,
\$x_type, \$x_sr_id, \$x_sr_start, \$x_sr_end, \$ori);
my @features;
my $sa = $self->db()->get_SliceAdaptor();
$self->{'_aexc_dbID_cache'} = {};
while($sth->fetch()) {
my $slice = $sa->fetch_by_seq_region_id($sr_id);
my $x_slice = $sa->fetch_by_seq_region_id($x_sr_id);
# each row creates TWO features, each of which has alternate_slice
# pointing to the "other" one
my $a = Bio::EnsEMBL::AssemblyExceptionFeature->new
('-dbID' => $ax_id,
'-start' => $sr_start,
'-end' => $sr_end,
'-strand' => 1,
'-adaptor' => $self,
'-slice' => $slice,
'-alternate_slice' => $x_slice->sub_Slice($x_sr_start, $x_sr_end),
'-type' => $x_type);
push @features, $a;
$self->{'_aexc_dbID_cache'}->{$ax_id} = $a;
push @features, Bio::EnsEMBL::AssemblyExceptionFeature->new
('-dbID' => $ax_id,
'-start' => $x_sr_start,
'-end' => $x_sr_end,
'-strand' => 1,
'-adaptor' => $self,
'-slice' => $x_slice,
'-alternate_slice' => $slice->sub_Slice($sr_start, $sr_end),
'-type' => "$x_type REF" );
}
$sth->finish();
$self->{'_aexc_cache'} = \@features;
return \@features;
}
=head2 fetch_by_dbID
Arg [1] : int $dbID
Example : my $axf = $axfa->fetch_by_dbID(3);
Description: Retrieves a single assembly exception feature via its internal
identifier. Note that this only retrieves one of the two
assembly exception features which are represented by a single
row in the assembly_exception table.
Returntype : Bio::EnsEMBL::AssemblyExceptionFeature
Exceptions : none
Caller : general
Status : Stable
=cut
sub fetch_by_dbID {
my $self = shift;
my $dbID = shift;
if(!exists($self->{'_aexc_dbID_cache'})) {
# force loading of cache
$self->fetch_all();
}
return $self->{'_aexc_dbID_cache'}->{$dbID};
}
=head2 fetch_all_by_Slice
Arg [1] : Bio::EnsEMBL::Slice $slice
Example : my @axfs = @{$axfa->fetch_all_by_Slice($slice)};
Description: Retrieves all assembly exception features which overlap the
provided slice. The returned features will be in coordinate
system of the slice.
Returntype : reference to list of Bio::EnsEMBL::AssemblyException features
Exceptions : none
Caller : Feature::get_all_alt_locations, general
Status : Stable
=cut
sub fetch_all_by_Slice {
my $self = shift;
my $slice = shift;
my $key= uc($slice->name());
# return features from the slice cache if present
if(exists($self->{'_aexc_slice_cache'}->{$key})) {
return $self->{'_aexc_slice_cache'}->{$key};
}
my $all_features = $self->fetch_all();
my $mcc = $self->db()->get_MetaCoordContainer();
my $css = $mcc->fetch_all_CoordSystems_by_feature_type('assembly_exception');
my @features;
my $ma = $self->db()->get_AssemblyMapperAdaptor();
foreach my $cs (@$css) {
my $mapper;
if($cs->equals($slice->coord_system)) {
$mapper = undef;
} else {
$mapper = $ma->fetch_by_CoordSystems($cs,$slice->coord_system());
}
push @features, @{ $self->_remap($all_features, $mapper, $slice) };
}
$self->{'_aexc_slice_cache'}->{$key} = \@features;
return \@features;
}
#
# Given a list of features checks if they are in the correct coord system
# by looking at the first features slice. If they are not then they are
# converted and placed on the slice.
#
# Note that this is a re-implementation of a method with the same name in
# BaseFeatureAdaptor, and in contrast to the latter which maps features in
# place, this method returns a remapped copy of each feature. The reason for
# this is to get around conflicts with caching.
#
sub _remap {
my ($self, $features, $mapper, $slice) = @_;
# check if any remapping is actually needed
if(@$features && (!$features->[0]->isa('Bio::EnsEMBL::Feature') ||
$features->[0]->slice == $slice)) {
return $features;
}
# remapping has not been done, we have to do our own conversion from
# to slice coords
my @out;
my $slice_start = $slice->start();
my $slice_end = $slice->end();
my $slice_strand = $slice->strand();
my $slice_cs = $slice->coord_system();
my ($seq_region, $start, $end, $strand, $seq_region_name);
my $slice_seq_region = $slice->seq_region_name();
foreach my $f (@$features) {
# since feats were obtained in contig coords, attached seq is a contig
my $fslice = $f->slice();
if(!$fslice) {
throw("Feature does not have attached slice.\n");
}
my $fseq_region = $fslice->seq_region_name();
my $fcs = $fslice->coord_system();
if(!$slice_cs->equals($fcs)) {
# slice of feature in different coord system, mapping required
($seq_region, $start, $end, $strand) =
$mapper->fastmap($fseq_region,$f->start(),$f->end(),$f->strand(),$fcs);
# undefined start means gap
next if(!defined $start);
my $slice_adaptor = $self->db()->get_SliceAdaptor();
$seq_region_name = $slice_adaptor->fetch_by_seq_region_id($seq_region)->seq_region_name;
} else {
$start = $f->start();
$end = $f->end();
$strand = $f->strand();
$seq_region_name = $f->slice->seq_region_name();
}
# maps to region outside desired area
next if ($start > $slice_end) || ($end < $slice_start) ||
($slice_seq_region ne $seq_region_name);
# create new copies of successfully mapped feaatures with shifted start,
# end and strand
my ($new_start, $new_end);
my $seq_region_len = $slice->seq_region_length();
if ($slice_strand == 1) { # Positive strand
$new_start = $start - $slice_start + 1;
$new_end = $end - $slice_start + 1;
if ($slice->is_circular()) {
# Handle circular chromosomes.
if ($new_start > $new_end) {
# Looking at a feature overlapping the chromsome origin.
if ($new_end > $slice_start) {
# Looking at the region in the beginning of the chromosome.
$new_start -= $seq_region_len;
}
if ($new_end < 0) {
$new_end += $seq_region_len;
}
} else {
if ( $slice_start > $slice_end && $new_end < 0) {
# Looking at the region overlapping the chromosome
# origin and a feature which is at the beginning of the
# chromosome.
$new_start += $seq_region_len;
$new_end += $seq_region_len;
}
}
} ## end if ($dest_slice->is_circular...)
} else { # Negative strand
$new_start = $slice_end - $end + 1;
$new_end = $slice_end - $start + 1;
if ($slice->is_circular()) {
if ($slice_start > $slice_end) {
# slice spans origin or replication
if ($start >= $slice_start) {
$new_end += $seq_region_len;
$new_start += $seq_region_len if $end > $slice_start;
} elsif ($start <= $slice_end) {
# do nothing
} elsif ($end >= $slice_start) {
$new_start += $seq_region_len;
$new_end += $seq_region_len;
} elsif ($end <= $slice_end) {
$new_end += $seq_region_len if $new_end < 0;
} elsif ($start > $end) {
$new_end += $seq_region_len;
} else {
}
} else {
if ($start <= $slice_end and $end >= $slice_start) {
# do nothing
} elsif ($start > $end) {
if ($start <= $slice_end) {
$new_start -= $seq_region_len;
} elsif ($end >= $slice_start) {
$new_end += $seq_region_len;
} else {
}
}
}
}
} ## end else [ if ($dest_slice_strand...)]
push @out, Bio::EnsEMBL::AssemblyExceptionFeature->new(
'-dbID' => $f->dbID,
'-start' => $new_start,
'-end' => $new_end,
'-strand' => $strand * $slice_strand,
'-adaptor' => $self,
'-slice' => $slice,
'-alternate_slice' => $f->alternate_slice,
'-type' => $f->type,
);
} # end foreach assembly exception
return \@out;
}
=head2 store
Arg[1] : Bio::EnsEMBL::AssemblyException $asx
Arg[2] : Bio::EnsEMBL::AssemblyException $asx2
Example : $asx = Bio::EnsEMBL::AssemblyExceptionFeature->new(...)
$asx2 = Bio::EnsEMBL::AssemblyExceptionFeature->new(...)
$asx_seq_region_id = $asx_adaptor->store($asx);
Description: This stores a assembly exception feature in the
assembly_exception table and returns the assembly_exception_id.
Needs 2 features: one pointing to the Assembly_exception, and the
other pointing to the region in the reference that is being mapped to
Will check that start, end and type are defined, and the alternate
slice is present as well.
ReturnType: int
Exceptions: throw if assembly exception not defined (needs start, end,
type and alternate_slice) of if $asx not a Bio::EnsEMBL::AssemblyException
Caller: general
Status: Stable
=cut
sub store{
my $self = shift;
my $asx = shift;
my $asx2 = shift;
if (! $asx->isa('Bio::EnsEMBL::AssemblyExceptionFeature')){
throw("$asx is not a Ensembl assemlby exception -- not stored");
}
#if already present, return ID in the database
my $db = $self->db();
if ($asx->is_stored($db)){
return $asx->dbID();
}
#do some checkings for the object
#at the moment, the orientation is always 1
if (! $asx->start || ! $asx->end ){
throw("Assembly exception does not have coordinates");
}
if ($asx->type !~ /PAR|HAP|PATCH_NOVEL|PATCH_FIX/){
throw("Only types of assembly exception features valid are PAR, HAP, PATCH_FIX or PATCH_NOVEL");
}
if ( !($asx->alternate_slice->isa('Bio::EnsEMBL::Slice')) ){
throw("Alternate slice should be a Bio::EnsEMBL::Slice");
}
#now check the other Assembly exception feature, the one pointing to the REF
# region
if (!$asx2->isa('Bio::EnsEMBL::AssemblyExceptionFeature')){
throw("$asx2 is not a Ensembl assemlby exception -- not stored");
}
if (! $asx2->start || ! $asx2->end ){
throw("Assembly exception does not have coordinates");
}
if ($asx2->type !~ /HAP REF|PAR REF|PATCH_NOVEL REF|PATCH_FIX REF/){
throw("$asx2 should have type of assembly exception features HAP REF, PAR REF, PATCH_FIX REF or PATCH_NOVEL REF");
}
if (! ($asx2->alternate_slice->isa('Bio::EnsEMBL::Slice')) ){
throw("Alternate slice should be a Bio::EnsEMBL::Slice");
}
#finally check that both features are pointing to each other slice
if ($asx->slice != $asx2->alternate_slice || $asx->alternate_slice != $asx2->slice){
throw("Slice and alternate slice in both features are not pointing to each other");
}
#prepare the SQL
my $asx_sql = q{
INSERT INTO assembly_exception( seq_region_id, seq_region_start,
seq_region_end,
exc_type, exc_seq_region_id,
exc_seq_region_start, exc_seq_region_end,
ori)
VALUES (?, ?, ?, ?, ?, ?, ?, 1)
};
my $asx_st = $self->prepare($asx_sql);
my $asx_id = undef;
my $asx_seq_region_id;
my $asx2_seq_region_id;
my $original = $asx;
my $original2 = $asx2;
#check all feature information
($asx, $asx_seq_region_id) = $self->_pre_store($asx);
($asx2, $asx2_seq_region_id) = $self->_pre_store($asx2);
#and store it
$asx_st->bind_param(1, $asx_seq_region_id, SQL_INTEGER);
$asx_st->bind_param(2, $asx->start(), SQL_INTEGER);
$asx_st->bind_param(3, $asx->end(), SQL_INTEGER);
$asx_st->bind_param(4, $asx->type(), SQL_VARCHAR);
$asx_st->bind_param(5, $asx2_seq_region_id, SQL_INTEGER);
$asx_st->bind_param(6, $asx2->start(), SQL_INTEGER);
$asx_st->bind_param(7, $asx2->end(), SQL_INTEGER);
$asx_st->execute();
$asx_id = $self->last_insert_id('assembly_exception_id', undef, 'assembly_exception');
#finally, update the dbID and adaptor of the asx and asx2
$original->adaptor($self);
$original->dbID($asx_id);
$original2->adaptor($self);
$original2->dbID($asx_id);
#and finally update dbID cache with new assembly exception
$self->{'_aexc_dbID_cache'}->{$asx_id} = $original;
#and update the other caches as well
push @{$self->{'_aexc_slice_cache'}->{uc($asx->slice->name)}},$original, $original2;
push @{$self->{'_aexc_cache'}}, $original, $original2;
return $asx_id;
}
#
# Helper function containing some common feature storing functionality
#
# Given a Feature this will return a copy (or the same feature if no changes
# to the feature are needed) of the feature which is relative to the start
# of the seq_region it is on. The seq_region_id of the seq_region it is on
# is also returned.
#
# This method will also ensure that the database knows which coordinate
# systems that this feature is stored in.
# Since this adaptor doesn't inherit from BaseFeatureAdaptor, we need to copy
# the code
#
sub _pre_store {
my $self = shift;
my $feature = shift;
if(!ref($feature) || !$feature->isa('Bio::EnsEMBL::Feature')) {
throw('Expected Feature argument.');
}
$self->_check_start_end_strand($feature->start(),$feature->end(),
$feature->strand());
my $db = $self->db();
my $slice_adaptor = $db->get_SliceAdaptor();
my $slice = $feature->slice();
if(!ref($slice) || !($slice->isa('Bio::EnsEMBL::Slice') or $slice->isa('Bio::EnsEMBL::LRGSlice')) ) {
throw('Feature must be attached to Slice to be stored.');
}
# make sure feature coords are relative to start of entire seq_region
if($slice->start != 1 || $slice->strand != 1) {
#move feature onto a slice of the entire seq_region
$slice = $slice_adaptor->fetch_by_region($slice->coord_system->name(),
$slice->seq_region_name(),
undef, #start
undef, #end
undef, #strand
$slice->coord_system->version());
$feature = $feature->transfer($slice);
if(!$feature) {
throw('Could not transfer Feature to slice of ' .
'entire seq_region prior to storing');
}
}
# Ensure this type of feature is known to be stored in this coord system.
my $cs = $slice->coord_system;
my $mcc = $db->get_MetaCoordContainer();
$mcc->add_feature_type($cs, 'assembly_exception', $feature->length);
my $seq_region_id = $slice_adaptor->get_seq_region_id($slice);
if(!$seq_region_id) {
throw('Feature is associated with seq_region which is not in this DB.');
}
return ($feature, $seq_region_id);
}
#
# helper function used to validate start/end/strand and
# hstart/hend/hstrand etc.
#
sub _check_start_end_strand {
my $self = shift;
my $start = shift;
my $end = shift;
my $strand = shift;
#
# Make sure that the start, end, strand are valid
#
if(int($start) != $start) {
throw("Invalid Feature start [$start]. Must be integer.");
}
if(int($end) != $end) {
throw("Invalid Feature end [$end]. Must be integer.");
}
if(int($strand) != $strand || $strand < -1 || $strand > 1) {
throw("Invalid Feature strand [$strand]. Must be -1, 0 or 1.");
}
if($end < $start) {
throw("Invalid Feature start/end [$start/$end]. Start must be less " .
"than or equal to end.");
}
return 1;
}
=head2 remove
Arg [1] : $asx Bio::EnsEMBL::AssemblyFeatureException
Example : $asx_adaptor->remove($asx);
Description: This removes a assembly exception feature from the database.
Returntype : none
Exceptions : thrown if $asx arg does not implement dbID(), or if
$asx->dbID is not a true value
Caller : general
Status : Stable
=cut
#again, this method is generic in BaseFeatureAdaptor, but since this class
#is not inheriting, need to copy&paste
sub remove {
my ($self, $feature) = @_;
if(!$feature || !ref($feature) || !$feature->isa('Bio::EnsEMBL::AssemblyExceptionFeature')) {
throw('AssemblyExceptionFeature argument is required');
}
if(!$feature->is_stored($self->db)) {
throw("This feature is not stored in this database");
}
my $asx_id = $feature->dbID();
my $key = uc($feature->slice->name);
my $sth = $self->prepare("DELETE FROM assembly_exception WHERE assembly_exception_id = ?");
$sth->bind_param(1,$feature->dbID,SQL_INTEGER);
$sth->execute();
#and clear cache
#and finally update dbID cache
delete $self->{'_aexc_dbID_cache'}->{$asx_id};
#and remove from cache feature
my @features;
foreach my $asx (@{$self->{'_aexc_slice_cache'}->{$key}}){
if ($asx->dbID != $asx_id){
push @features, $asx;
}
}
$self->{'_aexc_slice_cache'}->{$key} = \@features;
@features = ();
foreach my $asx (@{$self->{'_aexc_cache'}}){
if ($asx->dbID != $asx_id){
push @features, $asx;
}
}
$self->{'_aexc_cache'} = \@features;
#unset the feature dbID ad adaptor
$feature->dbID(undef);
$feature->adaptor(undef);
return;
}
1;
| danstaines/ensembl | modules/Bio/EnsEMBL/DBSQL/AssemblyExceptionFeatureAdaptor.pm | Perl | apache-2.0 | 21,599 |
%###############################################################
%############# MODULE: UPPER-BOUND CONSTRUCTION ################
%###############################################################
:- module(upperBound, [upperBound/7]).
:- use_module(library(lists)).
:- use_module(self_check_rul).
:- use_module(prePostCon,[isType/1,isRULprog/1,isDefinition/1,isOrdered/1]).
:- use_module(auxil,[newName/2, wellDefined/3,errorMsg/2,orderSubGoals/2,removeClause/4,prepareTriple/10]).
:- use_module(subType,[subType/3]).
:- use_module(gensym2,[seed/1]).
%##############################################################
% (PUBLIC) UPPERBOUND: ========================================
% upperBound takes the names of two regular types together with
% the representation of a program defining them and it computes
% a regular type which is a upperbound of both the input types.
% The generated upperBound type will get a new name also genera-
% ted by this procedure. In order to generate the new name, the
% procedure needs some number index as input. Based on the input
% index, the names of the new predicates (defining the new type)
% are computed by index incrementation. The result can be found
% in the output index. The seven parameters of the procedure are
% used as: (+ + - - + + -). The newly generated type must be an
% upper type of each of the two input types. As we were dealing
% with pairs of type names in the sub-typing procedures, so are
% we dealing with triples of type names in this procedure here.
% UpperBoundType occurs twice in the upperType-procedure call
% because of an equal-value requirement with respect to an im-
% plicitly underlying comparison. ATTENTION: input program must
% be in RUL! Please note the similarity between this procedure
% and the interSection procedure in the interSection module!
% The ReferenceTriples are required for comparison. [18.9.2001:
% NEW]: Check subtype properties in advance! ==================
:- initialization(assert_pre(upperBound:upperBound(In1,In2,_,_,In3,In4,_Dummy),
(prePostCon:isType(In1),
prePostCon:isType(In2),
prePostCon:isRULprog(In3),
number(In4)))).
:- initialization(assert_post(upperBound:upperBound(_,_,Out1,Out2,_,_,_Dummy),
(prePostCon:isType(Out1),
prePostCon:isRULprog(Out2)))).
%--------------------------------------------------------------
upperBound(any,_,any,[],_,Num,Num) :- !.
upperBound(_,any,any,[],_,Num,Num) :- !.
upperBound(SubType,SuperType,SuperType,[],Prog,Num,Num) :-
subType(SubType, SuperType, Prog),
!.
upperBound(SuperType,SubType,SuperType,[],Prog,Num,Num) :-
subType(SubType, SuperType, Prog),
!.
upperBound(TypeName1, TypeName2, UpBdName,
[proc(UpBdName/1,UpBdDef)|ProcDefs],
InputProgram, InputIndex, OutputIndex) :-
orderSubGoals(InputProgram, RULprogram),
wellDefined(TypeName1, TypeDef1, RULprogram),
wellDefined(TypeName2, TypeDef2, RULprogram),
newName(InputIndex, NewName),
name(NewName, NameCode),
name(ub_, FlagCode), /* Flag indicating upperBound */
append(FlagCode, NameCode, IdentCode),
name(UpBdName, IdentCode),
NextIndex is InputIndex + 1,
upperType(TypeDef1, TypeDef2, UpBdDef,
UpBdName, TypeName1, TypeName2,
UpBdName, InputTriples, ReferenceTriples,
NextIndex, NewIndex, RULprogram),
make_UB_def(ReferenceTriples, TypeName1, TypeName2,
UpBdName, InputTriples, ProcDefs,
NewIndex, OutputIndex, RULprogram),
!.
upperBound(X,Y,_,_,_,_,_) :-
errorMsg(upperBound,(X,Y)),
!,
fail.
%------------------------------------------------------------
make_UB_def([], _,_,_,_, [], FinalIndex, FinalIndex, _) :- !.
make_UB_def(ReferenceTriples, TypeName1, TypeName2, UpperName,
InputTriples, ProcDefs, IndexInput, IndexOutput,
RULprogram) :-
eachUpperBound(ReferenceTriples, FirstDefs, IntermediateTriples,
TypeName1, TypeName2, UpperName, InputTriples,
IndexInput, IntermediateIndex, RULprogram),
make_UB_def(IntermediateTriples, TypeName1, TypeName2,
UpperName, InputTriples, RestDefs,
IntermediateIndex, IndexOutput, RULprogram),
append(FirstDefs, RestDefs, ProcDefs).
% UPPERTYPE: ==================================================
% Checks if three type definitions for (t1, t2, t) fullfil the
% upper type condition such that t > t1 and t > t2. There's not
% much to do in case one of these definitions is found empty []
% (Recursion base case). In the non-trivial case we must treat
% the type definitions clause by clause (which is done calling
% the removeClause procedure) and then further check the remai-
% ning clause bodies. =========================================
upperType([], [], [], _,_,_,_,_,_, FinalIndex, FinalIndex, _) :- !.
upperType([], [(Predicate:-SameGoals)|OtherClauses],
[(UpperPred:-SameGoals)|UpperClauses],
UpperName, Type1, Type2, UpperBound,
InputTriples, ReferenceTriples, IndexInput,
IndexOutput, RULprogram) :-
/* PRECONDITION */
isOrdered((Predicate:-SameGoals)),
!,
Predicate =.. [_, SameFunctor],
UpperPred =.. [UpperName, SameFunctor],
upperType([], OtherClauses, UpperClauses, UpperName,
Type1, Type2, UpperBound, InputTriples,
ReferenceTriples, IndexInput, IndexOutput,
RULprogram).
upperType([(Predicate:-SameGoals)|OtherClauses], [],
[(UpperPred:-SameGoals)|UpperClauses],
UpperName, Type1, Type2, UpperBound,
InputTriples, ReferenceTriples, IndexInput,
IndexOutput, RULprogram) :-
/* PRECONDITION */
isOrdered((Predicate:-SameGoals)),
!,
Predicate =.. [_, SameFunctor],
UpperPred =.. [UpperName, SameFunctor],
upperType(OtherClauses, [], UpperClauses, UpperName,
Type1, Type2, UpperBound, InputTriples,
ReferenceTriples, IndexInput, IndexOutput,
RULprogram).
upperType([(Pred1:-Goals1)|Clauses1], Clauses2,
[(UPred:-UGoals)|UClauses], UName, Type1, Type2,
UType, InputTriples, ReferenceTriples, IndexInput,
IndexOutput, RULprogram) :-
/* PRECONDITION */
isOrdered((Pred1:-Goals1)),
allOrdered(Clauses2),
removeClause(Pred1, TheRemovedGoals,
Clauses2, ResultClauses2),
!,
Pred1 =.. [_, SameFunctor],
UPred =.. [UName, SameFunctor],
upperBoundBody(Goals1, TheRemovedGoals, UGoals, Type1,
Type2, UType, InputTriples, ReferenceTriples,
IndexInput, IntermediateIndex, RULprogram),
upperType(Clauses1, ResultClauses2, UClauses, UName,
Type1, Type2, UType, InputTriples, ReferenceTriples,
IntermediateIndex, IndexOutput, RULprogram).
upperType([(Pred1:-SameGoals)|Clauses1], Clauses2,
[(UPred:-SameGoals)|UClauses], UName, Type1,
Type2, UType, InputTriples, ReferenceTriples,
IndexInput, IndexOutput, RULprogram) :-
/* PRECONDITION */
isOrdered((Pred1:-SameGoals)),
allOrdered(Clauses2),
Pred1 =.. [_, SameFunctor],
UPred =.. [UName, SameFunctor],
upperType(Clauses1, Clauses2, UClauses, UName, Type1,
Type2, UType, InputTriples, ReferenceTriples,
IndexInput, IndexOutput, RULprogram).
% UPPERBOUNDBODY: ===============================================
% upperBoundBody supports the upperType-procedure in decomposing
% the terms under consideration. Eventually we have to check all
% atoms of a multigoal in order to construct the upper type. The
% (.,.)-operator used for this dexomposition is SICSTUS built-in.
% No wonder that the structure of this procedure is very similar
% to the procedure subTypePairs of above! (One could have merged
% this procedure with the upperBoundAtom procedure, similar to
% what I did with the makePairs procedure, but I did not do it
% because this procedure here is already too big and heavy.)
% ==============================================================
upperBoundBody(true, true, true, _,_,_,_,_,
FinalIndex, FinalIndex, _) :- !.
upperBoundBody((Atom1,Goals1), (Atom2,Goals2), (AtomU,GoalsU),
Type1, Type2, UType, InputTriples, ReferenceTriples,
IndexInput,IndexOutput,RULprogram) :-
%user:debug_print('UB-BODY: '),
%user:debug_print((Atom1,Goals1)),
%user:debug_print((Atom2,Goals2)),
%user:debug_nl,
!,
upperBoundAtom(Atom1, Atom2, AtomU, Type1, Type2, UType,
InputTriples, ReferenceTriples, IndexInput,
IntermediateIndex, RULprogram),
upperBoundBody(Goals1, Goals2, GoalsU, Type1, Type2,
UType, InputTriples, ReferenceTriples,
IntermediateIndex, IndexOutput, RULprogram).
%user:debug_print((AtomU,GoalsU)),
%user:debug_nl
upperBoundBody(Atom1, Atom2, AtomU, Type1, Type2, UType,
InputTriples, ReferenceTriples, IndexInput,
IndexOutput, RULprogram) :-
upperBoundAtom(Atom1, Atom2, AtomU, Type1, Type2, UType,
InputTriples, ReferenceTriples, IndexInput,
IndexOutput, RULprogram).
% EACHUPPERBOUND: ===============================================
% "eachUpperBound" supports the "makeTriples"-procedure. It goes
% through the triple lists and makes sure that all triple in the
% list have the property {t > t1 AND t > t2}, where (t1,t2,t) is
% in the list and ">" is the uppertype relation on the correspon-
% ding types in the input program representation. By the way new
% indices are generated too. Because of far reaching call depen-
% dencies, we have to carry on loads of formal parameters here!
% If everything goes OK, then UpperType and UpperBound have THE
% SAME VALUE, as specified in the upperBound-procedure above.
% ==============================================================
eachUpperBound([], [], _,_,_,_,_, FinalIndex, FinalIndex, _) :- !.
eachUpperBound([(TypeName1,TypeName2,UTypeName)|ReferenceTriples],
[proc(UTypeName/1,UTypeDef)|ProcDefs],
IntermediateTriples, Type1, Type2, UType,
InputTriples, IndexInput, IndexOutput, RULprogram) :-
wellDefined(TypeName1, TypeDef1, RULprogram),
wellDefined(TypeName2, TypeDef2, RULprogram),
NextIndex is IndexInput + 1,
!,
upperType(TypeDef1, TypeDef2, UTypeDef, UTypeName, Type1,
Type2, UType, InputTriples, IntermediateTriples,
NextIndex, NewIndex, RULprogram),
eachUpperBound(ReferenceTriples, ProcDefs, IntermediateTriples,
Type1, Type2, UType, InputTriples, NewIndex,
IndexOutput, RULprogram).
% UPPERBOUNDATOM: ===============================================
% upperBoundAtom supports the upperBoundBody-procedure. We check
% that we have reached the level of atoms in our type decomposi-
% tion process. We are done where we discover that the functors
% of Pred1, Pred2 and UpperPred are the same, and if their next
% upper bound is also well defined. For this purpose we have to
% check the super type relation holds which can be of course al-
% so expressed in terms of sub type. This is done by the next-
% UpperBound procedure. =========================================
upperBoundAtom(Pred1, Pred2, UPred, Type1, Type2, UType,
InputTriples, ReferenceTriples, IndexInput,
IndexOutput, RULprogram) :-
Pred1 =.. [Name1, SameFunctor],
Pred2 =.. [Name2, SameFunctor],
nextUpperBound(Name1, Name2, UName, Type1, Type2, UType,
InputTriples, ReferenceTriples, IndexInput,
IndexOutput, RULprogram),
UPred =.. [UName, SameFunctor].
% NEXTUPPERBOUND: ===============================================
% nextUpperBound supports the upperBoundAtom-procedure. To check
% if a triple (t1, t2, t) is OK we must prove that t is an upper
% type of t1 as well as of t2. This means viceversa that both t1
% and t2 must be subtypes of t such that we can reuse "subType"
% as defined above. If two of the inputs (t1,t1,t) are the same,
% there is no need to create a new type-name triple. If all the
% three names are different, we have to prepare the generation
% of a new triple for the triple list. =========================
nextUpperBound(SubType, SuperType, SuperType, _,_,_,_,_,
FinalIndex, FinalIndex, RULprogram) :-
subType(SubType, SuperType, RULprogram),
!.
nextUpperBound(SuperType, SubType, SuperType, _,_,_,_,_,
FinalIndex, FinalIndex, RULprogram) :-
subType(SubType, SuperType, RULprogram),
!.
nextUpperBound(TypeName1, TypeName2, UpperName, Type1,
Type2, UType, InputTriples, ReferenceTriples,
IndexInput, IndexOutput, _) :-
prepareTriple(TypeName1, TypeName2, UpperName, Type1,
Type2, UType, InputTriples, ReferenceTriples,
IndexInput, IndexOutput).
%----------------------------------------------------------
allOrdered([]).
allOrdered([C|Clauses]) :-
isOrdered(C),
allOrdered(Clauses).
%###############################################################
%############################### END ###########################
%###############################################################
| leuschel/ecce | ecce_source/rul/upperBound.pl | Perl | apache-2.0 | 13,181 |
package Sisimai::MTA::Postfix;
use parent 'Sisimai::MTA';
use feature ':5.10';
use strict;
use warnings;
# Postfix manual - bounce(5) - http://www.postfix.org/bounce.5.html
my $Re0 = {
'from' => qr/ [(]Mail Delivery System[)]\z/,
'subject' => qr/\AUndelivered Mail Returned to Sender\z/,
};
my $Re1 = {
'begin' => qr{\A(?>
[ ]+The[ ](?:
Postfix[ ](?:
program\z # The Postfix program
|on[ ].+[ ]program\z # The Postfix on <os name> program
)
|\w+[ ]Postfix[ ]program\z # The <name> Postfix program
|mail[ \t]system\z # The mail system
|\w+[ \t]program\z # The <custmized-name> program
)
|This[ ]is[ ]the[ ](?:
Postfix[ ]program # This is the Postfix program
|\w+[ ]Postfix[ ]program # This is the <name> Postfix program
|\w+[ ]program # This is the <customized-name> Postfix program
|mail[ ]system[ ]at[ ]host # This is the mail system at host <hostname>.
)
)
}x,
'rfc822' => qr!\AContent-Type:[ \t]*(?:message/rfc822|text/rfc822-headers)\z!x,
'endof' => qr/\A__END_OF_EMAIL_MESSAGE__\z/,
};
my $Indicators = __PACKAGE__->INDICATORS;
sub description { 'Postfix' }
sub smtpagent { 'Postfix' }
sub pattern { return $Re0 }
sub scan {
# Parse bounce messages from Postfix
# @param [Hash] mhead Message header of a bounce email
# @options mhead [String] from From header
# @options mhead [String] date Date header
# @options mhead [String] subject Subject header
# @options mhead [Array] received Received headers
# @options mhead [String] others Other required headers
# @param [String] mbody Message body of a bounce email
# @return [Hash, Undef] Bounce data list and message/rfc822 part
# or Undef if it failed to parse or the
# arguments are missing
# @since v4.0.0
my $class = shift;
my $mhead = shift // return undef;
my $mbody = shift // return undef;
return undef unless $mhead->{'subject'} =~ $Re0->{'subject'};
my $dscontents = [ __PACKAGE__->DELIVERYSTATUS ];
my @hasdivided = split( "\n", $$mbody );
my $rfc822part = ''; # (String) message/rfc822-headers part
my $rfc822list = []; # (Array) Each line in message/rfc822 part string
my $blanklines = 0; # (Integer) The number of blank lines
my $readcursor = 0; # (Integer) Points the current cursor position
my $recipients = 0; # (Integer) The number of 'Final-Recipient' header
my @commandset = (); # (Array) ``in reply to * command'' list
my $connvalues = 0; # (Integer) Flag, 1 if all the value of $connheader have been set
my $connheader = {
'date' => '', # The value of Arrival-Date header
'lhost' => '', # The value of Received-From-MTA header
};
my $anotherset = {}; # Another error information
my $v = undef;
my $p = '';
for my $e ( @hasdivided ) {
# Read each line between $Re1->{'begin'} and $Re1->{'rfc822'}.
unless( $readcursor ) {
# Beginning of the bounce message or delivery status part
if( $e =~ $Re1->{'begin'} ) {
$readcursor |= $Indicators->{'deliverystatus'};
next;
}
}
unless( $readcursor & $Indicators->{'message-rfc822'} ) {
# Beginning of the original message part
if( $e =~ $Re1->{'rfc822'} ) {
$readcursor |= $Indicators->{'message-rfc822'};
next;
}
}
if( $readcursor & $Indicators->{'message-rfc822'} ) {
# After "message/rfc822"
unless( length $e ) {
$blanklines++;
last if $blanklines > 1;
next;
}
push @$rfc822list, $e;
} else {
# Before "message/rfc822"
next unless $readcursor & $Indicators->{'deliverystatus'};
next unless length $e;
if( $connvalues == scalar( keys %$connheader ) ) {
# Final-Recipient: RFC822; userunknown@example.jp
# X-Actual-Recipient: RFC822; kijitora@example.co.jp
# Action: failed
# Status: 5.1.1
# Remote-MTA: DNS; mx.example.jp
# Diagnostic-Code: SMTP; 550 5.1.1 <userunknown@example.jp>... User Unknown
# Last-Attempt-Date: Fri, 14 Feb 2014 12:30:08 -0500
$v = $dscontents->[ -1 ];
if( $e =~ m/\A[Ff]inal-[Rr]ecipient:[ ]*(?:RFC|rfc)822;[ ]*(.+)\z/ ) {
# Final-Recipient: RFC822; userunknown@example.jp
if( length $v->{'recipient'} ) {
# There are multiple recipient addresses in the message body.
push @$dscontents, __PACKAGE__->DELIVERYSTATUS;
$v = $dscontents->[ -1 ];
}
$v->{'recipient'} = $1;
$recipients++;
} elsif( $e =~ m/\A[Xx]-[Aa]ctual-[Rr]ecipient:[ ]*(?:RFC|rfc)822;[ ]*([^ ]+)\z/ ||
$e =~ m/\A[Oo]riginal-[Rr]ecipient:[ ]*(?:RFC|rfc)822;[ ]*([^ ]+)\z/ ) {
# X-Actual-Recipient: RFC822; kijitora@example.co.jp
# Original-Recipient: rfc822;kijitora@example.co.jp
$v->{'alias'} = $1;
} elsif( $e =~ m/\A[Aa]ction:[ ]*(.+)\z/ ) {
# Action: failed
$v->{'action'} = lc $1;
} elsif( $e =~ m/\A[Ss]tatus:[ ]*(\d[.]\d+[.]\d+)/ ) {
# Status: 5.1.1
# Status:5.2.0
# Status: 5.1.0 (permanent failure)
$v->{'status'} = $1;
} elsif( $e =~ m/\A[Rr]emote-MTA:[ ]*(?:DNS|dns);[ ]*(.+)\z/ ) {
# Remote-MTA: DNS; mx.example.jp
$v->{'rhost'} = lc $1;
} elsif( $e =~ m/\A[Ll]ast-[Aa]ttempt-[Dd]ate:[ ]*(.+)\z/ ) {
# Last-Attempt-Date: Fri, 14 Feb 2014 12:30:08 -0500
#
# src/bounce/bounce_notify_util.c:
# 681 #if 0
# 682 if (dsn->time > 0)
# 683 post_mail_fprintf(bounce, "Last-Attempt-Date: %s",
# 684 mail_date(dsn->time));
# 685 #endif
$v->{'date'} = $1;
} else {
if( $e =~ m/\A[Dd]iagnostic-[Cc]ode:[ ]*(.+?);[ ]*(.+)\z/ ) {
# Diagnostic-Code: SMTP; 550 5.1.1 <userunknown@example.jp>... User Unknown
$v->{'spec'} = uc $1;
$v->{'spec'} = 'SMTP' if $v->{'spec'} eq 'X-POSTFIX';
$v->{'diagnosis'} = $2;
} elsif( $p =~ m/\A[Dd]iagnostic-[Cc]ode:[ ]*/ && $e =~ m/\A[ \t]+(.+)\z/ ) {
# Continued line of the value of Diagnostic-Code header
$v->{'diagnosis'} .= ' '.$1;
$e = 'Diagnostic-Code: '.$e;
}
}
} else {
# If you do so, please include this problem report. You can
# delete your own text from the attached returned message.
#
# The mail system
#
# <userunknown@example.co.jp>: host mx.example.co.jp[192.0.2.153] said: 550
# 5.1.1 <userunknown@example.co.jp>... User Unknown (in reply to RCPT TO
# command)
if( $e =~ m/[ \t][(]in reply to ([A-Z]{4}).*/ ) {
# 5.1.1 <userunknown@example.co.jp>... User Unknown (in reply to RCPT TO
push @commandset, $1;
} elsif( $e =~ m/([A-Z]{4})[ \t]*.*command[)]\z/ ) {
# to MAIL command)
push @commandset, $1;
} else {
if( $e =~ m/\A[Rr]eporting-MTA:[ ]*(?:DNS|dns);[ ]*(.+)\z/ ) {
# Reporting-MTA: dns; mx.example.jp
next if $connheader->{'lhost'};
$connheader->{'lhost'} = lc $1;
$connvalues++;
} elsif( $e =~ m/\A[Aa]rrival-[Dd]ate:[ ]*(.+)\z/ ) {
# Arrival-Date: Wed, 29 Apr 2009 16:03:18 +0900
next if $connheader->{'date'};
$connheader->{'date'} = $1;
$connvalues++;
} elsif( $e =~ m/\A(X-Postfix-Sender):[ ]*rfc822;[ ]*(.+)\z/ ) {
# X-Postfix-Sender: rfc822; shironeko@example.org
push @$rfc822list, sprintf( "%s: %s", $1, $2 );
} else {
# Alternative error message and recipient
if( $e =~ m/\A[<]([^ ]+[@][^ ]+)[>] [(]expanded from [<](.+)[>][)]:[ \t]*(.+)\z/ ) {
# <r@example.ne.jp> (expanded from <kijitora@example.org>): user ...
$anotherset->{'recipient'} = $1;
$anotherset->{'alias'} = $2;
$anotherset->{'diagnosis'} = $3;
} elsif( $e =~ m/\A[<]([^ ]+[@][^ ]+)[>]:(.*)\z/ ) {
# <kijitora@exmaple.jp>: ...
$anotherset->{'recipient'} = $1;
$anotherset->{'diagnosis'} = $2;
} else {
# Get error message continued from the previous line
next unless $anotherset->{'diagnosis'};
if( $e =~ m/\A[ \t]{4}(.+)\z/ ) {
# host mx.example.jp said:...
$anotherset->{'diagnosis'} .= ' '.$e;
}
}
}
}
}
} # End of if: rfc822
} continue {
# Save the current line for the next loop
$p = $e;
}
unless( $recipients ) {
# Fallback: set recipient address from error message
if( defined $anotherset->{'recipient'} && length $anotherset->{'recipient'} ) {
# Set recipient address
$dscontents->[-1]->{'recipient'} = $anotherset->{'recipient'};
$recipients++;
}
}
return undef unless $recipients;
require Sisimai::String;
for my $e ( @$dscontents ) {
# Set default values if each value is empty.
map { $e->{ $_ } ||= $connheader->{ $_ } || '' } keys %$connheader;
if( exists $anotherset->{'diagnosis'} && length $anotherset->{'diagnosis'} ) {
# Copy alternative error message
$e->{'diagnosis'} ||= $anotherset->{'diagnosis'};
if( $e->{'diagnosis'} =~ m/\A\d+\z/ ) {
# Override the value of diagnostic code message
$e->{'diagnosis'} = $anotherset->{'diagnosis'};
}
}
$e->{'diagnosis'} = Sisimai::String->sweep( $e->{'diagnosis'} );
$e->{'spec'} ||= 'SMTP' if $e->{'diagnosis'} =~ m/host .+ said:/;
$e->{'command'} = shift @commandset || '';
$e->{'agent'} = __PACKAGE__->smtpagent;
}
$rfc822part = Sisimai::RFC5322->weedout( $rfc822list );
return { 'ds' => $dscontents, 'rfc822' => $$rfc822part };
}
1;
__END__
=encoding utf-8
=head1 NAME
Sisimai::MTA::Postfix - bounce mail parser class for C<Postfix>.
=head1 SYNOPSIS
use Sisimai::MTA::Postfix;
=head1 DESCRIPTION
Sisimai::MTA::Postfix parses a bounce email which created by C<Postfix>. Methods
in the module are called from only Sisimai::Message.
=head1 CLASS METHODS
=head2 C<B<description()>>
C<description()> returns description string of this module.
print Sisimai::MTA::Postfix->description;
=head2 C<B<smtpagent()>>
C<smtpagent()> returns MTA name.
print Sisimai::MTA::Postfix->smtpagent;
=head2 C<B<scan( I<header data>, I<reference to body string>)>>
C<scan()> method parses a bounced email and return results as a array reference.
See Sisimai::Message for more details.
=head1 AUTHOR
azumakuniyuki
=head1 COPYRIGHT
Copyright (C) 2014-2016 azumakuniyuki, All rights reserved.
=head1 LICENSE
This software is distributed under The BSD 2-Clause License.
=cut
| jcbf/Sisimai | lib/Sisimai/MTA/Postfix.pm | Perl | bsd-2-clause | 12,811 |
package TestDNS;
use strict;
use warnings;
use 5.010001;
use Test::Nginx::Socket::Lua -Base;
#use JSON::XS;
use constant {
TYPE_A => 1,
TYPE_TXT => 16,
TYPE_CNAME => 5,
TYPE_AAAA => 28,
TYPE_SRV => 33,
CLASS_INTERNET => 1,
};
sub encode_name ($);
sub encode_ipv4 ($);
sub encode_ipv6 ($);
sub encode_record ($);
sub gen_dns_reply ($$);
sub Test::Base::Filter::dns {
my ($self, $code) = @_;
my $args = $self->current_arguments;
#warn "args: $args";
if (defined $args && $args ne 'tcp' && $args ne 'udp') {
die "Invalid argument to the \"dns\" filter: $args\n";
}
my $mode = $args // 'udp';
my $block = $self->current_block;
my $pointer_spec = $block->dns_pointers;
my @pointers;
if (defined $pointer_spec) {
my @loops = split /\s*,\s*/, $pointer_spec;
for my $loop (@loops) {
my @nodes = split /\s*=>\s*/, $loop;
my $prev;
for my $n (@nodes) {
if ($n !~ /^\d+$/ || $n == 0) {
die "bad name ID in the --- dns_pointers: $n\n";
}
if (!defined $prev) {
$prev = $n;
next;
}
$pointers[$prev] = $n;
}
}
}
my $input = eval $code;
if ($@) {
die "failed to evaluate code $code: $@\n";
}
if (!ref $input) {
return $input;
}
if (ref $input eq 'ARRAY') {
my @replies;
for my $t (@$input) {
push @replies, gen_dns_reply($t, $mode);
}
return \@replies;
}
if (ref $input eq 'HASH') {
return gen_dns_reply($input, $mode);
}
return $input;
}
sub gen_dns_reply ($$) {
my ($t, $mode) = @_;
my @raw_names;
push @raw_names, \($t->{qname});
my $answers = $t->{answer} // [];
if (!ref $answers) {
$answers = [$answers];
}
my $authority_answers = $t->{authority} // [];
if (!ref $authority_answers) {
$authority_answers = [$authority_answers];
}
my $additional_answers = $t->{additional} // [];
if (!ref $additional_answers) {
$additional_answers = [$additional_answers];
}
for my $ans (@$answers) {
push @raw_names, \($ans->{name});
if (defined $ans->{cname}) {
push @raw_names, \($ans->{cname});
}
}
for my $nsans (@$authority_answers) {
push @raw_names, \($nsans->{name});
if (defined $nsans->{cname}) {
push @raw_names, \($nsans->{cname});
}
}
for my $arans (@$additional_answers) {
push @raw_names, \($arans->{name});
if (defined $arans->{cname}) {
push @raw_names, \($arans->{cname});
}
}
for my $rname (@raw_names) {
$$rname = encode_name($$rname // "");
}
my $qname = $t->{qname};
my $s = '';
my $id = $t->{id} // 0;
$s .= pack("n", $id);
#warn "id: ", length($s), " ", encode_json([$s]);
my $qr = $t->{qr} // 1;
my $opcode = $t->{opcode} // 0;
my $aa = $t->{aa} // 0;
my $tc = $t->{tc} // 0;
my $rd = $t->{rd} // 1;
my $ra = $t->{ra} // 1;
my $ad = $t->{ad} // 0;
my $cd = $t->{cd} // 0;
my $rcode = $t->{rcode} // 0;
my $flags = ($qr << 15) + ($opcode << 11) + ($aa << 10) + ($tc << 9)
+ ($rd << 8) + ($ra << 7) + ($ad << 4) + ($cd << 5) + $rcode;
#warn sprintf("flags: %b", $flags);
$flags = pack("n", $flags);
$s .= $flags;
#warn "flags: ", length($flags), " ", encode_json([$flags]);
my $qdcount = $t->{qdcount} // 1;
my $ancount = $t->{ancount} // scalar @$answers;
my $nscount = $t->{nscount} // scalar @$authority_answers;
my $arcount = $t->{arcount} // scalar @$additional_answers;
$s .= pack("nnnn", $qdcount, $ancount, $nscount, $arcount);
#warn "qname: ", length($qname), " ", encode_json([$qname]);
$s .= $qname;
my $qs_type = $t->{qtype} // TYPE_A;
my $qs_class = $t->{qclass} // CLASS_INTERNET;
$s .= pack("nn", $qs_type, $qs_class);
for my $ans (@$answers) {
$s .= encode_record($ans);
}
for my $nsans (@$authority_answers) {
$s .= encode_record($nsans);
}
for my $arans (@$additional_answers) {
$s .= encode_record($arans);
}
if ($mode eq 'tcp') {
return pack("n", length($s)) . $s;
}
return $s;
}
sub encode_ipv4 ($) {
my $txt = shift;
my @bytes = split /\./, $txt;
return pack("CCCC", @bytes), 4;
}
sub encode_ipv6 ($) {
my $txt = shift;
my @groups = split /:/, $txt;
my $nils = 0;
my $nonnils = 0;
for my $g (@groups) {
if ($g eq '') {
$nils++;
} else {
$nonnils++;
$g = hex($g);
}
}
my $total = $nils + $nonnils;
if ($total > 8 ) {
die "Invalid IPv6 address: too many groups: $total: $txt";
}
if ($nils) {
my $found = 0;
my @new_groups;
for my $g (@groups) {
if ($g eq '') {
if ($found) {
next;
}
for (1 .. 8 - $nonnils) {
push @new_groups, 0;
}
$found = 1;
} else {
push @new_groups, $g;
}
}
@groups = @new_groups;
}
if (@groups != 8) {
die "Invalid IPv6 address: $txt: @groups\n";
}
#warn "IPv6 groups: @groups";
return pack("nnnnnnnn", @groups), 16;
}
sub encode_name ($) {
my $name = shift;
$name =~ s/([^.]+)\.?/chr(length($1)) . $1/ge;
$name .= "\0";
return $name;
}
sub encode_record ($) {
my $ans = shift;
my $name = $ans->{name};
my $type = $ans->{type};
my $class = $ans->{class};
my $ttl = $ans->{ttl};
my $rdlength = $ans->{rdlength};
my $rddata = $ans->{rddata};
my $ipv4 = $ans->{ipv4};
if (defined $ipv4) {
my ($data, $len) = encode_ipv4($ipv4);
$rddata //= $data;
$rdlength //= $len;
$type //= TYPE_A;
$class //= CLASS_INTERNET;
}
my $ipv6 = $ans->{ipv6};
if (defined $ipv6) {
my ($data, $len) = encode_ipv6($ipv6);
$rddata //= $data;
$rdlength //= $len;
$type //= TYPE_AAAA;
$class //= CLASS_INTERNET;
}
my $cname = $ans->{cname};
if (defined $cname) {
$rddata //= $cname;
$rdlength //= length $rddata;
$type //= TYPE_CNAME;
$class //= CLASS_INTERNET;
}
my $txt = $ans->{txt};
if (defined $txt) {
$rddata //= $txt;
$rdlength //= length $rddata;
$type //= TYPE_TXT;
$class //= CLASS_INTERNET;
}
my $srv = $ans->{srv};
if (defined $srv) {
$rddata //= pack("nnn", $ans->{priority}, $ans->{weight}, $ans->{port}) . encode_name($srv);
$rdlength //= length $rddata;
$type //= TYPE_SRV;
$class //= CLASS_INTERNET;
}
$type //= 0;
$class //= 0;
$ttl //= 0;
return $name . pack("nnNn", $type, $class, $ttl, $rdlength) . $rddata;
}
1
| LomoX-Offical/nginx-openresty-windows | src/lua-resty-dns-0.19/t/TestDNS.pm | Perl | bsd-2-clause | 7,188 |
package R::Css;
# Perl Awesome Stylesheets - pcss или pass
# конструктор правил css
use common::sense;
use R::App;
use R::Css::Type;
use R::Re;
use Exporter 'import';
our @EXPORT = qw/px em pc attr unit deg rgb hsl/;
# конструктор
sub new {
my ($cls) = @_;
my $self = bless {
dom => {}, # пулы name => {maxim=>, maximus=>...}
serf => [], # стек для пулов
register => {}, # зарегистрированные функции для обработки классов
classes => {}, # классы уже выведенные в css
}, ref $cls || $cls;
# создаём правило
$self->prince("main");
$self
}
################ единицы измерений
our %UNIT = qw/% em ex px pc mm sm s ms rem rex/;
# # в стандартных lineHeight
# sub lh($) {
# my ($val) = @_;
# "${val}em"
# }
# em
sub em($) {
my ($val) = @_;
"${val}em"
}
# px
sub px($) {
my ($val) = @_;
"${val}px"
}
# проценты, пико - pk
sub pс($) {
my ($val) = @_;
"${val}%"
}
# градусы
sub deg {
my ($val) = @_;
"${val}deg"
}
# возвращает атрибут для content: attr(attr)
sub attr ($) {
my ($attr) = @_;
\"attr($attr)"
}
# возвращает единицу измерения или выдаёт ошибку если её нет
sub unit {
my ($v) = @_;
die "app.css::unit: Ну что сказать? `$v` без числа" if not $v =~ s/^-?(?:\d+|\d*\.\d+)//;
die "$v не единица измерения css" if !exists $UNIT{$v};
$v
}
# цвет rgb
# примеры:
# rgb "00aaff", 0.5
# rgb 0, 0, 1, 0.5 # измеряется не от 0 до 255, а от 0 до 1
sub rgb ($$$;$) {
my ($r, $g, $b, $a) = @_;
$r = int $r*255;
$g = int $g*255;
$b = int $b*255;
if(defined $a) {
return "rgba($r,$g,$b,$a)";
}
return sprintf "#%02x%02x%02x", $r,$g,$b;
}
sub hsl ($$$;$) {
my ($h, $s, $l, $a) = @_;
$h = int $h*359;
$s = int($s*100) . "%";
$l = int($l*100) . "%";
if(defined $a) {
return "hsla($h,$s,$l,$a)";
}
return "hsl($h,$s,$l)";
}
# изменяет значения
# если
sub unitSize {
my $self = shift;
local $_;
$_ = &R::Css::Type::size for @_;
$self
}
# переводит в градусы из 0..1 -> 0..360
sub unitDeg {
my $self = shift;
local $_;
$_ = &R::Css::Type::deg for @_;
$self
}
################ стек
# ищет среди зарегистрированных функций css-класс
# если он не найден - вызывает по маске функцию-создателя, которая прописывает класс
sub reg {
my $self = shift;
my $classes = $self->{classes} //= {};
my $register = $self->{register};
for my $class (@_) {
next if exists $classes->{$class};
my $mask = $class;
my @args = ();
next if $mask !~ s/\d+/ push @args, $&; "*"/ge;
my $fn = $register->{$mask};
next if !defined $fn;
$classes->{$class} = 1;
$fn->($class, @args); # добавляет $css->код
}
$self
}
# получает html, который парсит в поисках зарегистрированных классов и добавляет в css
sub reg_html {
my $self = shift;
$self->reg( reg_html_classes(@_) );
$self
}
# получает html, который парсит в поисках зарегистрированных классов
sub reg_html_classes {
my $self = shift;
my $register = $self->{register};
my @out;
local ($_, $`, $', $&, $1, $2);
for my $html (@_) {
while($html =~ m!\sclass=(?:['"]([^'"]+)|([\w-]+))!g) {
my @re = defined($1)? split(/\s+/, $1): $2;
push @out, grep {
my $x = $_;
$x =~ s/\d+/*/g;
exists $register->{$x}
} @re;
}
}
@out
}
# регистрирует классы идущие через пробел
sub reg_class {
my ($self, $html) = @_;
$self->reg( split /\s+/, $html );
$self
}
# регистрирует функцию-создателя
# маски: наименования классов со *-ми вместо цифровых параметров
sub register {
if(@_>2) {
my ($self, $mask, $sub) = @_;
$self->{register}{$mask} = $sub;
$self
} else {
my ($self, $mask) = @_;
$self->{register}{$mask};
}
}
# сохраняет цепочки правил в файл
sub save {
my ($self, $path) = @_;
die "не хватает end" if @{$self->{serf}} > 1;
die "лишние end" if @{$self->{serf}} < 1;
my $f = $app->file->from($path);
my $s = $self->stringify;
$f->write($s) if !$f->exists or $s ne $f->read;
$self->{saveTime} = $f->mtime;
$self
}
# очищает css, не портя зарегистрированных классов
sub clear {
my ($self) = @_;
$self->{dom} = {};
$self->{serf} = [];
$self
}
# клонирует себя вместе с register и classes
sub clone {
my ($self) = @_;
my $new = bless {%$self}, ref $self;
$new->{register} = {%{$self->{register}}};
$new->{classes} = {%{$self->{classes}}};
$new
}
# # импортирует (читает и парсит) файл *.css или включает файл *.pl, *.pm, *.pcss или *.pass
# sub require {
# my ($self, $path) = @_;
# my $files = $app->file($path)->glob;
# if($files->length > 1) {
# for $path ($self->files) { $self->include($path) }
# return $self;
# }
# return $self if $self->{INC}{$path};
# $self->{INC}{$path} = 1;
# if($path =~ /\.(?:pl|pm|pass|pcss)$/) { require $path }
# else {
# $files->read
# }
# $self
# }
# парсит css-текст и добавляет в css
sub parse {
my $self = shift;
local ($_, $`, $', $&);
($_, my $path) = @_;
my $prince = 0;
my $intro;
my $the;
my @set;
my @S;
my $rule;
while( /\G
\@ (?P<media> [\w-]+ ) \s+ (?P<migo> (?:$R::Re::string|[^\{\};])+) |
(?P<rule> [\w-]+ ) \s*:\s* (?P<value> (?:$R::Re::string|[^\{\};])+) ) |
(?P<the> [\{\};]+) |
(?P<open> \{ ) |
(?P<close> \} ) |
(?P<end> ;) |
(?P<rem> \/\* .* \*\/ ) |
(?P<space>\s+)
/xgsm ) {
if($+{rem}) { $self->insert($+{rem}) }
elsif($+{media}) {
$intro = "\@$+{media} $+{migo}";
}
elsif($+{the}) { $the = $+{the} }
elsif($+{rule}) { $rule = 1; push @set, $+{rule}, $+{value}; }
elsif($+{open}) {
if(defined $intro) {
$self->prince("prince" . $prince++);
$self->intro($intro);
$intro = undef;
push @S, "prince";
}
elsif(defined $the) {
$self->the($the);
$the = undef;
push @S, "the";
}
else {
die "нет ни intro, ни the";
}
}
elsif($+{close}) {
die "стек пуст" if !@S;
if("the" eq pop @S) {
$self->set(@set) if @set;
@set = ();
} else {
$self->end;
}
}
elsif($+{end}) {
if(defined $intro) {
$self->insert($intro . ";");
}
elsif(defined $the) {
die "; после the `$the`";
}
elsif($rule) {
$rule = undef;
}
else {
die "; не после rule";
}
}
elsif($+{space}) {}
else {
die "что-то неизвестное было распознано парсером css";
}
}
if( pos() != length $_) {
$_ = substr $_, 0, pos();
my $lineno = 1;
while(/\n/) { $lineno++ }
die "ошибка распознавания кода css: $path:$lineno";
}
$self
}
# обрабатывает каждое значение
sub xak {
my ($self, $key, $val) = @_;
if(ref $val eq "ARRAY") {
my $sep = $R::Css::Type::Sep{$key} // ", ";
$val = join $sep, @$val;
}
my $norm = $key;
$self->normalize($norm);
my (@key, @val);
if(my $unit = $R::Css::Type::KeyVal{$key}{$val}) {
$_ = $val;
$R::Css::Type::k = $norm;
return $unit->($self);
}
if(my $unit = $R::Css::Type::Type{$key}) {
$_ = $val;
$R::Css::Type::k = $norm;
@val = $unit->($self);
}
if(my $onkey = $R::Css::Type::Key{$key}) {
$_ = $val;
$R::Css::Type::k = $norm;
@key = $onkey->($self);
}
@key = $norm if !@key;
@val = $val if !@val;
my @out;
for my $k (@key) {
push @out, "$k: $_" for @val;
}
join ";\n", @out;
}
# преобразует в строку
sub stringify {
my ($self, $prince) = @_;
local $_;
my @f;
my @prince = $prince //= $self->serf;
if($prince->{keyframes}) {
my $intro = substr $prince->{intro}, 1;
for my $vendor (qw/webkit o moz ms/) {
unshift @prince, {%$prince, intro=>"\@-$vendor-$intro"};
}
}
for $prince (@prince) {
push @f, "/* $prince->{name} */\n" if $prince->{name};
push @f, $prince->{intro}, " {\n" if $prince->{intro};
for my $maxim (@{$prince->{maximus}}) {
push(@f, $maxim), next if !ref $maxim;
push(@f, $self->stringify($maxim)), next if exists $maxim->{maximus};
my $sep = @{$maxim->{order}} == 1? "": "\n";
push @f, $maxim->{the}, " {$sep";
#my $set = $maxim->{set};
for my $m (@{$maxim->{order}}) {
my ($key, $val, $important) = @$m;
#print $f "\t";
push @f, $self->xak($key, $val);
push @f, "!important" if $important;
push @f, $sep? ";\n": "";
}
push @f, "}\n";
}
push @f, "\n}\n" if $prince->{intro};
}
join "", @f;
}
# нормализует ключ
sub normalize {
$_[1] =~ s/[A-Z]/ "-" . lc $&/ge;
undef
}
# возвращает верхушку стека (prince)
sub serf {
my ($self) = @_;
my $serf = $self->{serf};
$serf->[$#$serf]
}
# создаёт новое правило или возвращает текущее
sub the {
if(@_>1) {
my ($self, $the) = @_;
my $serf = $self->serf;
push @{$serf->{maximus}}, $serf->{maxim}{$the} = $serf->{the} = { the => $the };
$self
} else {
shift->serf->{the};
}
}
# создаёт именованный пул
sub prince {
my ($self, $name) = @_;
my $prince = { maxim => {}, maximus => [] };
$self->{dom}{$name} = $prince, $prince->{name} = $name if defined $name;
if($self->serf) {
$self->serf->{the} = undef;
push @{$self->serf->{maximus}}, $prince;
}
push @{$self->{serf}}, $prince;
$self
}
# конец пула
sub end {
my ($self) = @_;
die "end: стек пуст" if @{$self->{serf}} == 0;
if( pop(@{$self->{serf}})->{animedia} ) { # animedia - для анимации
die "end: стек пуст" if @{$self->{serf}} == 0;
pop @{$self->{serf}};
}
$self
}
# переходит в указанный пул и писаться будет в него
sub gosub {
my ($self, $prince) = @_;
my $serf = $self->{dom}{$prince};
die "нет prince($prince)" if !$serf;
push @{$self->{serf}}, $serf;
$self
}
# копирует media пула
sub goto {
my ($self, $prince) = @_;
my $serf = $self->{dom}{$prince};
die "нет prince($prince)" if !$serf;
die "нет медия-запроса в prince($prince)" if !$serf->{intro};
$self->prince("$prince.copy")->intro($serf->{intro})
}
# завершает цепочку
sub final {}
# расширяет правило из указанного селектора
sub like {
my ($self, $the, $prince) = @_;
$prince = $self->{dom}{$prince // "main"};
die "нет неймспейса $prince" unless $prince;
die "не найдено правило" unless exists $prince->{maxim}{$the};
my $from = $prince->{maxim}{$the};
my $the = $self->the;
push @{$the->{order}}, @{$from->{order}};
%{$the->{set}} = (%{$the->{set}}, %{$from->{set}});
$self
}
# вставляет файл или файлы
sub include {
my ($self, $file) = @_;
push @{$self->serf->{maximus}}, $app->file->from($file)->glob->reads("\n");
$self
}
# вставляет текст
sub insert {
my ($self, $insert) = @_;
push @{$self->serf->{maximus}}, $insert;
$self
}
# вставляет текст и новую строку
sub insertln {
my ($self, $insert) = @_;
push @{$self->serf->{maximus}}, $insert . "\n";
$self
}
# комментарий
sub rem {
my ($self, $rem) = @_;
local ($`, $');
$rem =~ s!\*/!\* /!g;
$self->insert("/* " . $rem . " */")
}
# оборачивает пулл
sub intro {
my ($self, $intro) = @_;
$self->serf->{intro} = $intro;
$self
}
# оборачивает пул в медиа-запрос или
sub media {
my ($self, $query) = @_;
#$query = "($query)" if $query !~ /^\(/;
$self->intro( "\@media $query" );
$self
}
# устанавливает css-стиль
sub set {
my $self = shift;
my $the = $self->the;
my $set = $the->{set} //= {};
my $order = $the->{order} //= [];
for(my $i=0; $i<@_; $i+=2) {
my ($key, $val) = @_[$i, $i+1];
#@$order = grep { $_->[0] ne $key } @$order ;
push @$order, [$key, ($set->{$key} = $val)] if defined $val;
}
$self
}
# переустанавливает свойства последнего стиля
sub reset {
my $self = shift;
my $the = $self->the;
my $set = $the->{set} //= {};
my $order = $the->{order} //= [];
for(my $i=0; $i<@_; $i+=2) {
my ($key, $val) = @_[$i, $i+1];
if(exists $set->{$key}) {
for(my $j=$#$order; $j>=0; $j--) {
$order->[$j][1] = $val, last if $order->[$j][0] eq $key;
}
} else {
push @$order, [$key, $val] if defined $val;
}
$set->{$key} = $val;
}
$self
}
# добавляет к css-стилю
sub add {
my $self = shift;
my $the = $self->the;
my $set = $the->{set} //= {};
my $order = $the->{order} //= [];
for(my $i=0; $i<@_; $i+=2) {
my ($key, $val) = @_[$i, $i+1];
next if !defined $val;
if(exists $set->{$key}) {
if(ref $set->{$key} eq "ARRAY") { push @{$set->{$key}}, $val; }
elsif(!defined $set->{$key}) { $self->reset( $key => $val ) }
else { $self->reset( $key => [$set->{$key}, $val] ) }
}
else {
$self->reset($key => $val)
}
}
$self
}
# возвращает по ключу значение, а если его нет - устанавливает
sub get {
my ($self, $key, $val) = @_;
my $v = $self->the->{set}{$key};
if(!defined $v) {
$self->reset($key => $val) if defined $val;
$val;
}
else {
$v
}
}
# возвращает последний в массиве, используется с add
sub ret {
my ($self, $key, $val) = @_;
my $v = $self->the->{set}{$key};
if(ref $v eq "ARRAY") {
$v->[$#$v];
}
elsif(defined $v) {
$v
}
else {
$self->add($key => $val) if defined $val;
$val
}
}
# устанавливает последний
sub top {
my ($self, $key, $val) = @_;
my $v = $self->the->{set}{$key};
if(ref $v eq "ARRAY") { $v->[$#$v] = $val }
else { $self->reset($key => $val) }
$self->get($key) if $key eq "x";
$self
}
# проверяет на существование ключ в the
sub exists {
my ($self, $key, %opt) = @_;
#if($opt{media})
#if($opt{the})
# должен пробегать по всем селекторам, содержащим это
exists $self->the->{set}{$key}
}
# устанавливает, если ещё не установлено
sub setdefault {
my $self = shift;
for(my $i=0; $i<@_; $i+=2) {
my ($key, $val) = @_[$i, $i+1];
$self->set($key => $val) if !$self->exists($key);
}
$self
}
# всем ключам в текущем the делает important
# если указан номер, то important делается только n ключам
sub important {
my ($self, $n) = @_;
for my $set (reverse @{$self->the->{order}}) {
$set->[2] = 1;
last if --$n == 0;
}
$self
}
################ размеры
# высота строки
sub line {
my ($self, $val) = @_;
$self->set(lineHeight => $val)
}
# size width, height
# можно использовать undef
sub size {
my ($self, $w, $h) = @_;
$self->set(width=>$w, height=>$h);
}
# устанавливает максимальный размер
sub max {
my ($self, $w, $h) = @_;
$self->set(maxWidth=>$w, maxHeight=>$h);
}
# устанавливает минимальный размер
sub min {
my ($self, $w, $h) = @_;
$self->set(minWidth=>$w, minHeight=>$h);
}
# устанавливает смещение верхнего левого угла
sub move {
my ($self, $w, $h) = @_;
$self->set(marginLeft=>$w, marginTop=>$h);
}
# устанавливает смещение нижнего правого угла
sub moveoff {
my ($self, $w, $h) = @_;
$self->set(marginRight=>$w, marginBottom=>$h);
}
# устанавливает смещения верхнего левого угла. Зависит от позиции: relative, absolute, fixed
sub locate {
my ($self, $w, $h) = @_;
$self->set(left=>$w, top=>$h);
}
# устанавливает смещения нижнего правого угла. Зависит от позиции: relative, absolute, fixed
sub locateoff {
my ($self, $w, $h) = @_;
$self->set(right=>$w, bottom=>$h);
}
# устанавливает внешние поля
sub offset {
my ($self, $x, $y, $x2, $y2) = @_;
if(@_==2) {
$self->set(margin => $x);
} elsif(@_==3) {
$self->set(margin => "$y $x");
}
else {
$self->set(margin => "$y $x2 $y2 $x");
}
$self
}
# устанавливает внутренние поля
sub field {
my ($self, $x, $y, $x2, $y2) = @_;
if(@_==2) {
$self->set(padding => $x);
} elsif(@_==3) {
$self->set(padding => "$y $x");
}
else {
$self->set(padding => "$y $x2 $y2 $x");
}
$self
}
# устанавливает поля верхнего левого угла
sub shift {
my ($self, $w, $h) = @_;
$self->set(paddingLeft=>$w, paddingTop=>$h);
}
# устанавливает поля нижнего правого угла
sub unshift {
my ($self, $w, $h) = @_;
$self->set(paddingRight=>$w, paddingBottom=>$h);
}
################ трансформации 2D
# сдвиг
sub xy {
my ($self, $x, $y) = @_;
$x //= 1.1;
$self->unitSize($x, $y);
$x .= ", $y" if defined $y;
$self->add(transform => "translate($x)")
}
# поворот
sub angle {
my ($self, $x) = @_;
$x //= "30deg";
$self->unitDeg($x);
$self->add(transform => "rotate($x)")
}
# масштаб
sub dim {
my ($self, $x, $y) = @_;
$x //= 1.1;
$x .= ", $y" if defined $y;
$self->add(transform => "scale($x)")
}
# скос
sub skew {
my ($self, $x, $y) = @_;
$x //= 1.1;
$y //= $x;
$self->unitDeg($x, $y);
$self->add(transform => "skew($x, $y)")
}
################ трансформации 3D
# смещение translate
sub x {
my ($self, $x) = @_;
return $self if !defined $x;
$self->unitSize($x);
$self->add(transform => "translateX($x)");
$self->setdefault(perspective => '70em');
}
# смещение translate
sub y {
my ($self, $y) = @_;
return $self if !defined $y;
$self->unitSize($y);
$self->add(transform => "translateY($y)");
$self->setdefault(perspective => '70em');
}
# смещение translate
sub z {
my ($self) = @_;
my ($self, $z) = @_;
return $self if !defined $z;
$self->unitSize($z);
$self->add(transform => "translateZ($z)");
$self->setdefault(perspective => '70em');
}
# поворачивает по координатам
sub spin {
my ($self, $x, $y, $z) = @_;
$self->unitDeg($x)->add(transform => "rotateX($x)") if defined $x;
$self->unitDeg($y)->add(transform => "rotateY($y)") if defined $y;
$self->unitDeg($z)->add(transform => "rotateZ($z)") if defined $z;
$self->setdefault(perspective => '70em') if defined $x or defined $y or defined $z;
$self
}
# масштабирует
sub scale {
my ($self, $x, $y, $z) = @_;
$self->add(transform => "scaleX($x)") if defined $x;
$self->add(transform => "scaleY($y)") if defined $y;
$self->add(transform => "scaleZ($z)") if defined $z;
$self->setdefault(perspective => '70em');
$self
}
# указывает оси вокруг которых поворачивать:
# x: <длина> | <проценты> | left | center | right
# y: <длина> | <проценты> | top | center | bottom
# z: <длина>
sub axis {
my ($self, $x, $y, $z) = @_;
$x //= .5;
$y //= .5 if defined $z;
$z = "${z}em" if Num $z;
$self->unitSize($x, $y);
$self->set(transformOrigin=> defined($z)? "$x $y $z": defined($y)? "$x $y": $x)
}
# задаёт перспективу трансформации
sub perspective {
my ($self, $perspective, $x, $y) = @_;
$perspective = "${perspective}em" if Num $perspective;
$self->set(perspective => $perspective) if defined $perspective;
if(defined $x or defined $y) {
$x //= .5;
$self->unitSize($x, $y);
$self->set(perspectiveOrigin=>"$x $y");
}
$self
}
# Дочерние элементы будут отображаться в 3D-пространстве
sub child3d {
my ($self) = @_;
$self->set(transformStyle => "preserve-3d")
}
# Дочерние элементы лежат в той же плоскости, что и их родитель
sub child2d {
my ($self) = @_;
$self->set(transformStyle => "flat")
}
################ шрифт
# устанавливает шрифт
# sub font {
# my ($self, $size) = @_;
# $self->unitSize($size);
# $self->set(font=>$size);
# $self
# }
# подчёркнутый текст
sub underline {
my ($self, $x) = @_;
$x //= 1;
$self->set(textDecoration=> $x? "underline": "none");
}
# надчёркнутый текст
sub overline {
my ($self, $x) = @_;
$x //= 1;
$self->set(textDecoration=> $x? "overline": "none");
}
# зачёркнутый текст
sub strike {
my ($self, $x) = @_;
$x //= 1;
$self->set(textDecoration=> $x? "line-throught": "none");
}
# жирность
sub bold {
my ($self, $bold) = @_;
$bold = 700 if @_==1;
$self->set(fontWeight => $bold);
}
# наклон
sub italic {
my ($self, $italic) = @_;
$italic //= -1;
$self->set(fontStyle => $italic==0? 'normal': $italic < 0? 'italic': 'oblique');
}
# шрифт sans-serif - обычный шрифт без засечек
sub sans {
my ($self) = @_;
$self->set(fontFamily => 'sans-serif');
}
# шрифт serif - обычный шрифт c засечками
sub serif {
my ($self) = @_;
$self->set(fontFamily => 'serif');
}
# шрифт фиксированной ширины
sub monospace {
my ($self) = @_;
$self->set(fontFamily => 'monospace');
}
# шрифт, имитирующий почерк
sub cursive {
my ($self) = @_;
$self->set(fontFamily => 'cursive');
}
# декоративный шрифт, для заглавий и т.д.
sub fantasy {
my ($self) = @_;
$self->set(fontFamily => 'fantasy');
}
################ позиции
# позиционировать абсолютно
sub absolute {
my ($self) = @_;
$self->set(position => 'absolute');
}
# позиционировать фиксированно
sub fixed {
my ($self) = @_;
$self->set(position => 'fixed');
}
# позиционировать относительно
sub relative {
my ($self) = @_;
$self->set(position => 'relative');
}
# позиционировать обычно
sub static {
my ($self) = @_;
$self->set(position => 'static');
}
# делает элемент "плавающим"
sub float {
my ($self, $n) = @_;
$n //= -1;
$self->set(float => $n == 0? 'none': $n<0? 'left': 'right')
}
# контролирует скролл и отображение текста
sub scroll {
my ($self) = @_;
$self->set(overflow=>'auto');
}
################ отображение
# блочный элемент
sub block {
my ($self) = @_;
$self->set(display => 'block');
}
# строчный элемент
sub inline {
my ($self) = @_;
$self->set(display => 'inline');
}
# строчный блок
sub iblock {
my ($self) = @_;
$self->set(display => 'inline-block');
}
# скрытый элемент
sub hide {
my ($self) = @_;
$self->set(display => 'none');
}
# прозрачность
sub opacity {
my ($self, $opacity) = @_;
$self->set(opacity => $opacity // .7);
}
# указатель - курсор
sub pointer {
my ($self, $pointer) = @_;
$self->set(cursor => $pointer // "pointer");
}
################ цвета
# цвета
sub color {
my ($self, $fg, $bg, $br) = @_;
$self->set(color=>$fg, backgroundColor=>$bg);
$self->set(border=>"solid 1px $br") if $br;
$self
}
################ границы
# скругляет углы
sub radius {
my ($self, $x1, $y1, $x2, $y2) = @_;
if(@_ == 1) {
#$x1 = ;
}
$self->set(borderRadius => "$x1 $y1 $x2 $y2");
}
# граница - цвет
sub limit {
my ($self, ) = @_;
$self
}
################ тени
# добавляет тень блоку
sub shadow {
my $self = shift;
my @x;
if($_[0] eq "none") {
@x = "none";
}
elsif($_[0] !~ /^[a-z]/) {
my ($x, $y, $blur, $spread, $color, $in) = @_;
$x //= ".3em";
$y //= ".3em";
$self->unitSize($x, $y);
push @x, $x, $y;
$blur //= ".3em";
$self->unitSize($blur), push @x, $blur;
$self->unitSize($spread), push @x, $spread if $spread;
push @x, $color if $color;
unshift @x, "inset" if $in;
}
else {
my %x = @_;
push @x, $x{x} // ".3em", $x{y} // ".3em";
my $blur = $x{blur} // ".3em";
$self->unitSize($blur);
push @x, $blur;
$self->unitSize($x{stretch}), push @x, $x{stretch} if $x{stretch}; # растягивает тень +- значение
push @x, $x{color} if $x{color};
unshift @x, "inset" if exists $x{in};
}
$self->add(boxShadow => join " ", @x)
}
# добавляет тень текста
sub shade {
my $self = shift;
my @x;
if($_[1] =~ /^\d/) {
my ($x, $y, $blur, $color) = @_;
$x //= "1px";
$y //= "1px";
$blur //= "1px";
$x .= "px" if Num $x;
$y .= "px" if Num $y;
$blur .= "px" if Num $blur;
push @x, $x, $y, $blur;
push @x, $color if $color;
}
else {
my %x = @_;
push @x, $x{x}//"1px", $x{y}//"1px";
$self->unitSize($x{blur}), push @x, $x{blur} if $x{blur};
push @x, $x{color} if $x{color};
}
$self->add(textShadow => join " ", @x)
}
################ рисование градиентов черепашкой (пришло из языка LOGO)
# turtle, tortoise - черепаха, little turtle - черепашка
# angle - угол
# line - пройденный путь: цвет позиция, ...
# x, y - позиция точки на которую стала черепашка
# http://frontender.info/dig-deep-into-css-gradients/
# поворачивает черепашку влево на 0-360: 0-1
sub left {
my ($self, $angle) = @_;
$self->unitDeg($angle);
$self->_add_linear($angle);
$self->the->{pen}{angle} += $angle;
$self
}
# поворачивает черепашку вправо на 0-360: 0-1
sub right {
my ($self, $angle) = @_;
$self->unitDeg($angle);
$self->_add_linear($angle);
$self->the->{pen}{angle} -= $angle;
$self
}
# добавляет "линию" к градиенту
sub _add_linear {
my ($self, $angle) = @_;
if($angle != 0 && (my $line = $self->the->{pen}{line}) ) {
$self->add("background", "linear-gradient(" . ($self->the->{angle} // 0) . "deg," . join(", ", @$line) . ") ");
delete $self->the->{pen}{line};
}
$self
}
# идти вперёд на значение. Указывается в стандартных единицах
sub forward {
my ($self, $n) = @_;
$self->unitSize($n);
my $pen = $self->the->{pen};
push @{ $pen->{line} }, ($pen->{color} || "transparent") . " " . $n;
# рассчитаем точку в которой находится перо
#$pen->{x} = sin
#$pen->{y} =
$self
}
# рисует радиальный градиент. Но для этого надо сделать два airward-а, чтобы через эти две точки провести окружность
sub airward {
my ($self) = @_;
$self
}
# устанавливает цвет градиента
# если цвет не указан - поднимает перо (делает цвет прозрачным)
sub pen {
my ($self, $color) = @_;
$self->the->{pen}{color} = $color;
$self
}
################ переходы
# включает переход
sub to {
my ($self, $param, $stand) = @_;
$param //= "all";
$stand //= .4;
local ($`, $', $&);
$param =~ s/[A-Z]/"-" . lc $&/ge;
$self->add(transition => "$param ${stand}s")
}
# продолжительность перехода
sub interval {
my ($self, $stand) = @_;
$stand //= .4;
my @x = split /\s+/, $self->ret("transition", "all .4s");
$x[1] = "${stand}s";
$self->top(transition => join " ", @x)
}
# функция перехода
sub ease {
my ($self, $ease) = @_;
$ease = @_>2? "cubic-bezier(".join(",", @_[1..$#_]).")":
!$ease? "ease":
$ease eq "linear"? $ease:
"ease-$ease";
my @x = split /\s+/, $self->ret("transition", "all .4s");
$x[2] = $ease;
$self->top(transition => join " ", @x)
}
# задержка перехода
sub delay {
my ($self, $delay) = @_;
$delay //= .2;
my @x = split /\s+/, $self->ret("transition", "all .4s");
$x[3] = "${delay}s";
$self->top(transition => join " ", @x)
}
################ анимация
# объявляет анимацию
sub keyframes {
my ($self, $name) = @_;
$self->prince("$name")->intro("\@keyframes \"$name\"");
$self->serf->{keyframes} = 1;
$self->frame(0)
}
# шаг анимации от 0 до 1
sub frame {
my ($self, $step) = @_;
if($self->serf->{animedia}) {
pop @{$self->{serf}};
}
$self->prince->intro($step * 100 . "%");
$self->serf->{animedia} = 1;
$self
}
# скорость анимации в секундах
# speed(0.5)
sub animation {
my ($self, $name, %opt) = @_;
# animation-name animation-duration animation-timing-function animation-delay animation-iteration-count animation-direction
my $speed = delete($opt{speed}) // 1;
/\s/? "cubic-bezier(".join(",", split /\s+/).")":
my $ease = delete $opt{ease};
$ease = $ease? (/\s/? "cubic-bezier(".join(",", split /\s+/).")": "ease-$ease"): "ease";
my $delay = delete($opt{delay}) // 0;
my $count = delete($opt{count}) // 1;
my $direct = delete($opt{direct}) // "alternate";
die "неверные опции в анимации: " . join ", ", keys %opt if keys %opt;
$self->add(animation => "$name ${speed}s $ease ${delay}s $count $direct")
}
# приостанавливает анимацию
sub pause {
my ($self) = @_;
$self->set(animationPlayState => "pause")
}
# запрещает прерывать анимацию
sub running {
my ($self) = @_;
$self->set(animationPlayState => "running")
}
################ события
# хелпер - инвертирует показатель
sub _hover {
my ($self, $hover) = @_;
$self->serf->{the} = join ",", map { s/$hover\b//? $_: "$_$hover" } split /\s*,\s*/, $self->serf->{the};
$self
}
# добавляет ко всем в the :hover
sub hover {
my ($self) = @_;
$self->_hover(":hover");
}
# добавляет ко всем в the :active
sub active {
my ($self) = @_;
$self->_hover(":active");
}
# добавляет ко всем в the :focus
sub focus {
my ($self) = @_;
$self->_hover(":focus");
}
# добавляет ко всем в the :checked
sub checked {
my ($self) = @_;
$self->_hover(":checked");
}
################ конвертеры
# конвертирует css в R::Css
sub css2r {
my ($self, $css) = @_;
my @st;
my $shl = "";
my $push = sub { push @st, shift; $shl .= "\t" };
my $pop = sub { $shl = substr $shl, 1; pop @st };
my $top = sub { $st[$#st] };
my $replace = sub {
$+{rem}? do { my $r = $+{rem}; $r =~ s/^/# /mg; $r }:
$+{prince}? do { my $o = "${shl}prince('$+{prince}')->\n"; $push->("prince"); $o }:
$+{the}? do { &$pop if &$top eq "the"; my $o = "${shl}the(\"$+{the}\")->set(\n"; $push->("the"); $o }:
$+{end}? do {
my $o="";
if(&$top eq "key") { $o="\",\n"; &$pop }
if(&$top eq "the") { &$pop; "${o}$shl)->" }
elsif(&$top eq "prince") { &$pop; "$o${shl}end->\n" }
elsif(@st == 0) { $o }
else { $o . $& }
}:
$+{end_val}? do { if(&$top eq "key") { &$pop; "\",\n" } else {$&} }:
$+{key}? do { my $s=$shl; $push->("key"); my $key=$+{key}; $key=~s/-(\w)/uc $1/ge; "${s}$key => \"" }:
$+{str}? do { #my $s=$+{str}; $s =~ s/^.(.*).$/$1/; $s
$&
}:
$&;
};
$css =~ s!
(?P<str> "(?:\\"|[^"])+" | '(?:\\'|[^'])+' ) |
/\* (?P<rem>.*?) \*/ |
(?P<prince> \@[^\{\}]*?) \s* \{ |
(?P<the> [^\n]+?) \s* \{ |
(?P<end> \s* \}) |
(?P<key> [\-\w]+ ) \s*:\s* |
(?P<end_val> \s*;)
!
$replace->();
!gexsm;
$css
}
1; | darviarush/rubin-forms | lib/R/Css.pm | Perl | bsd-2-clause | 32,250 |
=pod
This script automates the downloading and setup of
a compatible wxWidgets version for Sherpa development.
Run as follows:
>perl wxsetup.pl
=cut
use strict;
use LWP::Simple;
###########################################################
# Settings
my $sWxWidgetsSourceFilename = 'wxWidgets-2.6.2.tar.bz2';
my $sSourceUrl = 'http://biolpc22.york.ac.uk/pub/2.6.2/wxWidgets-2.6.2.tar.bz2';
my $sSourceTreeRoot = 'wxWidgets-2.6.2';
my $sBuildType = 'gtk2'; # also used as the name of the build directory
###########################################################
# Predeclarations
sub IsWxWidgetsInstalled();
sub DownloadwxWidgetsSource();
sub Configure();
sub Make();
###########################################################
# Main program
# Don't buffer output
local $| = 1;
my $sPath = `pwd`;
$sPath =~ s/[\r\n]+$//go;
print <<EOT;
$0
This script will download, compile and install a suitable version
of the wxWidgets libraries (if they are not already available).
EOT
my $bIsWxWidgetsInstalled = IsWxWidgetsInstalled();
exit
if($bIsWxWidgetsInstalled);
if(!-e $sWxWidgetsSourceFilename)
{
DownloadwxWidgetsSource();
}
print "Source code tarball is '$sPath/$sWxWidgetsSourceFilename'\n";
if(!-d $sSourceTreeRoot)
{
print "Uncompressing wxWidgets source to '$sSourceTreeRoot'\n";
`tar -xkjf $sWxWidgetsSourceFilename`;
}
print "Source code tree is '$sPath/$sSourceTreeRoot/'\n";
chdir $sSourceTreeRoot;
if(!-d $sBuildType)
{
mkdir $sBuildType;
}
print "Build directory is '$sPath/$sSourceTreeRoot/$sBuildType/'\n";
chdir $sBuildType;
Configure();
Make();
print "Done!\n";
##################################################################################
# Subroutines
=pod
Returns true if a wxWidgets version >= 2.6.2 is installed
with a unicode build
=cut
sub IsWxWidgetsInstalled()
{
# See if wx-config exists
my $sWxConfig = `which wx-config`;
return 0
if($sWxConfig !~ /(\/[a-zA-Z\s_\.]+)+\/wx-config/o);
#print "'wx-config' is installed as $sWxConfig\n";
my $sWxWidgetsVersions = `wx-config --list`;
#print "Installed versions: $sWxWidgetsVersions\n";
return 0
if($sWxWidgetsVersions !~ /Default\sconfig\sis\s+([^\r\n]+)/o);
my $sDefault = $1;
if($sDefault !~ /-unicode/o)
{
print <<EOT;
You have wxWidgets installed, but your default set of libraries is
not a unicode build.
EOT
return 0;
}
print <<EOT;
OK, wxWidgets unicode build is installed and is the default for wx-config.
There is nothing to do.
EOT
return 1;
}
sub DownloadwxWidgetsSource()
{
print "Downloading wxWidgets source from '$sSourceUrl'\n";
my $rc = mirror($sSourceUrl, $sWxWidgetsSourceFilename);
die("Can't download source file '$sSourceUrl'")
if(is_error($rc));
}
sub Configure()
{
print "Running 'configure':\n";
# --enable-unicode
my $sConfigureCommand = <<EOT;
../configure --with-gtk \\
--enable-shared --enable-unicode \\
--enable-debug --enable-debug_flag --enable-debug_cntxt \\
--enable-exceptions \\
--enable-dynlib --enable-dynamicloader \\
--enable-xpm --enable-cmdline --with-gnomeprint --disable-metafile
EOT
print "$sConfigureCommand\n\n";
`$sConfigureCommand`;
}
sub Make()
{
print "Running 'make':\n";
`make`;
print "Running 'make install':\n";
print "Please type in your root password:\n";
`su -c 'make install'`;
}
| joeyates/sherpa | Development/wxsetup.pl | Perl | bsd-3-clause | 3,336 |
=head1 NAME
tconv_ext - tconv extended API
=head1 SYNOPSIS
#include <tconv.h>
tconv_t tconv_open_ext(const char *tocodes, const char *fromcodes, tconv_option_t *tconvOptionp);
void tconv_trace_on(tconv_t tconvp);
void tconv_trace_off(tconv_t tconvp);
void tconv_trace(tconv_t tconvp, const char *fmts, ...);
char *tconv_error_set(tconv_t tconvp, const char *msgs);
char *tconv_error(tconv_t tconvp);
char *tconv_fromcode(tconv_t tconvp);
char *tconv_tocode(tconv_t tconvp);
short tconv_helper(tconv_t tconvp,
void *contextp,
short (*producerp)(void *contextp, char **bufpp, size_t *countlp, short *eofbp),
short (*consumerp)(void *contextp, char *bufp, size_t countl, short eofb, size_t *resultlp)
);
=head1 DESCRIPTION
tconv extended API is providing more entry points to query or control how tconv behaves: tconv is a generic layer on top of iconv(), ICU, etc. Therefore additional semantic is needed.
=head1 METHODS
=head2 tconv_open_ext
tconv_t tconv_open_ext(const char *tocodes, const char *fromcodes, tconv_option_t *tconvOptionp);
typedef void (*tconvTraceCallback_t)(void *userDatavp, const char *msgs);
typedef struct tconv_option {
tconv_charset_t *charsetp;
tconv_convert_t *convertp;
tconvTraceCallback_t traceCallbackp;
void *traceUserDatavp;
const char *fallbacks;
} tconv_option_t;
tconv support two engine types: one for charset detection, one for character conversion. Each engine as its own option structure:
=over
=item charsetp
Describe charset engine options.
=item convertp
Describe convertion engine options.
=back
Logging is provided through the I<genericLogger> package, and the developper may provide a function pointer with an associated context:
=over
=item traceCallbackp
A function pointer.
=item traceUserDatavp
Function pointer opaque context.
=item fallbacks
Fallback charset when user gave none I<and> the guess failed.
=back
If C<tconvOptionp> is NULL, defaults will apply. Otherwise, if C<charsetp> is NULL charset defaults apply, if C<convertp> is NULL convertion defaults apply, and if C<traceCallbackp> is NULL, no logging is possible.
=head3 charset engine
A charset engine may support three entry points:
typedef void *(*tconv_charset_new_t) (tconv_t tconvp, void *optionp);
typedef char *(*tconv_charset_run_t) (tconv_t tconvp, void *contextp, char *bytep, size_t bytel);
typedef void (*tconv_charset_free_t)(tconv_t tconvp, void *contextp);
All entry points start with a C<tconvp> pointer (that they can use to trigger logging, error setting).
The I<new> is optional, have a pointer to an opaque (from tconv point of view) data area, and return a charset specific opaque context.
If I<new> is not NULL, then I<free> must not be NULL, and will be called with the charset specific context pointer returned by I<new>.
When I<new> is NULL, the charset specific context will be NULL.
The only required entry point is I<run>, with a pointer to bytes, and the number of bytes.
C<charsetp> must point to a structure defined as:
typedef struct tconv_charset {
enum {
TCONV_CHARSET_EXTERNAL = 0,
TCONV_CHARSET_PLUGIN,
TCONV_CHARSET_ICU,
TCONV_CHARSET_CCHARDET,
} charseti;
union {
tconv_charset_external_t external;
tconv_charset_plugin_t plugin;
tconv_charset_ICU_option_t *ICUOptionp;
tconv_charset_cchardet_option_t *cchardetOptionp;
} u;
} tconv_charset_t;
i.e. a charset engine can be of four types:
=over
=item TCONV_CHARSET_EXTERNAL
An I<external> charset engine type is a structure that give explicitly the three entry points described at the beginning of this section, and a pointer to an opaque charset specific option area. It is defined as:
typedef struct tconv_charset_external {
void *optionp;
tconv_charset_new_t tconv_charset_newp;
tconv_charset_run_t tconv_charset_runp;
tconv_charset_free_t tconv_charset_freep;
} tconv_charset_external_t;
=item TCONV_CHARSET_PLUGIN
The charset engine is dynamically loaded. A I<plugin> definition is:
typedef struct tconv_charset_plugin {
void *optionp;
char *news;
char *runs;
char *frees;
char *filenames;
} tconv_charset_plugin_t;
i.e. I<tconv> will use C<filenames> as the path of a shared library and will try to load it. C<optionp> is a pointer to a charset specific option area. I<tconv> will look to the three entry points named C<news>, C<runs> and C<frees>:
=over
=item news
If C<news> is NULL, environment variable C<TCONV_ENV_CHARSET_NEW>, else C<tconv_charset_newp> will be looked at.
=item runs
If C<runs> is NULL, environment variable C<TCONV_ENV_CHARSET_RUN>, else C<tconv_charset_runp> will be looked at.
=item frees
If C<frees> is NULL, environment variable C<TCONV_ENV_CHARSET_FREE>, else C<tconv_charset_freep> will be looked at.
=back
Please note that dynamically load is not always thread-safe, and I<tconv> will not try to adapt to this situation. Therefore, it is up to the caller to make sure that tconv_open_ext() is called within a context that is not affected by an eventual non-thread-safe workflow (e.g. typically within a critical section, or at program startup).
=item TCONV_CHARSET_ICU
ICU built-in, available when I<tconv> has been compiled with ICU. If I<tconv> has not been compiled with such support, C<TCONV_CHARSET_ICU> remain available, but using it will fail.
If C<ICUOptionp> is not NULL, it must be a pointer to a structure defined as:
typedef struct tconv_charset_ICU_option {
int confidencei;
} tconv_charset_ICU_option_t;
where C<confidencei> is the minimum accepted confidence level. If NULL, a default of 10 is used, unless the environment variable C<TCONV_ENV_CHARSET_ICU_CONFIDENCE> is set.
=item TCONV_CHARSET_CCHARDET
cchardet built-in, always available.
If C<cchardetOptionp> is not NULL, it must be a pointer to a structure defined as:
typedef struct tconv_charset_cchardet_option {
float confidencef;
} tconv_charset_cchardet_option_t;
where C<confidencef> is the minimum accepted confidence level. If NULL, a default of 0.4f is used. This can also be set via the environment variable C<TCONV_ENV_CHARSET_CCHARDET_CONFIDENCE>.
=back
=head3 convert engine
A convert engine may support three entry points:
typedef void *(*tconv_convert_new_t) (tconv_t tconvp, const char *tocodes, const char *fromcodes, void *optionp);
typedef size_t (*tconv_convert_run_t) (tconv_t tconvp, void *contextp, char **inbufsp, size_t *inbytesleftlp, char **outbufsp, size_t *outbytesleftlp);
typedef int (*tconv_convert_free_t)(tconv_t tconvp, void *contextp);
All entry points start with a C<tconvp> pointer.
The I<new> is optional, have a pointer to an opaque (from tconv point of view) data area, and return a convert specific opaque context.
If I<new> is not NULL, then I<free> must not be NULL, and will be called with the convert specific context pointer returned by I<new>.
When I<new> is NULL, the convert specific context will be NULL.
The only required entry point is I<run>, with additional parameters that are the iconv() semantics: pointers to
=over
=item a pointer to input bytes
=item number of input bytes
=item a pointer to output bytes
=item number of output bytes
=back
C<convertp> must point to a structure defined as:
typedef struct tconv_convert {
enum {
TCONV_CONVERT_EXTERNAL = 0,
TCONV_CONVERT_PLUGIN,
TCONV_CONVERT_ICU,
TCONV_CONVERT_ICONV
} converti;
union {
tconv_convert_external_t external;
tconv_convert_plugin_t plugin;
tconv_convert_ICU_option_t *ICUOptionp;
tconv_convert_iconv_option_t *iconvOptionp;
} u;
} tconv_convert_t;
i.e. a convert engine can be of four types:
=over
=item TCONV_CONVERT_EXTERNAL
An I<external> convert engine type is a structure that give explicitly the three entry points described above, and a pointer to an opaque convert specific option area. It is defined as:
typedef struct tconv_convert_external {
void *optionp;
tconv_convert_new_t tconv_convert_newp;
tconv_convert_run_t tconv_convert_runp;
tconv_convert_free_t tconv_convert_freep;
} tconv_convert_external_t;
=item TCONV_CONVERT_PLUGIN
The convert engine is dynamically loaded. A I<plugin> definition is:
typedef struct tconv_convert_plugin {
void *optionp;
char *news;
char *runs;
char *frees;
char *filenames;
} tconv_convert_plugin_t;
i.e. I<tconv> will use C<filenames> as the path of a shared library and will try to load it. C<optionp> is a pointer to a convert specific option area. I<tconv> will look to the three entry points named C<news>, C<runs> and C<frees>:
=over
=item news
If C<news> is NULL, environment variable C<TCONV_ENV_CONVERT_NEW>, else C<tconv_convert_newp> will be looked at.
=item runs
If C<runs> is NULL, environment variable C<TCONV_ENV_CONVERT_RUN>, else C<tconv_convert_runp> will be looked at.
=item frees
If C<frees> is NULL, environment variable C<TCONV_ENV_CONVERT_FREE>, else C<tconv_convert_freep> will be looked at.
=back
Same remark about thread-safety as for the charset engine.
=item TCONV_CONVERT_ICU
ICU built-in, available when I<tconv> has been compiled with ICU. If I<tconv> has not been compiled with such support, C<TCONV_CONVERT_ICU> remain available, but using it will fail.
If C<ICUOptionp> is not NULL, it must be a pointer to a structure defined as:
typedef struct tconv_convert_ICU_option {
size_t uCharCapacityl;
short fallbackb;
int signaturei;
} tconv_convert_ICU_option_t;
containing:
=over
=item uCharCapacityl
ICU convertion always go through an UTF-16 internal buffer by design. C<uCharCapacityl> is the number of bytes of this internal intermediary buffer. The default is 4096, unless environment variable C<TCONV_ENV_CONVERT_ICU_UCHARCAPACITY> is set.
=item fallbackb
ICU convertion has an optional fallback mechanism for unknown characters. Default value is a false value, unless C<TCONV_ENV_CONVERT_ICU_FALLBACK> is set.
=item signaturei
A signature may be added or removed on demand. If C<signaturei> is lower than zero, signature is removed. If C<signaturei> is higher than zero, signature is added. Else ICU default will apply. Default is 0, unless C<TCONV_ENV_CONVERT_ICU_SIGNATURE> is set.
=back
=item TCONV_CONVERT_ICONV
iconv built-in, always available. No special option.
=back
=head2 tconv_trace_on
void tconv_trace_on(tconv_t tconvp);
Set tracing. Then any call to tconv_trace() will trigger a call to C<traceCallbackp> given in tconv_open_ext()'s option structure.
=head2 tconv_trace_off
void tconv_trace_off(tconv_t tconvp);
Unset tracing.
=head2 tconv_trace
void tconv_trace(tconv_t tconvp, const char *fmts, ...);
Formats a message string and call C<traceCallbackp> if tracing is on.
=head2 tconv_error_set
char *tconv_error_set(tconv_t tconvp, const char *msgs);
Set a string that should a contain a more accurate description of the last error. Any engine should use that when a specific description exist. Default is use system's errno description.
=head2 tconv_error
char *tconv_error(tconv_t tconvp);
Get the latest value of specific error string.
=head2 tconv_fromcode
char *tconv_fromcode(tconv_t tconvp);
Get the source codeset.
=head2 tconv_tocode
char *tconv_tocode(tconv_t tconvp);
Get the destination codeset.
=head2 tconv_helper
short tconv_helper(tconv_t tconvp,
void *contextp,
short (*producerp)(void *contextp, char **bufpp, size_t *countlp, short *eofbp),
short (*consumerp)(void *contextp, char *bufp, size_t countl, short eofb, size_t *resultlp)
);
From an end-user point of viez, the only important thing is to produce bytes that must be converted and to consume them. The C<tconv_helper> method is totally hiding all the iconv API subtilities, leaving only the two methods that are meaningul for the vast majority of applications. The parameters are:
=over
=item C<tconvp>
=item a producer
=item a consumer
=back
=head1 NOTES
=over
=item tracing
I<tconv> can trace itself, unless I<tconv> has been compiled with -DTCONV_NDEBUG, which is the default. When compiled without -DTCONV_NDEBUG, default tracing level is 0, unless environment variable C<TCONV_ENV_TRACE> is set and the value of the later is a true value.
=item specific error string
I<tconv> internally limit the length of such string to 1024 bytes (including NUL).
=item normalized charset name
A charset name contains only letters in the range [a-z0-9+.:].
=back
=head1 SEE ALSO
L<tconv(3)>, L<genericLogger(3)>
| jddurand/c-tconv | include/tconv/README.pod | Perl | mit | 12,911 |
if ( !(eval { require XML::Simple } && ! $@ )) {
print "------XML::Simple is not installed, please see installation instructions----\n";
}
| slac-pinger/pinger | testPrereqs.pl | Perl | cc0-1.0 | 143 |
package #
Date::Manip::Offset::off418;
# Copyright (c) 2008-2014 Sullivan Beck. All rights reserved.
# This program is free software; you can redistribute it and/or modify it
# under the same terms as Perl itself.
# This file was automatically generated. Any changes to this file will
# be lost the next time 'tzdata' is run.
# Generated on: Fri Nov 21 11:03:46 EST 2014
# Data version: tzdata2014j
# Code version: tzcode2014j
# This module contains data from the zoneinfo time zone database. The original
# data was obtained from the URL:
# ftp://ftp.iana.orgtz
use strict;
use warnings;
require 5.010000;
our ($VERSION);
$VERSION='6.48';
END { undef $VERSION; }
our ($Offset,%Offset);
END {
undef $Offset;
undef %Offset;
}
$Offset = '-10:29:20';
%Offset = (
0 => [
'pacific/kiritimati',
],
);
1;
| nriley/Pester | Source/Manip/Offset/off418.pm | Perl | bsd-2-clause | 856 |
#-----------------------------------------------------------
# routes.pl
#
# Some malware is known to create persistent routes
#
# Change History:
# 20100817 - created
#
# Ref:
# http://support.microsoft.com/kb/141383
# http://www.symantec.com/security_response/writeup.jsp?docid=
# 2010-041308-3301-99&tabid=2
#
# copyright 2010 Quantum Analytics Research, LLC
#-----------------------------------------------------------
package routes;
use strict;
my %config = (hive => "System",
osmask => 22,
hasShortDescr => 1,
hasDescr => 0,
hasRefs => 0,
version => 20100817);
sub getConfig{return %config}
sub getShortDescr {
return "Get persistent routes";
}
sub getDescr{}
sub getRefs {}
sub getHive {return $config{hive};}
sub getVersion {return $config{version};}
my $VERSION = getVersion();
sub pluginmain {
my $class = shift;
my $hive = shift;
::logMsg("Launching routes v.".$VERSION);
::rptMsg("routes v.".$VERSION); # banner
::rptMsg("(".getHive().") ".getShortDescr()."\n"); # banner
my $reg = Parse::Win32Registry->new($hive);
my $root_key = $reg->get_root_key;
# Code for System file, getting CurrentControlSet
my $current;
my $key_path = 'Select';
my $key;
if ($key = $root_key->get_subkey($key_path)) {
$current = $key->get_value("Current")->get_data();
my $ccs = "ControlSet00".$current;
my $sb_path = $ccs."\\Services\\Tcpip\\Parameters\\PersistentRoutes";
my $sb;
if ($sb = $root_key->get_subkey($sb_path)) {
::rptMsg($sb_path);
::rptMsg("LastWrite: ".gmtime($sb->get_timestamp()));
::rptMsg("");
my @vals = $sb->get_list_of_values();
if (scalar(@vals) > 0) {
::rptMsg(sprintf "%-15s %-15s %-15s %-5s","Address","Netmask","Gateway","Metric");
foreach my $v (@vals) {
my ($addr,$netmask,$gateway,$metric) = split(/,/,$v->get_name(),4);
::rptMsg(sprintf "%-15s %-15s %-15s %-5s",$addr,$netmask,$gateway,$metric);
}
}
else {
::rptMsg($sb_path." has no values.");
}
}
else {
::rptMsg($sb_path." not found.");
}
}
else {
::rptMsg($key_path." not found.");
}
}
1; | raman-bt/autopsy | RecentActivity/release/rr-full/plugins/routes.pl | Perl | apache-2.0 | 2,270 |
#! /usr/bin/env perl
#
# Copyright 1999-2010 University of Chicago
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
use strict;
use POSIX;
use Test;
my $test_exec = './register-refresh-credentials-test';
if ($ENV{CONTACT_STRING} eq "")
{
die "CONTACT_STRING not set";
}
my @tests;
my @todo;
my $valgrind = "";
if (exists $ENV{VALGRIND})
{
$valgrind = "valgrind --log-file=VALGRIND-register_register_refresh_credentials_test.log";
if (exists $ENV{VALGRIND_OPTIONS})
{
$valgrind .= ' ' . $ENV{VALGRIND_OPTIONS};
}
}
sub refresh_creds_test
{
my ($errors,$rc) = ("",0);
my ($output);
my ($contact) = shift;
system("$valgrind $test_exec '$contact' >/dev/null");
$rc = $?>> 8;
if($rc != 0)
{
$errors .= "Test exited with $rc. ";
}
if($errors eq "")
{
ok('success', 'success');
}
else
{
ok($errors, 'success');
}
}
push(@tests, "refresh_creds_test('$ENV{CONTACT_STRING}');");
# Now that the tests are defined, set up the Test to deal with them.
plan tests => scalar(@tests), todo => \@todo;
foreach (@tests)
{
eval "&$_";
}
| globus/globus-toolkit | gram/jobmanager/source/test/client/register-refresh-credentials-test.pl | Perl | apache-2.0 | 1,638 |
% generated: 25 October 1989
% option(s):
%
% (deriv) log10
%
% David H. D. Warren
%
% symbolic derivative of log(log(log(log(log(log(log(log(log(log(x))))))))))
top:-log10.
log10 :- d(log(log(log(log(log(log(log(log(log(log(x)))))))))),x,_).
d(U+V,X,DU+DV) :- !,
d(U,X,DU),
d(V,X,DV).
d(U-V,X,DU-DV) :- !,
d(U,X,DU),
d(V,X,DV).
d(U*V,X,DU*V+U*DV) :- !,
d(U,X,DU),
d(V,X,DV).
d(U/V,X,(DU*V-U*DV)/(^(V,2))) :- !,
d(U,X,DU),
d(V,X,DV).
d(^(U,N),X,DU*N*(^(U,N1))) :- !,
integer(N),
N1 is N-1,
d(U,X,DU).
d(-U,X,-DU) :- !,
d(U,X,DU).
d(exp(U),X,exp(U)*DU) :- !,
d(U,X,DU).
d(log(U),X,DU/U) :- !,
d(U,X,DU).
d(X,X,1) :- !.
d(_,_,0).
| TeamSPoon/pfc | t/tabling-tests/Bench/prolog/log10.pl | Perl | bsd-2-clause | 696 |
#!/usr/bin/perl
use strict;
use warnings;
package DnD;
my @winIDs;
my @winX;
my @winY;
my @winWidth;
my @winHeight;
my $lastTC = '';
sub GrabWindow
{my $winName = shift;
if($winName eq 'Gogi')
{$winName = 'LinGogi'}
elsif($winName eq 'Desktop')
{$winName = 'LinGogi desktop UI \(X11\/SW\)'}
elsif($winName eq 'TV')
{$winName = 'LinGogi desktop UI \(X11\/VEMU2D\)'}
elsif($winName eq 'SmartPhone')
{$winName = 'LinGogi SmartPhone'}
my $winID = `xdotool search --limit 1 --name '$winName'`;
if($winID)
{system("xdotool windowraise $winID");
system("xdotool windowfocus $winID");}
my $build = `xdotool getwindowname $winID`;
my $winInfo = `xwininfo -id '$winID'`;
if($winInfo =~ /Absolute upper-left X: (-?[0-9]{1,4})\n.*Absolute upper-left Y: (-?[0-9]{1,4})/)
{print "Window position: $1 : $2 \n";
if($1 < 0 or $2 < 0)
{die("Make sure that LinGogi window fits on the screen.\n");}
unshift(@winX,$1);
unshift(@winY,$2);}
else
{die("Can't retreive window position.\n");}
if($winInfo =~ /Width: ([0-9]{1,4})\n.*Height: ([0-9]{1,4})/)
{unshift(@winWidth,$1);
unshift(@winHeight,$2);}
else
{die("Can't retreive window size.\n");}
unshift(@winIDs,$winID);
if(int(@winIDs) == 1)
{print "Events will be sent to LinGogi window: \n$build";}
}
sub LoadPage
{if(Alive())
{my $tc = shift;
system("xdotool key F8");
system("xdotool type $tc");
system("xdotool key Return");
if($tc ne $lastTC)
{print "Testing TC ".$tc."\n";
$lastTC = $tc;}
}
}
sub SwitchWindows
{if(Alive())
{system("xdotool key alt+Tab");
@winIDs = reverse(@winIDs);}
WaitSeconds(0.5);}
sub SwitchTabs
{if(Alive())
{system("xdotool key ctrl+Tab");}
WaitSeconds(0.5);}
sub OpenNewTab
{if(Alive())
{system("xdotool key ctrl+t");}
WaitSeconds(0.5);}
sub CloseTab
{if(Alive())
{system("xdotool key ctrl+w");}
WaitSeconds(0.5);}
sub SelectAll
{if(Alive())
{system("xdotool key ctrl+a");}
}
sub TabNav
{my $n = shift;
if(Alive())
{for my $i (1..$n)
{system("xdotool key Tab");
WaitSeconds(0.1);}
}
}
sub ScanPage
{my ($drag,$wait,@abcd) = (0,3,0,0,0,0,0,0,0,0);
if(Alive())
{until($wait == 0 or $drag == 1)
{WaitSeconds(1);
$wait = $wait - 1;
system("scrot --focused tc$wait.bmp");
@abcd = FindColors("tc$wait",140,180,210,180,130,70);
if($abcd[0] != 0 and $abcd[4] != 0)
{$drag = 1;}
else
{if($wait != 0)
{print "Waiting for draggable area and/or dropzone to appear\n";}
else
{print "Can't locate draggable area and/or dropzone\n";}
}
}
}
for my $i (0..7)
{if($i%2 == 0)
{$abcd[$i] = $winX[0] + $abcd[$i];}
else
{$abcd[$i] = $winY[0] + $abcd[$i];}
}
return @abcd;}
sub FindDraggableArea
{my ($drag,$wait,@abcd) = (0,3,0,0,0,0);
if(Alive())
{until($wait == 0 or $drag == 1)
{WaitSeconds(1);
$wait = $wait - 1;
system("scrot --focused tc$wait.bmp");
@abcd = FindColors("tc$wait",140,180,210);
if($abcd[0] != 0)
{$drag = 1;}
else
{if($wait != 0)
{print "Waiting for draggable area to appear\n";}
else
{print "Can't locate draggable area.\n";}
}
}
}
for my $i (0..3)
{if($i%2 == 0)
{$abcd[$i] = $winX[0] + $abcd[$i];}
else
{$abcd[$i] = $winY[0] + $abcd[$i];}
}
return @abcd;}
sub FindDropzone
{my ($drop,$wait,@abcd) = (0,3,0,0,0,0);
if(Alive())
{until($wait == 0 or $drop == 1)
{WaitSeconds(1);
$wait = $wait - 1;
system("scrot --focused tc$wait.bmp");
@abcd = FindColors("tc$wait",180,130,70);
if($abcd[0] != 0)
{$drop = 1;}
else
{if($wait != 0)
{print "Waiting for dropzone to appear\n";}
else
{print "Can't locate dropzone.\n";}
}
}
}
for my $i (0..3)
{if($i%2 == 0)
{$abcd[$i] = $winX[0] + $abcd[$i];}
else
{$abcd[$i] = $winY[0] + $abcd[$i];}
}
return @abcd;}
sub Click
{my ($x,$y,$n) = @_;
if(Alive())
{system("xdotool mousemove --sync $x $y click --repeat $n --delay 50 1");}
}
sub Select
{my ($dragA,$dragB,$dragC,$dragD) = @_;
MouseDrag('select',$dragA,int(($dragB+$dragD)/2),$dragC,int(($dragB+$dragD)/2));}
sub DragAndDrop
{MouseDrag('simple',@_);}
sub DragAndScroll
{MouseDrag('scroll',@_);}
sub CancelDrag
{MouseDrag('cancel',@_,$_[0],$_[1]+120);}
sub DragBetweenWindows
{MouseDrag('crosswindow',@_);}
sub DragBetweenTabs
{MouseDrag('crosstab',@_);}
sub DragToPreviousTab
{MouseDrag('closetab',@_);}
sub DisturbDuringDrag
{MouseDrag('alert',@_);}
sub MouseDrag
{my ($drag,$dragX,$dragY,$dropX,$dropY) = @_;
if(Alive())
{my $dragBy;
my $dragTo;
if($drag ne 'select')
{print "Initiating drag and drop from location ".$dragX.":".$dragY." to location ".$dropX.":".$dropY."\n";}
system("xdotool mousemove --sync $dragX $dragY");
system("xdotool mousedown 1");
if($dropY > $dragY)
{for $dragTo ($dragY+1 .. $dropY)
{system("xdotool mousemove --sync $dragX $dragTo");
if($drag eq 'alert')
{Accept(1);}
}
}
else
{for $dragTo (1 .. $dragY - $dropY)
{$dragBy = $dragY - $dragTo;
system("xdotool mousemove --sync $dragX $dragBy");
if($drag eq 'alert')
{Accept(1);}
}
}
if($drag eq 'crosstab')
{SwitchTabs();}
elsif($drag eq 'closetab')
{CloseTab(); }
if($dropX > $dragX)
{for $dragTo ($dragX+1 .. $dropX)
{system("xdotool mousemove --sync $dragTo $dropY");
if($drag eq 'alert')
{Accept(1);}
}
}
else
{for $dragTo (1 .. $dragX - $dropX)
{$dragBy = $dragX - $dragTo;
system("xdotool mousemove --sync $dragBy $dropY");
if($drag eq 'alert')
{Accept(1);}
}
}
if($drag eq 'scroll')
{WaitSeconds(2);}
system("xdotool mousemove --sync $dropX $dropY");
if($drag eq 'alert')
{Accept(1);}
if($drag eq 'cancel')
{system("xdotool key Escape");}
elsif($drag eq 'bitmap')
{return CheckBitmap()}
system("xdotool mouseup 1");
if($drag eq 'alert')
{Accept(3);}
if($drag eq 'crosswindow')
{SwitchWindows();}
}
else
{if($drag eq 'bitmap')
{return (0);}
}
}
sub DragAround
{my ($dragX,$dragY,$dropX,$dropY) = @_;
if(Alive())
{my $dragBy;
my $dragTo;
print "Initiating drag and drop from location ".$dragX.":".$dragY." to location ".$dropX.":".$dropY."\n";
system("xdotool mousemove --sync $dragX $dragY");
system("xdotool mousedown 1");
for $dragTo (1 .. $dragY-1)
{$dragBy = $dragY - $dragTo;
system("xdotool mousemove --sync $dragX $dragBy");}
for $dragTo (1 .. $dragX)
{$dragBy = $dragX - $dragTo;
system("xdotool mousemove --sync $dragBy 1");}
for $dragTo (1 .. $dropY)
{system("xdotool mousemove --sync 1 $dragTo");}
for $dragTo (1 .. $dropX)
{system("xdotool mousemove --sync $dragTo $dropY");}
system("xdotool mousemove --sync $dropX $dropY");
system("xdotool mouseup 1");}
}
sub DragImage
{my ($dragX,$dragY,$dropX,$dropY) = (@_,$winX[0] + $winWidth[0] - 100,$winY[0] + $winHeight[0] - 100);
if(Alive())
{my $dragTo;
print "Initiating drag and drop from location ".$dragX.":".$dragY." to location ".$dropX.":".$dropY."\n";
system("xdotool mousemove --sync $dragX $dragY");
system("xdotool mousedown 1");
for $dragTo ($dragY+1 .. $dropY)
{system("xdotool mousemove --sync $dragX $dragTo");}
for $dragTo ($dragX+1 .. $dropX)
{system("xdotool mousemove --sync $dragTo $dropY");}
system("xdotool mousemove --sync $dropX $dropY");
system("scrot --focused pass.bmp");
my $pixelData = GetArea($winWidth[0]-100,$winHeight[0]-100,95,95);
system("xdotool mouseup 1");
return CheckBitmap($pixelData);}
else
{return (0); }
}
sub Accept
{my $n = shift;
for my $i (1 .. $n)
{if($i > 1)
{WaitSeconds(0.3);}
system("xdotool key space");}
}
sub Green
{if(Alive())
{WaitSeconds(1);
my $wait = 3;
my @abcd;
until($wait == 0)
{if($wait < 3)
{print "Waiting for test result\n";
WaitSeconds(2);}
$wait = $wait - 1;
system("scrot --focused pass$wait.bmp");
@abcd = FindColors("pass$wait",34,139,34);
if($abcd[0] != 0)
{return 1;}
else
{if($wait == 0)
{return 0;}
}
}
}
}
sub CheckBitmap
{my $pixelData = shift;
$pixelData =~ s/,0,0,0,0/B/g;
$pixelData =~ s/,34,139,34/F/g;
$pixelData =~ s/,180,130,70/S/g;
$pixelData =~ s/,140,180,210/T/g;
$pixelData =~ s/,255,255,255/W/g;
if($pixelData =~ /[BW]/)
{return (0,'transparent areas are not transparent in feedback bitmap');}
elsif($pixelData =~ /F/ and $pixelData =~ /S/ and $pixelData =~ /T/)
{if($pixelData =~ /^Pixel data[TSF]+$/)
{return (1);}
else
{$pixelData =~ s/,[0-9]{1,3}//g;
if(length($pixelData) > 8000)
{return (1,'ignoring anti-aliasing and/or small rendering artifacts');}
else
{return (0,'unexpected colors in feedback bitmap');}
}
}
else
{return (0,'feedback bitmap is incomplete');}
}
sub WaitSeconds
{select(undef, undef, undef, shift);}
sub Alive
{if(`xdotool getactivewindow` == $winIDs[0])
{return 1;}
else
{return 0;}
}
sub FindColors
{open(TC, "<".(shift).".bmp") or die("Can't load image file.\n");
my $n = int(@_)/3;
seek(TC,10,0);
read(TC,my $data, 4);
my $bitmapStart = unpack('V',$data);
seek(TC,4,1);
read(TC,$data, 8);
my ($width,$height) = unpack('ii',$data);
seek(TC,8,1);
read(TC,$data, $bitmapStart - 34);
my $dy = $width*3;
if($dy%4 != 0)
{$dy = $dy + 4 - ($dy)%4;}
my @abcd = (0,0,0,0,0,0,0,0);
my @pattern = (pack("C*",$_[0],$_[1],$_[2]),pack("C*",$_[3*$n-3],$_[3*$n-2],$_[3*$n-1]));
for(my $y = 0; $y < $height+1; $y++)
{read(TC,$data,$dy);
for(my $i=0; $i < $n; $i++)
{if($abcd[4*$i] == 0 and $data =~ /(^(?:...)*?$pattern[$i])/)
{($abcd[4*$i],$abcd[3+4*$i]) = (length($1)/3,$height-$y);}
if($abcd[4*$i] != 0 and $data =~ /(^(?:...)*$pattern[$i])/)
{($abcd[2+4*$i],$abcd[4*$i+1]) = (length($1)/3,$height-$y);}
}
}
close TC;
return splice(@abcd,0,4*($n+1));}
sub GetArea
{open(TC, "<pass.bmp") or die("Can't load image file.\n");
my $area = 'Pixel data';
my ($x,$y,$w,$h) = @_;
seek(TC,10,0);
read(TC,my $data, 4);
my $bitmapStart = unpack('V',$data);
seek(TC,4,1);
read(TC,$data, 8);
my ($width,$height) = unpack('ii',$data);
my $dy = $width*3;
if($dy%4 != 0)
{$dy = $dy + 4 - ($dy)%4;}
seek(TC,($height-$y-$h)*$dy + $x*3 + $bitmapStart - 26,1);
for(my $i = 0; $i < $h; $i++)
{read(TC,$data,$w*3);
$area = join(',',$area,unpack("C*",$data));
seek(TC,$dy-$w*3,1);}
close TC;
return $area;}
1; | frivoal/presto-testo | core/standards/dnd/auto/x11/xdotool/scrot/bmp/DnD.pm | Perl | bsd-3-clause | 10,742 |
########################################################################
# Bio::KBase::ObjectAPI::KBaseBiochem::Cue - This is the moose object corresponding to the Cue object
# Authors: Christopher Henry, Scott Devoid, Paul Frybarger
# Contact email: chenry@mcs.anl.gov
# Development location: Mathematics and Computer Science Division, Argonne National Lab
# Date of module creation: 2012-04-05T03:44:17
########################################################################
use strict;
use Bio::KBase::ObjectAPI::KBaseBiochem::DB::Cue;
package Bio::KBase::ObjectAPI::KBaseBiochem::Cue;
use Moose;
use namespace::autoclean;
extends 'Bio::KBase::ObjectAPI::KBaseBiochem::DB::Cue';
# CONSTANTS:
#TODO
# FUNCTIONS:
#TODO
__PACKAGE__->meta->make_immutable;
1;
| kbase/KBaseFBAModeling | lib/Bio/KBase/ObjectAPI/KBaseBiochem/Cue.pm | Perl | mit | 761 |
# LaTeX2HTML 99.2beta6 (1.42)
# Associate labels original text with physical files.
1;
# LaTeX2HTML 99.2beta6 (1.42)
# labels from external_latex_labels array.
1;
| mmusicante/PlacidoConfArtigos | CAISE2011/submitted/Bib/trans/labels.pl | Perl | mit | 170 |
package PPI::Token::Magic;
=pod
=head1 NAME
PPI::Token::Magic - Tokens representing magic variables
=head1 INHERITANCE
PPI::Token::Magic
isa PPI::Token::Symbol
isa PPI::Token
isa PPI::Element
=head1 SYNOPSIS
# When we say magic variables, we mean these...
$1 $2 $3 $4 $5 $6 $7 $8 $9
$_ $& $` $' $+ @+ %+ $* $. $/ $|
$\\ $" $; $% $= $- @- %- $) $#
$~ $^ $: $? $! %! $@ $$ $< $>
$( $0 $[ $] @_ @* $} $, $#+ $#-
$^L $^A $^E $^C $^D $^F $^H
$^I $^M $^N $^O $^P $^R $^S
$^T $^V $^W $^X
=head1 DESCRIPTION
C<PPI::Token::Magic> is a sub-class of L<PPI::Token::Symbol> which
identifies the token as "magic variable", one of the strange and
unusual variables that are connected to "things" behind the scenes.
Some are extremely common, like C<$_>, and others you will quite
probably never encounter in your Perl career.
=head1 METHODS
The class provides no additional methods, beyond those provided by it's
L<PPI::Token::Symbol>, L<PPI::Token> and L<PPI::Element>.
=cut
use strict;
use PPI::Token::Symbol ();
use PPI::Token::Unknown ();
use vars qw{$VERSION @ISA %magic};
BEGIN {
$VERSION = '1.215';
@ISA = 'PPI::Token::Symbol';
# Magic variables taken from perlvar.
# Several things added separately to avoid warnings.
foreach ( qw{
$1 $2 $3 $4 $5 $6 $7 $8 $9
$_ $& $` $' $+ @+ %+ $* $. $/ $|
$\\ $" $; $% $= $- @- %- $)
$~ $^ $: $? $! %! $@ $$ $< $>
$( $0 $[ $] @_ @*
$^L $^A $^E $^C $^D $^F $^H
$^I $^M $^N $^O $^P $^R $^S
$^T $^V $^W $^X %^H
$::|
}, '$}', '$,', '$#', '$#+', '$#-' ) {
$magic{$_} = 1;
}
}
=pod
=begin testing __TOKENIZER_on_char 30
my $document = PPI::Document->new(\<<'END_PERL');
$[; # Magic $[
$$; # Magic $$
%-; # Magic %-
$#-; # Magic $#-
$$foo; # Symbol $foo Dereference of $foo
$^W; # Magic $^W
$^WIDE_SYSTEM_CALLS; # Magic $^WIDE_SYSTEM_CALLS
${^MATCH}; # Magic ${^MATCH}
@{^_Bar}; # Magic @{^_Bar}
${^_Bar}[0]; # Magic @{^_Bar}
%{^_Baz}; # Magic %{^_Baz}
${^_Baz}{burfle}; # Magic %{^_Baz}
$${^MATCH}; # Magic ${^MATCH} Dereference of ${^MATCH}
\${^MATCH}; # Magic ${^MATCH}
END_PERL
isa_ok( $document, 'PPI::Document' );
$document->index_locations();
my $symbols = $document->find( 'PPI::Token::Symbol' );
is( scalar(@$symbols), 14, 'Found 14 symbols' );
my $comments = $document->find( 'PPI::Token::Comment' );
foreach my $token ( @$symbols ) {
my ($hash, $class, $name, $remk) =
split '\s+', $comments->[$token->line_number - 1], 4;
isa_ok( $token, "PPI::Token::$class" );
is( $token->symbol, $name, $remk || "The symbol is $name" );
}
=end testing
=cut
sub __TOKENIZER__on_char {
my $t = $_[1];
# $c is the candidate new content
my $c = $t->{token}->{content} . substr( $t->{line}, $t->{line_cursor}, 1 );
# Do a quick first test so we don't have to do more than this one.
# All of the tests below match this one, so it should provide a
# small speed up. This regex should be updated to match the inside
# tests if they are changed.
if ( $c =~ /^ \$ .* [ \w : \$ \{ ] $/x ) {
if ( $c =~ /^(\$(?:\_[\w:]|::))/ or $c =~ /^\$\'[\w]/ ) {
# If and only if we have $'\d, it is not a
# symbol. (this was apparently a concious choice)
# Note that $::0 on the other hand is legal
if ( $c =~ /^\$\'\d$/ ) {
# In this case, we have a magic plus a digit.
# Save the CURRENT token, and rerun the on_char
return $t->_finalize_token->__TOKENIZER__on_char( $t );
}
# A symbol in the style $_foo or $::foo or $'foo.
# Overwrite the current token
$t->{class} = $t->{token}->set_class('Symbol');
return PPI::Token::Symbol->__TOKENIZER__on_char( $t );
}
if ( $c =~ /^\$\$\w/ ) {
# This is really a scalar dereference. ( $$foo )
# Add the current token as the cast...
$t->{token} = PPI::Token::Cast->new( '$' );
$t->_finalize_token;
# ... and create a new token for the symbol
return $t->_new_token( 'Symbol', '$' );
}
if ( $c eq '$${' ) {
# This _might_ be a dereference of one of the
# control-character symbols.
my $line = substr $t->{line}, $t->{line_cursor} + 1;
if ( $line =~ m/$PPI::Token::Unknown::CURLY_SYMBOL/ ) {
# This is really a dereference. ( $${^_foo} )
# Add the current token as the cast...
$t->{token} = PPI::Token::Cast->new( '$' );
$t->_finalize_token;
# ... and create a new token for the symbol
return $t->_new_token( 'Magic', '$' );
}
}
if ( $c eq '$#$' or $c eq '$#{' ) {
# This is really an index dereferencing cast, although
# it has the same two chars as the magic variable $#.
$t->{class} = $t->{token}->set_class('Cast');
return $t->_finalize_token->__TOKENIZER__on_char( $t );
}
if ( $c =~ /^(\$\#)\w/ ) {
# This is really an array index thingy ( $#array )
$t->{token} = PPI::Token::ArrayIndex->new( "$1" );
return PPI::Token::ArrayIndex->__TOKENIZER__on_char( $t );
}
if ( $c =~ /^\$\^\w+$/o ) {
# It's an escaped char magic... maybe ( like $^M )
my $next = substr( $t->{line}, $t->{line_cursor}+1, 1 ); # Peek ahead
if ($magic{$c} && (!$next || $next !~ /\w/)) {
$t->{token}->{content} = $c;
$t->{line_cursor}++;
} else {
# Maybe it's a long magic variable like $^WIDE_SYSTEM_CALLS
return 1;
}
}
if ( $c =~ /^\$\#\{/ ) {
# The $# is actually a case, and { is its block
# Add the current token as the cast...
$t->{token} = PPI::Token::Cast->new( '$#' );
$t->_finalize_token;
# ... and create a new token for the block
return $t->_new_token( 'Structure', '{' );
}
} elsif ($c =~ /^%\^/) {
return 1 if $c eq '%^';
# It's an escaped char magic... maybe ( like %^H )
if ($magic{$c}) {
$t->{token}->{content} = $c;
$t->{line_cursor}++;
} else {
# Back off, treat '%' as an operator
chop $t->{token}->{content};
bless $t->{token}, $t->{class} = 'PPI::Token::Operator';
$t->{line_cursor}--;
}
}
if ( $magic{$c} ) {
# $#+ and $#-
$t->{line_cursor} += length( $c ) - length( $t->{token}->{content} );
$t->{token}->{content} = $c;
} else {
my $line = substr( $t->{line}, $t->{line_cursor} );
if ( $line =~ /($PPI::Token::Unknown::CURLY_SYMBOL)/ ) {
# control character symbol (e.g. ${^MATCH})
$t->{token}->{content} .= $1;
$t->{line_cursor} += length $1;
}
}
# End the current magic token, and recheck
$t->_finalize_token->__TOKENIZER__on_char( $t );
}
# Our version of canonical is plain simple
sub canonical { $_[0]->content }
1;
=pod
=head1 SUPPORT
See the L<support section|PPI/SUPPORT> in the main module.
=head1 AUTHOR
Adam Kennedy E<lt>adamk@cpan.orgE<gt>
=head1 COPYRIGHT
Copyright 2001 - 2011 Adam Kennedy.
This program is free software; you can redistribute
it and/or modify it under the same terms as Perl itself.
The full text of the license can be found in the
LICENSE file included with this module.
=cut
| liuyangning/WX_web | xampp/perl/vendor/lib/PPI/Token/Magic.pm | Perl | mit | 6,986 |
package DDG::Goodie::NLetterWords;
# ABSTRACT: find words of a certain length
use strict;
use DDG::Goodie;
use Lingua::EN::Numericalize;
triggers end => "words", "word";
zci is_cached => 0;
primary_example_queries '5 letter words';
secondary_example_queries '12 character word';
description 'find words of a certain length';
name 'NLetterWords';
code_url 'https://github.com/duckduckgo/zeroclickinfo-goodies/blob/master/lib/DDG/Goodie/NLetterWords.pm';
topics 'words_and_games';
category 'language';
attribution github => ['nospampleasemam', 'Dylan Lloyd'],
web => ['http://dylansserver.com/', 'Dylan Lloyd'];
handle query_parts => sub {
# Ensure rand is seeded for each process
srand();
my $numericalized = str2nbr($_);
return unless $numericalized =~ /^(\d{1,50}) (letter|char|character) words?$/;
my $length = $1;
my @allwords = share('words.txt')->slurp;
my @words;
for (@allwords) {
chomp($_);
if (length($_) == $length) { push(@words, $_); }
}
return unless @words;
my @randomwords;
if (scalar(@words) > 30) {
while (scalar(@randomwords) < 30) {
my $rand = int(rand(scalar(@words)));
if ($words[$rand]) {
push(@randomwords, $words[$rand]);
$words[$rand] = 0;
}
}
@words = @randomwords;
}
my $output = "Random $length letter words: " . join ', ', @words;
$output .= ".";
return $output;
};
1;
| digit4lfa1l/zeroclickinfo-goodies | lib/DDG/Goodie/NLetterWords.pm | Perl | apache-2.0 | 1,493 |
#!/usr/bin/perl
use strict;
#Autor: Moises Gautier Gomez
#5º Ingenieria en informatica
#Asignatura: Interfaces Software en Lenguaje Natural
#Practica 1 - Ejercicio 4
my @claves = ('madre','padre','hermano','hermana');
my $cadena;
my $coma = ",";
my $claves_generadas;
my $salir = 'adios';
my @texto;
print "Bienvenido, ¿de qué tema quiere hablar? \n";
while(<>)
{
chomp;
$cadena = qx/echo $_ | tr [A-Z] [a-z]/;
if($cadena =~ s/adios//)
{
print "¡Hasta luego! \n";
exit(1);
}
$claves_generadas = "";
# Almaceno en el array @texto la entrada estandar en formato de lista
@texto = split(/\s+/, $cadena); #Aquí lo que hacemos es transformar el string a elementos del array
foreach my $key (@texto)
{
#Extraigo los elementos del array y voy comparandolos con las palabras
# claves del ejercicio.
#Ahora voy comparando el elemento anterior con cada palabra
# clave de mi ejercicio y si hace match, compruebo que no se
# ha repetido y si no es asi, la introduzco en claves_generadas
# que sera la cadena salida segun los datos que reciba.
if($key =~ s/([m|p]adre)// || $key =~ s/(herman[a|o])// )
{
if($claves_generadas !~ $1)
{
$claves_generadas = "$claves_generadas $1$coma";
}
}
}
#Si la longitud es 1 significa de que solo se ha introducido una palabra
# reservada y por lo tanto la coma de la siguiente sobra.
if(length($claves_generadas) > 0)
{
$claves_generadas =~ s/,$//; #Esto elimina la ultima coma introducida
print "Hábleme más de su".$claves_generadas."\n";
}
#Si la longitud es 0 significa que no se ha introducido por la entrada estandar
# ninguna palabra clave del ejercicio y por lo tanto estoy a la espera de mas
# informacion.
if(length($claves_generadas) == 0)
{
print "Muy interesante, sígame contando. \n";
}
}
| MGautier/Programacion | Perl/ISLN - Asignatura/P1/practica-1-4.pl | Perl | mit | 1,816 |
#!/usr/bin/perl
# 2010/Mar/16 @ Zdenek Styblik
# desc: Fix Slackware's package's files
#
# Copyright (c) 2011 Zdenek Styblik <zdenek.styblik@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
use lib "/mnt/search.slackware.eu/perl/";
use Slackware::Search::ConfigParser qw(_getConfig);
use Slackware::Search::SupportLib qw(:T1);
use DBI;
use strict;
use warnings;
use constant CFGFILE => '/mnt/search.slackware.eu/conf/config.pl';
my $cfgParser = 'Slackware::Search::ConfigParser';
my %CFG = $cfgParser->_getConfig(CFGFILE);
unless (%CFG || keys(%CFG)) {
printf("Parsing of config file has failed.\n");
exit 2;
}
my $dbh = DBI->connect($CFG{DB_DSN},
$CFG{DB_USER},
$CFG{DB_PASS},
{
AutoCommit => 0,
RaiseError => 1,
PrintError => 1
}
);
die ("Unable to connect to DB.") unless ($dbh);
### MAIN ###
my $numArgs = $#ARGV + 1;
if ($numArgs == 0) {
print "Parameter must be Slackware version.\n";
exit 1;
}
if ($ARGV[0] !~ /^slackware(64)?-([0-9]+\.[0-9]+|current){1}$/i) {
print "Parameter doesn't look like Slackware version to me."
.$ARGV[0]."\n";
exit 1;
}
my $slib = 'Slackware::Search::SupportLib';
$slib->_set_dbHandler($dbh);
my $sverExists = $slib->existsSlackVer($ARGV[0]);
if ($sverExists == 0) {
print "This Slackware version is not in DB.\n";
print "Please use add script.\n";
exit 1;
}
my $idSlackVer = -1;
$idSlackVer = $slib->getSlackVerId($ARGV[0]);
$slib->_set_sverName($ARGV[0]);
### MANIFEST.bz2 ###
if ( -e "./FILELIST.TXT.files.manifests" ) {
open(FMANS, "./FILELIST.TXT.files.manifests")
or die("Unable to open FILELIST.TXT.files.manifests");
print "Processing manifests. This is going to be a while.\n";
while (my $lineMan = <FMANS>) {
chomp($lineMan);
my @arrLine = split(' ', $lineMan);
unless ($arrLine[7]) {
next;
}
unless ($arrLine[7] =~ /\.bz2$/i) {
next;
}
$slib->processManifestFile($arrLine[7], $idSlackVer, 0);
}
close(FMANS);
}
$dbh->commit;
$dbh->disconnect;
| zstyblik/search.slackware.eu | shell/db-fix-pkgs-files.pl | Perl | mit | 2,972 |
#!/usr/bin/perl -w
#use strict;
use Spreadsheet::ParseExcel;
use Encode qw(encode decode);
use Config::General;
die "xls2disease_genes.pl <xls> <date>\n" if (!(@ARGV));
die "xls2disease_genes.pl <xls> <date>\n" if ( $#ARGV != 1 );
chomp($xls = $ARGV[0]);
chomp($date = $ARGV[1]);
$enc = 'utf-8';
## Read configs
my $conf = Config::General->new("$ENV{'CALLS2XLS'}/calls2xls.cfg");
my %c = $conf->getall;
my $parser = Spreadsheet::ParseExcel->new();
my $workbook = $parser->parse($xls);
if ( !defined $workbook ) {
die $parser->error(), ".\n";
}
$calls2xls_root = $ENV{'CALLS2XLS'};
## Read configs
open (FH, "<$calls2xls_root/calls2xls.cfg");
while ( $line = <FH> ) {
if($line !~ /#/ && $line =~ /\S/){
@tmp = split(/\s=\s/, $line);
foreach $row (@tmp) {
$row =~ s/\s//g;
}
if($tmp[0] eq "info_dp"){
@info = split(/,/,$tmp[1]);
foreach $i (@info){
$c{$tmp[0]}{$i} = 1;
}
}
elsif($tmp[0] eq "info_ao"){
@info = split(/,/,$tmp[1]);
foreach $i (@info){
$c{$tmp[0]}{$i} = 1;
}
}
else{
$c{$tmp[0]}=$tmp[1];
}
}
}
#$date = ;
$output = $calls2xls_root . "/" . $c{'diseaseGeneAssocPath'} . "/" . "gl." . $date . ".txt";
open(FH, ">$output");
for my $worksheet ( $workbook->worksheets() ) {
$name = $worksheet->get_name();
$name_uni = encode($enc, $name);
if($name_uni !~ /versikt/ && $name_uni !~ /flikar/){
print FH ">$name_uni\n";
my ( $row_min, $row_max ) = $worksheet->row_range();
for my $row ( 1 .. $row_max ) {
@genedat = ();
for my $col ( 0 .. 2 ) {
my $cell = $worksheet->get_cell( $row, $col );
if($cell){
$cv = $cell->value();
$cv =~ s/\s//g;
push @genedat, $cv;
}
else{
push @genedat, "NA";
}
}
$genestr = join("\t", @genedat);
if( $genestr !~ /^NA\t/){
print FH "$genestr\n";
}
}
}
}
close(FH);
system("mkConfigOptionsFile.pl");
| parlar/calls2xls | bin/xls2diseaseGenes.pl | Perl | mit | 2,054 |
use strict;
use Data::Dumper;
use Carp;
#
# This is a SAS Component
#
=head1 NAME
get_relationship_OntologyForSample
=head1 SYNOPSIS
get_relationship_OntologyForSample [-c N] [-a] [--fields field-list] < ids > table.with.fields.added
=head1 DESCRIPTION
This relationship the ontology PO#, EO# or ENVO# associatioed with the sample.
Example:
get_relationship_OntologyForSample -a < ids > table.with.fields.added
would read in a file of ids and add a column for each field in the relationship.
The standard input should be a tab-separated table (i.e., each line
is a tab-separated set of fields). Normally, the last field in each
line would contain the id. If some other column contains the id,
use
-c N
where N is the column (from 1) that contains the id.
This is a pipe command. The input is taken from the standard input, and the
output is to the standard output.
=head1 COMMAND-LINE OPTIONS
Usage: get_relationship_OntologyForSample [arguments] < ids > table.with.fields.added
=over 4
=item -c num
Select the identifier from column num
=item -from field-list
Choose a set of fields from the Ontology
entity to return. Field-list is a comma-separated list of strings. The
following fields are available:
=over 4
=item id
=item id
=item name
=item definition
=item ontologySource
=back
=item -rel field-list
Choose a set of fields from the relationship to return. Field-list is a comma-separated list of
strings. The following fields are available:
=over 4
=item from_link
=item to_link
=back
=item -to field-list
Choose a set of fields from the SampleAnnotation entity to return. Field-list is a comma-separated list of
strings. The following fields are available:
=over 4
=item id
=item annotationDate
=item source_id
=back
=back
=head1 AUTHORS
L<The SEED Project|http://www.theseed.org>
=cut
use Bio::KBase::Utilities::ScriptThing;
use Bio::KBase::CDMI::CDMIClient;
use Getopt::Long;
#Default fields
my @all_from_fields = ( 'id', 'id', 'name', 'definition', 'ontologySource' );
my @all_rel_fields = ( 'from_link', 'to_link', );
my @all_to_fields = ( 'id', 'annotationDate', 'source_id' );
my %all_from_fields = map { $_ => 1 } @all_from_fields;
my %all_rel_fields = map { $_ => 1 } @all_rel_fields;
my %all_to_fields = map { $_ => 1 } @all_to_fields;
my @default_fields = ('from-link', 'to-link');
my @from_fields;
my @rel_fields;
my @to_fields;
our $usage = <<'END';
Usage: get_relationship_OntologyForSample [arguments] < ids > table.with.fields.added
--show-fields
List the available fields.
-c num
Select the identifier from column num
--from field-list
Choose a set of fields from the Ontology
entity to return. Field-list is a comma-separated list of strings. The
following fields are available:
id
id
name
definition
ontologySource
--rel field-list
Choose a set of fields from the relationship to return. Field-list is a comma-separated list of
strings. The following fields are available:
from_link
to_link
--to field-list
Choose a set of fields from the SampleAnnotation entity to
return. Field-list is a comma-separated list of strings. The following fields are available:
id
annotationDate
source_id
END
my $column;
my $input_file;
my $a;
my $f;
my $r;
my $t;
my $help;
my $show_fields;
my $i = "-";
my $geO = Bio::KBase::CDMI::CDMIClient->new_get_entity_for_script("c=i" => \$column,
"h" => \$help,
"show-fields" => \$show_fields,
"a" => \$a,
"from=s" => \$f,
"rel=s" => \$r,
"to=s" => \$t,
'i=s' => \$i);
if ($help) {
print $usage;
exit 0;
}
if ($show_fields)
{
print "from fields:\n";
print " $_\n" foreach @all_from_fields;
print "relation fields:\n";
print " $_\n" foreach @all_rel_fields;
print "to fields:\n";
print " $_\n" foreach @all_to_fields;
exit 0;
}
if ($a && ($f || $r || $t)) {die $usage};
if ($a) {
@from_fields = @all_from_fields;
@rel_fields = @all_rel_fields;
@to_fields = @all_to_fields;
} elsif ($f || $t || $r) {
my $err = 0;
if ($f) {
@from_fields = split(",", $f);
$err += check_fields(\@from_fields, %all_from_fields);
}
if ($r) {
@rel_fields = split(",", $r);
$err += check_fields(\@rel_fields, %all_rel_fields);
}
if ($t) {
@to_fields = split(",", $t);
$err += check_fields(\@to_fields, %all_to_fields);
}
if ($err) {exit 1;}
} else {
@rel_fields = @default_fields;
}
my $ih;
if ($input_file)
{
open $ih, "<", $input_file or die "Cannot open input file $input_file: $!";
}
else
{
$ih = \*STDIN;
}
while (my @tuples = Bio::KBase::Utilities::ScriptThing::GetBatch($ih, undef, $column)) {
my @h = map { $_->[0] } @tuples;
my $h = $geO->get_relationship_OntologyForSample(\@h, \@from_fields, \@rel_fields, \@to_fields);
my %results;
for my $result (@$h) {
my @from;
my @rel;
my @to;
my $from_id;
my $res = $result->[0];
for my $key (@from_fields) {
push (@from,$res->{$key});
}
$res = $result->[1];
$from_id = $res->{'from_link'};
for my $key (@rel_fields) {
push (@rel,$res->{$key});
}
$res = $result->[2];
for my $key (@to_fields) {
push (@to,$res->{$key});
}
if ($from_id) {
push @{$results{$from_id}}, [@from, @rel, @to];
}
}
for my $tuple (@tuples)
{
my($id, $line) = @$tuple;
my $resultsForId = $results{$id};
if ($resultsForId) {
for my $result (@$resultsForId) {
print join("\t", $line, @$result) . "\n";
}
}
}
}
sub check_fields {
my ($fields, %all_fields) = @_;
my @err;
for my $field (@$fields) {
if (!$all_fields{$field})
{
push(@err, $field);
}
}
if (@err) {
my @f = keys %all_fields;
print STDERR "get_relationship_OntologyForSample: unknown fields @err. Valid fields are @f\n";
return 1;
}
return 0;
}
| kbase/kb_seed | scripts/get_relationship_OntologyForSample.pl | Perl | mit | 6,008 |
#!/usr/bin/perl -w
use strict;
use warnings;
use Bio::SeqIO;
use Bio::Seq;
use IO::String;
my %ann;
&loadFiles;
foreach my $locus (keys %ann)
{
my $mRNAPro = $ann{$locus}{"mRNAPro"};
if ($mRNAPro) { $mRNAPro =~ tr/[a-z][A-Z][0-9]/_/c} else {$mRNAPro = "no_product"};
my $mRNATransID = $ann{$locus}{"mRNATransID"};
my $seq = $ann{$locus}{"seq"};
my $length = length($seq);
while ($length % 3) { $seq .= "N"; $length = length($seq); };
if ($length % 3) { die "LENGTH $length OF SEQUENCE $locus ISNT DIVISIBLE BY 3"; };
# print "SIZE = " . $length . "\n";
$seq =~ s/(.{60})/$1\n/g;
my $start = $ann{$locus}{"start"};
my $end = $ann{$locus}{"end"};
my $id = $ann{$locus}{"id"};
my $desc = $ann{$locus}{"desc"};
print ">$id $id\_$locus\_$mRNAPro\_$start\_$end\n$seq\n";
}
# my $outseq = Bio::SeqIO->new( -fh => \*STDOUT,
# -format => "fasta");
sub loadFiles
{
opendir (DIR, "./") || die "CANT READ DIRECTORY: $!\n";
my @dots = grep { (!/^\./) && -f "./$_" && (/\.fa$/ || /\.gbff$/)} readdir(DIR);
closedir DIR;
my %hash = map { /^(\S+)\.(fa|gbff)$/; $1 => "file"; } @dots;
my $countFile = 0;
my $countSeq = 0;
foreach my $key (sort keys %hash)
{
# print "\t PARSING $key";
$countFile++;
my $countSeqFile = 0;
my $name = $key;
my $string = `cat $name.gbff`;
$string .= "\nORIGIN\n";
$string .= `cat $name.fa`;
my $stringfh = new IO::String($string);
my $stream = Bio::SeqIO->new( -fh => $stringfh,
-format => 'GenBank');
while (my $seq = $stream->next_seq())
{
my $sequence = $seq->seq();
my $anumber = $seq->accession_number();
my $display_id = $seq->display_id();
my $subseq = $seq->subseq(5,10);
my $alphabet = $seq->alphabet();
my $primary_id = $seq->primary_id();
my $length = $seq->length();
my $description = $seq->description();
my $strain = "";
my $desc = "";
$desc .= "DISPLAY ID\t" . $display_id . "\n"; # the human read-able id of the sequence
# $desc .= "SUBSEQ \t" . $subseq . "\n"; # part of the sequence as a string
$desc .= "ALPHABET \t" . $alphabet . "\n"; # one of 'dna','rna','protein'
$desc .= "PRIMARY ID\t" . $primary_id . "\n"; # a unique id for this sequence regardless of its display_id or accession number
$desc .= "LENGTH \t" . $length . "\n"; # sequence length
# if ($length % 3) { die "LENGTH $length OF SEQUENCE $anumber ISNT DIVISIBLE BY 3"; };
$desc .= "DESCRIPT \t" . $description . "\n"; # a description of the sequence
# $desc .= "SEQ \t" . $sequence . "\n"; # string of sequence
$desc .= "ACC NUMBER\t" . $anumber . "\n"; # when there, the accession number
# print $desc;
for my $feat_object ($seq->get_SeqFeatures)
{
my $gene;
my $mRNA_Pro;
my $locus;
my $mRNA_Seq;
my $mRNA_Trans_ID;
my $primary_tag = $feat_object->primary_tag;
# print "primary tag: ", $primary_tag , "\n";
for my $tag ($feat_object->get_all_tags)
{
# print " tag: ", $tag, "\n";
for my $value ($feat_object->get_tag_values($tag))
{
if ((($primary_tag eq "gene") ||($primary_tag eq "mRNA")) && ($tag eq "locus_tag"))
{
$locus = $value;
$ann{$locus}{"seq"} = $feat_object->seq()->seq();
$ann{$locus}{"start"} = $feat_object->start;
$ann{$locus}{"end"} = $feat_object->end;
$ann{$locus}{"id"} = $display_id;
$ann{$locus}{"desc"} = $description;
$countSeq++;
$countSeqFile++;
};
if ($locus)
{
if (($primary_tag eq "mRNA") && ($tag eq "product")) { $ann{$locus}{"mRNAPro"} = $value; };
if (($primary_tag eq "mRNA") && ($tag eq "transcript_id")) { $ann{$locus}{"mRNATransID"} = $value; };
# my $gene = $seq->gene2liveseq(-gene_name => "$locus");
# if ($primary_tag eq "mRNA") { $ann{$locus}{"seq"} = $feat_object->seq(); };
}
# print " value: ", $value, "\n";
}
}
}
# foreach my $feat ( $seq->top_SeqFeatures )
# {
# my $locus;
# my $mRNA_Pro;
# if ( $feat->primary_tag eq 'CDS' )
# {
# my $cds_obj = $feat->spliced_seq;
# print ">".$seq->display_id()."\n".$cds_obj->seq."\n";
# }
# }
} # end while seq
# print " > $countSeqFile SEQUENCES\n";
} #end foreach key hash
# print "$countFile FILES LOADED WITH $countSeq SEQUENCES\n";
}
1; | sauloal/perlscripts | Bio/toCheck/gff2fasta.pl | Perl | mit | 5,574 |
:- module(bc_data_config, [
bc_config_get/2, % +Name, -Value
bc_config_set/2, % +Name, +Value
bc_config_set_api/3, % +Actor, +Name, +Value
bc_config_set_list_api/2, % +Actor, +List
bc_config_list/2, % +Actor, -List
bc_config_dict/1 % -Dict
]).
:- use_module(library(debug)).
:- use_module(library(docstore)).
%! bc_config_get(+Name, -Value) is det.
%
% Retrieves the configuration entry.
% When the entry does not exist then
% an error error(no_config(Name)) is thrown.
bc_config_get(Name, Value):-
( ds_find(config, name=Name, [Doc])
-> Value = Doc.value
; throw(error(no_config(Name)))).
%! bc_config_set_api(+Actor, +Name, +Value) is det.
%
% Same as bc_config_set/2 but checks that
% the current API user is an admin.
bc_config_set_api(Actor, Name, Value):-
config_access(Actor),
bc_config_set(Name, Value).
%! bc_config_set_list_api(+Actor, List) is det.
%
% Same as bc_config_set_api/2 but takes
% a list of configuration entries.
bc_config_set_list_api(Actor, List):-
config_access(Actor),
maplist(config_set_dict, List).
config_set_dict(Dict):-
bc_config_set(Dict.name, Dict.value).
config_access(Actor):-
Actor.type = admin, !.
config_access(_):-
throw(error(no_access)).
%! bc_config_set(+Name, +Value) is det.
%
% Sets the configuration value. If the
% value does not exist yet, it is added.
bc_config_set(Name, Value):-
debug(bc_data, 'setting ~w to ~p', [Name, Value]),
( ds_find(config, name=Name, [Doc])
-> put_dict(value, Doc, Value, New),
ds_update(New)
; ds_insert(config{ name: Name, value: Value })).
%! bc_config_list(+Actor, -List) is det.
%
% Retrieves the list of all config
% values. Returned list contains dicts
% `config{ name: Name, value: Value }`.
bc_config_list(Actor, List):-
config_access(Actor),
ds_all(config, List).
%! bc_config_dict(-Dict) is det.
%
% Retrieves dict containing all
% config entries.
bc_config_dict(Dict):-
ds_all(config, List),
build_dict(List, _{}, Dict).
build_dict([Entry|List], Acc, Dict):-
Tmp = Acc.put(Entry.name, Entry.value),
build_dict(List, Tmp, Dict).
build_dict([], Dict, Dict).
| rla/blog-core | prolog/bc/bc_data_config.pl | Perl | mit | 2,226 |
package API::PiData::Pi;
use Mojo::Base "Mojolicious::Controller";
# use YAML::Tiny 'Dump';
use autodie;
use Mojo::JSON;
use YAML::Syck;
=head1 NAME
API::PiData::Pi - Receive data about PI server.
=head1 SYNOPSIS
# Application
package MyApp;
use Mojo::Base 'Mojolicious', -signatures;
use API::PiData::Pi;
# Route
sub startup ($self) {
$self->routes->post('/pidata')->to('PI#set_pi_data');
}
=head1 DESCRIPTION
This is a controller class. Ment to be dispatched to from mojolicious application class in route definiton.
=head1 METHODS
=head2 set_pi_data
Store pi data to file.
And store history in a csv file.
=cut
sub _check_client_ip {
# TODO
# get array og Net::IP from config
my $c= shift;
my @isp_ipaddresses_text = $c->config->{isp_ip_range};
# Use Net::IP ip_is_overlap to auth if
}
sub set_pi_data {
# Do not continue on invalid input and render a default 400
# error document.
my $c = shift->openapi->valid_input or return;
# IP check goes here
my $spec = $c->openapi->spec;
# $c->openapi->valid_input copies valid data to validation object,
# and the normal Mojolicious api works as well.
my $body = $c->validation->output->{body};
say STDERR Dump $c->config;
my $pi_file = $c->config->{'datadir'} ? $c->config->{'datadir'}.'/pi-status.json' : $ENV{HOME}.'/etc/pi-status.json';
{
open my $fh,'>', $pi_file;
print $fh Mojo::JSON::to_json($body);
close $fh;
}
my $pi_history = $c->config->{'datadir'} ? $c->config->{'datadir'}.'/pi-history.csv' : $ENV{HOME}.'/etc/pi-history.csv';
# write
{
my $temp = $body->{temp};
$temp =~ s/\D+$//g;
open my $fh,'>>',$pi_history;
printf $fh "%s;%s\n",Mojo::Date->new($body->{a_time})->epoch, $temp;
close $fh;
}
$c->render(openapi => 'ok');
}
1;
| slegga/webserver | lib/API/PiData/Pi.pm | Perl | mit | 1,820 |
#=======================================================================
# ____ ____ _____ _ ____ ___ ____
# | _ \| _ \| ___| _ _ / \ | _ \_ _| |___ \
# | |_) | | | | |_ (_) (_) / _ \ | |_) | | __) |
# | __/| |_| | _| _ _ / ___ \| __/| | / __/
# |_| |____/|_| (_) (_) /_/ \_\_| |___| |_____|
#
# A Perl Module Chain to faciliate the Creation and Modification
# of High-Quality "Portable Document Format (PDF)" Files.
#
#=======================================================================
#
# THIS IS A REUSED PERL MODULE, FOR PROPER LICENCING TERMS SEE BELOW:
#
#
# Copyright Martin Hosken <Martin_Hosken@sil.org>
#
# No warranty or expression of effectiveness, least of all regarding
# anyone's safety, is implied in this software or documentation.
#
# This specific module is licensed under the Perl Artistic License.
#
#
# $Id: Maxp.pm,v 2.0 2005/11/16 02:16:00 areibens Exp $
#
#=======================================================================
package PDF::API2::Basic::TTF::Maxp;
=head1 NAME
PDF::API2::Basic::TTF::Maxp - Maximum Profile table in a font
=head1 DESCRIPTION
A collection of useful instance variables following the TTF standard. Probably
the most used being C<numGlyphs>. Note that this particular value is
foundational and should be kept up to date by the application, it is not updated
by C<update>.
Handles table versions 0.5, 1.0
=head1 INSTANCE VARIABLES
No others beyond those specified in the standard:
numGlyphs
maxPoints
maxContours
maxCompositePoints
maxCompositeContours
maxZones
maxTwilightPoints
maxStorage
maxFunctionDefs
maxInstructionDefs
maxStackElements
maxSizeOfInstructions
maxComponentElements
maxComponentDepth
=head1 METHODS
=cut
use strict;
use vars qw(@ISA %fields @field_info);
use PDF::API2::Basic::TTF::Utils;
@ISA = qw(PDF::API2::Basic::TTF::Table);
@field_info = (
'numGlyphs' => 'S',
'maxPoints' => 'S',
'maxContours' => 'S',
'maxCompositePoints' => 'S',
'maxCompositeContours' => 'S',
'maxZones' => 'S',
'maxTwilightPoints' => 'S',
'maxStorage' => 'S',
'maxFunctionDefs' => 'S',
'maxInstructionDefs' => 'S',
'maxStackElements' => 'S',
'maxSizeOfInstructions' => 'S',
'maxComponentElements' => 'S',
'maxComponentDepth' => 'S');
sub init
{
my ($k, $v, $c, $i);
for ($i = 0; $i < $#field_info; $i += 2)
{
($k, $v, $c) = TTF_Init_Fields($field_info[$i], $c, $field_info[$i + 1]);
next unless defined $k && $k ne "";
$fields{$k} = $v;
}
}
=head2 $t->read
Reads the table into memory
=cut
sub read
{
my ($self) = @_;
my ($dat);
$self->SUPER::read or return $self;
init unless defined $fields{'numGlyphs'}; # any key would do
$self->{' INFILE'}->read($dat, 4);
$self->{'version'} = TTF_Unpack("f", $dat);
if ($self->{'version'} == 0.5)
{
$self->{' INFILE'}->read($dat, 2);
$self->{'numGlyphs'} = unpack("n", $dat);
} else
{
$self->{' INFILE'}->read($dat, 28);
TTF_Read_Fields($self, $dat, \%fields);
}
$self;
}
=head2 $t->out($fh)
Writes the table to a file either from memory or by copying.
=cut
sub out
{
my ($self, $fh) = @_;
return $self->SUPER::out($fh) unless $self->{' read'};
$fh->print(TTF_Pack("f", $self->{'version'}));
if ($self->{'version'} == 0.5)
{ $fh->print(pack("n", $self->{'numGlyphs'})); }
else
{ $fh->print(TTF_Out_Fields($self, \%fields, 28)); }
$self;
}
=head2 $t->update
Calculates all the maximum values for a font based on the glyphs in the font.
Only those fields which require hinting code interpretation are ignored and
left as they were read.
=cut
sub update
{
my ($self) = @_;
my ($i, $num, @n, @m, $j);
my (@name) = qw(maxPoints maxContours maxCompositePoints maxCompositeContours
maxSizeOfInstructions maxComponentElements maxComponentDepth);
return undef unless ($self->SUPER::update);
return undef if ($self->{'version'} == 0.5); # only got numGlyphs
return undef unless (defined $self->{' PARENT'}{'loca'});
$self->{' PARENT'}{'loca'}->update;
$num = $self->{'numGlyphs'};
for ($i = 0; $i < $num; $i++)
{
my ($g) = $self->{' PARENT'}{'loca'}{'glyphs'}[$i] || next;
@n = $g->maxInfo($self->{' PARENT'}{'loca'}{'glyphs'});
for ($j = 0; $j <= $#n; $j++)
{ $m[$j] = $n[$j] if $n[$j] > $m[$j]; }
}
foreach ('prep', 'fpgm')
{ $m[4] = length($self->{' PARENT'}{$_}{' dat'})
if (defined $self->{' PARENT'}{$_}
&& length($self->{' PARENT'}{$_}{' dat'}) > $m[4]);
}
for ($j = 0; $j <= $#name; $j++)
{ $self->{$name[$j]} = $m[$j]; }
$self;
}
1;
=head1 BUGS
None known
=head1 AUTHOR
Martin Hosken Martin_Hosken@sil.org. See L<PDF::API2::Basic::TTF::Font> for copyright and
licensing.
=cut
| carlgao/lenga | images/lenny64-peon/usr/share/perl5/PDF/API2/Basic/TTF/Maxp.pm | Perl | mit | 5,228 |
lt(A,B):- A < B.
type_restriction(lt(A,B),[number(A),number(B)]).
ex(merge([],[],[]),'+').
ex(merge([],[2],[2]),'+').
ex(merge([],[3,4],[3,4]),'+').
ex(merge([5],[],[5]),'+').
ex(merge([2,3],[],[2,3]),'+').
ex(merge([1],[2],[1,2]),'+').
ex(merge([0,1],[3,4],[0,1,3,4]),'+').
ex(merge([1],[3,4],[1,3,4]),'+').
ex(merge([5],[4],[4,5]),'+').
ex(merge([6,7],[3,4],[3,4,6,7]),'+').
ex(merge([6,7],[4],[4,6,7]),'+').
ex(merge([2,3,4,6],[5,7],[2,3,4,5,6,7]),'+').
ex(merge([43,55,63],[22,33,44,53],[22,33,43,44,53,55,63]),'+').
ex(merge([29,39,49,59],[37,79,99],[29,37,39,49,59,79,99]),'+').
ex(merge([2],[4,7],[2,4,7]),'+').
ex(merge([],[],[3]),'-').
ex(merge([],[2],[1,2]),'-').
ex(merge([],[3,4],[4,3]),'-').
ex(merge([5,6],[],[5]),'-').
ex(merge([2,3],[],[1,2,3]),'-').
ex(merge([1],[2],[2,1]),'-').
ex(merge([0,1],[3,4],[0,1,4]),'-').
ex(merge([1],[3,4],[4,1,1,3]),'-').
ex(merge([5],[1,4],[4,1,5]),'-').
ex(merge([6,7],[3,4],[3,6,4,7]),'-').
ex(merge([6,7],[4],[6,7,4]),'-').
end_of_file.
| ?- clear_kb, do_full_kb('examples/ex9.pl').
try:
:- clear_kb, init_kb('examples/ex9.pl').
% file "/tmp_mnt/home/stahl/edl/framework/miles/examples/ex9.pl" consulted.
:- store_clause(merge([],[2],[2]),_,user,28).
% rule added.
:- store_clause(merge([5],[],[5]),_,user,29).
% rule added.
:- store_clause(merge([1],[2],[1,2]),_,user,30).
% rule added.
:- store_clause(merge([0,1],[3,4],[0,1,3,4]),_,user,31).
% rule added.
:- store_clause(merge([1],[3,4],[1,3,4]),_,user,32).
% rule added.
:- store_clause(merge([5],[4],[4,5]),_,user,33).
% rule added.
:- store_clause(merge([6,7],[3,4],[3,4,6,7]),_,user,34).
% rule added.
:- store_clause(merge([6,7],[4],[4,6,7]),_,user,35).
% rule added.
:- flatten_rules.
% yes
:- saturate(30,Xmout1,5).
% yes
% rule 36 created.
:- saturate(31,Xmout1,5).
% yes
% rule 37 created.
:- saturate(33,Xmout1,5).
% yes
% rule 38 created.
:- saturate(34,Xmout1,5).
% yes
% rule 39 created.
:- unflatten_kb.
% yes
:- lgg(36,37,Xmout1).
% yes
% rule 40 created.
:- lgg(38,39,Xmout1).
% yes
% rule 41 created.
:- delete_clause(36).
:- delete_clause(37).
:- delete_clause(38).
:- delete_clause(39).
% selected rules deleted.
:- delete_clause(28).
:- delete_clause(29).
:- delete_clause(30).
:- delete_clause(31).
:- delete_clause(32).
:- delete_clause(33).
:- delete_clause(34).
:- delete_clause(35).
% selected rules deleted.
:- argument_types.
% yes
:- clause_heads.
% yes
:- delete_clause(47).
:- delete_clause(48).
% selected rules deleted.
:- eval_examples.
% yes
:- complete_chk.
% yes
:- correct_chk.
% no
:- fp(Xmout1).
% yes
% resulting rules selected
:- refinement_add_body_literal(40,Xmout1).
% yes
:- eval_examples.
% yes
:- complete_chk.
% yes
:- correct_chk.
% no
:- fp(Xmout1).
% yes
% resulting rules selected
:- refinement_add_body_literal(41,Xmout1).
% yes
:- eval_examples.
% yes
:- correct_chk.
% yes
:- complete_chk.
% yes
| TeamSPoon/logicmoo_workspace | packs_sys/logicmoo_nars/prolog/miles/examples/ex9.pl | Perl | mit | 2,887 |
package rlines;
use strict;
use Exporter;
use Encode;
use vars qw($VERSION @ISA @EXPORT @EXPORT_OK %EXPORT_TAGS);
$VERSION = 0.1;
@ISA = qw(Exporter);
@EXPORT = ();
@EXPORT_OK = qw();
%EXPORT_TAGS = (DEFAULT => [qw()], ALL =>[qw()]);
use FindBin qw($Bin);
use lib "$Bin/../modules";
use RoundNumber;
use Data::Dumper;
### rfarrer@broadinstitute.org
sub launch_R {
my $R_file = $_[0];
my $CMD = "Rscript $R_file";
#my $CMD = "cat $R_file | R --vanilla";
system($CMD);
return 1;
}
sub close_device_print {
my $fh = $_[0];
print $fh "dev.off()\n";
print $fh "q()\n\n";
close $fh;
return 1;
}
sub print_xlab_and_xaxis_for_windows {
my ($xmax, $fh) = @_;
my $number_of_points_on_axis = 9;
my ($values, $rounded_values, $units) = roundnumber::split_and_round_to_suitable_number_and_display_suitable_unit($xmax, $number_of_points_on_axis);
# units = "Mb for example", rounded_values = @(400, 800, 1200), values=@(4, 8, 12)
# Create First point
unshift @{$values}, '0';
unshift @{$rounded_values}, '0';
# Print Points of interest of xlab
my $points_of_interest = "lablist.top<-as.vector(c(";
foreach(@{$values}) { $points_of_interest .= "\"$_\", "; }
$points_of_interest =~ s/,\s$//;
#warn "points of interest = $points_of_interest))\n";
print $fh "$points_of_interest))\n";
# Axis
print $fh "axis(1, at=lablist.top, line=0.4, lwd=2, labels=FALSE)\n";
# Labels
my $labels_of_interest = "axis(1, at=lablist.top, lab=c(";
foreach(@{$rounded_values}) { $labels_of_interest .= "\"$_\", "; }
$labels_of_interest =~ s/,\s$//;
#warn "labels of interest = $labels_of_interest)\n";
print $fh "$labels_of_interest), line=0.18, lwd=0, cex=1.5)\n";
# Ylab
print $fh "mtext(\"Position in genome ($units)\", outer = TRUE, side=1, line=2.4, cex=2)\n";
return 1;
}
1;
| rhysf/Synima | modules/write_Rscript.pm | Perl | mit | 1,788 |
package Opcode;
require 5.002;
use vars qw($VERSION $XS_VERSION @ISA @EXPORT_OK);
$VERSION = "1.04";
$XS_VERSION = "1.03";
use strict;
use Carp;
use Exporter ();
use DynaLoader ();
@ISA = qw(Exporter DynaLoader);
BEGIN {
@EXPORT_OK = qw(
opset ops_to_opset
opset_to_ops opset_to_hex invert_opset
empty_opset full_opset
opdesc opcodes opmask define_optag
opmask_add verify_opset opdump
);
}
sub opset (;@);
sub opset_to_hex ($);
sub opdump (;$);
use subs @EXPORT_OK;
bootstrap Opcode $XS_VERSION;
_init_optags();
sub ops_to_opset { opset @_ } # alias for old name
sub opset_to_hex ($) {
return "(invalid opset)" unless verify_opset($_[0]);
unpack("h*",$_[0]);
}
sub opdump (;$) {
my $pat = shift;
# handy utility: perl -MOpcode=opdump -e 'opdump File'
foreach(opset_to_ops(full_opset)) {
my $op = sprintf " %12s %s\n", $_, opdesc($_);
next if defined $pat and $op !~ m/$pat/i;
print $op;
}
}
sub _init_optags {
my(%all, %seen);
@all{opset_to_ops(full_opset)} = (); # keys only
local($_);
local($/) = "\n=cut"; # skip to optags definition section
<DATA>;
$/ = "\n="; # now read in 'pod section' chunks
while(<DATA>) {
next unless m/^item\s+(:\w+)/;
my $tag = $1;
# Split into lines, keep only indented lines
my @lines = grep { m/^\s/ } split(/\n/);
foreach (@lines) { s/--.*// } # delete comments
my @ops = map { split ' ' } @lines; # get op words
foreach(@ops) {
warn "$tag - $_ already tagged in $seen{$_}\n" if $seen{$_};
$seen{$_} = $tag;
delete $all{$_};
}
# opset will croak on invalid names
define_optag($tag, opset(@ops));
}
close(DATA);
warn "Untagged opnames: ".join(' ',keys %all)."\n" if %all;
}
1;
__DATA__
=head1 NAME
Opcode - Disable named opcodes when compiling perl code
=head1 SYNOPSIS
use Opcode;
=head1 DESCRIPTION
Perl code is always compiled into an internal format before execution.
Evaluating perl code (e.g. via "eval" or "do 'file'") causes
the code to be compiled into an internal format and then,
provided there was no error in the compilation, executed.
The internal format is based on many distinct I<opcodes>.
By default no opmask is in effect and any code can be compiled.
The Opcode module allow you to define an I<operator mask> to be in
effect when perl I<next> compiles any code. Attempting to compile code
which contains a masked opcode will cause the compilation to fail
with an error. The code will not be executed.
=head1 NOTE
The Opcode module is not usually used directly. See the ops pragma and
Safe modules for more typical uses.
=head1 WARNING
The authors make B<no warranty>, implied or otherwise, about the
suitability of this software for safety or security purposes.
The authors shall not in any case be liable for special, incidental,
consequential, indirect or other similar damages arising from the use
of this software.
Your mileage will vary. If in any doubt B<do not use it>.
=head1 Operator Names and Operator Lists
The canonical list of operator names is the contents of the array
op_name defined and initialised in file F<opcode.h> of the Perl
source distribution (and installed into the perl library).
Each operator has both a terse name (its opname) and a more verbose or
recognisable descriptive name. The opdesc function can be used to
return a list of descriptions for a list of operators.
Many of the functions and methods listed below take a list of
operators as parameters. Most operator lists can be made up of several
types of element. Each element can be one of
=over 8
=item an operator name (opname)
Operator names are typically small lowercase words like enterloop,
leaveloop, last, next, redo etc. Sometimes they are rather cryptic
like gv2cv, i_ncmp and ftsvtx.
=item an operator tag name (optag)
Operator tags can be used to refer to groups (or sets) of operators.
Tag names always begin with a colon. The Opcode module defines several
optags and the user can define others using the define_optag function.
=item a negated opname or optag
An opname or optag can be prefixed with an exclamation mark, e.g., !mkdir.
Negating an opname or optag means remove the corresponding ops from the
accumulated set of ops at that point.
=item an operator set (opset)
An I<opset> as a binary string of approximately 43 bytes which holds a
set or zero or more operators.
The opset and opset_to_ops functions can be used to convert from
a list of operators to an opset and I<vice versa>.
Wherever a list of operators can be given you can use one or more opsets.
See also Manipulating Opsets below.
=back
=head1 Opcode Functions
The Opcode package contains functions for manipulating operator names
tags and sets. All are available for export by the package.
=over 8
=item opcodes
In a scalar context opcodes returns the number of opcodes in this
version of perl (around 340 for perl5.002).
In a list context it returns a list of all the operator names.
(Not yet implemented, use @names = opset_to_ops(full_opset).)
=item opset (OP, ...)
Returns an opset containing the listed operators.
=item opset_to_ops (OPSET)
Returns a list of operator names corresponding to those operators in
the set.
=item opset_to_hex (OPSET)
Returns a string representation of an opset. Can be handy for debugging.
=item full_opset
Returns an opset which includes all operators.
=item empty_opset
Returns an opset which contains no operators.
=item invert_opset (OPSET)
Returns an opset which is the inverse set of the one supplied.
=item verify_opset (OPSET, ...)
Returns true if the supplied opset looks like a valid opset (is the
right length etc) otherwise it returns false. If an optional second
parameter is true then verify_opset will croak on an invalid opset
instead of returning false.
Most of the other Opcode functions call verify_opset automatically
and will croak if given an invalid opset.
=item define_optag (OPTAG, OPSET)
Define OPTAG as a symbolic name for OPSET. Optag names always start
with a colon C<:>.
The optag name used must not be defined already (define_optag will
croak if it is already defined). Optag names are global to the perl
process and optag definitions cannot be altered or deleted once
defined.
It is strongly recommended that applications using Opcode should use a
leading capital letter on their tag names since lowercase names are
reserved for use by the Opcode module. If using Opcode within a module
you should prefix your tags names with the name of your module to
ensure uniqueness and thus avoid clashes with other modules.
=item opmask_add (OPSET)
Adds the supplied opset to the current opmask. Note that there is
currently I<no> mechanism for unmasking ops once they have been masked.
This is intentional.
=item opmask
Returns an opset corresponding to the current opmask.
=item opdesc (OP, ...)
This takes a list of operator names and returns the corresponding list
of operator descriptions.
=item opdump (PAT)
Dumps to STDOUT a two column list of op names and op descriptions.
If an optional pattern is given then only lines which match the
(case insensitive) pattern will be output.
It's designed to be used as a handy command line utility:
perl -MOpcode=opdump -e opdump
perl -MOpcode=opdump -e 'opdump Eval'
=back
=head1 Manipulating Opsets
Opsets may be manipulated using the perl bit vector operators & (and), | (or),
^ (xor) and ~ (negate/invert).
However you should never rely on the numerical position of any opcode
within the opset. In other words both sides of a bit vector operator
should be opsets returned from Opcode functions.
Also, since the number of opcodes in your current version of perl might
not be an exact multiple of eight, there may be unused bits in the last
byte of an upset. This should not cause any problems (Opcode functions
ignore those extra bits) but it does mean that using the ~ operator
will typically not produce the same 'physical' opset 'string' as the
invert_opset function.
=head1 TO DO (maybe)
$bool = opset_eq($opset1, $opset2) true if opsets are logically eqiv
$yes = opset_can($opset, @ops) true if $opset has all @ops set
@diff = opset_diff($opset1, $opset2) => ('foo', '!bar', ...)
=cut
# the =cut above is used by _init_optags() to get here quickly
=head1 Predefined Opcode Tags
=over 5
=item :base_core
null stub scalar pushmark wantarray const defined undef
rv2sv sassign
rv2av aassign aelem aelemfast aslice av2arylen
rv2hv helem hslice each values keys exists delete
preinc i_preinc predec i_predec postinc i_postinc postdec i_postdec
int hex oct abs pow multiply i_multiply divide i_divide
modulo i_modulo add i_add subtract i_subtract
left_shift right_shift bit_and bit_xor bit_or negate i_negate
not complement
lt i_lt gt i_gt le i_le ge i_ge eq i_eq ne i_ne ncmp i_ncmp
slt sgt sle sge seq sne scmp
substr vec stringify study pos length index rindex ord chr
ucfirst lcfirst uc lc quotemeta trans chop schop chomp schomp
match split qr
list lslice splice push pop shift unshift reverse
cond_expr flip flop andassign orassign and or xor
warn die lineseq nextstate unstack scope enter leave
rv2cv anoncode prototype
entersub leavesub return method -- XXX loops via recursion?
leaveeval -- needed for Safe to operate, is safe without entereval
=item :base_mem
These memory related ops are not included in :base_core because they
can easily be used to implement a resource attack (e.g., consume all
available memory).
concat repeat join range
anonlist anonhash
Note that despite the existance of this optag a memory resource attack
may still be possible using only :base_core ops.
Disabling these ops is a I<very> heavy handed way to attempt to prevent
a memory resource attack. It's probable that a specific memory limit
mechanism will be added to perl in the near future.
=item :base_loop
These loop ops are not included in :base_core because they can easily be
used to implement a resource attack (e.g., consume all available CPU time).
grepstart grepwhile
mapstart mapwhile
enteriter iter
enterloop leaveloop
last next redo
goto
=item :base_io
These ops enable I<filehandle> (rather than filename) based input and
output. These are safe on the assumption that only pre-existing
filehandles are available for use. To create new filehandles other ops
such as open would need to be enabled.
readline rcatline getc read
formline enterwrite leavewrite
print sysread syswrite send recv
eof tell seek sysseek
readdir telldir seekdir rewinddir
=item :base_orig
These are a hotchpotch of opcodes still waiting to be considered
gvsv gv gelem
padsv padav padhv padany
rv2gv refgen srefgen ref
bless -- could be used to change ownership of objects (reblessing)
pushre regcmaybe regcreset regcomp subst substcont
sprintf prtf -- can core dump
crypt
tie untie
dbmopen dbmclose
sselect select
pipe_op sockpair
getppid getpgrp setpgrp getpriority setpriority localtime gmtime
entertry leavetry -- can be used to 'hide' fatal errors
=item :base_math
These ops are not included in :base_core because of the risk of them being
used to generate floating point exceptions (which would have to be caught
using a $SIG{FPE} handler).
atan2 sin cos exp log sqrt
These ops are not included in :base_core because they have an effect
beyond the scope of the compartment.
rand srand
=item :base_thread
These ops are related to multi-threading.
lock threadsv
=item :default
A handy tag name for a I<reasonable> default set of ops. (The current ops
allowed are unstable while development continues. It will change.)
:base_core :base_mem :base_loop :base_io :base_orig :base_thread
If safety matters to you (and why else would you be using the Opcode module?)
then you should not rely on the definition of this, or indeed any other, optag!
=item :filesys_read
stat lstat readlink
ftatime ftblk ftchr ftctime ftdir fteexec fteowned fteread
ftewrite ftfile ftis ftlink ftmtime ftpipe ftrexec ftrowned
ftrread ftsgid ftsize ftsock ftsuid fttty ftzero ftrwrite ftsvtx
fttext ftbinary
fileno
=item :sys_db
ghbyname ghbyaddr ghostent shostent ehostent -- hosts
gnbyname gnbyaddr gnetent snetent enetent -- networks
gpbyname gpbynumber gprotoent sprotoent eprotoent -- protocols
gsbyname gsbyport gservent sservent eservent -- services
gpwnam gpwuid gpwent spwent epwent getlogin -- users
ggrnam ggrgid ggrent sgrent egrent -- groups
=item :browse
A handy tag name for a I<reasonable> default set of ops beyond the
:default optag. Like :default (and indeed all the other optags) its
current definition is unstable while development continues. It will change.
The :browse tag represents the next step beyond :default. It it a
superset of the :default ops and adds :filesys_read the :sys_db.
The intent being that scripts can access more (possibly sensitive)
information about your system but not be able to change it.
:default :filesys_read :sys_db
=item :filesys_open
sysopen open close
umask binmode
open_dir closedir -- other dir ops are in :base_io
=item :filesys_write
link unlink rename symlink truncate
mkdir rmdir
utime chmod chown
fcntl -- not strictly filesys related, but possibly as dangerous?
=item :subprocess
backtick system
fork
wait waitpid
glob -- access to Cshell via <`rm *`>
=item :ownprocess
exec exit kill
time tms -- could be used for timing attacks (paranoid?)
=item :others
This tag holds groups of assorted specialist opcodes that don't warrant
having optags defined for them.
SystemV Interprocess Communications:
msgctl msgget msgrcv msgsnd
semctl semget semop
shmctl shmget shmread shmwrite
=item :still_to_be_decided
chdir
flock ioctl
socket getpeername ssockopt
bind connect listen accept shutdown gsockopt getsockname
sleep alarm -- changes global timer state and signal handling
sort -- assorted problems including core dumps
tied -- can be used to access object implementing a tie
pack unpack -- can be used to create/use memory pointers
entereval -- can be used to hide code from initial compile
require dofile
caller -- get info about calling environment and args
reset
dbstate -- perl -d version of nextstate(ment) opcode
=item :dangerous
This tag is simply a bucket for opcodes that are unlikely to be used via
a tag name but need to be tagged for completness and documentation.
syscall dump chroot
=back
=head1 SEE ALSO
ops(3) -- perl pragma interface to Opcode module.
Safe(3) -- Opcode and namespace limited execution compartments
=head1 AUTHORS
Originally designed and implemented by Malcolm Beattie,
mbeattie@sable.ox.ac.uk as part of Safe version 1.
Split out from Safe module version 1, named opcode tags and other
changes added by Tim Bunce.
=cut
| wolispace/cow_windows_server | oldcow_on_apache/perl/lib/Opcode.pm | Perl | apache-2.0 | 15,165 |
#!/usr/bin/env perl
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2017] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<helpdesk.org>.
=cut
=head1 NAME
dump_vcf.pl - dumps variations from variation DB into VCF file
=cut
use strict;
use warnings;
use Bio::EnsEMBL::Registry;
use Bio::EnsEMBL::DBSQL::DBAdaptor;
use Bio::EnsEMBL::Utils::Iterator;
use Bio::EnsEMBL::Utils::Sequence qw(expand);
use Bio::EnsEMBL::Utils::Slice qw(split_Slices);
use Bio::EnsEMBL::Utils::Sequence qw(expand);
use Bio::EnsEMBL::Variation::DBSQL::DBAdaptor;
use Getopt::Long;
use FileHandle;
use List::Util qw(first);
$| = 1;
my $config = {};
GetOptions(
$config,
'help|h',
'output_file|o=s',
'species=s',
'registry|r=s',
'host=s',
'user=s',
'port=i',
'db_version=i',
'population=s',
'individuals=s',
'allele_freq_in_population=s',
'seq_regions=s',
'consequences',
'protein_coding_details',
'ancestral_allele',
'global_maf',
'evidence_values',
'ref_fasta_file=s',
'data_source=s',
) or die "ERROR: Failed to parse command line arguments";
if (defined($config->{help})) {
&usage;
exit(0);
}
die "species argument required, try --help for usage instructions\n" unless $config->{species};
die "Can't fetch for a population and an individual at once"
if $config->{population} && $config->{individuals};
# default to a sensible file name
my $species = $config->{species};
$config->{output_file} ||= "$species.vcf";
my $reg = 'Bio::EnsEMBL::Registry';
if ( defined($config->{host}) && defined($config->{user})) {
$config->{port} ||= 3306;
$reg->load_registry_from_db(-host => $config->{host}, -user => $config->{user}, -port => $config->{port}, -db_version => $config->{db_version});
} else {
if (-e $config->{registry}) {
$reg->load_all($config->{registry});
} else {
die "ERROR: could not read from registry file ".$config->{registry}."\n";
}
}
# connect to DB
my $vdba = $reg->get_DBAdaptor($config->{species},'variation') || usage( "Cannot find variation db for ".$config->{species}." in ".$config->{registry_file} );
my $cdba = $reg->get_DBAdaptor($config->{species},'core') || usage( "Cannot find core db for ".$config->{species}." in ".$config->{registry_file} );
my $sliceAdaptor = $cdba->get_SliceAdaptor;
my $vfAdaptor = $vdba->get_VariationFeatureAdaptor;
my $populationAdaptor = $vdba->get_PopulationAdaptor;
my $alleleAdaptor = $vdba->get_AlleleAdaptor;
my $igtAdaptor = $vdba->get_IndividualGenotypeAdaptor;
my $individuals;
my $population;
if ($config->{allele_freq_in_population}) {
print $config->{allele_freq_in_population}, "\n";
$population = $populationAdaptor->fetch_by_name($config->{allele_freq_in_population});
}
if ($config->{population}) {
$population = $populationAdaptor->fetch_by_name($config->{population});
$individuals = $population->get_all_Individuals();
$config->{sample_data} = 1;
$config->{format} = 'GT';
}
if ($config->{individuals}) {
my $individualAdaptor = $vdba->get_IndividualAdaptor;
if (uc $config->{individuals} eq 'ALL') {
$individuals = $individualAdaptor->fetch_all();
} else {
my @names = split(',', $config->{individuals});
foreach my $name (@names) {
my $individual_objects = $individualAdaptor->fetch_all_by_name($name);
die "More than one individual for name $name." if (scalar @$individual_objects > 1);
push @$individuals, $individual_objects->[0];
}
}
$config->{sample_data} = 1;
$config->{format} = 'GT';
}
if ($config->{sample_data}) {
foreach my $individual (@$individuals) {
$config->{sample}->{$individual->name} = 1;
}
}
my $map_evidence = {
'Multiple_observations' => 'E_MO',
'Frequency' => 'E_Freq',
'HapMap' => 'E_HM',
'1000Genomes' => 'E_1000G',
'Cited' => 'E_C',
};
my $slices;
if ($config->{seq_regions}) {
my @regions = split(',', $config->{seq_regions});
foreach my $region (@regions) {
my $slice = $sliceAdaptor->fetch_by_region('toplevel', $region);
push @$slices, $slice;
}
} else {
# include only non-duplicate regions
$slices = $sliceAdaptor->fetch_all('toplevel', undef, 0, 1);
}
my $max_length = 1e4;
my $overlap = 0;
my ($slice_pieces, $vf_it, $vfs);
my $file_handle = FileHandle->new("> ".$config->{output_file});
print_header($config);
foreach my $slice (@$slices) {
$slice_pieces = split_Slices([$slice], $max_length, $overlap);
foreach my $slice_piece (@$slice_pieces) {
$vf_it = $vfAdaptor->fetch_Iterator_by_Slice($slice_piece);
while (my $vf = $vf_it->next) {
next if ($vf->seq_region_start <= $slice_piece->start); # avoid duplicated lines caused by vf overlapping two slice pieces
my $vcf_line = {};
$vcf_line->{CHROM} = $vf->seq_region_name;
$vcf_line->{ID} = $vf->variation_name;
my @alleles = split /\//, $vf->allele_string;
map {expand(\$_)} @alleles;
$vcf_line->{allele_string} = join('/', @alleles);
next unless alleles($config, $vf, $vcf_line);
my $source = $vf->source;
$source .= '_' . $vf->source_version if defined $vf->source_version;
$vcf_line->{INFO}->{DB} = $source;
if ($config->{sample_data}) {
next unless (genotypes($vcf_line, $vf));
}
if ($config->{consequences} || $config->{protein_coding_details}) {
consequences($config, $vcf_line, $vf);
}
if ($config->{global_maf}) {
global_maf($vcf_line, $vf);
}
if ($config->{allele_freq_in_population}) {
allele_freq_in_population($vcf_line, $vf);
}
if ($config->{ancestral_allele}) {
ancestral_allele($vcf_line, $vf);
}
if ($config->{evidence_values}) {
evidence_values($vcf_line, $vf);
}
print_vcf_line($config, $vcf_line);
}
}
}
$file_handle->close();
sub alleles {
my $config = shift;
my $vf = shift;
my $vcf_line = shift;
my %allele_lengths;
my @alleles = split /\//, $vcf_line->{allele_string};
# quality control alleles
my $qc = 1;
foreach my $allele (@alleles) {
unless ($allele=~/(^[ACTGNacgtn]+$)|(^-$)|(^<.*>$)/) {
warn "$allele in $vcf_line->{ID} did not pass allele quality control checks";
$qc = 0;
}
}
if ($qc) {
# look for imbalance in the allele string
foreach my $allele(@alleles) {
$allele =~ s/\-//g;
$allele_lengths{length($allele)} = 1;
}
my $start = $vf->seq_region_start;
my $end = $vf->seq_region_end;
if ($start > $end) {
$start = $end;
}
# in/del/unbalanced
if(scalar keys %allele_lengths > 1) {
# we need the ref base before the variation
# default to N in case we can't get it
my $prev_base = 'N';
my $loc = $start - 1;
my $seq_region_name = $vcf_line->{CHROM};
my $slice = $sliceAdaptor->fetch_by_toplevel_location("$seq_region_name:$loc-$loc");
$prev_base = $slice->seq if defined($slice);
for my $i(0..$#alleles) {
$alleles[$i] =~ s/\-//g;
$alleles[$i] = $prev_base.$alleles[$i];
}
$vcf_line->{POS} = $start - 1;
$vcf_line->{REF} = shift @alleles;
$vcf_line->{ALT} = join(',', @alleles);
}
# balanced sub
else {
$vcf_line->{POS} = $start;
$vcf_line->{REF} = shift @alleles;
$vcf_line->{ALT} = join(',', @alleles);
}
}
return $qc;
}
sub genotypes {
my $vcf_line = shift;
my $vf = shift;
my $has_gt = 0;
foreach my $individual (@$individuals) {
my $igts = $igtAdaptor->fetch_all_by_Variation($vf->variation, $individual);
if (scalar @$igts > 0) {
my $igt = $igts->[0];
my @alleles = split /\//, $vcf_line->{allele_string};
my $allele1_idx = (first { $alleles[$_] eq $igt->genotype->[0] } 0..$#alleles) || '.';
my $allele2_idx = (first { $alleles[$_] eq $igt->genotype->[1] } 0..$#alleles) || '.';
my $genotype = "$allele1_idx/$allele2_idx";
$vcf_line->{sample}->{$individual->name} = $genotype;
$has_gt = 1;
} else {
$vcf_line->{sample}->{$individual->name} = '.';
}
}
return $has_gt;
}
sub consequences {
my $config = shift;
my $vcf_line = shift;
my $vf = shift;
my $tvs = $vf->get_all_TranscriptVariations;
my @alleles = split /\//, $vcf_line->{allele_string};
#shift @alleles; # shift off reference allele?
foreach my $tv (@$tvs) {
#push @return, map {tva_to_line($config, $_)} @{$tv->get_all_alternate_TranscriptVariationAlleles};
foreach my $tva (@{$tv->get_all_alternate_TranscriptVariationAlleles}) {
my $allele_idx = first { $alleles[$_] eq $tva->variation_feature_seq } 0..$#alleles;
if ($config->{consequences}) {
for my $oc (@{$tva->get_all_OverlapConsequences}) {
push @{ $vcf_line->{INFO}->{Variant_effect} ||= [] },
join('|',
$oc->SO_term,
$allele_idx,
$oc->feature_SO_term,
$tv->transcript_stable_id,);
}
}
if ($config->{protein_coding_details}) {
if ($tva->pep_allele_string) {
push @{ $vcf_line->{INFO}->{variant_peptide} ||= [] },
join('|',
$allele_idx,
$tva->peptide,
$tv->transcript_stable_id,);
for my $tool (qw(sift polyphen)) {
my $pred_meth = $tool.'_prediction';
my $score_meth = $tool.'_score';
if (my $pred = $tva->$pred_meth) {
$pred =~ s/\s/_/g;
push @{ $vcf_line->{INFO}->{$tool."_prediction"} ||= [] },
join('|',
$allele_idx,
$pred,
$tva->$score_meth,
$tv->transcript_stable_i,);
}
}
}
}
}
}
}
sub global_maf {
my $vcf_line = shift;
my $vf = shift;
my $variation = $vf->variation;
if (defined($variation->minor_allele_frequency)) {
my @alleles = split /\//, $vcf_line->{allele_string};
my $allele_idx = first { $alleles[$_] eq $variation->minor_allele } 0..$#alleles;
if (defined($allele_idx)) {
$vcf_line->{INFO}->{GMAF} =
join('|',
$allele_idx,
$variation->minor_allele_frequency,
$variation->minor_allele_count,);
}
}
}
sub ancestral_allele {
my $vcf_line = shift;
my $vf = shift;
my $variation = $vf->variation;
if (defined($variation->ancestral_allele)) {
$vcf_line->{INFO}->{AA} = $variation->ancestral_allele;
}
}
sub evidence_values {
my $vcf_line = shift;
my $vf = shift;
my $variation = $vf->variation;
my $values = $variation->get_all_evidence_values();
if (scalar @$values) {
push @{$vcf_line->{FLAG}}, map {$map_evidence->{$_}} @$values;
}
}
sub allele_freq_in_population {
my $vcf_line = shift;
my $vf = shift;
my @alleles_in_population = @{$alleleAdaptor->fetch_all_by_Variation($vf->variation, $population)};
return unless (@alleles_in_population);
my $allele_freqs;
foreach my $allele (@alleles_in_population) {
$allele_freqs->{$allele->allele()} = $allele->frequency();
}
my @alleles = split /\//, $vcf_line->{allele_string};
shift @alleles; # shift off reference allele?
my @af = ();
foreach my $allele (@alleles) {
my $freq = $allele_freqs->{$allele};
$freq ||= 0.0;
push @af, $freq;
}
$vcf_line->{INFO}->{AF} = join(',', @af);
}
sub print_header {
my $config = shift;
my $version = '4.1';
my ($sec, $min, $hr, $mday, $mon, $year) = localtime;
$year += 1900; # correct the year
$mon++; # correct the month
my $file_date = sprintf "%4d%02d%02d", $year, $mon, $mday;
print $file_handle "##fileformat=VCFv$version\n";
print $file_handle "##fileData=$file_date\n";
if ($config->{ref_fasta_file}) {
print $file_handle "##reference=$config->{ref_fasta_file}\n";
}
if ($config->{data_source}) {
print $file_handle "##source=$config->{data_source}\n";
} else {
my $mca = $cdba->get_MetaContainerAdaptor;
my $schema_version = $mca->get_schema_version;
my $species_name = $mca->get_scientific_name;
$species_name =~ s/ /_/g;
my $url = 'http://e'.$schema_version.'.ensembl.org/'.$species_name;
print $file_handle "##source=ensembl,version=$schema_version,url=$url\n";
}
print $file_handle "##INFO=<ID=DB,Number=1,Type=String,Description=\"Source for variant and if available source version\">\n";
if ($config->{allele_freq_in_population}) {
print $file_handle "##INFO=<ID=AF,Number=A,Type=Float,Description=\"Allele Frequency\">\n";
}
if ($config->{ancestral_allele}) {
print $file_handle "##INFO=<ID=AA,Number=1,Type=String,Description=\"Ancestral Allele\">\n";
}
if ($config->{global_maf}) {
print $file_handle "##INFO=<ID=MAIDX,Number=1,Type=String,Description=\"Minor Allele (starting with 0 for REF allele)\">\n";
print $file_handle "##INFO=<ID=MAF,Number=1,Type=Float,Description=\"Minor Allele Frequency\">\n";
print $file_handle "##INFO=<ID=MAC,Number=1,Type=Integer,Description=\"Minor Allele Count\">\n";
print $file_handle "##INFO=<ID=GMAF,Number=1,Type=ListOfString,Description=\"Global Minor Allele Frequency.\",Format=MAIDX|MAF|MAC>\n";
}
if ($config->{consequences}) {
print $file_handle "##INFO=<ID=SV,Number=1,Type=String,Description=\"Sequence variant\">\n";
print $file_handle "##INFO=<ID=IDX,Number=1,Type=Integer,Description=\"0-based (starting with first reported ALT allele) index value that identifies which variant sequence the effect is being described for.\">\n";
print $file_handle "##INFO=<ID=FT,Number=1,Type=String,Description=\"Feature type that is being affected. This term must be the SO term sequence_feature or one of its children.\">\n";
print $file_handle "##INFO=<ID=FID,Number=.,Type=ListOfString,Description=\"Feature IDs correspond to ID attributes in a GFF3 file that describe the sequence features (for example genes or mRNAs).\">\n";
print $file_handle "##INFO=<ID=VE,Number=.,Type=ListOfString,Description=\"Effect that a sequence alteration has on a sequence feature that overlaps it.\",Format=SV|IDX|FT|FID>\n";
}
if ($config->{protein_coding_details}) {
#variant peptide
print $file_handle "##INFO=<ID=IDX,Number=1,Type=Integer,Description=\"0-based index value that identifies which variant sequence the effect is being described for.\">\n";
print $file_handle "##INFO=<ID=AmAc,Number=1,Type=String,Description=\"Amino acid translated as result of missense variant.\">\n";
print $file_handle "##INFO=<ID=FID,Number=.,Type=ListOfString,Description=\"Feature IDs correspond to ID attributes in a GFF3 file that describe the sequence features (for example genes or mRNAs).\">\n";
print $file_handle "##INFO=<ID=VarPep,Number=.,Type=ListOfString,Description=\"Effect that a sequence alteration has on a sequence feature that overlaps it.\",Format=IDX|AmAc|FID>\n";
#reference peptide
print $file_handle "##INFO=<ID=RefPep,Number=1,Type=String,Description=\"Amino acid translated with reference allele.\">\n";
#sift prediction
print $file_handle "##INFO=<ID=IDX,Number=1,Type=Integer,Description=\"0-based index value that identifies which variant sequence the effect is being described for.\">\n";
print $file_handle "##INFO=<ID=QP,Number=1,Type=String,Description=\"Qualitative prediction (either tolerated or deleterious).\">\n";
print $file_handle "##INFO=<ID=NV,Number=1,Type=String,Description=\"Numerical value which is the normalized probability that the amino acid change is tolerated so scores nearer 0 are more likely to be deleterious.\">\n";
print $file_handle "##INFO=<ID=FID,Number=.,Type=ListOfString,Description=\"Feature IDs correspond to ID attributes in a GFF3 file that describe the sequence features (for example genes or mRNAs).\">\n";
print $file_handle "##INFO=<ID=Sift,Number=.,Type=ListOfString,Description=\"Sift prediction.\",Format=IDX|QP|NV|FID>\n";
#polyphen prediction
print $file_handle "##INFO=<ID=IDX,Number=1,Type=Integer,Description=\"0-based index value that identifies which variant sequence the effect is being described for.\">\n";
print $file_handle "##INFO=<ID=QP,Number=1,Type=String,Description=\"Qualitative prediction (one of probably damaging, possibly damaging, benign or unknown).\">\n";
print $file_handle "##INFO=<ID=NV,Number=1,Type=String,Description=\"Numerical value which is the probability that a substitution is damaging, so values nearer 1 are more confidently predicted to be deleterious.\">\n";
print $file_handle "##INFO=<ID=FID,Number=.,Type=ListOfString,Description=\"Feature IDs correspond to ID attributes in a GFF3 file that describe the sequence features (for example genes or mRNAs).\">\n";
print $file_handle "##INFO=<ID=Polyphen,Number=.,Type=ListOfString,Description=\"Polyphen prediction.\",Format=IDX|QP|NV|FID>\n";
}
if ($config->{evidence_values}) {
print $file_handle "##INFO=<ID=E_MO,Number=0,Type=Flag,Description=\"Multiple_observations. The variant has multiple independent dbSNP submissions, i.e. submissions with a different submitter handles or different discovery samples\">\n";
print $file_handle "##INFO=<ID=E_Freq,Number=0,Type=Flag,Description=\"Frequency. The variant is reported to be polymorphic in at least one sample.\">\n";
print $file_handle "##INFO=<ID=E_HM,Number=0,Type=Flag,Description=\"HapMap. The variant is polymorphic in at least one HapMap panel (human only).\">\n";
print $file_handle "##INFO=<ID=E_1000G,Number=0,Type=Flag,Description=\"1000Genomes. The variant was discovered in the 1000 genomes project (human only).\">\n";
print $file_handle "##INFO=<ID=E_C,Number=0,Type=Flag,Description=\"Cited. dbSNP holds a citation from PubMed for the variant.\">\n";
}
if ($config->{sample_data}) {
print $file_handle "##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">\n";
}
my @header_line = ('#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO');
if ($config->{sample_data}) {
push @header_line, 'FORMAT';
push @header_line, sort keys %{$config->{sample}};
};
print $file_handle join("\t", @header_line), "\n";
}
sub print_vcf_line {
my $config = shift;
my $vcf_line = shift;
my ($info, @info_entries, $sample);
# info
if ($vcf_line->{INFO}) {
for my $key (keys %{$vcf_line->{INFO}}) {
my $val = $vcf_line->{INFO}->{$key};
if (ref $val eq 'ARRAY') {
push @info_entries, $key . '=' . join(',', @$val);
} else {
push @info_entries, $key . '=' . $val;
}
}
$info = "\t" . join(';', @info_entries);
}
if ($vcf_line->{FLAG}) {
$info = $info . ';' . join(';', @{$vcf_line->{FLAG}});
}
# sample
if ($vcf_line->{sample}) {
$sample = "\t" . $config->{format} . "\t" . join("\t", map {$vcf_line->{sample}->{$_}} sort keys %{$vcf_line->{sample}});
}
$vcf_line->{QUAL} = '.';
$vcf_line->{FILTER} = '.';
$sample ||= '';
my $line = join("\t", map {$vcf_line->{$_}} qw/CHROM POS ID REF ALT QUAL FILTER/);
print $file_handle $line . $info . $sample . "\n";
}
sub usage {
my $usage =<<END;
Usage:
perl dump_vcf.pl [arguments]
Options
-h | --help Display this message and quit.
-o | --output_file Output file.
--species Species to use.
-r | --registry Registry file used to build database connections.
--host Database host.
--user Database user.
--port Database port.
--db_version Database version.
--population Name of population. Dump genotypes for all indivivduals in this population.
--individual Comma-separated list of individual names. Dump genotypes for those individuals.
--seq_region Comma-separated list of seq_regions. Dump only for these regions. If not specified all toplevel regions are used.
--allele_freq_in_population Add allele frequencies for this population.
--consequences Add variation consequence data.
--protein_coding_details Add protein function prediction.
--ancestral_allele Add ancestral allele.
--global_maf Add global minor allele frequency data.
--evidence_values Add evidence values supporting a variant as a guide to its potential reliability (available from ensembl 71)
--ref_fasta_file Location of reference sequence backing the data contained in the VCF file.
--data_source Data source for VCF dump. Default is ensembl with schema_version and url to species used for data dump.
END
print $usage;
}
| willmclaren/ensembl-variation | scripts/export/dump_vcf.pl | Perl | apache-2.0 | 23,102 |
package MongoDB::BSON::Regexp;
# ABSTRACT: Regular expression type
use version;
our $VERSION = 'v0.703.5'; # TRIAL
use Moose;
use namespace::clean -except => 'meta';
has pattern => (
is => 'ro',
isa => 'Str',
required => 1,
);
has flags => (
is => 'ro',
isa => 'Str',
required => 0,
predicate => 'has_flags',
writer => '_set_flags',
);
my %ALLOWED_FLAGS = (
i => 1,
m => 1,
x => 1,
l => 1,
s => 1,
u => 1
);
sub BUILD {
my $self = shift;
if ( $self->has_flags ) {
my %seen;
my @flags = grep { !$seen{$_}++ } split '', $self->flags;
foreach my $f( @flags ) {
die "Regexp flag $f is not supported by MongoDB" if not exists $ALLOWED_FLAGS{$f};
}
$self->_set_flags( join '', sort @flags );
}
}
__PACKAGE__->meta->make_immutable;
1;
| antonnik/code-classifier | naive_bayes/resources/perl/Regexp.pm | Perl | apache-2.0 | 904 |
#
# Copyright 2022 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::citrix::sdx::snmp::mode::diskusage;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
use Digest::MD5 qw(md5_hex);
sub custom_usage_perfdata {
my ($self, %options) = @_;
my $label = 'used';
my $value_perf = $self->{result_values}->{used};
if (defined($self->{instance_mode}->{option_results}->{free})) {
$label = 'free';
$value_perf = $self->{result_values}->{free};
}
my %total_options = ();
if ($self->{instance_mode}->{option_results}->{units} eq '%') {
$total_options{total} = $self->{result_values}->{total};
$total_options{cast_int} = 1;
}
$self->{output}->perfdata_add(
label => $label, unit => 'B',
instances => $self->use_instances(extra_instance => $options{extra_instance}) ? $self->{result_values}->{display} : undef,
value => $value_perf,
warning => $self->{perfdata}->get_perfdata_for_output(label => 'warning-' . $self->{thlabel}, %total_options),
critical => $self->{perfdata}->get_perfdata_for_output(label => 'critical-' . $self->{thlabel}, %total_options),
min => 0, max => $self->{result_values}->{total}
);
}
sub custom_usage_threshold {
my ($self, %options) = @_;
my ($exit, $threshold_value);
$threshold_value = $self->{result_values}->{used};
$threshold_value = $self->{result_values}->{free} if (defined($self->{instance_mode}->{option_results}->{free}));
if ($self->{instance_mode}->{option_results}->{units} eq '%') {
$threshold_value = $self->{result_values}->{prct_used};
$threshold_value = $self->{result_values}->{prct_free} if (defined($self->{instance_mode}->{option_results}->{free}));
}
$exit = $self->{perfdata}->threshold_check(value => $threshold_value, threshold => [ { label => 'critical-' . $self->{thlabel}, exit_litteral => 'critical' }, { label => 'warning-'. $self->{thlabel}, exit_litteral => 'warning' } ]);
return $exit;
}
sub custom_usage_output {
my ($self, %options) = @_;
my ($total_size_value, $total_size_unit) = $self->{perfdata}->change_bytes(value => $self->{result_values}->{total});
my ($total_used_value, $total_used_unit) = $self->{perfdata}->change_bytes(value => $self->{result_values}->{used});
my ($total_free_value, $total_free_unit) = $self->{perfdata}->change_bytes(value => $self->{result_values}->{free});
my $msg = sprintf("Usage Total: %s Used: %s (%.2f%%) Free: %s (%.2f%%)",
$total_size_value . " " . $total_size_unit,
$total_used_value . " " . $total_used_unit, $self->{result_values}->{prct_used},
$total_free_value . " " . $total_free_unit, $self->{result_values}->{prct_free});
return $msg;
}
sub custom_usage_calc {
my ($self, %options) = @_;
$self->{result_values}->{display} = $options{new_datas}->{$self->{instance} . '_display'};
$self->{result_values}->{total} = $options{new_datas}->{$self->{instance} . '_total'};
$self->{result_values}->{used} = $options{new_datas}->{$self->{instance} . '_used'};
$self->{result_values}->{free} = $self->{result_values}->{total} - $self->{result_values}->{used};
$self->{result_values}->{prct_used} = $self->{result_values}->{used} * 100 / $self->{result_values}->{total};
$self->{result_values}->{prct_free} = 100 - $self->{result_values}->{prct_used};
return 0;
}
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'disk', type => 1, cb_prefix_output => 'prefix_disk_output', message_multiple => 'All disks are ok' }
];
$self->{maps_counters}->{disk} = [
{ label => 'usage', set => {
key_values => [ { name => 'display' }, { name => 'used' }, { name => 'total' } ],
closure_custom_calc => $self->can('custom_usage_calc'),
closure_custom_output => $self->can('custom_usage_output'),
closure_custom_perfdata => $self->can('custom_usage_perfdata'),
closure_custom_threshold_check => $self->can('custom_usage_threshold'),
}
},
{ label => 'read-iops', set => {
key_values => [ { name => 'diskTotalBlocksRead', per_second => 1 }, { name => 'display' } ],
output_template => 'Read IOPs : %.2f', output_error_template => "Read IOPs : %s",
perfdatas => [
{ label => 'read_iops', template => '%.2f',
unit => 'iops', min => 0, label_extra_instance => 1, instance_use => 'display' },
],
}
},
{ label => 'write-iops', set => {
key_values => [ { name => 'diskTotalBlocksWritten', per_second => 1 }, { name => 'display' } ],
output_template => 'Write IOPs : %.2f', output_error_template => "Write IOPs : %s",
perfdatas => [
{ label => 'write_iops', template => '%.2f',
unit => 'iops', min => 0, label_extra_instance => 1, instance_use => 'display' },
],
}
},
];
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options, statefile => 1);
bless $self, $class;
$options{options}->add_options(arguments => {
'filter-name:s' => { name => 'filter_name' },
'units:s' => { name => 'units', default => '%' },
'free' => { name => 'free' },
});
return $self;
}
sub prefix_disk_output {
my ($self, %options) = @_;
return "Disk '" . $options{instance_value}->{display} . "' ";
}
my $mapping = {
diskName => { oid => '.1.3.6.1.4.1.5951.6.2.1000.3.1.1' },
diskTotalBlocksRead => { oid => '.1.3.6.1.4.1.5951.6.2.1000.3.1.7' },
diskTotalBlocksWritten => { oid => '.1.3.6.1.4.1.5951.6.2.1000.3.1.8' },
diskUtilized => { oid => '.1.3.6.1.4.1.5951.6.2.1000.3.1.9' },
diskSize => { oid => '.1.3.6.1.4.1.5951.6.2.1000.3.1.10' },
};
my $oid_diskEntry = '.1.3.6.1.4.1.5951.6.2.1000.3.1';
sub manage_selection {
my ($self, %options) = @_;
$self->{disk} = {};
my $snmp_result = $options{snmp}->get_table(oid => $oid_diskEntry,
nothing_quit => 1);
foreach my $oid (keys %{$snmp_result}) {
next if ($oid !~ /^$mapping->{diskName}->{oid}\.(.*)$/);
my $instance = $1;
my $result = $options{snmp}->map_instance(mapping => $mapping, results => $snmp_result, instance => $instance);
if (defined($self->{option_results}->{filter_name}) && $self->{option_results}->{filter_name} ne '' &&
$result->{diskName} !~ /$self->{option_results}->{filter_name}/) {
$self->{output}->output_add(long_msg => "skipping '" . $result->{diskName} . "': no matching filter.", debug => 1);
next;
}
$self->{disk}->{$instance} = {
display => $result->{diskName},
total => $result->{diskSize},
used => $result->{diskUtilized},
%$result
};
}
if (scalar(keys %{$self->{disk}}) <= 0) {
$self->{output}->add_option_msg(short_msg => "No disk found.");
$self->{output}->option_exit();
}
$self->{cache_name} = "citrix_sdx_" . $self->{mode} . '_' . $options{snmp}->get_hostname() . '_' . $options{snmp}->get_port() . '_' .
(defined($self->{option_results}->{filter_counters}) ? md5_hex($self->{option_results}->{filter_counters}) : md5_hex('all')) . '_' .
(defined($self->{option_results}->{filter_name}) ? md5_hex($self->{option_results}->{filter_name}) : md5_hex('all'));
}
1;
__END__
=head1 MODE
Check disks.
=over 8
=item B<--filter-counters>
Only display some counters (regexp can be used).
Example: --filter-counters='^usage$'
=item B<--filter-name>
Filter disk name (can be a regexp).
=item B<--warning-*>
Threshold warning.
Can be: 'read-iops', 'write-iops', 'usage'.
=item B<--critical-*>
Threshold critical.
Can be: 'read-iops', 'write-iops', 'usage'.
=item B<--units>
Units of thresholds (Default: '%') ('%', 'B').
=item B<--free>
Thresholds are on free space left.
=back
=cut
| centreon/centreon-plugins | network/citrix/sdx/snmp/mode/diskusage.pm | Perl | apache-2.0 | 9,074 |
package Google::Ads::AdWords::v201809::StatsEstimate;
use strict;
use warnings;
__PACKAGE__->_set_element_form_qualified(1);
sub get_xmlns { 'https://adwords.google.com/api/adwords/o/v201809' };
our $XML_ATTRIBUTE_CLASS;
undef $XML_ATTRIBUTE_CLASS;
sub __get_attr_class {
return $XML_ATTRIBUTE_CLASS;
}
use Class::Std::Fast::Storable constructor => 'none';
use base qw(Google::Ads::SOAP::Typelib::ComplexType);
{ # BLOCK to scope variables
my %averageCpc_of :ATTR(:get<averageCpc>);
my %averagePosition_of :ATTR(:get<averagePosition>);
my %clickThroughRate_of :ATTR(:get<clickThroughRate>);
my %clicksPerDay_of :ATTR(:get<clicksPerDay>);
my %impressionsPerDay_of :ATTR(:get<impressionsPerDay>);
my %totalCost_of :ATTR(:get<totalCost>);
__PACKAGE__->_factory(
[ qw( averageCpc
averagePosition
clickThroughRate
clicksPerDay
impressionsPerDay
totalCost
) ],
{
'averageCpc' => \%averageCpc_of,
'averagePosition' => \%averagePosition_of,
'clickThroughRate' => \%clickThroughRate_of,
'clicksPerDay' => \%clicksPerDay_of,
'impressionsPerDay' => \%impressionsPerDay_of,
'totalCost' => \%totalCost_of,
},
{
'averageCpc' => 'Google::Ads::AdWords::v201809::Money',
'averagePosition' => 'SOAP::WSDL::XSD::Typelib::Builtin::double',
'clickThroughRate' => 'SOAP::WSDL::XSD::Typelib::Builtin::double',
'clicksPerDay' => 'SOAP::WSDL::XSD::Typelib::Builtin::float',
'impressionsPerDay' => 'SOAP::WSDL::XSD::Typelib::Builtin::float',
'totalCost' => 'Google::Ads::AdWords::v201809::Money',
},
{
'averageCpc' => 'averageCpc',
'averagePosition' => 'averagePosition',
'clickThroughRate' => 'clickThroughRate',
'clicksPerDay' => 'clicksPerDay',
'impressionsPerDay' => 'impressionsPerDay',
'totalCost' => 'totalCost',
}
);
} # end BLOCK
1;
=pod
=head1 NAME
Google::Ads::AdWords::v201809::StatsEstimate
=head1 DESCRIPTION
Perl data type class for the XML Schema defined complexType
StatsEstimate from the namespace https://adwords.google.com/api/adwords/o/v201809.
Represents a set of stats for a daily traffic estimate. <p>{@code averageCpc}, {@code averagePosition} and {@code clickThroughRate} will be {@code null} when not defined and {@code clicksPerDay} or {@code impressionsPerDay} is {@code 0}, respectively.</p>
=head2 PROPERTIES
The following properties may be accessed using get_PROPERTY / set_PROPERTY
methods:
=over
=item * averageCpc
=item * averagePosition
=item * clickThroughRate
=item * clicksPerDay
=item * impressionsPerDay
=item * totalCost
=back
=head1 METHODS
=head2 new
Constructor. The following data structure may be passed to new():
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| googleads/googleads-perl-lib | lib/Google/Ads/AdWords/v201809/StatsEstimate.pm | Perl | apache-2.0 | 2,866 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.