code stringlengths 2 1.05M | repo_name stringlengths 5 101 | path stringlengths 4 991 | language stringclasses 3 values | license stringclasses 5 values | size int64 2 1.05M |
|---|---|---|---|---|---|
package VMOMI::DVPortSetting;
use parent 'VMOMI::DynamicData';
use strict;
use warnings;
our @class_ancestors = (
'DynamicData',
);
our @class_members = (
['blocked', 'BoolPolicy', 0, 1],
['vmDirectPathGen2Allowed', 'BoolPolicy', 0, 1],
['inShapingPolicy', 'DVSTrafficShapingPolicy', 0, 1],
['outShapingPolicy', 'DVSTrafficShapingPolicy', 0, 1],
['vendorSpecificConfig', 'DVSVendorSpecificConfig', 0, 1],
['networkResourcePoolKey', 'StringPolicy', 0, 1],
['filterPolicy', 'DvsFilterPolicy', 0, 1],
);
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/DVPortSetting.pm | Perl | apache-2.0 | 755 |
=head1 LICENSE
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2022] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
=head1 AUTHORS
Jan-Hinnerk Vogel
=head1 NAME
Bio::EnsEMBL::Analysis::Runnable::TranscriptCoalescer -
=head1 SYNOPSIS
my $runnable = Bio::EnsEMBL::Analysis::Runnable::TranscriptCoalescer->new(
-query => $slice,
-program => 'snap',
);
$runnable->run;
my @predictions = @{$runnable->output};
=head1 DESCRIPTION
TranscriptCoalescer combines gene-structures from different evidence-sets
to longer predictions and adds translations to these predictions.
=head1 METHODS
=cut
package Bio::EnsEMBL::Analysis::Runnable::TranscriptCoalescer;
use strict;
use warnings;
use Bio::EnsEMBL::Analysis;
use Bio::EnsEMBL::DBSQL::DBAdaptor;
#use Bio::Tools::CodonTable;
use Bio::EnsEMBL::Analysis::Tools::Algorithms::GeneCluster;
use Bio::EnsEMBL::Analysis::Tools::Algorithms::ExonCluster;
use Bio::EnsEMBL::Analysis::Tools::Algorithms::ClusterUtils;
use Bio::EnsEMBL::Analysis::Config::GeneBuild::TranscriptCoalescer;
use Bio::EnsEMBL::Analysis::Tools::GeneBuildUtils::TranslationUtils qw (compute_translation return_translation) ;
use Bio::EnsEMBL::Analysis::Tools::GeneBuildUtils::TranscriptUtils qw (convert_to_genes print_Transcript print_Transcript_and_Exons Transcript_info ) ;
use Bio::EnsEMBL::Analysis::Tools::GeneBuildUtils::ExonUtils qw (Exon_info) ;
use Bio::EnsEMBL::Analysis::Tools::GeneBuildUtils::ExonExtended;
use Bio::EnsEMBL::Analysis::Tools::GeneBuildUtils::TranscriptExtended;
use Bio::EnsEMBL::Analysis::Runnable;
use Bio::EnsEMBL::Utils::Exception qw(throw warning info);
use Bio::EnsEMBL::Utils::Argument qw( rearrange );
use vars qw(@ISA);
@ISA = qw(Bio::EnsEMBL::Analysis::Runnable);
=head2 new
Function : creates TranscriptCoalescer-object
Returnval : returns TranscripotCoalescer-object
=cut
sub new {
my ($class,@args) = @_;
my $self = $class->SUPER::new(@args);
$self->{'_modified_genes'} =[] ; # array ref to modified genes to write to new db
$self->{'_discarded_transcripts'} = []; # array ref to discarded transcripts
$self->{'_genes'} = []; #array of genes to test;
my( $all_genes, $evidence_sets,$dnadb , $utils_verbosity ) =
rearrange([qw(
ALL_GENES
EVIDENCE_SETS
DNADB
UTILS_VERBOSITY
)], @args);
$self->{merged_tr}=[] ;
$self->{min_translation_length}=$MIN_TRANSLATION_LENGTH ;
$self->{dnadb}=$dnadb ;
#
# we add one more evidence set called 'est_merged' to be able
# to re-cluster the newly constructed genes with the input-genes
# ( of the sets 'simgw' and 'abinitio' genes )
#
# my %types_hash{'name_of_the_set'} = [gene_bt_100, simgw_200, simgw300, ...]
# types_set{'genes_with_est_source'} = [est_100, est_200, est_300....]
#
$self->{new_biotype} = $NEW_BIOTYPE ;
${$evidence_sets}{'est_merged'}=[$self->{new_biotype}] ;
# href to $hash{'evi_set'} = \@{biotype1,obiotype2}
$self->{evidence_sets} = $evidence_sets;
$self->{write_filtered_transcripts} = $WRITE_FILTERED_TRANSCRIPTS ;
$self->{write_alternative_transcripts} = $WRITE_ALTERNATIVE_TRANSCRIPTS ;
$self->{adjudicate_simgw_est} = $ADJUDICATE_SIMGW_EST ;
$self->{all_genes_href} = $all_genes ; # hashref $hash{'biotype'} = \@genes
$self->{v} = $VERBOSE ; # verbose or not
if (defined($utils_verbosity)) {
$self->{v} = 1 if ($utils_verbosity=~m/INFO/) ;
}
#$VERBOSE = 0 ;
#$self->{v} = 0;
return $self ;
}
=head2 run
Arg : none
Function : Runs the TranscriptCoalescer
- clusters genes of EvidenceSet 'est' acc. to their genomic extent
-
Returnval : none
=cut
sub run {
my ($self) = @_ ;
print " all genes fetched\n" if $self->{v};
# get all genes of evidence set 'est' on slice and cluster them
my @allgenes = @{ $self->get_genes_by_evidence_set('est') } ;
my ($clusters, $non_clusters) = cluster_Genes(\@allgenes, $self->get_all_evidence_sets ) ;
push @$clusters, @$non_clusters if (scalar(@$non_clusters) > 0 ) ;
my (@all_merged_est_transcripts, @tr_to_delete ) ;
#
# STAGE_1 :
# begin the process of merging est-gene and est-gene by using the conserved introns as evidence
#
print "non_clusters : " . scalar(@$non_clusters) . "\n" if $self->{v};
GENE_CLUSTER: foreach my $gene_cluster (@$clusters) {
if ($self->{v}) {
print " processing gene_cluster : $gene_cluster\n" ;
print "\n" ; print_cluster_info($self->query,$gene_cluster ) ; print "="x80; print "\n" ;
}
# cluster transcripts and exons and start the recursions on transcripts
# and exon clusters / this is the main routine to extend / merge the est-genes
#
print "Starting main_clustering_and_recursion\n" if $self->{v} ;
my $tr = $self->main_clustering_and_recursion($gene_cluster) ;
# remove redundant transcripts and edit last exons
if ( $tr ) {
if ($self->{v}){
print "Stage 1: Number Genes resulting out of pure est-merging : ".scalar(@$tr)."\n";
print_Transcript_and_Exons($tr) ;
}
# remove red. trans, edit terminal exons and remove redundant trans again
my @non_red_trans =
@{ remove_redundant_transcripts( $self->edit_terminal_exons(remove_redundant_transcripts($tr)))};
print "Stage 2: Number Trans. after term exon edit / removing redundancy : "
. scalar(@non_red_trans)."\n" if $self->{v};
# transcripts are splitted in ones which we're going to cluster again and ones which are
# removed by now (biotype of them has been changed)
my ($non_overlapping_trans , $removed_trans )=remove_overlapping_transcripts(\@non_red_trans) ;
print "Stage 3: Number non-overlapping Transcripts : "
. scalar(@$non_overlapping_trans)
. " (" . scalar(@$removed_trans) . " removed )\n\n" if $self->{v} ;
push @tr_to_delete , @$removed_trans if $self->{write_filtered_transcripts} ;
#
# @$non_overlapping_trans are transcript stuctures which have been produced by est-merging
# these might also be 'pure' ests / if we have a tr-structure later on (by abintio merging)
# which overlap this transcript, this structure will be removed from this array
#
push @all_merged_est_transcripts , @$non_overlapping_trans ;
#print "non-overlapping transcripts which have been made by est-merging :\n" ;
#print_Transcript_and_Exons($non_overlapping_trans) ;
}
} # GENE_CLUSTER
##
##
## RE-clustering of genes with other evidence
## to extend genes by abintio / simgw
##
##
if ($VERBOSE) {
print "after the first round\n" ;
print "="x80 ; print "\n" ;
print_Transcript_and_Exons(\@all_merged_est_transcripts) ;
}
#
# building new gene set which contains genes of set abinitio and simgw and the
# freshly merged genes (biotype of these genes is set in config)
#
my @new_gene_set ;
push @new_gene_set , @{convert_to_genes( \@all_merged_est_transcripts, $self->analysis ) } ;
push @new_gene_set , @{ $self->get_genes_by_evidence_set('abinitio') } ;
push @new_gene_set , @{ $self->get_genes_by_evidence_set('simgw') } ;
#
# re-cluster freshly merged / extended of type 'est_merged' genes of other sources
#
my ($clusters2, $non_clusters2) = cluster_Genes(\@new_gene_set, $self->get_all_evidence_sets ) ;
GENE_CLUSTERS: for my $gene_cluster (@$clusters2) {
#
# check if there are genes of type 'est_merged' in result
#
#my @est_merged_genes = $gene_cluster->get_Genes_by_Set('est_merged') ;
my %sets_in_gene_cluster ;
@sets_in_gene_cluster { @{$gene_cluster->get_sets_included} }= () ;
# If there are est_merged genes in the cluster (est's which have been merged in the first step)
# try to merge them with abintio or simgw
if ( exists $sets_in_gene_cluster{est_merged} ) {
# try to do recursive merge with simgw / abinitio, begin with clustering (Transcripts & Exons)
my @tr_merged =
map {@{$_->get_all_Transcripts}} @{ $gene_cluster->get_Genes_by_Set ( 'est_merged' ) } ;
my $exon_clusters = $gene_cluster->get_exon_clustering_from_gene_cluster();
print "exon_clustering finished\n" if $self->{v};
# recursive approach (merge already merged genes by abinitio or simgw)
if ($gene_cluster->strand == 1 ) {
@tr_merged = sort { $a->seq_region_start <=> $b->seq_region_start } @tr_merged ;
}else {
@tr_merged = sort { $b->seq_region_start <=> $a->seq_region_start } @tr_merged ;
}
my $start_transcript = shift @tr_merged ;
my $t = $self->merge_transcripts_recursion($start_transcript , \@tr_merged, $exon_clusters );
if ( @{ $self->{merged_tr} }>0 ){
#print "cluster: have merged_tr genes \n" ;
my @cloned_tr = @{ $self->{merged_tr} } ;
$self->{merged_tr}=[] ; # reset counter
#print "gene_comes_from_merging\n" ;
print_Transcript_and_Exons(\@cloned_tr) ;
# remove redundant genes which result out of abintio/simgw merged transcripts
@cloned_tr = @{ remove_redundant_transcripts( \@cloned_tr ) } ;
# remove overlapping transcripts
my ($non_overlapping_trans, $removed_trans )= remove_overlapping_transcripts( \@cloned_tr ) ;
# if we want to write the filtered / removed /overlapping genes as well to DB
# i.e. for debugging (biotype 'del_') (see config)
push @tr_to_delete, @$removed_trans if ($self->{write_filtered_transcripts});
# prevent to add the start-transcript a second to the set of all_merged_est_transcripts
push @all_merged_est_transcripts , @$non_overlapping_trans ;
@all_merged_est_transcripts =
@{ remove_redundant_transcripts( \@all_merged_est_transcripts ) };
($non_overlapping_trans, $removed_trans )
= remove_overlapping_transcripts( \@all_merged_est_transcripts ) ;
@all_merged_est_transcripts = @$non_overlapping_trans ; # this is ok
push @tr_to_delete , @$removed_trans if $self->{write_filtered_transcripts};
} else {
# recover transcripts cause they haven't been merged
push @all_merged_est_transcripts , @tr_merged ;
}
} else {
#print "have no est_merged genes\n" ;
}
}
#
# use rule-set to decide if we want to take simgw or est gene
#
#######################################################################
if ($self->{adjudicate_simgw_est}) {
# print "deciding weather to use simgw or est-combined gene in final set\n" ;
#
# #
# # 3rd re-clustering of simgw against est genes
# #
# my @est_simgw = @all_merged_est_transcripts ;
#
# push @est_simgw , @{ $self->get_genes_by_evidence_set('est_merged') } ;
# push @est_simgw , @{ $self->get_genes_by_evidence_set('simgw') } ;
#
# print "3rd reclustering to choose if we want to have simgw or est or both\n" ;
#
# ($clusters, $non_clusters) = cluster_Genes(\@est_simgw, $self->get_all_evidence_sets ) ;
# GENE_CLUSTER: foreach my $gene_cluster (@$clusters) {
# # cluster exons and re-set the exon-cluster relation
# my @exon_clusters = @{ $gene_cluster->get_exon_clustering_from_gene_cluster() };
# for my $ec (@exon_clusters) {
# for my $e ( @{$ec->get_all_Exons_in_ExonCluster} ) {
# $e->cluster($ec) ;
# }
# }
#
# # get simgw genes and remove overlapping ones
# my @tr_simgw = map {@{$_->get_all_Transcripts}} $gene_cluster->get_Genes_by_Set('simgw');
# my ($non_ov, $ov) = remove_overlapping_transcripts(\@tr_simgw) ;
# push @tr_to_delete , @$ov if $self->{write_filtered_transcripts};
# @tr_simgw = @$non_ov ;
#
# my @tr_est = $gene_cluster->get_Genes_by_Set('est_merged') ;
# my %sets_in_gene_cluster ;
# @sets_in_gene_cluster{@{ $gene_cluster->get_sets_included }}=() ;
#
# # FALL UNTERSCHEIDUNGEN
#
# if (exists $sets_in_gene_cluster{simgw} && exists $sets_in_gene_cluster{est_merged} ) {
#
# #
# # filter genes and decide if we want simgw , est_merged or whatever / UTR addtion
# #
# print "\n"x4;
# print "We have SIMGW and EST_MERGED genes in cluster :\n" ;
# my ($keep, $removed) = compare_simgw_and_est_merged(\@tr_simgw, \@tr_est,$self->{write_alternative_transcripts}) ;
#
#
#
# }elsif ( exists $sets_in_gene_cluster{est_merged} && !exists $sets_in_gene_cluster{simgw} ) {
#
# print "have no simgw and but est_merged genes \n" ;
#
#
# }elsif ( exists $sets_in_gene_cluster{simgw} && !exists $sets_in_gene_cluster{est_merged} ) {
# print "have simgw but no est_merged\n" ;
#
# # there are only simgw-genes (and some abinitio ) in the GENE_CLUSTER
#
# my @simgw_trans =
# map { @{ $_->get_all_Transcripts } } $gene_cluster->get_Genes_by_Set('simgw') ;
# #my $exon_clusters = $gene_cluster->get_exon_clustering_from_gene_cluster() ;
#
# for ( @tr_simgw ) {
# $_->biotype($self->{new_biotype} . "_" . $_->biotype) ;
# }
# push @all_merged_est_transcripts, @tr_simgw ;
# }
# }
}
if ($WRITE_FILTERED_TRANSCRIPTS) {
push @all_merged_est_transcripts, @tr_to_delete ;
}
print "Having " . scalar( @all_merged_est_transcripts ) . " genes so far for this slice \n\n" ;
# convert Bio::EnsEMBL::TranscriptExtended objects back
# and Bio::EnsEMBL::Exon object to avoid any shared exons
#
my @converted_transcripts ;
OLD_TR: for my $old_tr (@all_merged_est_transcripts) {
print "transcript \n" ;
my @new_exons ;
my @tsf ;
my @esf ;
OLD_EX: for my $oe ( @{$old_tr->get_all_Exons} ) {
push @tsf, @{$oe->transcript->get_all_supporting_features} ;
print "have " . scalar ( @tsf ) . " tsf for exon ".$oe->dbID." \n" ;
my $ne = new Bio::EnsEMBL::Exon(
-START =>$oe->start,
-END =>$oe->end,
-STRAND =>$oe->strand,
-SLICE =>$oe->slice,
-ANALYSIS =>$oe->analysis,
);
$ne->add_supporting_features(@{$oe->get_all_supporting_features} ) ;
push @esf, @{$oe->get_all_supporting_features} ;
push @new_exons, $ne ;
}
my $ntr = new Bio::EnsEMBL::Transcript (-EXONS => \@new_exons) ;
$ntr->biotype($old_tr->biotype) ;
$ntr->add_supporting_features(@tsf) ;
push @converted_transcripts, $ntr;
}
@all_merged_est_transcripts = @converted_transcripts ;
@all_merged_est_transcripts =
sort {$a->seq_region_start <=> $b->seq_region_start} @all_merged_est_transcripts ;
print_Transcript_and_Exons(\@all_merged_est_transcripts) if $self->{v};
my @trans_with_tl = @{$self->add_translation_and_trans_supp_features_to_transcripts(
\@all_merged_est_transcripts,$self->{min_translation_length}) } ;
$self->output ( convert_to_genes ( \@trans_with_tl, $self->analysis) ) ;
return ;
}
sub add_translation_and_trans_supp_features_to_transcripts {
my ($self, $trans,$min_translation_length) = @_ ;
my @trans_with_tl ;
TRANSCRIPT :for my $tr (@$trans) {
my $new_tr = compute_translation($tr) ;
unless ( $new_tr->translation ) {
print "skipping transcript - no translation !! \n" ;
next TRANSCRIPT ;
}
my $tr_length = $new_tr->length ; # . "\n" ;
my $tl_length = $new_tr->translate->length ;
my $ratio = ( (3*$tl_length) / $tr_length)*100 ;
if ( $ratio > $MIN_TRANSLATION_LENGTH ){
push @trans_with_tl , $tr ;
}else {
print "Translation is shorter than $MIN_TRANSLATION_LENGTH %".
" of transcript length ($ratio) - Transcript will not be used\n"
if $self->{v} ;
if ($WRITE_FILTERED_TRANSCRIPTS) {
$ratio = int($ratio) ;
my $tl_length_biotype;
if ($ratio < 40 ) {
$tl_length_biotype = "translation_len_smaller_40_perc" ;
}else{
$tl_length_biotype = "translation_len_$ratio"."_perc" ;
}
$tr->biotype($tl_length_biotype) ;
push @trans_with_tl, $tr ;
}
}
}
return \@trans_with_tl ;
}
sub remove_overlapping_transcripts {
my ($tref ) = @_ ;
my @transcripts ;
# only remove transcripts which are not already deleted
for my $tr (@$tref) {
push @transcripts, $tr unless ($tr->biotype=~m/^del_/) ;
}
# sort by number of exons if tr have more than one exon
@transcripts = sort { scalar(@{$a->get_all_Exons}) <=> scalar(@{$b->get_all_Exons}) } @transcripts;
# case of having single exon transcripts sort them by length
if ( @{$transcripts[-1]->get_all_Exons} == 1){
@transcripts = sort { $a->length <=> $b->length } @transcripts;
}
# since we call this function out of the gene-cluster routine,
# all genes should be in a gene cluster which makes
# preocessing much easier !
#
# idea : sort transcripts accourding to their number of exons and
# than begin with the shortest transcript and check if it's exon is
# covered by one one the longer transcripts
# we could also implement some other filters here
# FILTERS
# sort transcripts acc. to their number of exons
my @ltr = reverse @transcripts ; # tr with most exons first
my @remove_tr ;
# MAIN LOOPS : loop through longer transcripts
for (my $i=0 ; $i<@ltr; $i++) {
my $lg = $ltr[$i] ;
# loop through shorter transcripts
for (my $j=$i+1 ; $j < @ltr; $j++ ) {
#print " checking $i $j \n" ;
my $st = $ltr[$j] ;
if ($lg ne $st) {
# check genomic extent , nr_of_exons and if $lg eq st
if ( _check_if_lg_spans_st ($lg,$st) ) {
if ( check_if_all_exons_are_overlapped ($lg,$st) ) {
# transcript is incorprated in other transcript all exons have been checked
# print "\nTranscript_will be removed : " ; print_object($st) ; print "\n\n" ;
push @remove_tr, $st ;
}
} # else : lg does not span st
}
}
}
my %seen ;
@seen{ @$tref } = @$tref ;
delete @seen{ @remove_tr } ;
my @diff = values %seen ;
#
# change biotype of the transcripts which are removed to get overview what has
# been removed / remove redundant transcript out of @remove_tr before
#
my %tmp ;
@tmp{@remove_tr} = @remove_tr ;
@remove_tr = values %tmp ;
# labelling the exons / transcripts
for my $t ( @remove_tr ) {
my $bt = "del_" . $t->biotype ;
$t->biotype($bt) ;
for my $e (@{ $t->get_all_Exons} ) {
# $e->biotype($bt) ;
}
}
# print "xxx these tr will be removed\n===========================================\n" ;
# print_Transcript_and_Exons(\@remove_tr) ;
# print "xxx these tr will be NOT removed\n===========================================\n" ;
# print_Transcript_and_Exons(\@diff) ;
#
return (\@diff, \@remove_tr) ;
}
sub merge_transcripts_recursion {
my ( $self, $start_transcript , $transcripts_ref , $exon_clusters ) = @_ ;
#print "recusrion start\n" ;
my $tr1 = $start_transcript ;
my $tr2 ;
if (@$transcripts_ref > 0 ) {
$tr2 = shift @$transcripts_ref ;
my @new_transcripts ;
my $ex_3prim = $tr1->end_Exon();
my $merged_transcript ;
unless ($ex_3prim->next_exon ) {
if ( $tr2->start_Exon->overlaps($ex_3prim) && !$tr2->start_Exon->prev_exon ) {
#
# exon is in same cluster and has terminal 5prim exon of est
#
# xxxxx------------xxxxxxxxxxx tr1 (no next_exon)
# yyyyyyyyyyy----------yyyyyyyyyyyyyy tr2 (no prev_exon)
if ($self->{v}){
print "no next exon - look for overlapping 5prm \n" ;
print "\nRECURSION : These transcripts could be merged:\n" ; print "="x80;
print "\ntr1 : " . $tr1->seq_region_start . " -- " .$tr1->seq_region_end .
" " . $tr1->biotype . "\n" ; ;
print "tr2 : " . $tr2->seq_region_start . " -- " .$tr2->seq_region_end .
" " . $tr2->biotype . "\n" ; ;
}
my $this_ec = $tr1->end_Exon->cluster ;
#
# get evidence to merge $tr1->end_Exon AND $tr2->start_Exon
#
my @candidate_exons = @{ $this_ec->get_all_Exons_of_EvidenceSet('simgw')} ;
push @candidate_exons, @{ $this_ec->get_all_Exons_of_EvidenceSet('abinitio')} ;
#
# get ab-initios or sim-gws in this cluster which match the boundaries
#
# aaaaaaaaaaaa|--------------|aaaaaaaaaaaaaaaa
# |$tr1->end_Exon
#
# bbbbbbbbbbbbbbbb|-------|bbbbbbbbbbbbbb--
# $tr2->start_Exon|
#
# simsimsimsim|--------------|simsimsimsimsimsimsimsim|-------|simsimsismisms-- CONSERVED, OK
# abinitioabin|--------------|itioabinitioabinitioabin|-------|initioabinitio-- CONSERVED, OK
#
#
for my $ce (@candidate_exons) {
if ($self->{v}){
print "RECURSION checking candidate exon which spans region :\n" ;
print "start : " . $ce->seq_region_start . "\t".
$ce->seq_region_end . " (" . $ce->analysis->logic_name . ")\n\n" ;
}
if ( $ce->prev_exon && $ce->next_exon ) {
# check the boundarie of previous exon of 3prim term exon of EST-1-gene :
my ( $tr1_boundary , $tr2_boundary ) ;
my $ce_start = $ce->seq_region_end ;
my $ce_end = $ce->seq_region_start ;
if ( $tr1->seq_region_strand eq '1' ){
$tr1_boundary = $tr2->start_Exon->seq_region_end ;
$tr2_boundary = $tr1->end_Exon->seq_region_start ;
} else {
$tr1_boundary = $tr1->end_Exon->seq_region_end ;
$tr2_boundary = $tr2->start_Exon->seq_region_start ;
}
if ($self->{v}){
print "check_exon has previous and next exon\n" ;
print "\n" ; print "tr1_boundary : " . $tr1_boundary . "\n" ;
print "ce_start : " . $ce_start . "\n" ;
print "\n" ; print "tr2_boundary : " . $tr2_boundary . "\n" ;
print "ce_end : " . $ce_end . "\n" ; print "\n\n" ; print "\n\n" ;
}
if ($ce_start == $tr1_boundary && $ce_end == $tr2_boundary ) {
print "Boundaries match - merging genes \n" if $self->{v};
# less strict way in finding a compatible exon :
# only the intron has to be conserved between the flanking ab-initios and the est's
# (ONLYE THE EXON BOUNDARIES WHICH HAVE DOUBLE '||' ARE CHECKED !!!) :
#
# TR_1 TR_2
# --------estestest||-------------||estestestest
# estestestest||---------------||estestestest|----------
# --|abinitioabiont||-------------||abintioabintioabinitio||---------------||abinitioabinitioabinitio|----
# ^^ CONS.INTR. ^^ ^^ CONS. INTRON ^^
# || || || ||
# NOMATCH match match match match NOMATCH
#
#
# It is only checked if there are two conserved Introns at 3prim and 5rim end
# of 'broken' exon which we want to bridge. The more 'strict' approach is handeld in the
# second case below (look if the last exon of est-gene nr1 and next exon of est-gene nr2
# have excatly the same coordinates as the simgw / abinitio exon we use to merge
#
#
#
# merge the two transcripts by using the coordinates of simgw/abinitio bridge exon
#
# bondaries of abinotio/simgw exon match the start/ends of the exon to bridge
my $tr1_last_intron = Bio::EnsEMBL::Intron->new($tr1->end_Exon->prev_exon,$tr1->end_Exon) ;
my $ce_last_intron = Bio::EnsEMBL::Intron->new( $ce->prev_exon,$ce ) ;
my $tr2_next_intron = Bio::EnsEMBL::Intron->new($tr2->start_Exon,$tr2->start_Exon->next_exon) ;
my $ce_next_intron = Bio::EnsEMBL::Intron->new($ce, $ce->next_exon ) ;
if ( compare ($tr1_last_intron, $ce_last_intron ) && compare ($tr2_next_intron, $ce_next_intron)) {
push @new_transcripts , $self->merge_transcripts ( $tr1, $tr2, $ce, 'no_strict') ;
$merged_transcript = $self->merge_transcripts ( $tr1, $tr2, $ce, 'no_strict') ;
$self->merge_transcripts_recursion ( new_tr($merged_transcript) , $transcripts_ref , $exon_clusters );
# $self->merge_transcripts_recursion ( $merged_transcript , $transcripts_ref , $exon_clusters );
}
if ( $tr1->end_Exon->prev_exon->seq_region_end == $ce->prev_exon->seq_region_end
&& $tr1->end_Exon->prev_exon->seq_region_start == $ce->prev_exon->seq_region_start
&& $tr2->start_Exon->next_exon->seq_region_start == $ce->next_exon->seq_region_start
&& $tr2->start_Exon->next_exon->seq_region_end == $ce->next_exon->seq_region_end ) {
# STRICT WAY in finding a compatible exon :
# we need two flanking consreved Introns and two exons which match exactly the boundaries !!
# (all double || marked starts/ends are checked
#
# ---||esteststesteste||-------------||estestestest
# estestestest||---------------||estestestest||----------
# ---||abinitioabiniot||-------------||abintioabintioabinitio||---------------||abinitioabin||----------
# ^^ ^^ CONS.INTR. ^^ ^^ CONS. INTRON ^^ ^^
# || || || || || ||
# match match match match match match
#
#
# It's checked if there are two conserved Introns at 3prim and 5rim end around the
# 'broken' exon which we want to bridge and if the start/ends of the flanking exdons match as well
#
#
# merge the two transcripts by using the coordinates of simgw/abinitio bridge exon
#
push @new_transcripts , $self->merge_transcripts ( $tr1, $tr2, $ce, 'strict' ) ;
$merged_transcript = $self->merge_transcripts ( $tr1, $tr2, $ce, 'strict') ;
$self->merge_transcripts_recursion ( new_tr($merged_transcript) , $transcripts_ref , $exon_clusters );
}
} else {
print "Boundaries does not match\n" if $self->{v};
}
}
} # for candidate exons
}
}
####
} else { # there is no more transcriptin $transcripts_ref
push @{$self->{merged_tr} } , $start_transcript ;
return ;
}
my $tr_new = new_tr($tr2) ;
$self->merge_transcripts_recursion ( $tr_new , $transcripts_ref , $exon_clusters );
}
sub merge_transcripts {
my ($self, $base_trans, $trans_to_add , $merge_exon , $string) = @_ ;
my $new_biotype = $self->{new_biotype} . "_" . $merge_exon->biotype . "_merged_$string" ;
# my $new_biotype = $self->{new_biotype} . "_abinitio_merged" ;
#
# we need to clone exon otherwise we end up with multipe transcripts which share the same Exon object
#
my $cloned_ex = new Bio::EnsEMBL::Analysis::Tools::GeneBuildUtils::ExonExtended (
-start => $merge_exon->start ,
-end => $merge_exon->end ,
-phase => $merge_exon->phase ,
-end_phase => $merge_exon->end_phase,
-strand => $merge_exon->seq_region_strand ,
-slice => $merge_exon->slice ,
-analysis => $base_trans->analysis ) ;
$cloned_ex->biotype( $new_biotype ) ;
$cloned_ex->transcript($merge_exon->transcript) ;
$cloned_ex->add_supporting_features( @{$merge_exon->get_all_supporting_features}) ;
#
# register new biotype as a member of evidence_set 'est_merged' and make sure that the new
# biotype is only stored once in the array of biotypes , otherwise gene clustering won't work
#
my %tmp = %{ $self->get_all_evidence_sets };
my %tmp2 ;
@tmp2{ @{ $tmp{est_merged} } } =();
unless (exists $tmp2{ $new_biotype } ) {
push @{ $tmp{'est_merged'}}, $new_biotype ;
$self->get_all_evidence_sets( \%tmp ) ;
}
my $old_merge_exon_biotype = $merge_exon->biotype ;
if ( length($new_biotype) > 40) {
warning("The stringlength of the biotype is too loong....mysql will shorten it\n") ;
}
my @base_exons = @{$base_trans->get_all_Exons()} ;
#
# chop of 3prim exon cause here we use the abintio/ simgw exon for merging
#
pop @base_exons ;
my @exons_to_add = @{ $trans_to_add->get_all_Exons} ;
#
# chop of 5prim exon cause here we use the abintio/ simgw exon for merging
#
shift @exons_to_add ;
my $tr_merged = new Bio::EnsEMBL::Analysis::Tools::GeneBuildUtils::TranscriptExtended(
-BIOTYPE => $new_biotype ,
-ANALYSIS =>$base_trans->analysis ,
);
#
# creating transcript supporting feature
#
my $hseq_name ;
if ($merge_exon->stable_id) {
$hseq_name = $merge_exon->stable_id ;
}elsif ($merge_exon->dbID) {
$hseq_name = $merge_exon->dbID ;
}else {
$hseq_name = $merge_exon->biotype ;
}
my $feat_pair = Bio::EnsEMBL::FeaturePair->new (
-start => $merge_exon->seq_region_start ,
-end => $merge_exon->seq_region_end ,
-strand => $merge_exon->seq_region_strand ,
-slice => $merge_exon->slice ,
-analysis =>$merge_exon->analysis ,
-hseqname =>$hseq_name ,
-hstart => 1 ,
-hend => ( $merge_exon->seq_region_end - $merge_exon->seq_region_start +1 ) ,
-hstrand => $merge_exon->seq_region_strand ,
-score => 0 ,
-percent_id => 0 ,
) ;
# jhv
#
# # we should decide here if we want to create dnadna or dnapep align feature ...
# if ( ref($merge_exon) =~m/Bio::EnsEMBL::DnaPepAlignFeature/ ) {
# }elsif ( ref($merge_exon) =~m/Bio::EnsEMBL::DnaDnaAlignFeature/ ) {
# }
#
#
my $dna_align_feat = Bio::EnsEMBL::DnaDnaAlignFeature->new (-features =>[$feat_pair] ,
-align_type => 'ensembl',
-analysis => $merge_exon->analysis ) ;
$cloned_ex->add_supporting_features ( $dna_align_feat) ;
#
# Doing first part of merge
#
for my $be (@base_exons ) {
$be->biotype( $new_biotype ) ;
$tr_merged->add_Exon( $be ) ;
# print "add base_exons to tr: " ; print_object($be) ;
}
#
# add exon of abinitio / simgw
#
#$merge_exon->biotype( $old_merge_exon_biotype ) ;
$tr_merged->add_Exon ( $cloned_ex ) ;
# $tr_merged->add_Exon ( $merge_exon ) ;
# print "add merge_exon to tr: " ; print_object($merge_exon) ;
#
# adding rest of following est
#
for my $e (@exons_to_add) {
$e->biotype($new_biotype) ;
#print "adding rest of second est: " ; print_object($e) ;
$tr_merged->add_Exon ($e) ;
}
#print "\n\nThis_is_merged_trans :\n" ;
#print_Transcript_and_Exons([$tr_merged],"tr_merged") ;
return $tr_merged ;
}
sub edit_terminal_exons {
my ($self,$aref) = @_ ;
for my $t (@$aref ) {
my @exons = @{ $t->get_all_Exons } ;
# process 5prim terminal exon
my $ex_5prim = $exons[0];
my $longest_exon_5 = get_longest_5prim_term_exon_in_exon_cluster_of_this_exon($ex_5prim) ;
$t = $t->exchange_exon($ex_5prim, $longest_exon_5) ;
my $ex_3prim = $exons[$#exons];
my $longest_exon_3 = get_longest_3prim_term_exon_in_exon_cluster_of_this_exon($ex_3prim) ;
$t = $t->exchange_exon($ex_3prim, $longest_exon_3) ;
}
print_Transcript_and_Exons($aref) if $self->{v} ;
return $aref ;
}
sub main_clustering_and_recursion {
my ($self, $gene_cluster) = @_ ;
my @exon_clusters = @{ $gene_cluster->get_exon_clustering_from_gene_cluster() } ;
my @ex_cluster_est_evidence = @exon_clusters ;
$self->{start_terminal_ec} = $ex_cluster_est_evidence[0];
$self->{end_terminal_ec} = $ex_cluster_est_evidence[$#ex_cluster_est_evidence];
# use all possible exons with EST-evidence in first ExonCluster
my @est_start_exons = @{ $ex_cluster_est_evidence[0]->get_all_Exons_of_EvidenceSet('est') } ;
# make sure we don't have 3prim exons in the start-exon-set ! (only use exons with have next exon)
@est_start_exons = grep {$_->next_exon} @est_start_exons ;
my @all_assembled_tr;
#
# we could do this stuff as well with a recursion /
# idea would be :
# check first eand second exon to avoid situation where it's like :
# xxxxx----------------------------------------------------xxxxxxxxxx
# xxxxxx--------xxxxxxxxx--------xxxxx-xxxxxxx--------------x-xxxxxxxxxxxxxxx
# xxxxxx--------xxxxxxxxx--------xxxxx-xxxxxxx--------------x-xxxxxxxxxxxxxxx
#
#
# ---> WHERE NICE GENES START IN THE SECOND EXON CLUSTER
# implement a restarting of the algorithm !
#
#
# returns array of arrays of transcripts [ [ tra tra tra ] [ tra ]
my $trans_aref = $self->transcript_recursion(
\@ex_cluster_est_evidence,
\@est_start_exons,
[]
) ;
#
# prune transcript (change biotype and re-adjust start/end of transcript and biotype )
#
my @pruned_transcripts ;
for my $transcript ( @$trans_aref ) {
my $ntr = new Bio::EnsEMBL::Analysis::Tools::GeneBuildUtils::TranscriptExtended( -BIOTYPE => $self->{new_biotype} ) ;
for my $exon ( @{ $transcript->get_all_Exons } ) {
$exon->biotype( $self->{new_biotype} ) ;
$ntr->add_Exon( $exon ) ;
}
push @pruned_transcripts , $ntr ;
}
return \@pruned_transcripts ;
}
# does recursion in region with clustered transcripts and selects
# every unvisited exon as possible start exon for restart of recursion
#
# i have a seq-range with all transcripts and exon-clusters
# i have to select the start-exon-clusters if possible
# i start to process the first start exon clusters
# if i am finished with this i check if all exons have been visited
# if not, i sort the exons, use the one at 5prim end and call exon_recursion on this exon
# i check again if all exons have been visited -> if not, i choose the one which has'nt been
# visited and call exon_recursion on this......
#
sub start_est_linking {
my ( $self, $start_exons , $ex_cluster_real_ev ) = @_ ;
my @all_assembled_tr ;
if ($self->{v}){
print "starting est-linking with set of start-exons\n" ;
for (@$start_exons) {
print "START_exon: " ; print_object($_) ;
}
}
for my $act_start_exon (@$start_exons) {
if ($self->{v}){
print "\n\nSTART: Starting with exon :\n" ;
$self->print_exon($act_start_exon) ;
print "starting the exon_recursion\n" ;
}
my $t = $self->exon_recursion(
$ex_cluster_real_ev, # all ec with exon ranked > 0
$act_start_exon, # the exon where the story begins
undef, # last exon
[], # transcript
# [], # ref to store exons
# {}, # reference to transcript_hash
0 , # counter
{} # href
);
push @all_assembled_tr, $t if $t;
print "exon_recursion finished" if $self->{v} ;
}
if ($self->{v}){
print "\nAfter exon_recursion:\n" ;
print_Transcript_and_Exons(\@all_assembled_tr) ;
}
return \@all_assembled_tr ;
}
sub transcript_recursion {
my ($self , $ex_clusters_real_ev, $exons ,$all_tr ) = @_ ;
print "\nTranscriptRecursion:\n=============================================\n" if $self->{v};
my @ex_clusters_real_evidence = @$ex_clusters_real_ev ;
my @start_exons ;
if ( scalar(@$all_tr)==0) {
print "initialisation - using start exons which have been handend over _tmp_\n" if $self->{v} ;
@start_exons = @$exons ;
} else {
# We have already transcripts and we have to check out for next
# cluster with unprocessed / unvistited exons and call the recursion again on this cluster
# how to find next startpoint ?
@start_exons = @{ get_unvisited_exons_in_next_ec( $ex_clusters_real_ev ) } ;
#
# sort start-exons such that we don't end up using terminal 3-prim exons
#
my @tmp ;
if ( ( @start_exons) > 0 ) {
print "start_exons :" . scalar(@start_exons) . "\n\n" if $self->{v} ;
for my $se (@start_exons) {
if ($se->next_exon) {
if ($self->{v}) {
print "this exon_has_next_exon : " ;
print_object($se) ;
print "this is next exon : " ;
print_object($se->next_exon) ;
}
push @tmp, $se if $se->next_exon() ;
}
}
}
@start_exons = @tmp ;
}
if ( _all_exons_visited ( $ex_clusters_real_ev) ) {
print "\nall_exons_visited_returning_from_transcript_recursion\n" if $self->{v};
return ;
}
if (@start_exons > 0 ) {
print "starting est_linking\n" if $self->{v};
my $trans_aref = $self->start_est_linking(\@start_exons , $ex_clusters_real_ev ) ;
print_Transcript_and_Exons($trans_aref , "trans_aref_smart" ) if $self->{v};
push @$all_tr, @$trans_aref ;
print "finished est_linking\n" if $self->{v};
for my $ref (@$all_tr) {
if ( ref($ref)=~m/ARRAY/){
throw("having_array_that is not good");
}
}
}
# only start transcript_recursion if there are start_exons
# do we need this ???
$self->transcript_recursion ($ex_clusters_real_ev, \@start_exons , $all_tr ) ;
if ($self->{v}){
print "END_OF_TRANSCRIPT_RECURSION\ntrying print_Transcript\n" ;
print_Transcript_and_Exons($all_tr,"debug_trans_rec") ;
}
return $all_tr ;
}
sub exon_recursion {
my ($self, $all_ec, $exon, $last_exon, $transcript , $cnt, $href ) = @_ ;
#my ($self, $all_ec, $exon, $last_exon, $transcript , $exon_aref, $trans_href,$cnt, $href ) = @_ ;
if ($self->{v}) {
print "--> Starting exon recursion with this exon : \n\n" ;
print_object($exon) ;
print "\n\n" ;
}
$exon->visited("1") ;
my $tmp_biotype = $exon->biotype ."_temp" ;
my $init = 0 ;
if ( $cnt == 0 ) {
print "Initialisation\n" if $self->{v} ;
$init = 1 ;
$transcript = new Bio::EnsEMBL::Analysis::Tools::GeneBuildUtils::TranscriptExtended(
-BIOTYPE => $tmp_biotype ,
);
#$$trans_href{$exon} = $transcript ;
$$href{$exon}=$transcript ;
}
$cnt++ ;
# STOP if exon is in end terminal cluster and has NO next exon
# exon could be in end-terminal cluster but there could be another exon cause of overlap
# xxxxxxxxxxxxxx-----------------------------xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxx-------------------------------xxxxxxxxxxxx--------------xxxxxxxxxxxxxx
if ( $exon->cluster eq $self->{end_terminal_ec} && !$exon->next_exon) {
# $transcript->add_Exon($exon) ;
print "End terminal cluster reached\n" if $self->{v} ;
return $transcript ;
}
if ( $exon->next_exon) {
$$href{$exon}=() ; # mark exon as visited
print "Exon has next exon\n" if $self->{v} ;
# unless ( exon_coordinates_match ($exon, $last_exon) ) {
unless ( compare ($exon, $last_exon) ) {
#$$trans_href{$exon} = $transcript ;
my $tr_tmp ;
if ($init) {
# Initialisation : don't clone transcript, add exon
$transcript->add_Exon($exon) ;
$tr_tmp = $transcript ;
}else{
# Initialisation was done, clone transcript and add exon
$tr_tmp = new_tr($transcript) ;
$transcript->add_Exon($exon) ;
#$$trans_href{$exon} = $transcript ;
}
$self->exon_recursion( $all_ec, $exon->next_exon , $exon, $tr_tmp , $cnt, $href);
#$self->exon_recursion( $all_ec, $exon->next_exon , $exon, $tr_tmp , $exon_aref, $trans_href, $cnt, $href);
# Recursion is finished now - we have to check if we have to add an end-exon
my @t_exons = @{ $transcript->get_all_Exons } ;
# checking if last Exon is really at the very end of transcript
# or if we can add one more exon ($exon->next_Exon)
my $exon_to_add ;
if ($exon->next_exon) {
if ($transcript->end_Exon ne $exon->next_exon) {
# check if exon we want to add is more 3prim than trans->end_Exon
if ($transcript->seq_region_strand eq 1 ) {
if ($transcript->end_Exon->seq_region_end < $exon->next_exon->seq_region_start ) {
# xxxxxxxxxxxxxxxxx-------xxxxxx-----------xxxxxxxxxxxxxxxxxxx
# $t->seq_reg_start $t->seq_reg_end
# YYYYYYYYYYYYYYYYYY
# $e->seq_reg_start
$exon_to_add = $exon->next_exon ;
}
} else {
if ($transcript->end_Exon->seq_region_start > $exon->next_exon->seq_region_end ) {
$exon_to_add = $exon->next_exon ;
}
}
}
}
if ($exon_to_add ) {
$transcript->add_Exon($exon_to_add) ;
}
return $transcript ;
} # exon_coords_match
} else { # exon->next_exon()
print "Exon has no next exon - trying to find conserved intron to extend Transcript\n\n"
if $self->{v} ;
}
#
# exon has no next exon / getting all TEST-exons in exon_cluster
#
####################################################################
my @all_ex_in_ec = @{ $exon->cluster->get_all_Exons_in_ExonCluster() } ;
$$href{$exon}=() ;
print "Now walking through all exons in Cluster.... ( try to find last matching Intron)\n\n"
if $self->{v};
ALL_EXONS_IN_EC: for my $te ( @all_ex_in_ec ) {
my $nr_all_exons_in_ec = scalar(@all_ex_in_ec);
my $nr_exons_visited = keys %{$href} ;
if ($te eq $exon) {
print "exon is the same ---> skipping / next\n" if $self->{v}; # exon is the same
next ;
}
if (exists $$href{$te} ) {
# exon has already been processed
print "exon has been visited ---> skipping / next\n" if $self->{v}; # exon is the same
next ;
}
if ($self->{v}){
print "Now testing this exon for conseverd intron:\n" ;
$self->print_exon($te," (this exon will be tested for cons. intron) ") ;
}
# INTRONS
my ( $te_intron, $ex_intron ) ;
if ($te->prev_exon && $exon->prev_exon) {
print "\nExon and test-exon have prev. exon - building last intron\n" if $self->{v};
$te_intron = Bio::EnsEMBL::Intron->new($te->prev_exon,$te) ;
$ex_intron = Bio::EnsEMBL::Intron->new($exon->prev_exon,$exon) ;
} else {
#print "Can't build intron cause test-exon OR exon have no prev exon\n" ;
}
print "Comparing introns if there are introns...\n" if $self->{v};
if ( compare($te_intron, $ex_intron ) ) {
print "Introns match !!\n" if $self->{v};
if (!exists($$href{$te})) {
print "Exon hasn't been visited\n" if $self->{v};
my $new_tr_2 ;
if ($last_exon) {
if ($self->{v}) {
print "Last_exon : $last_exon \n" ;
$self->print_exon($last_exon) ;
$self->print_exon($te->prev_exon) ;
print "switching transcript bcs conserved intron to this exon:\n";
print_exon($te->prev_exon) ;
print "\nCloning transcript -has the foolowing exons now :\n" ;
print "-------------------------------------------------\n" ;
}
$new_tr_2 = new_tr($transcript) ;
my @all_exons_new_tr = @{$new_tr_2->get_all_Exons} ;
for my $e (@all_exons_new_tr) {
$self->print_exon($e) ;
}
print "\n\n" if $self->{v};
} else {
$new_tr_2 = new_tr($transcript) ;
$last_exon = $exon ;
}
$new_tr_2 = new_tr($transcript) ;
$$href{$te}=1 ; # mark exon as visited
# start new recursion
if ($self->{v}) {
print "new_starting_point:" ; $self->print_exon($te) ;
}
if ($te->next_exon) {
$self->exon_recursion ($all_ec,$te,$last_exon,$new_tr_2,$cnt,$href) ;
}else {
print "test exon has no next exon, skipping to start new recursiox\n" if $self->{v};
}
#$self->exon_recursion ($all_ec,$te,$last_exon,$new_tr_2,$exon_aref,$trans_href,$cnt,$href) ;
return ;
}else {
if ($self->{v}) {
print "exon has already been visited\n\n" ;
$self->print_exon($te," VISITED " ) ;
}
}
}elsif ( ($te->seq_region_end eq $exon->seq_region_end) && $te->next_exon) { # end match
#print "Introns does not_match_1\n" ;
#$self->print_exon($te,"six",2);
}elsif ( ($te->seq_region_start eq $exon->seq_region_start) && $te->next_exon){ # start match
#print "Introns does not_match_2\n" ;
#$self->print_exon($te,"seven",2);
}else {
#print "Introns does not_match_3.... processing next exon in exon_cluster\n\n" ;
}
$$href{$te}=1 ;
} #: ALL_EXONS_IN_EC
return ;
}
sub new_tr {
my ($old_tr) = @_ ;
my $new_tr = new Bio::EnsEMBL::Analysis::Tools::GeneBuildUtils::TranscriptExtended(
-EXONS => $old_tr->get_all_Exons(),
-BIOTYPE =>$old_tr->biotype(),
) ;
$new_tr->ev_set($old_tr->ev_set) if $old_tr->isa("Bio::EnsEMBL::Analysis::Tools::GeneBuildUtils::TranscriptExtended") ;
return $new_tr ;
}
=head2 get_all_evidence_sets
Name : get_all_evidence_sets
Arg[1] : none
Function : Returns hash of evidence_set_names($key) and biotypes
Returnval : Hashref $hash{evidence_set_name} = @\biotypes
=cut
sub get_all_evidence_sets {
my ($self) = @_ ;
return $self->{evidence_sets};
}
=head2 get_genes_by_evidence_set
Name : get_genes_by_evidence_set($evidence_set)
Arg[1] : String
Function : Returns all genes of an evidence set
(an evidence set contains genes of different biotypes ).
If there were any PredictionTranscripts specified as evidence set,
they were converted to genes and returned, too.
=cut
sub get_genes_by_evidence_set {
my ($self,$ev_set) = @_ ;
my @ev_set_genes ;
for my $biotype ( @{ $self->get_biotypes_of_evidence_set( $ev_set )} ) {
if ($self->get_genes_by_biotype($biotype)){
push @ev_set_genes, @{ $self->get_genes_by_biotype( $biotype ) } ;
}
}
return \@ev_set_genes ;
}
=head2 get_genes_by_biotype
Name : get_genes_by_biotype($arg)
Arg[1] : String
Function : Returns all genes of a specific biotype (like all simgw_100 or genscan genes)
Returnval : Arrayref of Bio::EnsEMBL::Gene objects
=cut
sub get_genes_by_biotype {
my ($self, $biotype ) = @_ ;
return ${ $self->{all_genes_href}}{$biotype};
}
=head2 get_biotypes_of_evidence_set
Name : get_biotypes_of_evidence_set($arg)
Arg[1] : String
Function : Returns all biotypes specific evidence set (like simgw_100 , genscan )
Returnval : Arrayref of Strings
=cut
sub get_biotypes_of_evidence_set {
my ($self, $ev_set ) = @_ ;
return ${ $self->{evidence_sets}}{$ev_set};
}
#### non object-methods below ###
sub _check_if_lg_spans_st {
my ($lg, $st ) = @_ ;
# check if long transcripts overlaps short transcript
# by looking at it's genomic extend
if ($lg->seq_region_start <= $st->seq_region_start
&& $lg->seq_region_end >= $st->seq_region_end
&& $lg->seq_region_strand == $st->seq_region_strand
&& (@{$lg->get_all_Exons} >= @{$st->get_all_Exons } )
&& $lg ne $st ) {
return 1 ;
}
if ($st->seq_region_start <= $lg->seq_region_start
&& $st->seq_region_end >= $lg->seq_region_end
&& $st->seq_region_strand == $lg->seq_region_strand
&& (@{$st->get_all_Exons} >= @{$lg->get_all_Exons } )
&& $lg ne $st ) {
return 1 ;
}
return 0 ;
}
# work
sub check_if_all_exons_are_overlapped {
my ($lg,$st) = @_ ;
# check if each exon of $lg has a counterpart in $st
my $test_ex_ok = 0 ;
my %mark_exon ;
my %overlapped_test_exons ;
my $nr_same_exons_conserved = 0 ;
my $exon_3prim_mismatch = 0 ;
my $exon_5prim_mismatch = 0 ;
for my $lge (@{$lg->get_all_Exons }) {
for my $ste (@{$st->get_all_Exons }) {
#print "testing (1)" ; print_object($lge) ;
#print " vs. (2)" ; print_object($ste) ;
if ( $lge->seq_region_start <= $ste->seq_region_start
&& $lge->seq_region_end >= $ste->seq_region_end
&& $lge->seq_region_strand eq $ste->seq_region_strand )
{
#
# THIS EXON $ste is completely overlapped
# |xxxxx| OR |xxxxxxxx| OR |xxxxxxxxx| OR |xxxxxx|
# |xxxxx| |xxxxx| |xxxx| |xxxx|
#
#print "long_exon (1)--> overlaps test_exon(2)\n" ;
#print "(1)" ; print_object($lge) ;
#print "(2)" ; print_object($ste) ;
$nr_same_exons_conserved++ ;
$test_ex_ok++ ;
$mark_exon{$lge->hashkey} = $lge ;
$overlapped_test_exons{$ste->hashkey} = $ste ;
} else {
# no exact match
##print "no exact match between exons\n" ;
# exon is a termial exon of ST
# watch out - here we compare each time the terimal exon of ste against all exons of $lge
# so we have to make sure that we have somehow an overlap or a distance between the
# exons compared !!!!!!!
#
# perhaps we should just check if ste and lge overlap in their genomic extend than
# using an offset of 50 bp for this !
#
#
my $start_diff = abs( $ste->seq_region_start - $lge->seq_region_start ) ;
my $end_diff = abs( $ste->seq_region_end - $lge->seq_region_end ) ;
# this is an extra rule for terminal exons
if ($ste->is_terminal_exon) {
#print "exon is terminal : " ;
if ($start_diff < 25 && $end_diff < 25 ) {
# print "start_end_diff smaller 50\n" ;
#
# exon is 'in range 'of 50 bp to lge
# check how conserved the exon is
#
my $exon_conservation = $ste->get_percentage_exon_conversation_in_exon_cluster() ;
# print "checking percentage conservation exon_conservation: $exon_conservation \n\n" ;
# if ( $exon_conservation < 0.1 && $exon_conservation != 0 )
if ( $exon_conservation < 0.1 ) {
$mark_exon{$lge->hashkey} = $lge ;
$overlapped_test_exons{$ste}=$ste ;
$test_ex_ok++ ;
# print "test_ex_ok $test_ex_ok\n" ;
}else{
# print " percentage consersation too high or zero$exon_conservation\n" ;
}
}
} else {
#
# Exon is not terminal / if there is a boundary mismatch in
# an internal exon
#
my $conservation = $ste->get_percentage_exon_conversation_in_exon_cluster() ;
if ($conservation < 0.1) {
# print "not really conserved boundary\n" ;
}
}
# new rule to remove transcripts which match all exons except the terminal one's
$start_diff = abs( $ste->seq_region_start - $lge->seq_region_start ) ;
$end_diff = abs( $ste->seq_region_end - $lge->seq_region_end ) ;
if ($ste->is_5prim_exon ) {
# print "exon_is_5prim_exon\n" ;
# print_object($ste) ;
$exon_5prim_mismatch = 1 ; #if ( $start_diff < 50 || $end_diff < 50 ) ;
}
if ($ste->is_3prim_exon) {
$exon_3prim_mismatch = 1 ; #if ( $start_diff < 50 || $end_diff < 50 ) ;
}
}
}
}
# print "\n\n" ;
# print "all_exons_tested : test_ex_ok $test_ex_ok \n" ;
# print "all_exons_tested : exon_3prim_mismatch $exon_3prim_mismatch \n" ;
# print "all_exons_tested : exon_5prim_mismatch $exon_5prim_mismatch \n" ;
# print "all_exons_tested : nr_same_exons_conserved $nr_same_exons_conserved \n" ;
#
if ($test_ex_ok == scalar(@{ $st->get_all_Exons } ) ) {
# print "all exons of test-gene are overlapped by anotherone \n" ;
#
# all exons of $st are overlapped by $lg
# now check for exon-skipping
# xxxxxxxxxxxxx-------------xxxxxxxxxxxxxxxx------------xxxxxxxxxxxxxxx
# xxxxxxxxxxxxx-----------------------------------------xxxxxxxxxxxxxxx
#
for my $lger (@{$lg->get_all_Exons }) {
unless (exists $mark_exon{$lger->hashkey}) {
# if there are exons which are on the long one but not in the short one
# now check if mark-exon is outside of boundaries of st-transcript
if ($st->seq_region_start <= $lger->seq_region_end
&& $st->seq_region_end >=$lger->seq_region_end ) {
# overlap of skipped exon and transcript
# print "checkxxx not removed " ; print_object($st) ;
return 0 ;
}else {
# the skipped exon is outside of the short transcript
}
}
}
return 1 ; # remove tr
} elsif ( (scalar(@{ $st->get_all_Exons } ) - $nr_same_exons_conserved) < 2 ) {
if ($exon_3prim_mismatch || $exon_5prim_mismatch) {
#print "removing_tr\n" ;
#print_Transcript($st) ;
return 1 ; # remove tr
}
}
# print "not all exons are ok\n" ;
return 0 ; # dont remove transcript / not all exons are overlapped
}
sub _all_exons_visited {
my ( $exon_cluster_aref) = @_ ;
for my $ec (@$exon_cluster_aref) {
for my $e ( @{ $ec->get_all_Exons_in_ExonCluster } ) {
return 0 unless ( $e->visited ) ;
}
}
return 1 ;
}
sub get_unvisited_exons_in_next_ec {
my ( $exon_cluster_aref) = @_ ;
my @unprocessed ;
for my $ec (@$exon_cluster_aref) {
for my $e ( @{ $ec->get_all_Exons_in_ExonCluster } ) {
push @unprocessed, $e unless ( $e->visited ) ;
}
return \@unprocessed if ( @unprocessed > 0 ) ;
}
return [] ;
}
sub compare {
my ($ft1 , $ft2) = @_ ;
return 0 unless ($ft1 && $ft2) ; # || changed to &&
if ( ($ft1->seq_region_start == $ft2->seq_region_start)
&& ($ft1->seq_region_end == $ft2->seq_region_end)
&& ($ft1->seq_region_strand== $ft2->seq_region_strand) ) {
return 1 ;
}
return 0 ;
}
=head2 remove_redundant_transcripts
Arg : Arrayref to array of array of transcripts
Function : removes redundant transcripts by comparing their exon-hashkeys
Returns : Arrayref to Array of Transcripts
=cut
sub remove_redundant_transcripts {
my ($tr_ref) = @_ ;
my @non_red_trans ;
my %tmp ;
# make hashkey out of exon-start-end-strand for each transcripts to see if
# they are the same
for my $t (@$tr_ref){
my @ex= @{ $t->get_all_Exons } ;
my $string ;
for my $e (@ex) {
my $exon_hk = $e->hashkey ;
$string.=$exon_hk ;
}
push @{$tmp{$string}}, $t ;
}
# since hashkeys are unique, get only one transcript for each hashkey
for my $k (keys %tmp){
my @t = @{$tmp{$k}} ;
push @non_red_trans, $t[0];
}
return \@non_red_trans ;
}
sub print_object {
my ( $ex, $string ) = @_ ;
print "$string :\n" if $string ;
print "srs -- sre:" .
"\t". $ex->seq_region_start .
"---". $ex->seq_region_end .
"\t". $ex->seq_region_strand .
"\t". $ex->biotype .
"\n" ;
}
sub print_cluster_info{
my ($slice,$cluster) = @_;
my $name = "GENE-CLUSTER : ";
$name = "EXON-CLUSTER" if ref($cluster)=~m/Exon/ ;
my $offset = $slice->start -1 ; #correction for apollo
my $cl_start = $offset + $cluster->start ;
my $cl_end= $offset + $cluster->end ;
print "$name start: $cl_start\tend: $cl_end\t" . $cluster->strand ." \n" ;
return ;
}
sub print_exon {
my ($self, $ex,$string,$nr_tab) = @_ ;
if ($self->{v}) {
$string ="" unless $string ;
my $tr_dbID = "" ;
my $hseq_name = "" ;
$nr_tab = 0 unless $nr_tab;
my $tr = $ex->transcript() ;
print $ex->biotype if $ex->biotype ;
my @sup_feat = @{ $ex->transcript->get_all_supporting_features} ;
$tr_dbID = "\ttr-db-id " . $ex->transcript->dbID ;
if ( $sup_feat[0]) {
$hseq_name = $sup_feat[0]->hseqname ;
}
print " " . $hseq_name .
"\t". $ex->seq_region_start . "---". $ex->seq_region_end .
"\t". $ex->seq_region_end. "---". $ex->seq_region_start .
"\t". $ex->seq_region_strand .
"\t". $ex->biotype ."$string" .
"\n" ;
}
}
sub get_longest_5prim_term_exon_in_exon_cluster_of_this_exon {
my ($small_exon ) = @_ ;
my $ec = $small_exon->cluster;
my @all_exons_in_clust = @{ $ec->get_all_Exons_in_ExonCluster } ;
my $longest_term_exon_in_clust = $small_exon ;
for my $exon (@all_exons_in_clust) {
if (!$exon->prev_exon && !$exon->next_exon) {
warning("Skipping this exon because this is a single exon\n") ;
next ;
}
if (!$exon->prev_exon && ($exon->length>$longest_term_exon_in_clust->length) ) {
if ($exon->seq_region_strand eq "1" ) {
if ($exon->seq_region_end == $small_exon->seq_region_end) {
# we only want to include the most 5prim exon, no overlapping exons
# (extending b with a is ok but not with c)
# aaaaaaaaa---aaa-------xxxxxxxx
# bbbb---bbb-------xxxxxxxx
# cccccccccccccccccccc--------xxxxxxx
$longest_term_exon_in_clust = $exon ;
}
}elsif ($exon->seq_region_strand eq "-1") {
if ($exon->seq_region_start == $small_exon->seq_region_start ) {
$longest_term_exon_in_clust = $exon ;
}
}else {
throw("exon has no seq_region_strand assigned. exiting\n" ) ;
}
}
}
return $longest_term_exon_in_clust ;
}
sub get_longest_3prim_term_exon_in_exon_cluster_of_this_exon {
my ($small_exon ) = @_ ;
my $ec = $small_exon->cluster;
my @all_exons_in_clust = @{ $ec->get_all_Exons_in_ExonCluster } ;
my $longest_term_exon_in_clust = $small_exon ;
for my $exon (@all_exons_in_clust) {
if (!$exon->prev_exon && !$exon->next_exon) {
warning("Skipping this exon because this is a single exon\n") ;
next ;
}
if (!$exon->next_exon && ($exon->length>$longest_term_exon_in_clust->length) ) {
if ($exon->seq_region_strand eq "1" ) {
if ($exon->seq_region_start == $small_exon->seq_region_start) {
$longest_term_exon_in_clust = $exon ;
}
}elsif ($exon->seq_region_strand eq "-1") {
if ($exon->seq_region_end == $small_exon->seq_region_end ) {
$longest_term_exon_in_clust = $exon ;
}
}else {
throw("exon has no seq_region_strand assigned. exiting\n" ) ;
}
}
}
return $longest_term_exon_in_clust ;
}
1;
| Ensembl/ensembl-analysis | modules/Bio/EnsEMBL/Analysis/Runnable/TranscriptCoalescer.pm | Perl | apache-2.0 | 63,268 |
#!/usr/bin/env perl
# Copyright [1999-2014] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
use strict;
use warnings;
use Bio::EnsEMBL::Registry;
use Getopt::Long;
my $desc = "
USAGE: getPeptideAlignFeature [options]
WHERE options are:
--url ensembl_db_url
mysql://anonymous\@ensembldb.ensembl.org, for example
--conf|--registry registry_file
ensembl registry configuration file
--compara_url compara_db_url
mysql://anonymous\@ensembldb.ensembl.org/ensembl_compara_57, for example
--gene_stable_id|--stable_id ensembl_gene_stable_id
ENSG00000060069, for example
Only one of url, conf or compara_url are required. If none is provided, the
script will look for the registry configuration file in the standard place.
";
my $reg = "Bio::EnsEMBL::Registry";
my $help;
my $registry_file;
my $url;
my $compara_url;
my $gene_stable_id = "ENSG00000060069";
GetOptions(
"help" => \$help,
"url=s" => \$url,
"compara_url=s" => \$compara_url,
"conf|registry=s" => \$registry_file,
"gene_stable_id|stable_id=s" => \$gene_stable_id,
);
if ($help) {
print $desc;
exit(0);
}
if ($registry_file) {
die if (!-e $registry_file);
$reg->load_all($registry_file);
} elsif ($url) {
$reg->load_registry_from_url($url);
} else {
$reg->load_all();
}
my $compara_dba;
if ($compara_url) {
use Bio::EnsEMBL::Compara::DBSQL::DBAdaptor;
$compara_dba = Bio::EnsEMBL::Compara::DBSQL::DBAdaptor->new(-url=>$compara_url);
} else {
$compara_dba = $reg->get_DBAdaptor("Multi", "compara");
}
###########################
#
# advanced example which uses a recursive approach
# to build single linkage clusters within a species set
#
###########################
my $gene_member = $compara_dba->get_GeneMemberAdaptor->fetch_by_source_stable_id("ENSEMBLGENE", $gene_stable_id);
my $peptide_member = $gene_member->get_canonical_SeqMember;
print "QUERY PEP: ";
$peptide_member->print_member();
my $peptide_align_feature_adaptor = $compara_dba->get_PeptideAlignFeatureAdaptor;
my $peptide_align_features = $peptide_align_feature_adaptor->fetch_all_RH_by_member($peptide_member->dbID);
# loop through and print
foreach my $this_peptide_align_feature (@{$peptide_align_features}) {
$this_peptide_align_feature->display_short;
}
exit(0);
| dbolser-ebi/ensembl-compara | scripts/examples/homology_getPeptideAlignFeature.pl | Perl | apache-2.0 | 2,837 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
package XrefParser::AnoXcelParser;
use strict;
use Carp;
use POSIX qw(strftime);
use File::Basename;
use base qw( XrefParser::BaseParser );
# Parse the external description file
#
# Protein mRNA Location Gene stable_id
# AGAP004677-PB AGAP004677-RB 2L:157496-159356:-1 AGAP004677
# AGAP004677-PA AGAP004677-RA 2L:157496-181213:-1 AGAP004677
# AGAP004678-PA AGAP004678-RA 2L:203866-204956:1 AGAP004678
#...
sub run {
my ($self, $ref_arg) = @_;
my $source_id = $ref_arg->{source_id};
my $species_id = $ref_arg->{species_id};
my $files = $ref_arg->{files};
my $release_file = $ref_arg->{rel_file};
my $verbose = $ref_arg->{verbose};
if ((!defined $source_id) or (!defined $species_id) or (!defined $files)) {
croak "Need to pass source_id, species_id, files and rel_file as pairs";
}
$verbose |=0;
my $file = @{$files}[0];
print "source_id = $source_id, species= $species_id, file = $file\n" if($verbose);
my $added = 0;
my $count = 0;
my $file_io = $self->get_filehandle($file);
if ( !defined $file_io ) {
print STDERR "ERROR: Could not open file $file\n";
return 1;
}
while ( my $line = $file_io->getline() ) {
chomp $line;
my ($protein_id, $mRNA_id, $loc, $gene_id) = split("\t",$line); #and use the gene_id as accession
my $xref_id = $self->get_xref($gene_id,$source_id, $species_id);
if(!defined($xref_id)){
$xref_id = $self->add_xref({ acc => $gene_id,
label => $gene_id,
desc =>$gene_id,
source_id => $source_id,
species_id =>$species_id,
info_type => "DIRECT"} );
$count++;
}
if(defined($gene_id) and $gene_id ne "-"){
$self->add_direct_xref($xref_id, $gene_id, "Gene", "") ;
$added++;
}
}
$file_io->close();
print "Added $count xrefs and $added Direct xrefs to genes for AnoXcel\n" if($verbose);
return 0;
}
1;
| danstaines/ensembl | misc-scripts/xref_mapping/XrefParser/AnoXcelParser.pm | Perl | apache-2.0 | 2,657 |
=head1 LICENSE
Copyright [2009-2014] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 MODIFICATIONS
Copyright [2014-2016] University of Edinburgh
All modifications licensed under the Apache License, Version 2.0, as above.
=cut
package EnsEMBL::Web::Component::Shared;
use strict;
##################################
### BEGIN LEPBASE MODIFICATIONS...
##################################
sub sequenceserver_button {
my ($self,$title,$sequence,$label) = @_;
my $blast_url = $self->hub->species_defs->BLAST_URL;
my $button = '
<form id="nt_blast_form_'.$label.'" target="_blank" action="'.$blast_url.'" method="POST">
<input type="hidden" name="input_sequence" value=">'.$title."\n".$sequence.'">
'.sequenceserver_link($title,$sequence,$label).'
</form>';
return $button;
}
sub sequenceserver_link {
my ($title,$sequence,$label) = @_;
my $link = '<a href="#" onclick="document.getElementById(\'nt_blast_form_'.$label.'\').submit();" class="button toggle no_img" style="float:left" title="Click to BLAST against Lepidoptera genes and genomes (opens a new window)">'.$label.'</a>';
return $link;
}
sub gene_tree_button {
my ($self,$url,$label) = @_;
my $button = '<a class="button" href="'.$url.'">'.$label.'</a>';
return $button;
}
##################################
### ...END LEPBASE MODIFICATIONS
##################################
1;
| lepbase/lepbase-ensembl | modules/EnsEMBL/Web/Component/Shared.pm | Perl | apache-2.0 | 1,953 |
#
# Copyright 2018 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::bluecoat::snmp::mode::memory;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
$options{options}->add_options(arguments =>
{
"warning:s" => { name => 'warning' },
"critical:s" => { name => 'critical' },
"nocache" => { name => 'nocache' },
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
if (($self->{perfdata}->threshold_validate(label => 'warning', value => $self->{option_results}->{warning})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong warning threshold '" . $self->{option_results}->{warning} . "'.");
$self->{output}->option_exit();
}
if (($self->{perfdata}->threshold_validate(label => 'critical', value => $self->{option_results}->{critical})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong critical threshold '" . $self->{option_results}->{critical} . "'.");
$self->{output}->option_exit();
}
}
sub run {
my ($self, %options) = @_;
$self->{snmp} = $options{snmp};
if ($self->{snmp}->is_snmpv1()) {
$self->{output}->add_option_msg(short_msg => "Need to use SNMP v2c or v3.");
$self->{output}->option_exit();
}
my $result = $self->{snmp}->get_table(oid => '.1.3.6.1.4.1.3417.2.11.2.3', nothing_quit => 1);
my $mem_total = $result->{'.1.3.6.1.4.1.3417.2.11.2.3.1.0'};
my $mem_cache = $result->{'.1.3.6.1.4.1.3417.2.11.2.3.2.0'};
my $mem_sys = $result->{'.1.3.6.1.4.1.3417.2.11.2.3.3.0'};
my $mem_used;
if (defined($self->{option_results}->{nocache})) {
$mem_used = $mem_sys;
} else {
$mem_used = $mem_sys + $mem_cache;
}
my $prct_used = sprintf("%.2f", $mem_used * 100 / $mem_total);
my $exit = $self->{perfdata}->threshold_check(value => $prct_used, threshold => [ { label => 'critical', exit_litteral => 'critical' }, { label => 'warning', exit_litteral => 'warning' } ]);
my ($used_value, $used_unit) = $self->{perfdata}->change_bytes(value => $mem_used);
my ($total_value, $total_unit) = $self->{perfdata}->change_bytes(value => $mem_total);
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("Memory used : %s - size : %s - percent : %.2f %%",
$used_value . ' ' . $used_unit, $total_value . ' ' . $total_unit,
$prct_used));
$self->{output}->perfdata_add(label => 'used', unit => 'B',
value => $mem_used,
warning => $self->{perfdata}->get_perfdata_for_output(label => 'warning', total => $mem_total, cast_int => 1),
critical => $self->{perfdata}->get_perfdata_for_output(label => 'critical', total => $mem_total, cast_int => 1),
min => 0, max => $mem_total);
$self->{output}->display();
$self->{output}->exit();
}
1;
__END__
=head1 MODE
Check bluecoat memory.
=over 8
=item B<--warning>
Threshold warning in percent.
=item B<--critical>
Threshold critical in percent.
=item B<--nocache>
Skip cache value.
=back
=cut
| wilfriedcomte/centreon-plugins | network/bluecoat/snmp/mode/memory.pm | Perl | apache-2.0 | 4,406 |
package VMOMI::LocalTSMEnabledEvent;
use parent 'VMOMI::HostEvent';
use strict;
use warnings;
our @class_ancestors = (
'HostEvent',
'Event',
'DynamicData',
);
our @class_members = ( );
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/LocalTSMEnabledEvent.pm | Perl | apache-2.0 | 418 |
package VMOMI::ArrayOfVirtualMachineBootOptionsBootableDevice;
use parent 'VMOMI::ComplexType';
use strict;
use warnings;
our @class_ancestors = ( );
our @class_members = (
['VirtualMachineBootOptionsBootableDevice', 'VirtualMachineBootOptionsBootableDevice', 1, 1],
);
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/ArrayOfVirtualMachineBootOptionsBootableDevice.pm | Perl | apache-2.0 | 495 |
#!/usr/bin/perl
use lib "/perfstat/dev/1.40/lib";
use Service;
use Metric;
use Graph;
$perfhome = "/perfstat/dev/1.40";
#create new service
$service = Service->new( RRA => "RRA:AVERAGE:0.5:1:288 RRA:AVERAGE:0.5:7:288 RRA:AVERAGE:0.5:30:288 RRA:AVERAGE:0.5:365:288",
operatingSystem => "Linux",
serviceName => "mem",
);
#add metric 0
$obj = Metric->new( rrdIndex => 0,
metricName => "memUsedPct",
friendlyName => "Memory Utilization",
rrdDST => GAUGE,
rrdHeartbeat => 600,
rrdMin => 0,
rrdMax => U,
hasEvents => 1,
warnThreshold => 90,
critThreshold => 95,
thresholdUnit => "Percent",
);
$service->addMetric($obj);
#add metric 1
$obj = Metric->new( rrdIndex => 1,
metricName => "swapUsedPct",
friendlyName => "Swap Utilization",
rrdDST => GAUGE,
rrdHeartbeat => 600,
rrdMin => 0,
rrdMax => U,
hasEvents => 1,
warnThreshold => 80,
critThreshold => 90,
thresholdUnit => "Percent",
);
$service->addMetric($obj);
#add metric 2
$obj = Metric->new( rrdIndex => 2,
metricName => "pageInKB",
friendlyName => "Pages In",
rrdDST => GAUGE,
rrdHeartbeat => 600,
rrdMin => 0,
rrdMax => U,
hasEvents => 0,
warnThreshold => 1000,
critThreshold => 5000,
thresholdUnit => "KB/Sec",
);
$service->addMetric($obj);
#add metric 3
$obj = Metric->new( rrdIndex => 3,
metricName => "pageOutKB",
friendlyName => "Pages Out",
rrdDST => GAUGE,
rrdHeartbeat => 600,
rrdMin => 0,
rrdMax => U,
hasEvents => 0,
warnThreshold => 500,
critThreshold => 1000,
thresholdUnit => "KB/Sec",
);
$service->addMetric($obj);
#add graph 0
$obj = Graph->new( name => "paging",
title => "Paging Activity on",
y_axis => "KB",
legend => "",
optionsArray => [qq{-u 100},qq{-r}],,
defArray => [q{DEF:pageInKB=$RRD:pageInKB:AVERAGE},q{DEF:pageOutKB=$RRD:pageOutKB:AVERAGE}],
cdefArray => [],
lineArray => [qq{LINE2:pageInKB#FF0000:Pages In},qq{LINE2:pageOutKB#00CC00:Pages Out}],
text => "",
);
$service->addGraph($obj);
#print out this service
print ("Ref: ref($service)\n");
$os = $service->getOS();
$serviceName = $service->getServiceName();
$RRA = $service->getRRA();
print ("OS: $os\n");
print ("serviceName: $serviceName\n");
print ("RRA: $RRA\n");
#print out this services metrics
$arrayLength = $service->getMetricArrayLength();
print ("metric Array Length = $arrayLength\n\n");
for ($counter=0; $counter < $arrayLength; $counter++)
{
$metricObject = $service->{metricArray}->[$counter];
$rrdIndex = $metricObject->getRRDIndex();
$rrdDST = $metricObject->getRRDDST();
$rrdHeartbeat = $metricObject->getRRDHeartbeat();
$rrdMin = $metricObject->getRRDMin();
$rrdMax = $metricObject->getRRDMax();
$metricName = $metricObject->getMetricName();
$friendlyName = $metricObject->getFriendlyName();
$hasEvents = $metricObject->getHasEvents();
$warnThreshold = $metricObject->getWarnThreshold();
$critThreshold = $metricObject->getCritThreshold();
$thresholdUnit = $metricObject->getThresholdUnit();
print ("rrdIndex: $rrdIndex\n");
print ("rrdDST: $rrdDST\n");
print ("rrdHeartbeat: $rrdHeartbeat\n");
print ("rrdMin: $rrdMin\n");
print ("rrdMax: $rrdMax\n");
print ("metricName: $metricName\n");
print ("friendlyName: $friendlyName\n");
print ("hasEvents: $hasEvents\n");
print ("warnThreshold: $warnThreshold\n");
print ("critThreshold: $critThreshold\n");
print ("threshUnit: $thresholdUnit\n\n");
}
#print out this services graphs
$arrayLength = $service->getGraphArrayLength();
print ("graph Array Length = $arrayLength\n\n");
for ($counter=0; $counter < $arrayLength; $counter++)
{
$graphObject = $service->{graphArray}->[$counter];
$name = $graphObject->getName();
$title = $graphObject->getTitle();
$y_axis = $graphObject->getYaxis();
$legend = $graphObject->getLegend();
$text = $graphObject->getText();
print ("name: $name\n");
print ("title: $title\n");
print ("y_axis: $y_axis\n");
print ("legend: $legend\n");
print ("text: $text\n");
$arrayLength2 = $graphObject->getOptionsArrayLength();
for ($counter2=0; $counter2 < $arrayLength2; $counter2++)
{
print ("option: $graphObject->{optionsArray}->[$counter2]\n");
}
$arrayLength2 = $graphObject->getDefArrayLength();
for ($counter2=0; $counter2 < $arrayLength2; $counter2++)
{
print ("def: $graphObject->{defArray}->[$counter2]\n");
}
$arrayLength2 = $graphObject->getCdefArrayLength();
for ($counter2=0; $counter2 < $arrayLength2; $counter2++)
{
print ("cdef: $graphObject->{cdefArray}->[$counter2]\n");
}
$arrayLength2 = $graphObject->getLineArrayLength();
for ($counter2=0; $counter2 < $arrayLength2; $counter2++)
{
print ("line: $graphObject->{lineArray}->[$counter2]\n");
}
print ("\n");
}
#Store the service
$service->store("$perfhome/etc/configs/$service->{operatingSystem}/$service->{serviceName}.ser") or die("can't store $service->{serviceName}.ser?\n");
| ktenzer/perfstat | misc/serialize/create/Linux/62603/mem.pl | Perl | apache-2.0 | 5,103 |
package VMOMI::ArrayOfClusterAction;
use parent 'VMOMI::ComplexType';
use strict;
use warnings;
our @class_ancestors = ( );
our @class_members = (
['ClusterAction', 'ClusterAction', 1, 1],
);
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/ArrayOfClusterAction.pm | Perl | apache-2.0 | 417 |
#
#===============================================================================
#
# FILE: Device.pm
#
# DESCRIPTION:
#
# FILES: ---
# BUGS: ---
# NOTES: ---
# AUTHOR: YOUR NAME (),
# ORGANIZATION:
# VERSION: 1.0
# CREATED: 01/21/2014 09:57:37 PM
# REVISION: ---
#===============================================================================
package Module;
use Modern::Perl 2013;
use Moose;
has 'Manager' => ( is => 'ro', isa => 'Manager', required => 1 );
has 'Name' => ( is => 'ro', isa => 'Str', required => 1 );
has 'GUID' => ( is => 'ro', isa => 'Str', required => 1 );
has '__direction' => ( is => 'ro', isa => 'Str' );
has '__type' => ( is => 'ro', isa => 'Str' );
sub accepts {
my ( $self, $message ) = @_;
return 0 unless $message->Direction eq $self->__direction;
return 0 unless grep $_ eq $self->__type, keys %{$message->Content};
return 1;
}
sub send {
my ( $self, $output ) = @_;
say "Module write";
}
no Moose;
__PACKAGE__->meta->make_immutable;
1;
| nornagest/rpi_experiments | RPiManager/Module.pm | Perl | bsd-3-clause | 1,071 |
package App::Netdisco::Web::Plugin::Search::VLAN;
use Dancer ':syntax';
use Dancer::Plugin::DBIC;
use Dancer::Plugin::Auth::Extensible;
use App::Netdisco::Web::Plugin;
register_search_tab({
tag => 'vlan',
label => 'VLAN',
provides_csv => 1,
api_endpoint => 1,
api_parameters => [
q => {
description => 'VLAN name or number',
required => 1,
},
],
});
# devices carrying vlan xxx
get '/ajax/content/search/vlan' => require_login sub {
my $q = param('q');
send_error( 'Missing query', 400 ) unless $q;
return unless ($q =~ m/\w/); # need some alphanum at least
my $rs;
if ( $q =~ m/^\d+$/ ) {
$rs = schema('netdisco')->resultset('Device')
->carrying_vlan( { vlan => $q } );
}
else {
$rs = schema('netdisco')->resultset('Device')
->carrying_vlan_name( { name => $q } );
}
my @results = $rs->hri->all;
return unless scalar @results;
if (request->is_ajax) {
my $json = to_json( \@results );
template 'ajax/search/vlan.tt', { results => $json }, { layout => 'noop' };
}
else {
header( 'Content-Type' => 'text/comma-separated-values' );
template 'ajax/search/vlan_csv.tt', { results => \@results }, { layout => 'noop' };
}
};
1;
| netdisco/netdisco | lib/App/Netdisco/Web/Plugin/Search/VLAN.pm | Perl | bsd-3-clause | 1,309 |
########################################################################
# Bio::KBase::ObjectAPI::KBaseFBA::DB::FBAComparisonFBA - This is the moose object corresponding to the KBaseFBA.FBAComparisonFBA object
# Authors: Christopher Henry, Scott Devoid, Paul Frybarger
# Contact email: chenry@mcs.anl.gov
# Development location: Mathematics and Computer Science Division, Argonne National Lab
########################################################################
package Bio::KBase::ObjectAPI::KBaseFBA::DB::FBAComparisonFBA;
use Bio::KBase::ObjectAPI::BaseObject;
use Moose;
use namespace::autoclean;
extends 'Bio::KBase::ObjectAPI::BaseObject';
# PARENT:
has parent => (is => 'rw', isa => 'Ref', weak_ref => 1, type => 'parent', metaclass => 'Typed');
# ATTRIBUTES:
has uuid => (is => 'rw', lazy => 1, isa => 'Str', type => 'msdata', metaclass => 'Typed',builder => '_build_uuid');
has _reference => (is => 'rw', lazy => 1, isa => 'Str', type => 'msdata', metaclass => 'Typed',builder => '_build_reference');
has uptake_compounds => (is => 'rw', isa => 'Int', printOrder => '-1', type => 'attribute', metaclass => 'Typed');
has media_ref => (is => 'rw', isa => 'Str', printOrder => '-1', type => 'attribute', metaclass => 'Typed');
has excretion_compounds => (is => 'rw', isa => 'Int', printOrder => '-1', type => 'attribute', metaclass => 'Typed');
has forward_reactions => (is => 'rw', isa => 'Int', printOrder => '-1', type => 'attribute', metaclass => 'Typed');
has reverse_reactions => (is => 'rw', isa => 'Int', printOrder => '-1', type => 'attribute', metaclass => 'Typed');
has compounds => (is => 'rw', isa => 'Int', printOrder => '-1', type => 'attribute', metaclass => 'Typed');
has objective => (is => 'rw', isa => 'Num', printOrder => '-1', type => 'attribute', metaclass => 'Typed');
has reactions => (is => 'rw', isa => 'Int', printOrder => '-1', type => 'attribute', metaclass => 'Typed');
has fbamodel_ref => (is => 'rw', isa => 'Str', printOrder => '-1', type => 'attribute', metaclass => 'Typed');
has id => (is => 'rw', isa => 'Str', printOrder => '-1', type => 'attribute', metaclass => 'Typed');
has fba_ref => (is => 'rw', isa => 'Str', printOrder => '-1', type => 'attribute', metaclass => 'Typed');
has fba_similarity => (is => 'rw', isa => 'HashRef', printOrder => '-1', default => sub {return {};}, type => 'attribute', metaclass => 'Typed');
# LINKS:
has media => (is => 'rw', type => 'link(Bio::KBase::ObjectAPI::KBaseStore,Media,media_ref)', metaclass => 'Typed', lazy => 1, builder => '_build_media', clearer => 'clear_media', isa => 'Bio::KBase::ObjectAPI::KBaseBiochem::Media', weak_ref => 1);
has fbamodel => (is => 'rw', type => 'link(Bio::KBase::ObjectAPI::KBaseStore,FBAModel,fbamodel_ref)', metaclass => 'Typed', lazy => 1, builder => '_build_model', clearer => 'clear_model', isa => 'Bio::KBase::ObjectAPI::KBaseFBA::FBAModel', weak_ref => 1);
has fba => (is => 'rw', type => 'link(Bio::KBase::ObjectAPI::KBaseStore,FBA,fba_ref)', metaclass => 'Typed', lazy => 1, builder => '_build_fba', clearer => 'clear_fba', isa => 'Bio::KBase::ObjectAPI::KBaseFBA::FBA', weak_ref => 1);
# BUILDERS:
sub _build_reference { my ($self) = @_;return $self->parent()->_reference().'/fbas/id/'.$self->id(); }
sub _build_uuid { my ($self) = @_;return $self->_reference(); }
sub _build_media {
my ($self) = @_;
return $self->getLinkedObject($self->media_ref());
}
sub _build_model {
my ($self) = @_;
return $self->getLinkedObject($self->fbamodel_ref());
}
sub _build_fba {
my ($self) = @_;
return $self->getLinkedObject($self->fba_ref());
}
# CONSTANTS:
sub _type { return 'KBaseFBA.FBAComparisonFBA'; }
sub _module { return 'KBaseFBA'; }
sub _class { return 'FBAComparisonFBA'; }
sub _top { return 0; }
my $attributes = [
{
'req' => 0,
'printOrder' => -1,
'name' => 'uptake_compounds',
'type' => 'Int',
'perm' => 'rw'
},
{
'req' => 0,
'printOrder' => -1,
'name' => 'media_ref',
'type' => 'Str',
'perm' => 'rw'
},
{
'req' => 0,
'printOrder' => -1,
'name' => 'excretion_compounds',
'type' => 'Int',
'perm' => 'rw'
},
{
'req' => 0,
'printOrder' => -1,
'name' => 'forward_reactions',
'type' => 'Int',
'perm' => 'rw'
},
{
'req' => 0,
'printOrder' => -1,
'name' => 'compounds',
'type' => 'Int',
'perm' => 'rw'
},
{
'req' => 0,
'printOrder' => -1,
'name' => 'objective',
'type' => 'Num',
'perm' => 'rw'
},
{
'req' => 0,
'printOrder' => -1,
'name' => 'reactions',
'type' => 'Int',
'perm' => 'rw'
},
{
'req' => 0,
'printOrder' => -1,
'name' => 'fbamodel_ref',
'type' => 'Str',
'perm' => 'rw'
},
{
'req' => 0,
'printOrder' => -1,
'name' => 'id',
'type' => 'Str',
'perm' => 'rw'
},
{
'req' => 0,
'printOrder' => -1,
'name' => 'fba_ref',
'type' => 'Str',
'perm' => 'rw'
},
{
'req' => 0,
'printOrder' => -1,
'name' => 'fba_similarity',
'default' => 'sub {return {};}',
'type' => 'HashRef',
'perm' => 'rw'
},
{
'req' => 0,
'printOrder' => -1,
'name' => 'reverse_reactions',
'type' => 'Int',
'perm' => 'rw'
}
];
my $attribute_map = {uptake_compounds => 0, media_ref => 1, excretion_compounds => 2, forward_reactions => 3, compounds => 4, objective => 5, reactions => 6, fbamodel_ref => 7, id => 8, fba_ref => 9, reverse_reactions => 10};
sub _attributes {
my ($self, $key) = @_;
if (defined($key)) {
my $ind = $attribute_map->{$key};
if (defined($ind)) {
return $attributes->[$ind];
} else {
return;
}
} else {
return $attributes;
}
}
my $links = [
{
'attribute' => 'media_ref',
'parent' => 'Bio::KBase::ObjectAPI::KBaseStore',
'clearer' => 'clear_media',
'name' => 'media',
'method' => 'Media',
'class' => 'Bio::KBase::ObjectAPI::KBaseBiochem::Media',
'module' => 'KBaseBiochem'
},
{
'attribute' => 'fbamodel_ref',
'parent' => 'Bio::KBase::ObjectAPI::KBaseStore',
'clearer' => 'clear_model',
'name' => 'model',
'method' => 'FBAModel',
'class' => 'Bio::KBase::ObjectAPI::KBaseFBA::FBAModel',
'module' => 'KBaseFBA'
},
{
'attribute' => 'fba_ref',
'parent' => 'Bio::KBase::ObjectAPI::KBaseStore',
'clearer' => 'clear_fba',
'name' => 'fba',
'method' => 'FBA',
'class' => 'Bio::KBase::ObjectAPI::KBaseFBA::FBA',
'module' => 'KBaseFBA'
}
];
my $link_map = {media => 0, model => 1, fba => 2};
sub _links {
my ($self, $key) = @_;
if (defined($key)) {
my $ind = $link_map->{$key};
if (defined($ind)) {
return $links->[$ind];
} else {
return;
}
} else {
return $links;
}
}
my $subobjects = [];
my $subobject_map = {};
sub _subobjects {
my ($self, $key) = @_;
if (defined($key)) {
my $ind = $subobject_map->{$key};
if (defined($ind)) {
return $subobjects->[$ind];
} else {
return;
}
} else {
return $subobjects;
}
}
__PACKAGE__->meta->make_immutable;
1;
| kbase/KBaseFBAModeling | lib/Bio/KBase/ObjectAPI/KBaseFBA/DB/FBAComparisonFBA.pm | Perl | mit | 8,000 |
#!/usr/bin/perl -w
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# This script takes ct-verif output, and counts the number of successful
# and failed tests. If these match expected, it returns 0. Else it dies
# with a non-zero exit code.
use strict;
use warnings;
sub trim { my $s = shift; $s =~ s/^\s+|\s+$//g; return $s };
if (@ARGV != 2) {
die "usage: count_success.pl expected_success expected_failures";
}
my $expected_success = shift;
my $expected_failure = shift;
my @undefined_functions = ();
my %allowed_undefined = ("__CONTRACT_invariant" => 1,
"malloc" => 1,
"nondet" => 1);
my $verified = 0;
my $errors = 0;
while (my $line = <STDIN>){
print $line;
#Check if the code under test used unexpected functions
if ($line =~ /warning: module contains undefined functions:([a-zA-Z0-9_, ]+)/) {
print "found undefined\n\n";
for my $fns (split(",",$1)){
my $trimmed = trim ($fns);
unless ($allowed_undefined{$trimmed}) {
push @undefined_functions, $trimmed;
}
}
}
#Count the number of errors / successes
if ($line =~ /Boogie program verifier finished with (\d+) verified, (\d+) error/) {
$verified = $verified + $1;
$errors = $errors + $2;
}
}
if($verified == $expected_success and $errors == $expected_failure){
print "verified: $verified errors: $errors as expected\n";
} else {
die "ERROR:\tExpected \tverified: $expected_success\terrors: $expected_failure.\n\tGot\t\tverified: $verified\terrors: $errors.\n";
}
if (@undefined_functions) {
die "Unable to prove that code was constant time due to the presence of external functions: @undefined_functions\n";
}
| gibson-compsci/s2n | tests/sidetrail/count_success.pl | Perl | apache-2.0 | 2,170 |
# !!!!!!! DO NOT EDIT THIS FILE !!!!!!!
# This file is machine-generated by lib/unicore/mktables from the Unicode
# database, Version 6.1.0. Any changes made here will be lost!
# !!!!!!! INTERNAL PERL USE ONLY !!!!!!!
# This file is for internal use by core Perl only. The format and even the
# name or existence of this file are subject to change without notice. Don't
# use it directly.
return <<'END';
10B00 10B35
10B39 10B3F
END
| efortuna/AndroidSDKClone | ndk_experimental/prebuilt/linux-x86_64/lib/perl5/5.16.2/unicore/lib/Scx/Avst.pl | Perl | apache-2.0 | 447 |
package DDG::Goodie::Regexp;
# ABSTRACT: Parse a regexp and list the matches
use strict;
use warnings;
use DDG::Goodie;
use Safe;
zci answer_type => "regexp";
zci is_cached => 1;
triggers start => 'regex', 'match', 'regexp';
triggers any => '=~';
sub compile_re {
my ($re, $modifiers, $compiler) = @_;
$compiler->($re, $modifiers);
}
# Using $& causes a performance penalty, apparently.
sub get_full_match {
return substr(shift, $-[0], $+[0] - $-[0]);
}
# Ensures that the correct numbered matches are being produced.
sub real_number_matches {
my ($one, @numbered) = @_;
# If the first match isn't defined then neither are the others!
return defined $one ? @numbered : ();
}
sub get_match_record {
my ($regexp, $str, $modifiers) = @_;
my $compiler = Safe->new->reval(q { sub { qr/(?$_[1])$_[0]/ } }) or return;
BEGIN {
$SIG{'__WARN__'} = sub {
warn $_[0] if $_[0] !~ /Use of uninitialized value in regexp compilation/i;
}
}
my @numbered = $str =~ compile_re($regexp, $modifiers, $compiler) or return;
@numbered = real_number_matches($1, @numbered);
my $matches = {};
$matches->{'Full Match'} = get_full_match($str);
foreach my $match (keys %+) {
$matches->{"Named Capture <$match>"} = $+{$match};
};
my $i = 1;
foreach my $match (@numbered) {
$matches->{"Subpattern Match $i"} = $match;
$i++;
};
return $matches;
}
my $regex_re = qr/\/(?<regex>.+)\/(?<modifiers>i)?/;
sub extract_regex_text {
my $query = shift;
$query =~ /^(?<text>.+) =~ $regex_re$/;
($+{regex} && $+{text}) || ($query =~ /^(?:match\s*regexp?|regexp?)\s*$regex_re\s+(?<text>.+)$/);
return unless defined $+{regex} && defined $+{text};
my $modifiers = $+{modifiers} // '';
return ($+{regex}, $+{text}, $modifiers);
}
sub get_match_keys { return sort (keys %{$_[0]}) }
handle query => sub {
my $query = $_;
my ($regexp, $str, $modifiers) = extract_regex_text($query) or return;
my $matches = get_match_record($regexp, $str, $modifiers) or return;
my @key_order = get_match_keys($matches);
return unless $matches->{'Full Match'} ne '';
return $matches,
structured_answer => {
data => {
title => "Regular Expression Match",
subtitle => "Match regular expression /$regexp/$modifiers on $str",
record_data => $matches,
record_keys => \@key_order,
},
meta => {
signal => 'high',
},
templates => {
group => 'list',
options => {
content => 'record',
},
moreAt => 0,
},
};
};
1;
| lights0123/zeroclickinfo-goodies | lib/DDG/Goodie/Regexp.pm | Perl | apache-2.0 | 2,801 |
=head1 NAME
perldebtut - Perl debugging tutorial
=head1 DESCRIPTION
A (very) lightweight introduction in the use of the perl debugger, and a
pointer to existing, deeper sources of information on the subject of debugging
perl programs.
There's an extraordinary number of people out there who don't appear to know
anything about using the perl debugger, though they use the language every
day.
This is for them.
=head1 use strict
First of all, there's a few things you can do to make your life a lot more
straightforward when it comes to debugging perl programs, without using the
debugger at all. To demonstrate, here's a simple script, named "hello", with
a problem:
#!/usr/bin/perl
$var1 = 'Hello World'; # always wanted to do that :-)
$var2 = "$varl\n";
print $var2;
exit;
While this compiles and runs happily, it probably won't do what's expected,
namely it doesn't print "Hello World\n" at all; It will on the other hand do
exactly what it was told to do, computers being a bit that way inclined. That
is, it will print out a newline character, and you'll get what looks like a
blank line. It looks like there's 2 variables when (because of the typo)
there's really 3:
$var1 = 'Hello World';
$varl = undef;
$var2 = "\n";
To catch this kind of problem, we can force each variable to be declared
before use by pulling in the strict module, by putting 'use strict;' after the
first line of the script.
Now when you run it, perl complains about the 3 undeclared variables and we
get four error messages because one variable is referenced twice:
Global symbol "$var1" requires explicit package name at ./t1 line 4.
Global symbol "$var2" requires explicit package name at ./t1 line 5.
Global symbol "$varl" requires explicit package name at ./t1 line 5.
Global symbol "$var2" requires explicit package name at ./t1 line 7.
Execution of ./hello aborted due to compilation errors.
Luvverly! and to fix this we declare all variables explicitly and now our
script looks like this:
#!/usr/bin/perl
use strict;
my $var1 = 'Hello World';
my $varl = undef;
my $var2 = "$varl\n";
print $var2;
exit;
We then do (always a good idea) a syntax check before we try to run it again:
> perl -c hello
hello syntax OK
And now when we run it, we get "\n" still, but at least we know why. Just
getting this script to compile has exposed the '$varl' (with the letter 'l')
variable, and simply changing $varl to $var1 solves the problem.
=head1 Looking at data and -w and v
Ok, but how about when you want to really see your data, what's in that
dynamic variable, just before using it?
#!/usr/bin/perl
use strict;
my $key = 'welcome';
my %data = (
'this' => qw(that),
'tom' => qw(and jerry),
'welcome' => q(Hello World),
'zip' => q(welcome),
);
my @data = keys %data;
print "$data{$key}\n";
exit;
Looks OK, after it's been through the syntax check (perl -c scriptname), we
run it and all we get is a blank line again! Hmmmm.
One common debugging approach here, would be to liberally sprinkle a few print
statements, to add a check just before we print out our data, and another just
after:
print "All OK\n" if grep($key, keys %data);
print "$data{$key}\n";
print "done: '$data{$key}'\n";
And try again:
> perl data
All OK
done: ''
After much staring at the same piece of code and not seeing the wood for the
trees for some time, we get a cup of coffee and try another approach. That
is, we bring in the cavalry by giving perl the 'B<-d>' switch on the command
line:
> perl -d data
Default die handler restored.
Loading DB routines from perl5db.pl version 1.07
Editor support available.
Enter h or `h h' for help, or `man perldebug' for more help.
main::(./data:4): my $key = 'welcome';
Now, what we've done here is to launch the built-in perl debugger on our
script. It's stopped at the first line of executable code and is waiting for
input.
Before we go any further, you'll want to know how to quit the debugger: use
just the letter 'B<q>', not the words 'quit' or 'exit':
DB<1> q
>
That's it, you're back on home turf again.
=head1 help
Fire the debugger up again on your script and we'll look at the help menu.
There's a couple of ways of calling help: a simple 'B<h>' will get the summary
help list, 'B<|h>' (pipe-h) will pipe the help through your pager (which is
(probably 'more' or 'less'), and finally, 'B<h h>' (h-space-h) will give you
the entire help screen. Here is the summary page:
DB<1>h
List/search source lines: Control script execution:
l [ln|sub] List source code T Stack trace
- or . List previous/current line s [expr] Single step [in expr]
v [line] View around line n [expr] Next, steps over subs
f filename View source in file <CR/Enter> Repeat last n or s
/pattern/ ?patt? Search forw/backw r Return from subroutine
M Show module versions c [ln|sub] Continue until position
Debugger controls: L List break/watch/actions
o [...] Set debugger options t [expr] Toggle trace [trace expr]
<[<]|{[{]|>[>] [cmd] Do pre/post-prompt b [ln|event|sub] [cnd] Set breakpoint
! [N|pat] Redo a previous command B ln|* Delete a/all breakpoints
H [-num] Display last num commands a [ln] cmd Do cmd before line
= [a val] Define/list an alias A ln|* Delete a/all actions
h [db_cmd] Get help on command w expr Add a watch expression
h h Complete help page W expr|* Delete a/all watch exprs
|[|]db_cmd Send output to pager ![!] syscmd Run cmd in a subprocess
q or ^D Quit R Attempt a restart
Data Examination: expr Execute perl code, also see: s,n,t expr
x|m expr Evals expr in list context, dumps the result or lists methods.
p expr Print expression (uses script's current package).
S [[!]pat] List subroutine names [not] matching pattern
V [Pk [Vars]] List Variables in Package. Vars can be ~pattern or !pattern.
X [Vars] Same as "V current_package [Vars]".
y [n [Vars]] List lexicals in higher scope <n>. Vars same as V.
For more help, type h cmd_letter, or run man perldebug for all docs.
More confusing options than you can shake a big stick at! It's not as bad as
it looks and it's very useful to know more about all of it, and fun too!
There's a couple of useful ones to know about straight away. You wouldn't
think we're using any libraries at all at the moment, but 'B<M>' will show
which modules are currently loaded, and their version number, while 'B<m>'
will show the methods, and 'B<S>' shows all subroutines (by pattern) as
shown below. 'B<V>' and 'B<X>' show variables in the program by package
scope and can be constrained by pattern.
DB<2>S str
dumpvar::stringify
strict::bits
strict::import
strict::unimport
Using 'X' and cousins requires you not to use the type identifiers ($@%), just
the 'name':
DM<3>X ~err
FileHandle(stderr) => fileno(2)
Remember we're in our tiny program with a problem, we should have a look at
where we are, and what our data looks like. First of all let's view some code
at our present position (the first line of code in this case), via 'B<v>':
DB<4> v
1 #!/usr/bin/perl
2: use strict;
3
4==> my $key = 'welcome';
5: my %data = (
6 'this' => qw(that),
7 'tom' => qw(and jerry),
8 'welcome' => q(Hello World),
9 'zip' => q(welcome),
10 );
At line number 4 is a helpful pointer, that tells you where you are now. To
see more code, type 'v' again:
DB<4> v
8 'welcome' => q(Hello World),
9 'zip' => q(welcome),
10 );
11: my @data = keys %data;
12: print "All OK\n" if grep($key, keys %data);
13: print "$data{$key}\n";
14: print "done: '$data{$key}'\n";
15: exit;
And if you wanted to list line 5 again, type 'l 5', (note the space):
DB<4> l 5
5: my %data = (
In this case, there's not much to see, but of course normally there's pages of
stuff to wade through, and 'l' can be very useful. To reset your view to the
line we're about to execute, type a lone period '.':
DB<5> .
main::(./data_a:4): my $key = 'welcome';
The line shown is the one that is about to be executed B<next>, it hasn't
happened yet. So while we can print a variable with the letter 'B<p>', at
this point all we'd get is an empty (undefined) value back. What we need to
do is to step through the next executable statement with an 'B<s>':
DB<6> s
main::(./data_a:5): my %data = (
main::(./data_a:6): 'this' => qw(that),
main::(./data_a:7): 'tom' => qw(and jerry),
main::(./data_a:8): 'welcome' => q(Hello World),
main::(./data_a:9): 'zip' => q(welcome),
main::(./data_a:10): );
Now we can have a look at that first ($key) variable:
DB<7> p $key
welcome
line 13 is where the action is, so let's continue down to there via the letter
'B<c>', which by the way, inserts a 'one-time-only' breakpoint at the given
line or sub routine:
DB<8> c 13
All OK
main::(./data_a:13): print "$data{$key}\n";
We've gone past our check (where 'All OK' was printed) and have stopped just
before the meat of our task. We could try to print out a couple of variables
to see what is happening:
DB<9> p $data{$key}
Not much in there, lets have a look at our hash:
DB<10> p %data
Hello Worldziptomandwelcomejerrywelcomethisthat
DB<11> p keys %data
Hello Worldtomwelcomejerrythis
Well, this isn't very easy to read, and using the helpful manual (B<h h>), the
'B<x>' command looks promising:
DB<12> x %data
0 'Hello World'
1 'zip'
2 'tom'
3 'and'
4 'welcome'
5 undef
6 'jerry'
7 'welcome'
8 'this'
9 'that'
That's not much help, a couple of welcomes in there, but no indication of
which are keys, and which are values, it's just a listed array dump and, in
this case, not particularly helpful. The trick here, is to use a B<reference>
to the data structure:
DB<13> x \%data
0 HASH(0x8194bc4)
'Hello World' => 'zip'
'jerry' => 'welcome'
'this' => 'that'
'tom' => 'and'
'welcome' => undef
The reference is truly dumped and we can finally see what we're dealing with.
Our quoting was perfectly valid but wrong for our purposes, with 'and jerry'
being treated as 2 separate words rather than a phrase, thus throwing the
evenly paired hash structure out of alignment.
The 'B<-w>' switch would have told us about this, had we used it at the start,
and saved us a lot of trouble:
> perl -w data
Odd number of elements in hash assignment at ./data line 5.
We fix our quoting: 'tom' => q(and jerry), and run it again, this time we get
our expected output:
> perl -w data
Hello World
While we're here, take a closer look at the 'B<x>' command, it's really useful
and will merrily dump out nested references, complete objects, partial objects
- just about whatever you throw at it:
Let's make a quick object and x-plode it, first we'll start the debugger:
it wants some form of input from STDIN, so we give it something non-committal,
a zero:
> perl -de 0
Default die handler restored.
Loading DB routines from perl5db.pl version 1.07
Editor support available.
Enter h or `h h' for help, or `man perldebug' for more help.
main::(-e:1): 0
Now build an on-the-fly object over a couple of lines (note the backslash):
DB<1> $obj = bless({'unique_id'=>'123', 'attr'=> \
cont: {'col' => 'black', 'things' => [qw(this that etc)]}}, 'MY_class')
And let's have a look at it:
DB<2> x $obj
0 MY_class=HASH(0x828ad98)
'attr' => HASH(0x828ad68)
'col' => 'black'
'things' => ARRAY(0x828abb8)
0 'this'
1 'that'
2 'etc'
'unique_id' => 123
DB<3>
Useful, huh? You can eval nearly anything in there, and experiment with bits
of code or regexes until the cows come home:
DB<3> @data = qw(this that the other atheism leather theory scythe)
DB<4> p 'saw -> '.($cnt += map { print "\t:\t$_\n" } grep(/the/, sort @data))
atheism
leather
other
scythe
the
theory
saw -> 6
If you want to see the command History, type an 'B<H>':
DB<5> H
4: p 'saw -> '.($cnt += map { print "\t:\t$_\n" } grep(/the/, sort @data))
3: @data = qw(this that the other atheism leather theory scythe)
2: x $obj
1: $obj = bless({'unique_id'=>'123', 'attr'=>
{'col' => 'black', 'things' => [qw(this that etc)]}}, 'MY_class')
DB<5>
And if you want to repeat any previous command, use the exclamation: 'B<!>':
DB<5> !4
p 'saw -> '.($cnt += map { print "$_\n" } grep(/the/, sort @data))
atheism
leather
other
scythe
the
theory
saw -> 12
For more on references see L<perlref> and L<perlreftut>
=head1 Stepping through code
Here's a simple program which converts between Celsius and Fahrenheit, it too
has a problem:
#!/usr/bin/perl -w
use strict;
my $arg = $ARGV[0] || '-c20';
if ($arg =~ /^\-(c|f)((\-|\+)*\d+(\.\d+)*)$/) {
my ($deg, $num) = ($1, $2);
my ($in, $out) = ($num, $num);
if ($deg eq 'c') {
$deg = 'f';
$out = &c2f($num);
} else {
$deg = 'c';
$out = &f2c($num);
}
$out = sprintf('%0.2f', $out);
$out =~ s/^((\-|\+)*\d+)\.0+$/$1/;
print "$out $deg\n";
} else {
print "Usage: $0 -[c|f] num\n";
}
exit;
sub f2c {
my $f = shift;
my $c = 5 * $f - 32 / 9;
return $c;
}
sub c2f {
my $c = shift;
my $f = 9 * $c / 5 + 32;
return $f;
}
For some reason, the Fahrenheit to Celsius conversion fails to return the
expected output. This is what it does:
> temp -c0.72
33.30 f
> temp -f33.3
162.94 c
Not very consistent! We'll set a breakpoint in the code manually and run it
under the debugger to see what's going on. A breakpoint is a flag, to which
the debugger will run without interruption, when it reaches the breakpoint, it
will stop execution and offer a prompt for further interaction. In normal
use, these debugger commands are completely ignored, and they are safe - if a
little messy, to leave in production code.
my ($in, $out) = ($num, $num);
$DB::single=2; # insert at line 9!
if ($deg eq 'c')
...
> perl -d temp -f33.3
Default die handler restored.
Loading DB routines from perl5db.pl version 1.07
Editor support available.
Enter h or `h h' for help, or `man perldebug' for more help.
main::(temp:4): my $arg = $ARGV[0] || '-c100';
We'll simply continue down to our pre-set breakpoint with a 'B<c>':
DB<1> c
main::(temp:10): if ($deg eq 'c') {
Followed by a view command to see where we are:
DB<1> v
7: my ($deg, $num) = ($1, $2);
8: my ($in, $out) = ($num, $num);
9: $DB::single=2;
10==> if ($deg eq 'c') {
11: $deg = 'f';
12: $out = &c2f($num);
13 } else {
14: $deg = 'c';
15: $out = &f2c($num);
16 }
And a print to show what values we're currently using:
DB<1> p $deg, $num
f33.3
We can put another break point on any line beginning with a colon, we'll use
line 17 as that's just as we come out of the subroutine, and we'd like to
pause there later on:
DB<2> b 17
There's no feedback from this, but you can see what breakpoints are set by
using the list 'L' command:
DB<3> L
temp:
17: print "$out $deg\n";
break if (1)
Note that to delete a breakpoint you use 'B'.
Now we'll continue down into our subroutine, this time rather than by line
number, we'll use the subroutine name, followed by the now familiar 'v':
DB<3> c f2c
main::f2c(temp:30): my $f = shift;
DB<4> v
24: exit;
25
26 sub f2c {
27==> my $f = shift;
28: my $c = 5 * $f - 32 / 9;
29: return $c;
30 }
31
32 sub c2f {
33: my $c = shift;
Note that if there was a subroutine call between us and line 29, and we wanted
to B<single-step> through it, we could use the 'B<s>' command, and to step
over it we would use 'B<n>' which would execute the sub, but not descend into
it for inspection. In this case though, we simply continue down to line 29:
DB<4> c 29
main::f2c(temp:29): return $c;
And have a look at the return value:
DB<5> p $c
162.944444444444
This is not the right answer at all, but the sum looks correct. I wonder if
it's anything to do with operator precedence? We'll try a couple of other
possibilities with our sum:
DB<6> p (5 * $f - 32 / 9)
162.944444444444
DB<7> p 5 * $f - (32 / 9)
162.944444444444
DB<8> p (5 * $f) - 32 / 9
162.944444444444
DB<9> p 5 * ($f - 32) / 9
0.722222222222221
:-) that's more like it! Ok, now we can set our return variable and we'll
return out of the sub with an 'r':
DB<10> $c = 5 * ($f - 32) / 9
DB<11> r
scalar context return from main::f2c: 0.722222222222221
Looks good, let's just continue off the end of the script:
DB<12> c
0.72 c
Debugged program terminated. Use q to quit or R to restart,
use O inhibit_exit to avoid stopping after program termination,
h q, h R or h O to get additional info.
A quick fix to the offending line (insert the missing parentheses) in the
actual program and we're finished.
=head1 Placeholder for a, w, t, T
Actions, watch variables, stack traces etc.: on the TODO list.
a
w
t
T
=head1 REGULAR EXPRESSIONS
Ever wanted to know what a regex looked like? You'll need perl compiled with
the DEBUGGING flag for this one:
> perl -Dr -e '/^pe(a)*rl$/i'
Compiling REx `^pe(a)*rl$'
size 17 first at 2
rarest char
at 0
1: BOL(2)
2: EXACTF <pe>(4)
4: CURLYN[1] {0,32767}(14)
6: NOTHING(8)
8: EXACTF <a>(0)
12: WHILEM(0)
13: NOTHING(14)
14: EXACTF <rl>(16)
16: EOL(17)
17: END(0)
floating `'$ at 4..2147483647 (checking floating) stclass `EXACTF <pe>'
anchored(BOL) minlen 4
Omitting $` $& $' support.
EXECUTING...
Freeing REx: `^pe(a)*rl$'
Did you really want to know? :-)
For more gory details on getting regular expressions to work, have a look at
L<perlre>, L<perlretut>, and to decode the mysterious labels (BOL and CURLYN,
etc. above), see L<perldebguts>.
=head1 OUTPUT TIPS
To get all the output from your error log, and not miss any messages via
helpful operating system buffering, insert a line like this, at the start of
your script:
$|=1;
To watch the tail of a dynamically growing logfile, (from the command line):
tail -f $error_log
Wrapping all die calls in a handler routine can be useful to see how, and from
where, they're being called, L<perlvar> has more information:
BEGIN { $SIG{__DIE__} = sub { require Carp; Carp::confess(@_) } }
Various useful techniques for the redirection of STDOUT and STDERR filehandles
are explained in L<perlopentut> and L<perlfaq8>.
=head1 CGI
Just a quick hint here for all those CGI programmers who can't figure out how
on earth to get past that 'waiting for input' prompt, when running their CGI
script from the command-line, try something like this:
> perl -d my_cgi.pl -nodebug
Of course L<CGI> and L<perlfaq9> will tell you more.
=head1 GUIs
The command line interface is tightly integrated with an B<emacs> extension
and there's a B<vi> interface too.
You don't have to do this all on the command line, though, there are a few GUI
options out there. The nice thing about these is you can wave a mouse over a
variable and a dump of its data will appear in an appropriate window, or in a
popup balloon, no more tiresome typing of 'x $varname' :-)
In particular have a hunt around for the following:
B<ptkdb> perlTK based wrapper for the built-in debugger
B<ddd> data display debugger
B<PerlDevKit> and B<PerlBuilder> are NT specific
NB. (more info on these and others would be appreciated).
=head1 SUMMARY
We've seen how to encourage good coding practices with B<use strict> and
B<-w>. We can run the perl debugger B<perl -d scriptname> to inspect your
data from within the perl debugger with the B<p> and B<x> commands. You can
walk through your code, set breakpoints with B<b> and step through that code
with B<s> or B<n>, continue with B<c> and return from a sub with B<r>. Fairly
intuitive stuff when you get down to it.
There is of course lots more to find out about, this has just scratched the
surface. The best way to learn more is to use perldoc to find out more about
the language, to read the on-line help (L<perldebug> is probably the next
place to go), and of course, experiment.
=head1 SEE ALSO
L<perldebug>,
L<perldebguts>,
L<perldiag>,
L<perlrun>
=head1 AUTHOR
Richard Foley <richard.foley@rfi.net> Copyright (c) 2000
=head1 CONTRIBUTORS
Various people have made helpful suggestions and contributions, in particular:
Ronald J Kimball <rjk@linguist.dartmouth.edu>
Hugo van der Sanden <hv@crypt0.demon.co.uk>
Peter Scott <Peter@PSDT.com>
| liuyangning/WX_web | xampp/perl/lib/pods/perldebtut.pod | Perl | mit | 21,285 |
#!/usr/bin/env perl
#
# ====================================================================
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. Rights for redistribution and usage in source and binary
# forms are granted according to the OpenSSL license.
# ====================================================================
#
# sha256/512_block procedure for x86_64.
#
# 40% improvement over compiler-generated code on Opteron. On EM64T
# sha256 was observed to run >80% faster and sha512 - >40%. No magical
# tricks, just straight implementation... I really wonder why gcc
# [being armed with inline assembler] fails to generate as fast code.
# The only thing which is cool about this module is that it's very
# same instruction sequence used for both SHA-256 and SHA-512. In
# former case the instructions operate on 32-bit operands, while in
# latter - on 64-bit ones. All I had to do is to get one flavor right,
# the other one passed the test right away:-)
#
# sha256_block runs in ~1005 cycles on Opteron, which gives you
# asymptotic performance of 64*1000/1005=63.7MBps times CPU clock
# frequency in GHz. sha512_block runs in ~1275 cycles, which results
# in 128*1000/1275=100MBps per GHz. Is there room for improvement?
# Well, if you compare it to IA-64 implementation, which maintains
# X[16] in register bank[!], tends to 4 instructions per CPU clock
# cycle and runs in 1003 cycles, 1275 is very good result for 3-way
# issue Opteron pipeline and X[16] maintained in memory. So that *if*
# there is a way to improve it, *then* the only way would be to try to
# offload X[16] updates to SSE unit, but that would require "deeper"
# loop unroll, which in turn would naturally cause size blow-up, not
# to mention increased complexity! And once again, only *if* it's
# actually possible to noticeably improve overall ILP, instruction
# level parallelism, on a given CPU implementation in this case.
#
# Special note on Intel EM64T. While Opteron CPU exhibits perfect
# perfromance ratio of 1.5 between 64- and 32-bit flavors [see above],
# [currently available] EM64T CPUs apparently are far from it. On the
# contrary, 64-bit version, sha512_block, is ~30% *slower* than 32-bit
# sha256_block:-( This is presumably because 64-bit shifts/rotates
# apparently are not atomic instructions, but implemented in microcode.
#
# May 2012.
#
# Optimization including one of Pavel Semjanov's ideas, alternative
# Maj, resulted in >=5% improvement on most CPUs, +20% SHA256 and
# unfortunately -2% SHA512 on P4 [which nobody should care about
# that much].
#
# June 2012.
#
# Add SIMD code paths, see below for improvement coefficients. SSSE3
# code path was not attempted for SHA512, because improvement is not
# estimated to be high enough, noticeably less than 9%, to justify
# the effort, not on pre-AVX processors. [Obviously with exclusion
# for VIA Nano, but it has SHA512 instruction that is faster and
# should be used instead.] For reference, corresponding estimated
# upper limit for improvement for SSSE3 SHA256 is 28%. The fact that
# higher coefficients are observed on VIA Nano and Bulldozer has more
# to do with specifics of their architecture [which is topic for
# separate discussion].
#
# November 2012.
#
# Add AVX2 code path. Two consecutive input blocks are loaded to
# 256-bit %ymm registers, with data from first block to least
# significant 128-bit halves and data from second to most significant.
# The data is then processed with same SIMD instruction sequence as
# for AVX, but with %ymm as operands. Side effect is increased stack
# frame, 448 additional bytes in SHA256 and 1152 in SHA512, and 1.2KB
# code size increase.
#
# March 2014.
#
# Add support for Intel SHA Extensions.
######################################################################
# Current performance in cycles per processed byte (less is better):
#
# SHA256 SSSE3 AVX/XOP(*) SHA512 AVX/XOP(*)
#
# AMD K8 14.9 - - 9.57 -
# P4 17.3 - - 30.8 -
# Core 2 15.6 13.8(+13%) - 9.97 -
# Westmere 14.8 12.3(+19%) - 9.58 -
# Sandy Bridge 17.4 14.2(+23%) 11.6(+50%(**)) 11.2 8.10(+38%(**))
# Ivy Bridge 12.6 10.5(+20%) 10.3(+22%) 8.17 7.22(+13%)
# Haswell 12.2 9.28(+31%) 7.80(+56%) 7.66 5.40(+42%)
# Bulldozer 21.1 13.6(+54%) 13.6(+54%(***)) 13.5 8.58(+57%)
# VIA Nano 23.0 16.5(+39%) - 14.7 -
# Atom 23.0 18.9(+22%) - 14.7 -
# Silvermont 27.4 20.6(+33%) - 17.5 -
#
# (*) whichever best applicable;
# (**) switch from ror to shrd stands for fair share of improvement;
# (***) execution time is fully determined by remaining integer-only
# part, body_00_15; reducing the amount of SIMD instructions
# below certain limit makes no difference/sense; to conserve
# space SHA256 XOP code path is therefore omitted;
$flavour = shift;
$output = shift;
if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
die "can't locate x86_64-xlate.pl";
if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
=~ /GNU assembler version ([2-9]\.[0-9]+)/) {
$avx = ($1>=2.19) + ($1>=2.22);
}
if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
`nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
$avx = ($1>=2.09) + ($1>=2.10);
}
if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
`ml64 2>&1` =~ /Version ([0-9]+)\./) {
$avx = ($1>=10) + ($1>=11);
}
if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) {
$avx = ($2>=3.0) + ($2>3.0);
}
$shaext=0; ### set to zero if compiling for 1.0.1
$avx=1 if (!$shaext && $avx);
open OUT,"| \"$^X\" $xlate $flavour";
*STDOUT=*OUT;
if ($output =~ /512/) {
$func="sha512_block_data_order";
$TABLE="K512";
$SZ=8;
@ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%rax","%rbx","%rcx","%rdx",
"%r8", "%r9", "%r10","%r11");
($T1,$a0,$a1,$a2,$a3)=("%r12","%r13","%r14","%r15","%rdi");
@Sigma0=(28,34,39);
@Sigma1=(14,18,41);
@sigma0=(1, 8, 7);
@sigma1=(19,61, 6);
$rounds=80;
} else {
$func="sha256_block_data_order";
$TABLE="K256";
$SZ=4;
@ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%eax","%ebx","%ecx","%edx",
"%r8d","%r9d","%r10d","%r11d");
($T1,$a0,$a1,$a2,$a3)=("%r12d","%r13d","%r14d","%r15d","%edi");
@Sigma0=( 2,13,22);
@Sigma1=( 6,11,25);
@sigma0=( 7,18, 3);
@sigma1=(17,19,10);
$rounds=64;
}
$ctx="%rdi"; # 1st arg, zapped by $a3
$inp="%rsi"; # 2nd arg
$Tbl="%rbp";
$_ctx="16*$SZ+0*8(%rsp)";
$_inp="16*$SZ+1*8(%rsp)";
$_end="16*$SZ+2*8(%rsp)";
$_rsp="16*$SZ+3*8(%rsp)";
$framesz="16*$SZ+4*8";
sub ROUND_00_15()
{ my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
my $STRIDE=$SZ;
$STRIDE += 16 if ($i%(16/$SZ)==(16/$SZ-1));
$code.=<<___;
ror \$`$Sigma1[2]-$Sigma1[1]`,$a0
mov $f,$a2
xor $e,$a0
ror \$`$Sigma0[2]-$Sigma0[1]`,$a1
xor $g,$a2 # f^g
mov $T1,`$SZ*($i&0xf)`(%rsp)
xor $a,$a1
and $e,$a2 # (f^g)&e
ror \$`$Sigma1[1]-$Sigma1[0]`,$a0
add $h,$T1 # T1+=h
xor $g,$a2 # Ch(e,f,g)=((f^g)&e)^g
ror \$`$Sigma0[1]-$Sigma0[0]`,$a1
xor $e,$a0
add $a2,$T1 # T1+=Ch(e,f,g)
mov $a,$a2
add ($Tbl),$T1 # T1+=K[round]
xor $a,$a1
xor $b,$a2 # a^b, b^c in next round
ror \$$Sigma1[0],$a0 # Sigma1(e)
mov $b,$h
and $a2,$a3
ror \$$Sigma0[0],$a1 # Sigma0(a)
add $a0,$T1 # T1+=Sigma1(e)
xor $a3,$h # h=Maj(a,b,c)=Ch(a^b,c,b)
add $T1,$d # d+=T1
add $T1,$h # h+=T1
lea $STRIDE($Tbl),$Tbl # round++
___
$code.=<<___ if ($i<15);
add $a1,$h # h+=Sigma0(a)
___
($a2,$a3) = ($a3,$a2);
}
sub ROUND_16_XX()
{ my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
$code.=<<___;
mov `$SZ*(($i+1)&0xf)`(%rsp),$a0
mov `$SZ*(($i+14)&0xf)`(%rsp),$a2
mov $a0,$T1
ror \$`$sigma0[1]-$sigma0[0]`,$a0
add $a1,$a # modulo-scheduled h+=Sigma0(a)
mov $a2,$a1
ror \$`$sigma1[1]-$sigma1[0]`,$a2
xor $T1,$a0
shr \$$sigma0[2],$T1
ror \$$sigma0[0],$a0
xor $a1,$a2
shr \$$sigma1[2],$a1
ror \$$sigma1[0],$a2
xor $a0,$T1 # sigma0(X[(i+1)&0xf])
xor $a1,$a2 # sigma1(X[(i+14)&0xf])
add `$SZ*(($i+9)&0xf)`(%rsp),$T1
add `$SZ*($i&0xf)`(%rsp),$T1
mov $e,$a0
add $a2,$T1
mov $a,$a1
___
&ROUND_00_15(@_);
}
$code=<<___;
.text
.extern OPENSSL_ia32cap_P
.globl $func
.type $func,\@function,3
.align 16
$func:
___
$code.=<<___ if ($SZ==4 || $avx);
lea OPENSSL_ia32cap_P(%rip),%r11
mov 0(%r11),%r9d
mov 4(%r11),%r10d
mov 8(%r11),%r11d
___
$code.=<<___ if ($SZ==4 && $shaext);
test \$`1<<29`,%r11d # check for SHA
jnz _shaext_shortcut
___
$code.=<<___ if ($avx && $SZ==8);
test \$`1<<11`,%r10d # check for XOP
jnz .Lxop_shortcut
___
$code.=<<___ if ($avx>1);
and \$`1<<8|1<<5|1<<3`,%r11d # check for BMI2+AVX2+BMI1
cmp \$`1<<8|1<<5|1<<3`,%r11d
je .Lavx2_shortcut
___
$code.=<<___ if ($avx);
and \$`1<<30`,%r9d # mask "Intel CPU" bit
and \$`1<<28|1<<9`,%r10d # mask AVX and SSSE3 bits
or %r9d,%r10d
cmp \$`1<<28|1<<9|1<<30`,%r10d
je .Lavx_shortcut
___
$code.=<<___ if ($SZ==4);
test \$`1<<9`,%r10d
jnz .Lssse3_shortcut
___
$code.=<<___;
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
mov %rsp,%r11 # copy %rsp
shl \$4,%rdx # num*16
sub \$$framesz,%rsp
lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
and \$-64,%rsp # align stack frame
mov $ctx,$_ctx # save ctx, 1st arg
mov $inp,$_inp # save inp, 2nd arh
mov %rdx,$_end # save end pointer, "3rd" arg
mov %r11,$_rsp # save copy of %rsp
.Lprologue:
mov $SZ*0($ctx),$A
mov $SZ*1($ctx),$B
mov $SZ*2($ctx),$C
mov $SZ*3($ctx),$D
mov $SZ*4($ctx),$E
mov $SZ*5($ctx),$F
mov $SZ*6($ctx),$G
mov $SZ*7($ctx),$H
jmp .Lloop
.align 16
.Lloop:
mov $B,$a3
lea $TABLE(%rip),$Tbl
xor $C,$a3 # magic
___
for($i=0;$i<16;$i++) {
$code.=" mov $SZ*$i($inp),$T1\n";
$code.=" mov @ROT[4],$a0\n";
$code.=" mov @ROT[0],$a1\n";
$code.=" bswap $T1\n";
&ROUND_00_15($i,@ROT);
unshift(@ROT,pop(@ROT));
}
$code.=<<___;
jmp .Lrounds_16_xx
.align 16
.Lrounds_16_xx:
___
for(;$i<32;$i++) {
&ROUND_16_XX($i,@ROT);
unshift(@ROT,pop(@ROT));
}
$code.=<<___;
cmpb \$0,`$SZ-1`($Tbl)
jnz .Lrounds_16_xx
mov $_ctx,$ctx
add $a1,$A # modulo-scheduled h+=Sigma0(a)
lea 16*$SZ($inp),$inp
add $SZ*0($ctx),$A
add $SZ*1($ctx),$B
add $SZ*2($ctx),$C
add $SZ*3($ctx),$D
add $SZ*4($ctx),$E
add $SZ*5($ctx),$F
add $SZ*6($ctx),$G
add $SZ*7($ctx),$H
cmp $_end,$inp
mov $A,$SZ*0($ctx)
mov $B,$SZ*1($ctx)
mov $C,$SZ*2($ctx)
mov $D,$SZ*3($ctx)
mov $E,$SZ*4($ctx)
mov $F,$SZ*5($ctx)
mov $G,$SZ*6($ctx)
mov $H,$SZ*7($ctx)
jb .Lloop
mov $_rsp,%rsi
mov (%rsi),%r15
mov 8(%rsi),%r14
mov 16(%rsi),%r13
mov 24(%rsi),%r12
mov 32(%rsi),%rbp
mov 40(%rsi),%rbx
lea 48(%rsi),%rsp
.Lepilogue:
ret
.size $func,.-$func
___
if ($SZ==4) {
$code.=<<___;
.align 64
.type $TABLE,\@object
$TABLE:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
.long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
.long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
.long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
.long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
.asciz "SHA256 block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
___
} else {
$code.=<<___;
.align 64
.type $TABLE,\@object
$TABLE:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0x428a2f98d728ae22,0x7137449123ef65cd
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x3956c25bf348b538,0x59f111f1b605d019
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0xd807aa98a3030242,0x12835b0145706fbe
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0x9bdc06a725c71235,0xc19bf174cf692694
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0x983e5152ee66dfab,0xa831c66d2db43210
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
.quad 0x06ca6351e003826f,0x142929670a0e6e70
.quad 0x06ca6351e003826f,0x142929670a0e6e70
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0x81c2c92e47edaee6,0x92722c851482353b
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xd192e819d6ef5218,0xd69906245565a910
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0xf40e35855771202a,0x106aa07032bbd1b8
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0x90befffa23631e28,0xa4506cebde82bde9
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xca273eceea26619c,0xd186b8c721c0c207
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x113f9804bef90dae,0x1b710b35131c471b
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x28db77f523047d84,0x32caab7b40c72493
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
.quad 0x0001020304050607,0x08090a0b0c0d0e0f
.quad 0x0001020304050607,0x08090a0b0c0d0e0f
.asciz "SHA512 block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
___
}
######################################################################
# SIMD code paths
#
if ($SZ==4 && $shaext) {{{
######################################################################
# Intel SHA Extensions implementation of SHA256 update function.
#
my ($ctx,$inp,$num,$Tbl)=("%rdi","%rsi","%rdx","%rcx");
my ($Wi,$ABEF,$CDGH,$TMP,$BSWAP,$ABEF_SAVE,$CDGH_SAVE)=map("%xmm$_",(0..2,7..10));
my @MSG=map("%xmm$_",(3..6));
$code.=<<___;
.type sha256_block_data_order_shaext,\@function,3
.align 64
sha256_block_data_order_shaext:
_shaext_shortcut:
___
$code.=<<___ if ($win64);
lea `-8-5*16`(%rsp),%rsp
movaps %xmm6,-8-5*16(%rax)
movaps %xmm7,-8-4*16(%rax)
movaps %xmm8,-8-3*16(%rax)
movaps %xmm9,-8-2*16(%rax)
movaps %xmm10,-8-1*16(%rax)
.Lprologue_shaext:
___
$code.=<<___;
lea K256+0x80(%rip),$Tbl
movdqu ($ctx),$ABEF # DCBA
movdqu 16($ctx),$CDGH # HGFE
movdqa 0x200-0x80($Tbl),$TMP # byte swap mask
pshufd \$0x1b,$ABEF,$Wi # ABCD
pshufd \$0xb1,$ABEF,$ABEF # CDAB
pshufd \$0x1b,$CDGH,$CDGH # EFGH
movdqa $TMP,$BSWAP # offload
palignr \$8,$CDGH,$ABEF # ABEF
punpcklqdq $Wi,$CDGH # CDGH
jmp .Loop_shaext
.align 16
.Loop_shaext:
movdqu ($inp),@MSG[0]
movdqu 0x10($inp),@MSG[1]
movdqu 0x20($inp),@MSG[2]
pshufb $TMP,@MSG[0]
movdqu 0x30($inp),@MSG[3]
movdqa 0*32-0x80($Tbl),$Wi
paddd @MSG[0],$Wi
pshufb $TMP,@MSG[1]
movdqa $CDGH,$CDGH_SAVE # offload
sha256rnds2 $ABEF,$CDGH # 0-3
pshufd \$0x0e,$Wi,$Wi
nop
movdqa $ABEF,$ABEF_SAVE # offload
sha256rnds2 $CDGH,$ABEF
movdqa 1*32-0x80($Tbl),$Wi
paddd @MSG[1],$Wi
pshufb $TMP,@MSG[2]
sha256rnds2 $ABEF,$CDGH # 4-7
pshufd \$0x0e,$Wi,$Wi
lea 0x40($inp),$inp
sha256msg1 @MSG[1],@MSG[0]
sha256rnds2 $CDGH,$ABEF
movdqa 2*32-0x80($Tbl),$Wi
paddd @MSG[2],$Wi
pshufb $TMP,@MSG[3]
sha256rnds2 $ABEF,$CDGH # 8-11
pshufd \$0x0e,$Wi,$Wi
movdqa @MSG[3],$TMP
palignr \$4,@MSG[2],$TMP
nop
paddd $TMP,@MSG[0]
sha256msg1 @MSG[2],@MSG[1]
sha256rnds2 $CDGH,$ABEF
movdqa 3*32-0x80($Tbl),$Wi
paddd @MSG[3],$Wi
sha256msg2 @MSG[3],@MSG[0]
sha256rnds2 $ABEF,$CDGH # 12-15
pshufd \$0x0e,$Wi,$Wi
movdqa @MSG[0],$TMP
palignr \$4,@MSG[3],$TMP
nop
paddd $TMP,@MSG[1]
sha256msg1 @MSG[3],@MSG[2]
sha256rnds2 $CDGH,$ABEF
___
for($i=4;$i<16-3;$i++) {
$code.=<<___;
movdqa $i*32-0x80($Tbl),$Wi
paddd @MSG[0],$Wi
sha256msg2 @MSG[0],@MSG[1]
sha256rnds2 $ABEF,$CDGH # 16-19...
pshufd \$0x0e,$Wi,$Wi
movdqa @MSG[1],$TMP
palignr \$4,@MSG[0],$TMP
nop
paddd $TMP,@MSG[2]
sha256msg1 @MSG[0],@MSG[3]
sha256rnds2 $CDGH,$ABEF
___
push(@MSG,shift(@MSG));
}
$code.=<<___;
movdqa 13*32-0x80($Tbl),$Wi
paddd @MSG[0],$Wi
sha256msg2 @MSG[0],@MSG[1]
sha256rnds2 $ABEF,$CDGH # 52-55
pshufd \$0x0e,$Wi,$Wi
movdqa @MSG[1],$TMP
palignr \$4,@MSG[0],$TMP
sha256rnds2 $CDGH,$ABEF
paddd $TMP,@MSG[2]
movdqa 14*32-0x80($Tbl),$Wi
paddd @MSG[1],$Wi
sha256rnds2 $ABEF,$CDGH # 56-59
pshufd \$0x0e,$Wi,$Wi
sha256msg2 @MSG[1],@MSG[2]
movdqa $BSWAP,$TMP
sha256rnds2 $CDGH,$ABEF
movdqa 15*32-0x80($Tbl),$Wi
paddd @MSG[2],$Wi
nop
sha256rnds2 $ABEF,$CDGH # 60-63
pshufd \$0x0e,$Wi,$Wi
dec $num
nop
sha256rnds2 $CDGH,$ABEF
paddd $CDGH_SAVE,$CDGH
paddd $ABEF_SAVE,$ABEF
jnz .Loop_shaext
pshufd \$0xb1,$CDGH,$CDGH # DCHG
pshufd \$0x1b,$ABEF,$TMP # FEBA
pshufd \$0xb1,$ABEF,$ABEF # BAFE
punpckhqdq $CDGH,$ABEF # DCBA
palignr \$8,$TMP,$CDGH # HGFE
movdqu $ABEF,($ctx)
movdqu $CDGH,16($ctx)
___
$code.=<<___ if ($win64);
movaps -8-5*16(%rax),%xmm6
movaps -8-4*16(%rax),%xmm7
movaps -8-3*16(%rax),%xmm8
movaps -8-2*16(%rax),%xmm9
movaps -8-1*16(%rax),%xmm10
mov %rax,%rsp
.Lepilogue_shaext:
___
$code.=<<___;
ret
.size sha256_block_data_order_shaext,.-sha256_block_data_order_shaext
___
}}}
{{{
my $a4=$T1;
my ($a,$b,$c,$d,$e,$f,$g,$h);
sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm
{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
my $arg = pop;
$arg = "\$$arg" if ($arg*1 eq $arg);
$code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
}
sub body_00_15 () {
(
'($a,$b,$c,$d,$e,$f,$g,$h)=@ROT;'.
'&ror ($a0,$Sigma1[2]-$Sigma1[1])',
'&mov ($a,$a1)',
'&mov ($a4,$f)',
'&ror ($a1,$Sigma0[2]-$Sigma0[1])',
'&xor ($a0,$e)',
'&xor ($a4,$g)', # f^g
'&ror ($a0,$Sigma1[1]-$Sigma1[0])',
'&xor ($a1,$a)',
'&and ($a4,$e)', # (f^g)&e
'&xor ($a0,$e)',
'&add ($h,$SZ*($i&15)."(%rsp)")', # h+=X[i]+K[i]
'&mov ($a2,$a)',
'&xor ($a4,$g)', # Ch(e,f,g)=((f^g)&e)^g
'&ror ($a1,$Sigma0[1]-$Sigma0[0])',
'&xor ($a2,$b)', # a^b, b^c in next round
'&add ($h,$a4)', # h+=Ch(e,f,g)
'&ror ($a0,$Sigma1[0])', # Sigma1(e)
'&and ($a3,$a2)', # (b^c)&(a^b)
'&xor ($a1,$a)',
'&add ($h,$a0)', # h+=Sigma1(e)
'&xor ($a3,$b)', # Maj(a,b,c)=Ch(a^b,c,b)
'&ror ($a1,$Sigma0[0])', # Sigma0(a)
'&add ($d,$h)', # d+=h
'&add ($h,$a3)', # h+=Maj(a,b,c)
'&mov ($a0,$d)',
'&add ($a1,$h);'. # h+=Sigma0(a)
'($a2,$a3) = ($a3,$a2); unshift(@ROT,pop(@ROT)); $i++;'
);
}
######################################################################
# SSSE3 code path
#
if ($SZ==4) { # SHA256 only
my @X = map("%xmm$_",(0..3));
my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%xmm$_",(4..9));
$code.=<<___;
.type ${func}_ssse3,\@function,3
.align 64
${func}_ssse3:
.Lssse3_shortcut:
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
mov %rsp,%r11 # copy %rsp
shl \$4,%rdx # num*16
sub \$`$framesz+$win64*16*4`,%rsp
lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
and \$-64,%rsp # align stack frame
mov $ctx,$_ctx # save ctx, 1st arg
mov $inp,$_inp # save inp, 2nd arh
mov %rdx,$_end # save end pointer, "3rd" arg
mov %r11,$_rsp # save copy of %rsp
___
$code.=<<___ if ($win64);
movaps %xmm6,16*$SZ+32(%rsp)
movaps %xmm7,16*$SZ+48(%rsp)
movaps %xmm8,16*$SZ+64(%rsp)
movaps %xmm9,16*$SZ+80(%rsp)
___
$code.=<<___;
.Lprologue_ssse3:
mov $SZ*0($ctx),$A
mov $SZ*1($ctx),$B
mov $SZ*2($ctx),$C
mov $SZ*3($ctx),$D
mov $SZ*4($ctx),$E
mov $SZ*5($ctx),$F
mov $SZ*6($ctx),$G
mov $SZ*7($ctx),$H
___
$code.=<<___;
#movdqa $TABLE+`$SZ*2*$rounds`+32(%rip),$t4
#movdqa $TABLE+`$SZ*2*$rounds`+64(%rip),$t5
jmp .Lloop_ssse3
.align 16
.Lloop_ssse3:
movdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
movdqu 0x00($inp),@X[0]
movdqu 0x10($inp),@X[1]
movdqu 0x20($inp),@X[2]
pshufb $t3,@X[0]
movdqu 0x30($inp),@X[3]
lea $TABLE(%rip),$Tbl
pshufb $t3,@X[1]
movdqa 0x00($Tbl),$t0
movdqa 0x20($Tbl),$t1
pshufb $t3,@X[2]
paddd @X[0],$t0
movdqa 0x40($Tbl),$t2
pshufb $t3,@X[3]
movdqa 0x60($Tbl),$t3
paddd @X[1],$t1
paddd @X[2],$t2
paddd @X[3],$t3
movdqa $t0,0x00(%rsp)
mov $A,$a1
movdqa $t1,0x10(%rsp)
mov $B,$a3
movdqa $t2,0x20(%rsp)
xor $C,$a3 # magic
movdqa $t3,0x30(%rsp)
mov $E,$a0
jmp .Lssse3_00_47
.align 16
.Lssse3_00_47:
sub \$`-16*2*$SZ`,$Tbl # size optimization
___
sub Xupdate_256_SSSE3 () {
(
'&movdqa ($t0,@X[1]);',
'&movdqa ($t3,@X[3])',
'&palignr ($t0,@X[0],$SZ)', # X[1..4]
'&palignr ($t3,@X[2],$SZ);', # X[9..12]
'&movdqa ($t1,$t0)',
'&movdqa ($t2,$t0);',
'&psrld ($t0,$sigma0[2])',
'&paddd (@X[0],$t3);', # X[0..3] += X[9..12]
'&psrld ($t2,$sigma0[0])',
'&pshufd ($t3,@X[3],0b11111010)',# X[14..15]
'&pslld ($t1,8*$SZ-$sigma0[1]);'.
'&pxor ($t0,$t2)',
'&psrld ($t2,$sigma0[1]-$sigma0[0]);'.
'&pxor ($t0,$t1)',
'&pslld ($t1,$sigma0[1]-$sigma0[0]);'.
'&pxor ($t0,$t2);',
'&movdqa ($t2,$t3)',
'&pxor ($t0,$t1);', # sigma0(X[1..4])
'&psrld ($t3,$sigma1[2])',
'&paddd (@X[0],$t0);', # X[0..3] += sigma0(X[1..4])
'&psrlq ($t2,$sigma1[0])',
'&pxor ($t3,$t2);',
'&psrlq ($t2,$sigma1[1]-$sigma1[0])',
'&pxor ($t3,$t2)',
'&pshufb ($t3,$t4)', # sigma1(X[14..15])
'&paddd (@X[0],$t3)', # X[0..1] += sigma1(X[14..15])
'&pshufd ($t3,@X[0],0b01010000)',# X[16..17]
'&movdqa ($t2,$t3);',
'&psrld ($t3,$sigma1[2])',
'&psrlq ($t2,$sigma1[0])',
'&pxor ($t3,$t2);',
'&psrlq ($t2,$sigma1[1]-$sigma1[0])',
'&pxor ($t3,$t2);',
'&movdqa ($t2,16*2*$j."($Tbl)")',
'&pshufb ($t3,$t5)',
'&paddd (@X[0],$t3)' # X[2..3] += sigma1(X[16..17])
);
}
sub SSSE3_256_00_47 () {
my $j = shift;
my $body = shift;
my @X = @_;
my @insns = (&$body,&$body,&$body,&$body); # 104 instructions
if (0) {
foreach (Xupdate_256_SSSE3()) { # 36 instructions
eval;
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
}
} else { # squeeze extra 4% on Westmere and 19% on Atom
eval(shift(@insns)); #@
&movdqa ($t0,@X[1]);
eval(shift(@insns));
eval(shift(@insns));
&movdqa ($t3,@X[3]);
eval(shift(@insns)); #@
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns)); #@
eval(shift(@insns));
&palignr ($t0,@X[0],$SZ); # X[1..4]
eval(shift(@insns));
eval(shift(@insns));
&palignr ($t3,@X[2],$SZ); # X[9..12]
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns)); #@
&movdqa ($t1,$t0);
eval(shift(@insns));
eval(shift(@insns));
&movdqa ($t2,$t0);
eval(shift(@insns)); #@
eval(shift(@insns));
&psrld ($t0,$sigma0[2]);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&paddd (@X[0],$t3); # X[0..3] += X[9..12]
eval(shift(@insns)); #@
eval(shift(@insns));
&psrld ($t2,$sigma0[0]);
eval(shift(@insns));
eval(shift(@insns));
&pshufd ($t3,@X[3],0b11111010); # X[4..15]
eval(shift(@insns));
eval(shift(@insns)); #@
&pslld ($t1,8*$SZ-$sigma0[1]);
eval(shift(@insns));
eval(shift(@insns));
&pxor ($t0,$t2);
eval(shift(@insns)); #@
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns)); #@
&psrld ($t2,$sigma0[1]-$sigma0[0]);
eval(shift(@insns));
&pxor ($t0,$t1);
eval(shift(@insns));
eval(shift(@insns));
&pslld ($t1,$sigma0[1]-$sigma0[0]);
eval(shift(@insns));
eval(shift(@insns));
&pxor ($t0,$t2);
eval(shift(@insns));
eval(shift(@insns)); #@
&movdqa ($t2,$t3);
eval(shift(@insns));
eval(shift(@insns));
&pxor ($t0,$t1); # sigma0(X[1..4])
eval(shift(@insns)); #@
eval(shift(@insns));
eval(shift(@insns));
&psrld ($t3,$sigma1[2]);
eval(shift(@insns));
eval(shift(@insns));
&paddd (@X[0],$t0); # X[0..3] += sigma0(X[1..4])
eval(shift(@insns)); #@
eval(shift(@insns));
&psrlq ($t2,$sigma1[0]);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&pxor ($t3,$t2);
eval(shift(@insns)); #@
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns)); #@
&psrlq ($t2,$sigma1[1]-$sigma1[0]);
eval(shift(@insns));
eval(shift(@insns));
&pxor ($t3,$t2);
eval(shift(@insns)); #@
eval(shift(@insns));
eval(shift(@insns));
#&pshufb ($t3,$t4); # sigma1(X[14..15])
&pshufd ($t3,$t3,0b10000000);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&psrldq ($t3,8);
eval(shift(@insns));
eval(shift(@insns)); #@
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns)); #@
&paddd (@X[0],$t3); # X[0..1] += sigma1(X[14..15])
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&pshufd ($t3,@X[0],0b01010000); # X[16..17]
eval(shift(@insns));
eval(shift(@insns)); #@
eval(shift(@insns));
&movdqa ($t2,$t3);
eval(shift(@insns));
eval(shift(@insns));
&psrld ($t3,$sigma1[2]);
eval(shift(@insns));
eval(shift(@insns)); #@
&psrlq ($t2,$sigma1[0]);
eval(shift(@insns));
eval(shift(@insns));
&pxor ($t3,$t2);
eval(shift(@insns)); #@
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns)); #@
eval(shift(@insns));
&psrlq ($t2,$sigma1[1]-$sigma1[0]);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&pxor ($t3,$t2);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns)); #@
#&pshufb ($t3,$t5);
&pshufd ($t3,$t3,0b00001000);
eval(shift(@insns));
eval(shift(@insns));
&movdqa ($t2,16*2*$j."($Tbl)");
eval(shift(@insns)); #@
eval(shift(@insns));
&pslldq ($t3,8);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&paddd (@X[0],$t3); # X[2..3] += sigma1(X[16..17])
eval(shift(@insns)); #@
eval(shift(@insns));
eval(shift(@insns));
}
&paddd ($t2,@X[0]);
foreach (@insns) { eval; } # remaining instructions
&movdqa (16*$j."(%rsp)",$t2);
}
for ($i=0,$j=0; $j<4; $j++) {
&SSSE3_256_00_47($j,\&body_00_15,@X);
push(@X,shift(@X)); # rotate(@X)
}
&cmpb ($SZ-1+16*2*$SZ."($Tbl)",0);
&jne (".Lssse3_00_47");
for ($i=0; $i<16; ) {
foreach(body_00_15()) { eval; }
}
$code.=<<___;
mov $_ctx,$ctx
mov $a1,$A
add $SZ*0($ctx),$A
lea 16*$SZ($inp),$inp
add $SZ*1($ctx),$B
add $SZ*2($ctx),$C
add $SZ*3($ctx),$D
add $SZ*4($ctx),$E
add $SZ*5($ctx),$F
add $SZ*6($ctx),$G
add $SZ*7($ctx),$H
cmp $_end,$inp
mov $A,$SZ*0($ctx)
mov $B,$SZ*1($ctx)
mov $C,$SZ*2($ctx)
mov $D,$SZ*3($ctx)
mov $E,$SZ*4($ctx)
mov $F,$SZ*5($ctx)
mov $G,$SZ*6($ctx)
mov $H,$SZ*7($ctx)
jb .Lloop_ssse3
mov $_rsp,%rsi
___
$code.=<<___ if ($win64);
movaps 16*$SZ+32(%rsp),%xmm6
movaps 16*$SZ+48(%rsp),%xmm7
movaps 16*$SZ+64(%rsp),%xmm8
movaps 16*$SZ+80(%rsp),%xmm9
___
$code.=<<___;
mov (%rsi),%r15
mov 8(%rsi),%r14
mov 16(%rsi),%r13
mov 24(%rsi),%r12
mov 32(%rsi),%rbp
mov 40(%rsi),%rbx
lea 48(%rsi),%rsp
.Lepilogue_ssse3:
ret
.size ${func}_ssse3,.-${func}_ssse3
___
}
if ($avx) {{
######################################################################
# XOP code path
#
if ($SZ==8) { # SHA512 only
$code.=<<___;
.type ${func}_xop,\@function,3
.align 64
${func}_xop:
.Lxop_shortcut:
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
mov %rsp,%r11 # copy %rsp
shl \$4,%rdx # num*16
sub \$`$framesz+$win64*16*($SZ==4?4:6)`,%rsp
lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
and \$-64,%rsp # align stack frame
mov $ctx,$_ctx # save ctx, 1st arg
mov $inp,$_inp # save inp, 2nd arh
mov %rdx,$_end # save end pointer, "3rd" arg
mov %r11,$_rsp # save copy of %rsp
___
$code.=<<___ if ($win64);
movaps %xmm6,16*$SZ+32(%rsp)
movaps %xmm7,16*$SZ+48(%rsp)
movaps %xmm8,16*$SZ+64(%rsp)
movaps %xmm9,16*$SZ+80(%rsp)
___
$code.=<<___ if ($win64 && $SZ>4);
movaps %xmm10,16*$SZ+96(%rsp)
movaps %xmm11,16*$SZ+112(%rsp)
___
$code.=<<___;
.Lprologue_xop:
vzeroupper
mov $SZ*0($ctx),$A
mov $SZ*1($ctx),$B
mov $SZ*2($ctx),$C
mov $SZ*3($ctx),$D
mov $SZ*4($ctx),$E
mov $SZ*5($ctx),$F
mov $SZ*6($ctx),$G
mov $SZ*7($ctx),$H
jmp .Lloop_xop
___
if ($SZ==4) { # SHA256
my @X = map("%xmm$_",(0..3));
my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7));
$code.=<<___;
.align 16
.Lloop_xop:
vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
vmovdqu 0x00($inp),@X[0]
vmovdqu 0x10($inp),@X[1]
vmovdqu 0x20($inp),@X[2]
vmovdqu 0x30($inp),@X[3]
vpshufb $t3,@X[0],@X[0]
lea $TABLE(%rip),$Tbl
vpshufb $t3,@X[1],@X[1]
vpshufb $t3,@X[2],@X[2]
vpaddd 0x00($Tbl),@X[0],$t0
vpshufb $t3,@X[3],@X[3]
vpaddd 0x20($Tbl),@X[1],$t1
vpaddd 0x40($Tbl),@X[2],$t2
vpaddd 0x60($Tbl),@X[3],$t3
vmovdqa $t0,0x00(%rsp)
mov $A,$a1
vmovdqa $t1,0x10(%rsp)
mov $B,$a3
vmovdqa $t2,0x20(%rsp)
xor $C,$a3 # magic
vmovdqa $t3,0x30(%rsp)
mov $E,$a0
jmp .Lxop_00_47
.align 16
.Lxop_00_47:
sub \$`-16*2*$SZ`,$Tbl # size optimization
___
sub XOP_256_00_47 () {
my $j = shift;
my $body = shift;
my @X = @_;
my @insns = (&$body,&$body,&$body,&$body); # 104 instructions
&vpalignr ($t0,@X[1],@X[0],$SZ); # X[1..4]
eval(shift(@insns));
eval(shift(@insns));
&vpalignr ($t3,@X[3],@X[2],$SZ); # X[9..12]
eval(shift(@insns));
eval(shift(@insns));
&vprotd ($t1,$t0,8*$SZ-$sigma0[1]);
eval(shift(@insns));
eval(shift(@insns));
&vpsrld ($t0,$t0,$sigma0[2]);
eval(shift(@insns));
eval(shift(@insns));
&vpaddd (@X[0],@X[0],$t3); # X[0..3] += X[9..12]
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vprotd ($t2,$t1,$sigma0[1]-$sigma0[0]);
eval(shift(@insns));
eval(shift(@insns));
&vpxor ($t0,$t0,$t1);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vprotd ($t3,@X[3],8*$SZ-$sigma1[1]);
eval(shift(@insns));
eval(shift(@insns));
&vpxor ($t0,$t0,$t2); # sigma0(X[1..4])
eval(shift(@insns));
eval(shift(@insns));
&vpsrld ($t2,@X[3],$sigma1[2]);
eval(shift(@insns));
eval(shift(@insns));
&vpaddd (@X[0],@X[0],$t0); # X[0..3] += sigma0(X[1..4])
eval(shift(@insns));
eval(shift(@insns));
&vprotd ($t1,$t3,$sigma1[1]-$sigma1[0]);
eval(shift(@insns));
eval(shift(@insns));
&vpxor ($t3,$t3,$t2);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vpxor ($t3,$t3,$t1); # sigma1(X[14..15])
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vpsrldq ($t3,$t3,8);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vpaddd (@X[0],@X[0],$t3); # X[0..1] += sigma1(X[14..15])
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vprotd ($t3,@X[0],8*$SZ-$sigma1[1]);
eval(shift(@insns));
eval(shift(@insns));
&vpsrld ($t2,@X[0],$sigma1[2]);
eval(shift(@insns));
eval(shift(@insns));
&vprotd ($t1,$t3,$sigma1[1]-$sigma1[0]);
eval(shift(@insns));
eval(shift(@insns));
&vpxor ($t3,$t3,$t2);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vpxor ($t3,$t3,$t1); # sigma1(X[16..17])
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vpslldq ($t3,$t3,8); # 22 instructions
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vpaddd (@X[0],@X[0],$t3); # X[2..3] += sigma1(X[16..17])
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vpaddd ($t2,@X[0],16*2*$j."($Tbl)");
foreach (@insns) { eval; } # remaining instructions
&vmovdqa (16*$j."(%rsp)",$t2);
}
for ($i=0,$j=0; $j<4; $j++) {
&XOP_256_00_47($j,\&body_00_15,@X);
push(@X,shift(@X)); # rotate(@X)
}
&cmpb ($SZ-1+16*2*$SZ."($Tbl)",0);
&jne (".Lxop_00_47");
for ($i=0; $i<16; ) {
foreach(body_00_15()) { eval; }
}
} else { # SHA512
my @X = map("%xmm$_",(0..7));
my ($t0,$t1,$t2,$t3) = map("%xmm$_",(8..11));
$code.=<<___;
.align 16
.Lloop_xop:
vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
vmovdqu 0x00($inp),@X[0]
lea $TABLE+0x80(%rip),$Tbl # size optimization
vmovdqu 0x10($inp),@X[1]
vmovdqu 0x20($inp),@X[2]
vpshufb $t3,@X[0],@X[0]
vmovdqu 0x30($inp),@X[3]
vpshufb $t3,@X[1],@X[1]
vmovdqu 0x40($inp),@X[4]
vpshufb $t3,@X[2],@X[2]
vmovdqu 0x50($inp),@X[5]
vpshufb $t3,@X[3],@X[3]
vmovdqu 0x60($inp),@X[6]
vpshufb $t3,@X[4],@X[4]
vmovdqu 0x70($inp),@X[7]
vpshufb $t3,@X[5],@X[5]
vpaddq -0x80($Tbl),@X[0],$t0
vpshufb $t3,@X[6],@X[6]
vpaddq -0x60($Tbl),@X[1],$t1
vpshufb $t3,@X[7],@X[7]
vpaddq -0x40($Tbl),@X[2],$t2
vpaddq -0x20($Tbl),@X[3],$t3
vmovdqa $t0,0x00(%rsp)
vpaddq 0x00($Tbl),@X[4],$t0
vmovdqa $t1,0x10(%rsp)
vpaddq 0x20($Tbl),@X[5],$t1
vmovdqa $t2,0x20(%rsp)
vpaddq 0x40($Tbl),@X[6],$t2
vmovdqa $t3,0x30(%rsp)
vpaddq 0x60($Tbl),@X[7],$t3
vmovdqa $t0,0x40(%rsp)
mov $A,$a1
vmovdqa $t1,0x50(%rsp)
mov $B,$a3
vmovdqa $t2,0x60(%rsp)
xor $C,$a3 # magic
vmovdqa $t3,0x70(%rsp)
mov $E,$a0
jmp .Lxop_00_47
.align 16
.Lxop_00_47:
add \$`16*2*$SZ`,$Tbl
___
sub XOP_512_00_47 () {
my $j = shift;
my $body = shift;
my @X = @_;
my @insns = (&$body,&$body); # 52 instructions
&vpalignr ($t0,@X[1],@X[0],$SZ); # X[1..2]
eval(shift(@insns));
eval(shift(@insns));
&vpalignr ($t3,@X[5],@X[4],$SZ); # X[9..10]
eval(shift(@insns));
eval(shift(@insns));
&vprotq ($t1,$t0,8*$SZ-$sigma0[1]);
eval(shift(@insns));
eval(shift(@insns));
&vpsrlq ($t0,$t0,$sigma0[2]);
eval(shift(@insns));
eval(shift(@insns));
&vpaddq (@X[0],@X[0],$t3); # X[0..1] += X[9..10]
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vprotq ($t2,$t1,$sigma0[1]-$sigma0[0]);
eval(shift(@insns));
eval(shift(@insns));
&vpxor ($t0,$t0,$t1);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vprotq ($t3,@X[7],8*$SZ-$sigma1[1]);
eval(shift(@insns));
eval(shift(@insns));
&vpxor ($t0,$t0,$t2); # sigma0(X[1..2])
eval(shift(@insns));
eval(shift(@insns));
&vpsrlq ($t2,@X[7],$sigma1[2]);
eval(shift(@insns));
eval(shift(@insns));
&vpaddq (@X[0],@X[0],$t0); # X[0..1] += sigma0(X[1..2])
eval(shift(@insns));
eval(shift(@insns));
&vprotq ($t1,$t3,$sigma1[1]-$sigma1[0]);
eval(shift(@insns));
eval(shift(@insns));
&vpxor ($t3,$t3,$t2);
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vpxor ($t3,$t3,$t1); # sigma1(X[14..15])
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vpaddq (@X[0],@X[0],$t3); # X[0..1] += sigma1(X[14..15])
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
&vpaddq ($t2,@X[0],16*2*$j-0x80."($Tbl)");
foreach (@insns) { eval; } # remaining instructions
&vmovdqa (16*$j."(%rsp)",$t2);
}
for ($i=0,$j=0; $j<8; $j++) {
&XOP_512_00_47($j,\&body_00_15,@X);
push(@X,shift(@X)); # rotate(@X)
}
&cmpb ($SZ-1+16*2*$SZ-0x80."($Tbl)",0);
&jne (".Lxop_00_47");
for ($i=0; $i<16; ) {
foreach(body_00_15()) { eval; }
}
}
$code.=<<___;
mov $_ctx,$ctx
mov $a1,$A
add $SZ*0($ctx),$A
lea 16*$SZ($inp),$inp
add $SZ*1($ctx),$B
add $SZ*2($ctx),$C
add $SZ*3($ctx),$D
add $SZ*4($ctx),$E
add $SZ*5($ctx),$F
add $SZ*6($ctx),$G
add $SZ*7($ctx),$H
cmp $_end,$inp
mov $A,$SZ*0($ctx)
mov $B,$SZ*1($ctx)
mov $C,$SZ*2($ctx)
mov $D,$SZ*3($ctx)
mov $E,$SZ*4($ctx)
mov $F,$SZ*5($ctx)
mov $G,$SZ*6($ctx)
mov $H,$SZ*7($ctx)
jb .Lloop_xop
mov $_rsp,%rsi
vzeroupper
___
$code.=<<___ if ($win64);
movaps 16*$SZ+32(%rsp),%xmm6
movaps 16*$SZ+48(%rsp),%xmm7
movaps 16*$SZ+64(%rsp),%xmm8
movaps 16*$SZ+80(%rsp),%xmm9
___
$code.=<<___ if ($win64 && $SZ>4);
movaps 16*$SZ+96(%rsp),%xmm10
movaps 16*$SZ+112(%rsp),%xmm11
___
$code.=<<___;
mov (%rsi),%r15
mov 8(%rsi),%r14
mov 16(%rsi),%r13
mov 24(%rsi),%r12
mov 32(%rsi),%rbp
mov 40(%rsi),%rbx
lea 48(%rsi),%rsp
.Lepilogue_xop:
ret
.size ${func}_xop,.-${func}_xop
___
}
######################################################################
# AVX+shrd code path
#
local *ror = sub { &shrd(@_[0],@_) };
$code.=<<___;
.type ${func}_avx,\@function,3
.align 64
${func}_avx:
.Lavx_shortcut:
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
mov %rsp,%r11 # copy %rsp
shl \$4,%rdx # num*16
sub \$`$framesz+$win64*16*($SZ==4?4:6)`,%rsp
lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
and \$-64,%rsp # align stack frame
mov $ctx,$_ctx # save ctx, 1st arg
mov $inp,$_inp # save inp, 2nd arh
mov %rdx,$_end # save end pointer, "3rd" arg
mov %r11,$_rsp # save copy of %rsp
___
$code.=<<___ if ($win64);
movaps %xmm6,16*$SZ+32(%rsp)
movaps %xmm7,16*$SZ+48(%rsp)
movaps %xmm8,16*$SZ+64(%rsp)
movaps %xmm9,16*$SZ+80(%rsp)
___
$code.=<<___ if ($win64 && $SZ>4);
movaps %xmm10,16*$SZ+96(%rsp)
movaps %xmm11,16*$SZ+112(%rsp)
___
$code.=<<___;
.Lprologue_avx:
vzeroupper
mov $SZ*0($ctx),$A
mov $SZ*1($ctx),$B
mov $SZ*2($ctx),$C
mov $SZ*3($ctx),$D
mov $SZ*4($ctx),$E
mov $SZ*5($ctx),$F
mov $SZ*6($ctx),$G
mov $SZ*7($ctx),$H
___
if ($SZ==4) { # SHA256
my @X = map("%xmm$_",(0..3));
my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%xmm$_",(4..9));
$code.=<<___;
vmovdqa $TABLE+`$SZ*2*$rounds`+32(%rip),$t4
vmovdqa $TABLE+`$SZ*2*$rounds`+64(%rip),$t5
jmp .Lloop_avx
.align 16
.Lloop_avx:
vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
vmovdqu 0x00($inp),@X[0]
vmovdqu 0x10($inp),@X[1]
vmovdqu 0x20($inp),@X[2]
vmovdqu 0x30($inp),@X[3]
vpshufb $t3,@X[0],@X[0]
lea $TABLE(%rip),$Tbl
vpshufb $t3,@X[1],@X[1]
vpshufb $t3,@X[2],@X[2]
vpaddd 0x00($Tbl),@X[0],$t0
vpshufb $t3,@X[3],@X[3]
vpaddd 0x20($Tbl),@X[1],$t1
vpaddd 0x40($Tbl),@X[2],$t2
vpaddd 0x60($Tbl),@X[3],$t3
vmovdqa $t0,0x00(%rsp)
mov $A,$a1
vmovdqa $t1,0x10(%rsp)
mov $B,$a3
vmovdqa $t2,0x20(%rsp)
xor $C,$a3 # magic
vmovdqa $t3,0x30(%rsp)
mov $E,$a0
jmp .Lavx_00_47
.align 16
.Lavx_00_47:
sub \$`-16*2*$SZ`,$Tbl # size optimization
___
sub Xupdate_256_AVX () {
(
'&vpalignr ($t0,@X[1],@X[0],$SZ)', # X[1..4]
'&vpalignr ($t3,@X[3],@X[2],$SZ)', # X[9..12]
'&vpsrld ($t2,$t0,$sigma0[0]);',
'&vpaddd (@X[0],@X[0],$t3)', # X[0..3] += X[9..12]
'&vpsrld ($t3,$t0,$sigma0[2])',
'&vpslld ($t1,$t0,8*$SZ-$sigma0[1]);',
'&vpxor ($t0,$t3,$t2)',
'&vpshufd ($t3,@X[3],0b11111010)',# X[14..15]
'&vpsrld ($t2,$t2,$sigma0[1]-$sigma0[0]);',
'&vpxor ($t0,$t0,$t1)',
'&vpslld ($t1,$t1,$sigma0[1]-$sigma0[0]);',
'&vpxor ($t0,$t0,$t2)',
'&vpsrld ($t2,$t3,$sigma1[2]);',
'&vpxor ($t0,$t0,$t1)', # sigma0(X[1..4])
'&vpsrlq ($t3,$t3,$sigma1[0]);',
'&vpaddd (@X[0],@X[0],$t0)', # X[0..3] += sigma0(X[1..4])
'&vpxor ($t2,$t2,$t3);',
'&vpsrlq ($t3,$t3,$sigma1[1]-$sigma1[0])',
'&vpxor ($t2,$t2,$t3)',
'&vpshufb ($t2,$t2,$t4)', # sigma1(X[14..15])
'&vpaddd (@X[0],@X[0],$t2)', # X[0..1] += sigma1(X[14..15])
'&vpshufd ($t3,@X[0],0b01010000)',# X[16..17]
'&vpsrld ($t2,$t3,$sigma1[2])',
'&vpsrlq ($t3,$t3,$sigma1[0])',
'&vpxor ($t2,$t2,$t3);',
'&vpsrlq ($t3,$t3,$sigma1[1]-$sigma1[0])',
'&vpxor ($t2,$t2,$t3)',
'&vpshufb ($t2,$t2,$t5)',
'&vpaddd (@X[0],@X[0],$t2)' # X[2..3] += sigma1(X[16..17])
);
}
sub AVX_256_00_47 () {
my $j = shift;
my $body = shift;
my @X = @_;
my @insns = (&$body,&$body,&$body,&$body); # 104 instructions
foreach (Xupdate_256_AVX()) { # 29 instructions
eval;
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
}
&vpaddd ($t2,@X[0],16*2*$j."($Tbl)");
foreach (@insns) { eval; } # remaining instructions
&vmovdqa (16*$j."(%rsp)",$t2);
}
for ($i=0,$j=0; $j<4; $j++) {
&AVX_256_00_47($j,\&body_00_15,@X);
push(@X,shift(@X)); # rotate(@X)
}
&cmpb ($SZ-1+16*2*$SZ."($Tbl)",0);
&jne (".Lavx_00_47");
for ($i=0; $i<16; ) {
foreach(body_00_15()) { eval; }
}
} else { # SHA512
my @X = map("%xmm$_",(0..7));
my ($t0,$t1,$t2,$t3) = map("%xmm$_",(8..11));
$code.=<<___;
jmp .Lloop_avx
.align 16
.Lloop_avx:
vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
vmovdqu 0x00($inp),@X[0]
lea $TABLE+0x80(%rip),$Tbl # size optimization
vmovdqu 0x10($inp),@X[1]
vmovdqu 0x20($inp),@X[2]
vpshufb $t3,@X[0],@X[0]
vmovdqu 0x30($inp),@X[3]
vpshufb $t3,@X[1],@X[1]
vmovdqu 0x40($inp),@X[4]
vpshufb $t3,@X[2],@X[2]
vmovdqu 0x50($inp),@X[5]
vpshufb $t3,@X[3],@X[3]
vmovdqu 0x60($inp),@X[6]
vpshufb $t3,@X[4],@X[4]
vmovdqu 0x70($inp),@X[7]
vpshufb $t3,@X[5],@X[5]
vpaddq -0x80($Tbl),@X[0],$t0
vpshufb $t3,@X[6],@X[6]
vpaddq -0x60($Tbl),@X[1],$t1
vpshufb $t3,@X[7],@X[7]
vpaddq -0x40($Tbl),@X[2],$t2
vpaddq -0x20($Tbl),@X[3],$t3
vmovdqa $t0,0x00(%rsp)
vpaddq 0x00($Tbl),@X[4],$t0
vmovdqa $t1,0x10(%rsp)
vpaddq 0x20($Tbl),@X[5],$t1
vmovdqa $t2,0x20(%rsp)
vpaddq 0x40($Tbl),@X[6],$t2
vmovdqa $t3,0x30(%rsp)
vpaddq 0x60($Tbl),@X[7],$t3
vmovdqa $t0,0x40(%rsp)
mov $A,$a1
vmovdqa $t1,0x50(%rsp)
mov $B,$a3
vmovdqa $t2,0x60(%rsp)
xor $C,$a3 # magic
vmovdqa $t3,0x70(%rsp)
mov $E,$a0
jmp .Lavx_00_47
.align 16
.Lavx_00_47:
add \$`16*2*$SZ`,$Tbl
___
sub Xupdate_512_AVX () {
(
'&vpalignr ($t0,@X[1],@X[0],$SZ)', # X[1..2]
'&vpalignr ($t3,@X[5],@X[4],$SZ)', # X[9..10]
'&vpsrlq ($t2,$t0,$sigma0[0])',
'&vpaddq (@X[0],@X[0],$t3);', # X[0..1] += X[9..10]
'&vpsrlq ($t3,$t0,$sigma0[2])',
'&vpsllq ($t1,$t0,8*$SZ-$sigma0[1]);',
'&vpxor ($t0,$t3,$t2)',
'&vpsrlq ($t2,$t2,$sigma0[1]-$sigma0[0]);',
'&vpxor ($t0,$t0,$t1)',
'&vpsllq ($t1,$t1,$sigma0[1]-$sigma0[0]);',
'&vpxor ($t0,$t0,$t2)',
'&vpsrlq ($t3,@X[7],$sigma1[2]);',
'&vpxor ($t0,$t0,$t1)', # sigma0(X[1..2])
'&vpsllq ($t2,@X[7],8*$SZ-$sigma1[1]);',
'&vpaddq (@X[0],@X[0],$t0)', # X[0..1] += sigma0(X[1..2])
'&vpsrlq ($t1,@X[7],$sigma1[0]);',
'&vpxor ($t3,$t3,$t2)',
'&vpsllq ($t2,$t2,$sigma1[1]-$sigma1[0]);',
'&vpxor ($t3,$t3,$t1)',
'&vpsrlq ($t1,$t1,$sigma1[1]-$sigma1[0]);',
'&vpxor ($t3,$t3,$t2)',
'&vpxor ($t3,$t3,$t1)', # sigma1(X[14..15])
'&vpaddq (@X[0],@X[0],$t3)', # X[0..1] += sigma1(X[14..15])
);
}
sub AVX_512_00_47 () {
my $j = shift;
my $body = shift;
my @X = @_;
my @insns = (&$body,&$body); # 52 instructions
foreach (Xupdate_512_AVX()) { # 23 instructions
eval;
eval(shift(@insns));
eval(shift(@insns));
}
&vpaddq ($t2,@X[0],16*2*$j-0x80."($Tbl)");
foreach (@insns) { eval; } # remaining instructions
&vmovdqa (16*$j."(%rsp)",$t2);
}
for ($i=0,$j=0; $j<8; $j++) {
&AVX_512_00_47($j,\&body_00_15,@X);
push(@X,shift(@X)); # rotate(@X)
}
&cmpb ($SZ-1+16*2*$SZ-0x80."($Tbl)",0);
&jne (".Lavx_00_47");
for ($i=0; $i<16; ) {
foreach(body_00_15()) { eval; }
}
}
$code.=<<___;
mov $_ctx,$ctx
mov $a1,$A
add $SZ*0($ctx),$A
lea 16*$SZ($inp),$inp
add $SZ*1($ctx),$B
add $SZ*2($ctx),$C
add $SZ*3($ctx),$D
add $SZ*4($ctx),$E
add $SZ*5($ctx),$F
add $SZ*6($ctx),$G
add $SZ*7($ctx),$H
cmp $_end,$inp
mov $A,$SZ*0($ctx)
mov $B,$SZ*1($ctx)
mov $C,$SZ*2($ctx)
mov $D,$SZ*3($ctx)
mov $E,$SZ*4($ctx)
mov $F,$SZ*5($ctx)
mov $G,$SZ*6($ctx)
mov $H,$SZ*7($ctx)
jb .Lloop_avx
mov $_rsp,%rsi
vzeroupper
___
$code.=<<___ if ($win64);
movaps 16*$SZ+32(%rsp),%xmm6
movaps 16*$SZ+48(%rsp),%xmm7
movaps 16*$SZ+64(%rsp),%xmm8
movaps 16*$SZ+80(%rsp),%xmm9
___
$code.=<<___ if ($win64 && $SZ>4);
movaps 16*$SZ+96(%rsp),%xmm10
movaps 16*$SZ+112(%rsp),%xmm11
___
$code.=<<___;
mov (%rsi),%r15
mov 8(%rsi),%r14
mov 16(%rsi),%r13
mov 24(%rsi),%r12
mov 32(%rsi),%rbp
mov 40(%rsi),%rbx
lea 48(%rsi),%rsp
.Lepilogue_avx:
ret
.size ${func}_avx,.-${func}_avx
___
if ($avx>1) {{
######################################################################
# AVX2+BMI code path
#
my $a5=$SZ==4?"%esi":"%rsi"; # zap $inp
my $PUSH8=8*2*$SZ;
use integer;
sub bodyx_00_15 () {
# at start $a1 should be zero, $a3 - $b^$c and $a4 copy of $f
(
'($a,$b,$c,$d,$e,$f,$g,$h)=@ROT;'.
'&add ($h,(32*($i/(16/$SZ))+$SZ*($i%(16/$SZ)))%$PUSH8.$base)', # h+=X[i]+K[i]
'&and ($a4,$e)', # f&e
'&rorx ($a0,$e,$Sigma1[2])',
'&rorx ($a2,$e,$Sigma1[1])',
'&lea ($a,"($a,$a1)")', # h+=Sigma0(a) from the past
'&lea ($h,"($h,$a4)")',
'&andn ($a4,$e,$g)', # ~e&g
'&xor ($a0,$a2)',
'&rorx ($a1,$e,$Sigma1[0])',
'&lea ($h,"($h,$a4)")', # h+=Ch(e,f,g)=(e&f)+(~e&g)
'&xor ($a0,$a1)', # Sigma1(e)
'&mov ($a2,$a)',
'&rorx ($a4,$a,$Sigma0[2])',
'&lea ($h,"($h,$a0)")', # h+=Sigma1(e)
'&xor ($a2,$b)', # a^b, b^c in next round
'&rorx ($a1,$a,$Sigma0[1])',
'&rorx ($a0,$a,$Sigma0[0])',
'&lea ($d,"($d,$h)")', # d+=h
'&and ($a3,$a2)', # (b^c)&(a^b)
'&xor ($a1,$a4)',
'&xor ($a3,$b)', # Maj(a,b,c)=Ch(a^b,c,b)
'&xor ($a1,$a0)', # Sigma0(a)
'&lea ($h,"($h,$a3)");'. # h+=Maj(a,b,c)
'&mov ($a4,$e)', # copy of f in future
'($a2,$a3) = ($a3,$a2); unshift(@ROT,pop(@ROT)); $i++;'
);
# and at the finish one has to $a+=$a1
}
$code.=<<___;
.type ${func}_avx2,\@function,3
.align 64
${func}_avx2:
.Lavx2_shortcut:
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
mov %rsp,%r11 # copy %rsp
sub \$`2*$SZ*$rounds+4*8+$win64*16*($SZ==4?4:6)`,%rsp
shl \$4,%rdx # num*16
and \$-256*$SZ,%rsp # align stack frame
lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
add \$`2*$SZ*($rounds-8)`,%rsp
mov $ctx,$_ctx # save ctx, 1st arg
mov $inp,$_inp # save inp, 2nd arh
mov %rdx,$_end # save end pointer, "3rd" arg
mov %r11,$_rsp # save copy of %rsp
___
$code.=<<___ if ($win64);
movaps %xmm6,16*$SZ+32(%rsp)
movaps %xmm7,16*$SZ+48(%rsp)
movaps %xmm8,16*$SZ+64(%rsp)
movaps %xmm9,16*$SZ+80(%rsp)
___
$code.=<<___ if ($win64 && $SZ>4);
movaps %xmm10,16*$SZ+96(%rsp)
movaps %xmm11,16*$SZ+112(%rsp)
___
$code.=<<___;
.Lprologue_avx2:
vzeroupper
sub \$-16*$SZ,$inp # inp++, size optimization
mov $SZ*0($ctx),$A
mov $inp,%r12 # borrow $T1
mov $SZ*1($ctx),$B
cmp %rdx,$inp # $_end
mov $SZ*2($ctx),$C
cmove %rsp,%r12 # next block or random data
mov $SZ*3($ctx),$D
mov $SZ*4($ctx),$E
mov $SZ*5($ctx),$F
mov $SZ*6($ctx),$G
mov $SZ*7($ctx),$H
___
if ($SZ==4) { # SHA256
my @X = map("%ymm$_",(0..3));
my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%ymm$_",(4..9));
$code.=<<___;
vmovdqa $TABLE+`$SZ*2*$rounds`+32(%rip),$t4
vmovdqa $TABLE+`$SZ*2*$rounds`+64(%rip),$t5
jmp .Loop_avx2
.align 16
.Loop_avx2:
vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
vmovdqu -16*$SZ+0($inp),%xmm0
vmovdqu -16*$SZ+16($inp),%xmm1
vmovdqu -16*$SZ+32($inp),%xmm2
vmovdqu -16*$SZ+48($inp),%xmm3
#mov $inp,$_inp # offload $inp
vinserti128 \$1,(%r12),@X[0],@X[0]
vinserti128 \$1,16(%r12),@X[1],@X[1]
vpshufb $t3,@X[0],@X[0]
vinserti128 \$1,32(%r12),@X[2],@X[2]
vpshufb $t3,@X[1],@X[1]
vinserti128 \$1,48(%r12),@X[3],@X[3]
lea $TABLE(%rip),$Tbl
vpshufb $t3,@X[2],@X[2]
vpaddd 0x00($Tbl),@X[0],$t0
vpshufb $t3,@X[3],@X[3]
vpaddd 0x20($Tbl),@X[1],$t1
vpaddd 0x40($Tbl),@X[2],$t2
vpaddd 0x60($Tbl),@X[3],$t3
vmovdqa $t0,0x00(%rsp)
xor $a1,$a1
vmovdqa $t1,0x20(%rsp)
lea -$PUSH8(%rsp),%rsp
mov $B,$a3
vmovdqa $t2,0x00(%rsp)
xor $C,$a3 # magic
vmovdqa $t3,0x20(%rsp)
mov $F,$a4
sub \$-16*2*$SZ,$Tbl # size optimization
jmp .Lavx2_00_47
.align 16
.Lavx2_00_47:
___
sub AVX2_256_00_47 () {
my $j = shift;
my $body = shift;
my @X = @_;
my @insns = (&$body,&$body,&$body,&$body); # 96 instructions
my $base = "+2*$PUSH8(%rsp)";
&lea ("%rsp","-$PUSH8(%rsp)") if (($j%2)==0);
foreach (Xupdate_256_AVX()) { # 29 instructions
eval;
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
}
&vpaddd ($t2,@X[0],16*2*$j."($Tbl)");
foreach (@insns) { eval; } # remaining instructions
&vmovdqa ((32*$j)%$PUSH8."(%rsp)",$t2);
}
for ($i=0,$j=0; $j<4; $j++) {
&AVX2_256_00_47($j,\&bodyx_00_15,@X);
push(@X,shift(@X)); # rotate(@X)
}
&lea ($Tbl,16*2*$SZ."($Tbl)");
&cmpb (($SZ-1)."($Tbl)",0);
&jne (".Lavx2_00_47");
for ($i=0; $i<16; ) {
my $base=$i<8?"+$PUSH8(%rsp)":"(%rsp)";
foreach(bodyx_00_15()) { eval; }
}
} else { # SHA512
my @X = map("%ymm$_",(0..7));
my ($t0,$t1,$t2,$t3) = map("%ymm$_",(8..11));
$code.=<<___;
jmp .Loop_avx2
.align 16
.Loop_avx2:
vmovdqu -16*$SZ($inp),%xmm0
vmovdqu -16*$SZ+16($inp),%xmm1
vmovdqu -16*$SZ+32($inp),%xmm2
lea $TABLE+0x80(%rip),$Tbl # size optimization
vmovdqu -16*$SZ+48($inp),%xmm3
vmovdqu -16*$SZ+64($inp),%xmm4
vmovdqu -16*$SZ+80($inp),%xmm5
vmovdqu -16*$SZ+96($inp),%xmm6
vmovdqu -16*$SZ+112($inp),%xmm7
#mov $inp,$_inp # offload $inp
vmovdqa `$SZ*2*$rounds-0x80`($Tbl),$t2
vinserti128 \$1,(%r12),@X[0],@X[0]
vinserti128 \$1,16(%r12),@X[1],@X[1]
vpshufb $t2,@X[0],@X[0]
vinserti128 \$1,32(%r12),@X[2],@X[2]
vpshufb $t2,@X[1],@X[1]
vinserti128 \$1,48(%r12),@X[3],@X[3]
vpshufb $t2,@X[2],@X[2]
vinserti128 \$1,64(%r12),@X[4],@X[4]
vpshufb $t2,@X[3],@X[3]
vinserti128 \$1,80(%r12),@X[5],@X[5]
vpshufb $t2,@X[4],@X[4]
vinserti128 \$1,96(%r12),@X[6],@X[6]
vpshufb $t2,@X[5],@X[5]
vinserti128 \$1,112(%r12),@X[7],@X[7]
vpaddq -0x80($Tbl),@X[0],$t0
vpshufb $t2,@X[6],@X[6]
vpaddq -0x60($Tbl),@X[1],$t1
vpshufb $t2,@X[7],@X[7]
vpaddq -0x40($Tbl),@X[2],$t2
vpaddq -0x20($Tbl),@X[3],$t3
vmovdqa $t0,0x00(%rsp)
vpaddq 0x00($Tbl),@X[4],$t0
vmovdqa $t1,0x20(%rsp)
vpaddq 0x20($Tbl),@X[5],$t1
vmovdqa $t2,0x40(%rsp)
vpaddq 0x40($Tbl),@X[6],$t2
vmovdqa $t3,0x60(%rsp)
lea -$PUSH8(%rsp),%rsp
vpaddq 0x60($Tbl),@X[7],$t3
vmovdqa $t0,0x00(%rsp)
xor $a1,$a1
vmovdqa $t1,0x20(%rsp)
mov $B,$a3
vmovdqa $t2,0x40(%rsp)
xor $C,$a3 # magic
vmovdqa $t3,0x60(%rsp)
mov $F,$a4
add \$16*2*$SZ,$Tbl
jmp .Lavx2_00_47
.align 16
.Lavx2_00_47:
___
sub AVX2_512_00_47 () {
my $j = shift;
my $body = shift;
my @X = @_;
my @insns = (&$body,&$body); # 48 instructions
my $base = "+2*$PUSH8(%rsp)";
&lea ("%rsp","-$PUSH8(%rsp)") if (($j%4)==0);
foreach (Xupdate_512_AVX()) { # 23 instructions
eval;
if ($_ !~ /\;$/) {
eval(shift(@insns));
eval(shift(@insns));
eval(shift(@insns));
}
}
&vpaddq ($t2,@X[0],16*2*$j-0x80."($Tbl)");
foreach (@insns) { eval; } # remaining instructions
&vmovdqa ((32*$j)%$PUSH8."(%rsp)",$t2);
}
for ($i=0,$j=0; $j<8; $j++) {
&AVX2_512_00_47($j,\&bodyx_00_15,@X);
push(@X,shift(@X)); # rotate(@X)
}
&lea ($Tbl,16*2*$SZ."($Tbl)");
&cmpb (($SZ-1-0x80)."($Tbl)",0);
&jne (".Lavx2_00_47");
for ($i=0; $i<16; ) {
my $base=$i<8?"+$PUSH8(%rsp)":"(%rsp)";
foreach(bodyx_00_15()) { eval; }
}
}
$code.=<<___;
mov `2*$SZ*$rounds`(%rsp),$ctx # $_ctx
add $a1,$A
#mov `2*$SZ*$rounds+8`(%rsp),$inp # $_inp
lea `2*$SZ*($rounds-8)`(%rsp),$Tbl
add $SZ*0($ctx),$A
add $SZ*1($ctx),$B
add $SZ*2($ctx),$C
add $SZ*3($ctx),$D
add $SZ*4($ctx),$E
add $SZ*5($ctx),$F
add $SZ*6($ctx),$G
add $SZ*7($ctx),$H
mov $A,$SZ*0($ctx)
mov $B,$SZ*1($ctx)
mov $C,$SZ*2($ctx)
mov $D,$SZ*3($ctx)
mov $E,$SZ*4($ctx)
mov $F,$SZ*5($ctx)
mov $G,$SZ*6($ctx)
mov $H,$SZ*7($ctx)
cmp `$PUSH8+2*8`($Tbl),$inp # $_end
je .Ldone_avx2
xor $a1,$a1
mov $B,$a3
xor $C,$a3 # magic
mov $F,$a4
jmp .Lower_avx2
.align 16
.Lower_avx2:
___
for ($i=0; $i<8; ) {
my $base="+16($Tbl)";
foreach(bodyx_00_15()) { eval; }
}
$code.=<<___;
lea -$PUSH8($Tbl),$Tbl
cmp %rsp,$Tbl
jae .Lower_avx2
mov `2*$SZ*$rounds`(%rsp),$ctx # $_ctx
add $a1,$A
#mov `2*$SZ*$rounds+8`(%rsp),$inp # $_inp
lea `2*$SZ*($rounds-8)`(%rsp),%rsp
add $SZ*0($ctx),$A
add $SZ*1($ctx),$B
add $SZ*2($ctx),$C
add $SZ*3($ctx),$D
add $SZ*4($ctx),$E
add $SZ*5($ctx),$F
lea `2*16*$SZ`($inp),$inp # inp+=2
add $SZ*6($ctx),$G
mov $inp,%r12
add $SZ*7($ctx),$H
cmp $_end,$inp
mov $A,$SZ*0($ctx)
cmove %rsp,%r12 # next block or stale data
mov $B,$SZ*1($ctx)
mov $C,$SZ*2($ctx)
mov $D,$SZ*3($ctx)
mov $E,$SZ*4($ctx)
mov $F,$SZ*5($ctx)
mov $G,$SZ*6($ctx)
mov $H,$SZ*7($ctx)
jbe .Loop_avx2
lea (%rsp),$Tbl
.Ldone_avx2:
lea ($Tbl),%rsp
mov $_rsp,%rsi
vzeroupper
___
$code.=<<___ if ($win64);
movaps 16*$SZ+32(%rsp),%xmm6
movaps 16*$SZ+48(%rsp),%xmm7
movaps 16*$SZ+64(%rsp),%xmm8
movaps 16*$SZ+80(%rsp),%xmm9
___
$code.=<<___ if ($win64 && $SZ>4);
movaps 16*$SZ+96(%rsp),%xmm10
movaps 16*$SZ+112(%rsp),%xmm11
___
$code.=<<___;
mov (%rsi),%r15
mov 8(%rsi),%r14
mov 16(%rsi),%r13
mov 24(%rsi),%r12
mov 32(%rsi),%rbp
mov 40(%rsi),%rbx
lea 48(%rsi),%rsp
.Lepilogue_avx2:
ret
.size ${func}_avx2,.-${func}_avx2
___
}}
}}}}}
# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
# CONTEXT *context,DISPATCHER_CONTEXT *disp)
if ($win64) {
$rec="%rcx";
$frame="%rdx";
$context="%r8";
$disp="%r9";
$code.=<<___;
.extern __imp_RtlVirtualUnwind
.type se_handler,\@abi-omnipotent
.align 16
se_handler:
push %rsi
push %rdi
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
pushfq
sub \$64,%rsp
mov 120($context),%rax # pull context->Rax
mov 248($context),%rbx # pull context->Rip
mov 8($disp),%rsi # disp->ImageBase
mov 56($disp),%r11 # disp->HanderlData
mov 0(%r11),%r10d # HandlerData[0]
lea (%rsi,%r10),%r10 # prologue label
cmp %r10,%rbx # context->Rip<prologue label
jb .Lin_prologue
mov 152($context),%rax # pull context->Rsp
mov 4(%r11),%r10d # HandlerData[1]
lea (%rsi,%r10),%r10 # epilogue label
cmp %r10,%rbx # context->Rip>=epilogue label
jae .Lin_prologue
___
$code.=<<___ if ($avx>1);
lea .Lavx2_shortcut(%rip),%r10
cmp %r10,%rbx # context->Rip<avx2_shortcut
jb .Lnot_in_avx2
and \$-256*$SZ,%rax
add \$`2*$SZ*($rounds-8)`,%rax
.Lnot_in_avx2:
___
$code.=<<___;
mov %rax,%rsi # put aside Rsp
mov 16*$SZ+3*8(%rax),%rax # pull $_rsp
lea 48(%rax),%rax
mov -8(%rax),%rbx
mov -16(%rax),%rbp
mov -24(%rax),%r12
mov -32(%rax),%r13
mov -40(%rax),%r14
mov -48(%rax),%r15
mov %rbx,144($context) # restore context->Rbx
mov %rbp,160($context) # restore context->Rbp
mov %r12,216($context) # restore context->R12
mov %r13,224($context) # restore context->R13
mov %r14,232($context) # restore context->R14
mov %r15,240($context) # restore context->R15
lea .Lepilogue(%rip),%r10
cmp %r10,%rbx
jb .Lin_prologue # non-AVX code
lea 16*$SZ+4*8(%rsi),%rsi # Xmm6- save area
lea 512($context),%rdi # &context.Xmm6
mov \$`$SZ==4?8:12`,%ecx
.long 0xa548f3fc # cld; rep movsq
.Lin_prologue:
mov 8(%rax),%rdi
mov 16(%rax),%rsi
mov %rax,152($context) # restore context->Rsp
mov %rsi,168($context) # restore context->Rsi
mov %rdi,176($context) # restore context->Rdi
mov 40($disp),%rdi # disp->ContextRecord
mov $context,%rsi # context
mov \$154,%ecx # sizeof(CONTEXT)
.long 0xa548f3fc # cld; rep movsq
mov $disp,%rsi
xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
mov 8(%rsi),%rdx # arg2, disp->ImageBase
mov 0(%rsi),%r8 # arg3, disp->ControlPc
mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
mov 40(%rsi),%r10 # disp->ContextRecord
lea 56(%rsi),%r11 # &disp->HandlerData
lea 24(%rsi),%r12 # &disp->EstablisherFrame
mov %r10,32(%rsp) # arg5
mov %r11,40(%rsp) # arg6
mov %r12,48(%rsp) # arg7
mov %rcx,56(%rsp) # arg8, (NULL)
call *__imp_RtlVirtualUnwind(%rip)
mov \$1,%eax # ExceptionContinueSearch
add \$64,%rsp
popfq
pop %r15
pop %r14
pop %r13
pop %r12
pop %rbp
pop %rbx
pop %rdi
pop %rsi
ret
.size se_handler,.-se_handler
___
$code.=<<___ if ($SZ == 4 && $shaext);
.type shaext_handler,\@abi-omnipotent
.align 16
shaext_handler:
push %rsi
push %rdi
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
pushfq
sub \$64,%rsp
mov 120($context),%rax # pull context->Rax
mov 248($context),%rbx # pull context->Rip
lea .Lprologue_shaext(%rip),%r10
cmp %r10,%rbx # context->Rip<.Lprologue
jb .Lin_prologue
lea .Lepilogue_shaext(%rip),%r10
cmp %r10,%rbx # context->Rip>=.Lepilogue
jae .Lin_prologue
lea -8-5*16(%rax),%rsi
lea 512($context),%rdi # &context.Xmm6
mov \$10,%ecx
.long 0xa548f3fc # cld; rep movsq
jmp .Lin_prologue
.size shaext_handler,.-shaext_handler
___
$code.=<<___;
.section .pdata
.align 4
.rva .LSEH_begin_$func
.rva .LSEH_end_$func
.rva .LSEH_info_$func
___
$code.=<<___ if ($SZ==4 && $shext);
.rva .LSEH_begin_${func}_shaext
.rva .LSEH_end_${func}_shaext
.rva .LSEH_info_${func}_shaext
___
$code.=<<___ if ($SZ==4);
.rva .LSEH_begin_${func}_ssse3
.rva .LSEH_end_${func}_ssse3
.rva .LSEH_info_${func}_ssse3
___
$code.=<<___ if ($avx && $SZ==8);
.rva .LSEH_begin_${func}_xop
.rva .LSEH_end_${func}_xop
.rva .LSEH_info_${func}_xop
___
$code.=<<___ if ($avx);
.rva .LSEH_begin_${func}_avx
.rva .LSEH_end_${func}_avx
.rva .LSEH_info_${func}_avx
___
$code.=<<___ if ($avx>1);
.rva .LSEH_begin_${func}_avx2
.rva .LSEH_end_${func}_avx2
.rva .LSEH_info_${func}_avx2
___
$code.=<<___;
.section .xdata
.align 8
.LSEH_info_$func:
.byte 9,0,0,0
.rva se_handler
.rva .Lprologue,.Lepilogue # HandlerData[]
___
$code.=<<___ if ($SZ==4 && $shaext);
.LSEH_info_${func}_shaext:
.byte 9,0,0,0
.rva shaext_handler
___
$code.=<<___ if ($SZ==4);
.LSEH_info_${func}_ssse3:
.byte 9,0,0,0
.rva se_handler
.rva .Lprologue_ssse3,.Lepilogue_ssse3 # HandlerData[]
___
$code.=<<___ if ($avx && $SZ==8);
.LSEH_info_${func}_xop:
.byte 9,0,0,0
.rva se_handler
.rva .Lprologue_xop,.Lepilogue_xop # HandlerData[]
___
$code.=<<___ if ($avx);
.LSEH_info_${func}_avx:
.byte 9,0,0,0
.rva se_handler
.rva .Lprologue_avx,.Lepilogue_avx # HandlerData[]
___
$code.=<<___ if ($avx>1);
.LSEH_info_${func}_avx2:
.byte 9,0,0,0
.rva se_handler
.rva .Lprologue_avx2,.Lepilogue_avx2 # HandlerData[]
___
}
sub sha256op38 {
my $instr = shift;
my %opcodelet = (
"sha256rnds2" => 0xcb,
"sha256msg1" => 0xcc,
"sha256msg2" => 0xcd );
if (defined($opcodelet{$instr}) && @_[0] =~ /%xmm([0-7]),\s*%xmm([0-7])/) {
my @opcode=(0x0f,0x38);
push @opcode,$opcodelet{$instr};
push @opcode,0xc0|($1&7)|(($2&7)<<3); # ModR/M
return ".byte\t".join(',',@opcode);
} else {
return $instr."\t".@_[0];
}
}
foreach (split("\n",$code)) {
s/\`([^\`]*)\`/eval $1/geo;
s/\b(sha256[^\s]*)\s+(.*)/sha256op38($1,$2)/geo;
print $_,"\n";
}
close STDOUT;
| Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/third_party/boringssl/src/crypto/sha/asm/sha512-x86_64.pl | Perl | mit | 60,962 |
#!/sw/bin/perl -w
# add coordinates in a pts file
$pgnam="ptsadd";
undef $debug;
@j=();
@ARGVI=@ARGV;
for($i=0;$i<=$#ARGV;$i++){
if($ARGV[$i] =~ /-D(\w+)=(.*)/){
$$1=$2;
@j=($i,@j);
@DEFS = (@DEFS,$1);
%DHSH = (%DHSH, $1, $2,);
next;
}
if($ARGV[$i] =~ /-D(\w+)/){
$$1=1;
@j=($i,@j);
@DEFS = (@DEFS,$1);
%DHSH = (%DHSH, $1, 1,);
next;
}
}
foreach $i (@j){
splice(@ARGV,$i,1);
}
if(defined $debug){
print "ARGVI: @ARGVI\n";
print "ARGV: @ARGV\n";
print "DEFS:";
for($i=0;$i<=$#DEFS;$i++){
print " $DEFS[$i]=${$DEFS[$i]}";
}
print "\n";
print "DHSH:";
while (($key,$value) = each %DHSH){
print " DHSH\{$key\}=$value";
}
print "\n";
}
if($#ARGV < 2){
die "usage: $pgnam in_file add_file out_file\n";
}
open(IFILE, "$ARGV[1]") || die "$pgnam: $ARGV[1] $!.\n";
while (<IFILE>){
s/^\s+//;
s/\s+$//;
next if /^\$/;
# chop;
@f=split;
($xm,$ym,$zm)=@f;
}
close IFILE;
open(IFILE, "$ARGV[0]") || die "$pgnam: $ARGV[0] $!.\n";
open(OFILE, ">$ARGV[2]") || die "$pgnam: $ARGV[2] $!.\n";
while (<IFILE>){
s/^\s+//;
s/\s+$//;
next if /^\$/;
# chop;
@f=split;
$f[0]+=$xm;
$f[1]+=$ym;
$f[2]+=$zm;
print OFILE "@f\n";
}
close IFILE;
close OFILE;
| andybond13/ETHZ_to_FT | ell_approx/tomography_to_simplified_mesh/ptsadd.pl | Perl | mit | 1,287 |
#!/usr/bin/env perl
### modules
use strict;
use warnings;
use Pod::Usage;
use Data::Dumper;
use Getopt::Long;
use File::Spec;
use Bio::TreeIO;
use List::Util qw/sum/;
### args/flags
pod2usage("$0: No files given.") if ((@ARGV == 0) && (-t STDIN));
my ($verbose, $tree_in, $tformat, $count_in, $count_header, $mothur, $regex);
my $abund_cut = 5;
GetOptions(
"tree=s" => \$tree_in, # tree file
"format=s" => \$tformat, # tree format
"count=s" => \$count_in, # count table file
"xheader" => \$count_header, # header in count table? [T]
"mothur" => \$mothur, # mothur formatted count file? [F]
"abundance=i" => \$abund_cut, # abundance cutoff
"regex=s" => \$regex, # regex for exclusion on pruning
"verbose" => \$verbose,
"help|?" => \&pod2usage # Help
);
### I/O error & defaults
die " Provide a tree file (newick or nexus).\n" if ! $tree_in;
$tformat = check_tree_format($tformat);
$regex = qr/$regex/i if $regex;
### MAIN
# load tree & count files #
my $treeo = tree_io($tree_in, $tformat);
my ($count_r, $header) = load_count($count_in, $count_header, $mothur);
# sum abundances #
my $abund_r = sum_count($count_r, $abund_cut, $mothur, $regex);
# checking taxon existence in tree #
check_names($treeo, $abund_r, $count_r);
# writing out prune list for R script & calling script #
my $prune_file = write_prune_list($count_in, $abund_r);
call_prune_tree($tree_in, $prune_file); # calling R script
# writting out pruned count file #
write_pruned_count_file($count_r, $count_in, $header);
#$treeo = prune_by_abundance($treeo, $count_r);
#tree_write($treeo, $tree_in);
### Subroutines
sub write_pruned_count_file{
my ($count_r, $count_in, $header) = @_;
(my $outfile = $count_in) =~ s/\.[^.]+$|$/_prn.txt/;
open OUT, ">$outfile" or die $!;
print OUT $header, "\n" if $header;
foreach my $row (keys %$count_r){
print OUT join("\t", $row, @{$$count_r{$row}}), "\n";
}
close OUT;
print STDERR " Pruned count file written: '$outfile'\n";
print STDERR " Number of taxa in pruned count file: ", scalar keys %$count_r, "\n";
}
sub call_prune_tree{
# calling prune_tree.r #
my ($tree_in, $prune_file) = @_;
# my $cmd = "Rscript ../../bin/tree_PruneByAbundance.r -t $tree_in -n $prune_file";
my $cmd = "tree_PruneByAbundance.r -t $tree_in -n $prune_file";
print STDERR "\n$cmd\n" if $verbose;
system($cmd);
}
sub write_prune_list{
# writing out prune list #
my ($count_in, $count_r) = @_;
(my $outfile = $count_in) =~ s/\.[^.]+$|$/_prn-list.txt/;
open OUT, ">$outfile" or die $!;
foreach my $taxon (keys %$count_r){
print OUT join("\t", $taxon, $$count_r{$taxon}), "\n";
}
close OUT;
return $outfile;
}
sub tree_write{
### writting out a nexus tree file ###
my ($treeo, $tree_in) = @_;
(my $outfile = $tree_in) =~ s/\.[^\.]+$|$/_prn.nwk/;
my $out = new Bio::TreeIO(-file => ">$outfile", -format => "newick");
$out->write_tree($treeo);
print STDERR " Newick tree file written: '$outfile'\n";
}
sub prune_by_abundance{
# pruning the phylogeny by abundance #
my ($treeo, $count_r) = @_;
for my $node ($treeo->get_leaf_nodes){
next if ! exists $$count_r{$node->id};
$treeo->remove_Node($node);
}
return $treeo;
}
sub check_names{
# checking names #
my ($treeo, $abund_r, $count_r, $regex) = @_;
my $count_rows = scalar keys %$count_r;
my %nodes = map{$_->id, 1} $treeo->get_leaf_nodes;
my $prune_cnt = 0;
foreach my $taxon (keys %$abund_r){
if (! exists $nodes{$taxon}){
print STDERR " WARNING! in count file; not in tree file: '$taxon'\n";
#delete $$abund_r{$taxon}; # removing from count file because taxon not in tree
delete $$count_r{$taxon}; # removing from count file because taxon not in tree
}
else{
$prune_cnt++ if $$abund_r{$taxon} eq "delete";
}
}
foreach my $node (keys %nodes){
print STDERR " WARNING! in tree file; not in count file: '$node'\n"
if ! exists $$abund_r{$node};
}
print STDERR "\n Number of taxa in count file: $count_rows\n";
print STDERR " Number of leaves: ", scalar keys %nodes, "\n";
print STDERR " Number of taxa to be pruned: $prune_cnt\n";
}
sub sum_count{
# summing abundances in count file #
my ($count_r, $abund_cut, $mothur) = @_;
my %abund;
foreach my $row (keys %$count_r){
my $rowsum;
if($mothur){ $rowsum = sum(@{$$count_r{$row}}[1..$#{$$count_r{$row}}]); }
else{ $rowsum = sum( @{$$count_r{$row}} ); }
if($regex && $row =~ $regex){ # skipping if regex hits taxon
$abund{$row} = $row;
}
elsif($rowsum >= $abund_cut){
$abund{$row} = $row;
}
else{
$abund{$row} = "delete";
}
}
#print Dumper %abund; exit;
return \%abund;
}
sub load_count{
# loading count file #
#
my ($count_in, $count_header, $mothur) = @_;
open IN, $count_in or die $!;
my %count;
my $header;
while(<IN>){
chomp;
# header #
if ($.==1 && ! $count_header && ! $mothur){
$header = $_;
next;
}
my @line = split /\t/;
die " ERROR: the count file must be at least 2 columns (rownames, count)\n"
if scalar @line < 2;
$count{$line[0]} = [@line[1..$#line]];
}
close IN;
#print Dumper %count; exit;
return \%count, $header; # returning taxa for pruning
}
sub tree_io{
# loading tree object #
my ($tree_in, $format) = @_;
my $input = Bio::TreeIO -> new(-file => $tree_in,
-format => $format);
my $treeio = $input->next_tree;
#for my $node ($treeio->get_nodes){ print "nodeID: ", $node->id, "\n"; }
#exit;
return $treeio;
}
sub check_tree_format{
my $format = shift;
$format = "newick" if ! $format;
$format =~ s/^new$/newick/i;
$format =~ s/^nex$/nexus/i;
die " Designated tree format ($format) not recognized.\n" if $format !~ /newick|nexus/;
return $format;
}
__END__
=pod
=head1 NAME
tree_PruneByAbundance.pl -- Prune tree by taxon abundances
=head1 SYNOPSIS
tree_PruneByAbundance.pl -t -c [-f] [-x] [-m] [-a] [-r]
=head2 options
=over
=item -t
Tree file (newick or nexus).
=item -f
Tree file format (newick or nexus). [newick]
=item -c
Count file (tab-delimited, 1st row = rownames)
=item -x
Header in 1st line of count file? [FALSE]
=item -m
Mothur-formatted count file? [FALSE]
=item -a
Abundance cutoff for pruning (>=). [5]
=item -r
Regular expression for excluding certain taxa. (example: -r "^methan").
Capitaliztion invariant.
=item -v
Verbose output
=item -h
This help message
=back
=head2 Requirements:
prune_TreeByAbundance.r
=head2 For more information:
perldoc tree_PruneByAbundance.pl
=head1 DESCRIPTION
Prune taxa from a tree based on the abundances of the taxa found.
=head1 EXAMPLES
=head2 Basic usage
tree_PruneByAbundance.pl -t test.nwk -c count.txt
=head1 AUTHOR
Nick Youngblut <nyoungb2@illinois.edu>
=head1 AVAILABILITY
sharchaea.life.uiuc.edu:/home/git/tree_edit/
=head1 COPYRIGHT
Copyright 2010, 2011
This software is licensed under the terms of the GPLv3
=cut
| nyoungb2/tree_edit | bin/PruneByAbundance.pl | Perl | mit | 6,964 |
#!/usr/bin/perl -w
#Author: Pengcheng Yang
#Email: yangpc@mail.biols.ac.cn
use strict;
use Getopt::Long;
use File::Basename qw(basename dirname);
my $usage= "\n$0
--expr \t<str>\texpression table file with the format, the first line is the header
\t\twithout the first column. every line is one gene
\t\tthis data will be log2 transformed and normalized to variance 1 and mean 0
--exprChek\t<str>\tchecking the sample quality
--expCut \t<int>\tthe maximum value that the genes expression value smaller than this
\t\tvalue will be filtered (default is 5)
--tp \t<str>\tthe max power used for test the power threshold (default 20, minimal 12)
--beta \t<str>\tthe soft thresholding power set according to the Analysis of network
\t\ttopology for various soft-thresholding powers.
--TOMType \t<str>\ta character string specifying TOM type to be calculated. One of 'unsigned',
\t\t\t'signed' (default unsigned)
--merge \t<str>\twhether or not (Y or N) to merge the modules (default Y)
--outDir \t<str>\toutput directory
--step \t<str>\tsteps:
\t\t1. power selection: preprocess expression data and plot the power law graph. then according to the
\t\t\tplot get the power parameter beta and set to the parameter --beta
\t\t2. network construction: using the --beta parameter run network construction and plot clustering dendrogram
\n";
my($expr,$outDir,$step,$beta,$tp,$TOMType,$merge,$expCut,
$exprChek);
GetOptions(
"expr:s"=>\$expr,
"outDir:s"=>\$outDir,
"step:s"=>\$step,
"beta:f"=>\$beta,
"tp:i"=>\$tp,
"TOMType:s"=>\$TOMType,
"merge:s"=>\$merge,
"expCut:i"=>\$expCut,
"exprChek"=>\$exprChek
);
$step ||= "1";
$tp ||= 20;
$TOMType ||= "unsigned";
die $usage if !defined $expr;
die "not defined beta !!!\n $usage\n" if ($step eq "2" && ! defined $beta);
my $expr_base=basename($expr);
my $rscpt;
`mkdir -p $outDir` if ! -e $outDir;
if($step =~ /1/){
if(!defined $expCut){
$expCut ||= 5;
}
if(defined $exprChek){
$exprChek="y";
}else{
$exprChek="n";
}
$rscpt=qq#
\#reading data
library(WGCNA)
options(stringsAsFactors = FALSE)
expr <- read.table("$expr",check.names=FALSE)
expr <- expr[apply(expr,1,function(x) any(x>$expCut)),]
\#output the filtered expression value
write.table(expr,file="$outDir/$expr_base.after_filt",sep="\\t",quote=FALSE)
datExpr0 <- as.data.frame(t(expr))
if(identical("y","$exprChek")){
gsg <- goodSamplesGenes(expr,verbos=3)
if (!gsg\$allOK){
if (sum(!gsg\$goodGenes)>0)
printFlush(paste("Removing genes:", paste(names(datExpr0)[!gsg\$goodGenes], collapse = ", ")));
if (sum(!gsg\$goodSamples)>0)
printFlush(paste("Removing samples:", paste(rownames(datExpr0)[!gsg\$goodSamples], collapse = ", ")));
datExpr0 = datExpr0[gsg\$goodSamples, gsg\$goodGenes]
}
}
datExpr0.dimn <- dimnames(datExpr0)
datExpr0 <- log2(datExpr0+1)
datExpr0 <- t(scale(t(datExpr0),center=TRUE,scale=TRUE))
datExpr0 <- scale(datExpr0,center=TRUE,scale=TRUE)
dimnames(datExpr0) <- datExpr0.dimn
\#checking data quality
if(identical("y","$exprChek")){
sampleTree = flashClust(dist(datExpr0), method = "average");
pdf("$outDir/$expr_base.Sample_clustering_to_detect_outliers.pdf",height=9,width=12)
par(cex = 0.6);
par(mar = c(0,4,2,0))
plot(sampleTree, main = "Sample clustering to detect outliers", sub="", xlab="", cex.lab = 1.5,
cex.axis = 1.5, cex.main = 2)
dev.off()
}
datExpr <- datExpr0
\#construct coexpression network
enableWGCNAThreads()
powers = c(c(1:10), seq(from = 12, to=$tp, by=2))
sft = pickSoftThreshold(datExpr, powerVector = powers, verbose = 5)
\#select beta
pdf("$outDir/$expr_base.Power_selection.pdf",height=5,width=9)
par(mfrow = c(1,2));
cex1 = 0.9;
\# Scale-free topology fit index as a function of the soft-thresholding power
plot(sft\$fitIndices[,1], -sign(sft\$fitIndices[,3])*sft\$fitIndices[,2],
xlab="Soft Threshold (power)",ylab="Scale Free Topology Model Fit,signed R^2",type="n",
main = paste("Scale independence"));
text(sft\$fitIndices[,1], -sign(sft\$fitIndices[,3])*sft\$fitIndices[,2],
labels=powers,cex=cex1,col="red");
\# this line corresponds to using an R^2 cut-off of h
abline(h=0.90,col="red")
\# Mean connectivity as a function of the soft-thresholding power
plot(sft\$fitIndices[,1], sft\$fitIndices[,5],
xlab="Soft Threshold (power)",ylab="Mean Connectivity", type="n",
main = paste("Mean connectivity"))
text(sft\$fitIndices[,1], sft\$fitIndices[,5], labels=powers, cex=cex1,col="red")
dev.off()
save(datExpr,expr,sft,file="$outDir/$expr_base.expr.RData")
q('no')
#;
open O,">","$outDir/$expr_base.expr.R";
print O $rscpt;
close O;
open O,">","$outDir/step1_expr.sh";
print O "R CMD BATCH $outDir/$expr_base.expr.R $outDir/$expr_base.expr.Rout\n";
close O;
`R CMD BATCH $outDir/$expr_base.expr.R $outDir/$expr_base.expr.Rout`;
}
#step2:
#1. construct network based on selected beta
if($step =~ /2/){
die "undefined beta\n\n" if ! defined $beta;
$merge ||= "N";
$rscpt=qq#
library(WGCNA)
load("$outDir/$expr_base.expr.RData")
\#construct network step-by-step
enableWGCNAThreads()
\#Co-expression similarity and adjacency
adjacency <- adjacency(datExpr, power = $beta,type="$TOMType");
\# Turn adjacency into topological overlap
TOM = TOMsimilarity(adjacency);
dimnames(TOM) <- dimnames(adjacency)
dissTOM = 1-TOM
save(adjacency,dissTOM,file="$outDir/$expr_base.networkConstruction.TOM.RData")
geneTree = flashClust(as.dist(dissTOM), method = "average")
dynamicMods = cutreeDynamic(dendro = geneTree, distM = dissTOM,
deepSplit = 2, pamRespectsDendro = FALSE,
minClusterSize = 20);
table(dynamicMods)
dynamicColors = labels2colors(dynamicMods)
table(dynamicColors)
\# Plot the dendrogram and the module colors underneath
pdf("$outDir/$expr_base.DendroAndColors.pdf")
plotDendroAndColors(geneTree, dynamicColors, "Dynamic Tree Cut",
dendroLabels = FALSE, hang = 0.03,
addGuide = TRUE, guideHang = 0.05,
main = "Gene dendrogram and module colors")
dev.off()
if(identical("$merge","Y")){
merged <- mergeCloseModules(datExpr, dynamicColors, cutHeight = 0.2,verbos=3)
pdf("$outDir/$expr_base.DendroAndColors.after_merge.pdf")
plotDendroAndColors(geneTree, cbind(dynamicColors,merged\$colors),
c("Dynamic Tree Cut","Merged Dynamic"),dendroLabels = FALSE, hang = 0.03,
addGuide = TRUE, guideHang = 0.05,main = "Gene dendrogram and module colors")
dev.off()
dynamicColors <- merged\$colors
}
save(datExpr,dynamicColors,adjacency,TOM,geneTree,file="$outDir/$expr_base.for_net_trait.RData")
save(datExpr,expr,dynamicMods,dynamicColors,geneTree,
file = "$outDir/$expr_base.networkConstruction.RData")
q('no')
#;
open O,">","$outDir/$expr_base.networkConstruction.R";
print O $rscpt;
close O;
open O,">","$outDir/step2_net.sh";
print O "R CMD BATCH $outDir/$expr_base.networkConstruction.R $outDir/$expr_base.networkConstruction.Rout\n";
close O;
`R CMD BATCH $outDir/$expr_base.networkConstruction.R $outDir/$expr_base.networkConstruction.Rout`;
}
| pengchy/EACO | bin/run_WGCNA.pl | Perl | mit | 6,920 |
#BEGIN_HEADER
#
# Copyright (C) 2020 Mahdi Safsafi.
#
# https://github.com/MahdiSafsafi/opcodesDB
#
# See licence file 'LICENCE' for use and distribution rights.
#
#END_HEADER
use strict;
use warnings;
# BNDCL-Check Lower Bound.
ENCODING BNDCL_ro => {
diagram => 'MAP=0f MOD=MEM MPX=1 MR=1 OP=0x1a P66=0 PF2=0 PF3=1',
extensions => 'MPX',
categories => 'SYSTEM|MISC',
metadata => 'isa=MPX',
tags => 'page=BNDCL',
};
ENCODING BNDCL_romx => {
diagram => 'MAP=0f MOD=REG MPX=1 MR=1 OP=0x1a P66=0 PF2=0 PF3=1',
extensions => 'MPX',
categories => 'SYSTEM|MISC',
metadata => 'isa=MPX',
tags => 'page=BNDCL',
};
# BNDCU/BNDCN-Check Upper Bound.
ENCODING BNDCN_ro => {
diagram => 'MAP=0f MOD=MEM MPX=1 MR=1 OP=0x1b P66=0 PF2=1 PF3=0',
extensions => 'MPX',
categories => 'SYSTEM|MISC',
metadata => 'isa=MPX',
tags => 'page=BNDCx',
};
ENCODING BNDCN_romx => {
diagram => 'MAP=0f MOD=REG MPX=1 MR=1 OP=0x1b P66=0 PF2=1 PF3=0',
extensions => 'MPX',
categories => 'SYSTEM|MISC',
metadata => 'isa=MPX',
tags => 'page=BNDCx',
};
ENCODING BNDCU_ro => {
diagram => 'MAP=0f MOD=MEM MPX=1 MR=1 OP=0x1a P66=0 PF2=1 PF3=0',
extensions => 'MPX',
categories => 'SYSTEM|MISC',
metadata => 'isa=MPX',
tags => 'page=BNDCx',
};
ENCODING BNDCU_romx => {
diagram => 'MAP=0f MOD=REG MPX=1 MR=1 OP=0x1a P66=0 PF2=1 PF3=0',
extensions => 'MPX',
categories => 'SYSTEM|MISC',
metadata => 'isa=MPX',
tags => 'page=BNDCx',
};
# BNDLDX-Load Extended Bounds Using Address Translation.
ENCODING BNDLDX => {
diagram => 'MAP=0f MOD=MEM MPX=1 MR=1 OP=0x1a P66=0 PF2=0 PF3=0',
extensions => 'MPX',
categories => 'SYSTEM',
metadata => 'isa=MPX',
tags => 'page=BNDLDX',
};
# BNDMK-Make Bounds.
ENCODING BNDMK => {
diagram => 'MAP=0f MOD=MEM MPX=1 MR=1 OP=0x1b P66=0 PF2=0 PF3=1',
extensions => 'MPX',
categories => 'SYSTEM|MISC',
metadata => 'isa=MPX',
tags => 'page=BNDMK',
};
# BNDMOV-Move Bounds.
ENCODING BNDMOV_mxro => {
diagram => 'MAP=0f MPX=1 MR=1 OP=0x1b P66=1 PF2=0 PF3=0',
extensions => 'MPX',
categories => 'SYSTEM',
metadata => 'isa=MPX',
tags => 'page=BNDMOV',
};
ENCODING BNDMOV_romx => {
diagram => 'MAP=0f MPX=1 MR=1 OP=0x1a P66=1 PF2=0 PF3=0',
extensions => 'MPX',
categories => 'SYSTEM',
metadata => 'isa=MPX',
tags => 'page=BNDMOV',
};
# BNDSTX-Store Extended Bounds Using Address Translation.
ENCODING BNDSTX => {
diagram => 'MAP=0f MOD=MEM MPX=1 MR=1 OP=0x1b P66=0 PF2=0 PF3=0',
extensions => 'MPX',
categories => 'SYSTEM|MISC',
metadata => 'isa=MPX',
tags => 'page=BNDSTX',
};
# CLAC-Clear AC Flag in EFLAGS Register.
ENCODING CLAC => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0x01 P66=0 PF2=0 PF3=0 REG=1 RM=2',
iflags => 'ac=Z',
extensions => 'SMAP',
categories => 'SYSTEM',
metadata => 'isa=SMAP cpl=RING0',
tags => 'page=CLAC',
};
# CLDEMOTE-Cache Line Demote.
ENCODING CLDEMOTE => {
diagram => 'CLDEMOTE=1 MAP=0f MOD=MEM MR=1 OP=0x1c P66=0 PF2=0 PF3=0 REG=0',
extensions => 'CLDEMOTE',
categories => 'SYSTEM|MISC',
metadata => 'isa=CLDEMOTE',
tags => 'page=CLDEMOTE',
};
# CLFLUSH-Flush Cache Line.
ENCODING CLFLUSH => {
diagram => 'MAP=0f MOD=MEM MR=1 OP=0xae P66=0 PF2=0 PF3=0 REG=7',
extensions => 'CLFSH',
categories => 'SYSTEM',
metadata => 'isa=CLFSH',
tags => 'page=CLFLUSH',
};
# CLFLUSHOPT-Flush Cache Line Optimized.
ENCODING CLFLUSHOPT => {
diagram => 'MAP=0f MOD=MEM MR=1 OP=0xae P66=1 PF2=0 PF3=0 REG=7',
extensions => 'CLFLUSHOPT',
categories => 'SYSTEM',
metadata => 'isa=CLFLUSHOPT',
tags => 'page=CLFLUSHOPT',
};
# CLRSSBSY-Clear Busy Flag in a Supervisor Shadow Stack Token.
ENCODING CLRSSBSY => {
diagram => 'MAP=0f MOD=MEM MR=1 OP=0xae P66=0 PF2=0 PF3=1 REG=6',
extensions => 'CET',
categories => 'SYSTEM|MISC',
metadata => 'isa=CET',
tags => 'page=CLRSSBSY',
};
# CLWB-Cache Line Write Back.
ENCODING CLWB => {
diagram => 'MAP=0f MOD=MEM MR=1 OP=0xae P66=1 PF2=0 PF3=0 REG=6',
extensions => 'CLWB',
categories => 'SYSTEM',
metadata => 'isa=CLWB',
tags => 'page=CLWB',
};
# ENCLS.
ENCODING ENCLS => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0x01 P66=0 PF2=0 PF3=0 REG=1 RM=7',
extensions => 'SGX',
categories => 'SYSTEM|MISC',
metadata => 'isa=SGX cpl=RING0',
tags => 'page=ENCLS',
};
# ENCLU.
ENCODING ENCLU => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0x01 P66=0 PF2=0 PF3=0 REG=2 RM=7',
extensions => 'SGX',
categories => 'SYSTEM|MISC',
metadata => 'isa=SGX',
tags => 'page=ENCLU',
};
# ENCLV.
ENCODING ENCLV => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0x01 P66=0 PF2=0 PF3=0 REG=0 RM=0',
extensions => 'SGX_ENCLV',
categories => 'SYSTEM|MISC',
metadata => 'isa=SGX_ENCLV',
tags => 'page=ENCLV',
};
# ENDBR32-Terminate an Indirect Branch in 32-bit and Compatibility Mode.
ENCODING ENDBR32 => {
diagram => 'CET=1 MAP=0f MOD=REG MR=1 OP=0x1e PF2=0 PF3=1 REG=7 RM=3',
extensions => 'CET',
categories => 'SYSTEM',
metadata => 'isa=CET',
tags => 'page=ENDBR32',
};
# ENDBR64-Terminate an Indirect Branch in 64-bit Mode.
ENCODING ENDBR64 => {
diagram => 'CET=1 MAP=0f MOD=REG MR=1 OP=0x1e PF2=0 PF3=1 REG=7 RM=2',
extensions => 'CET',
categories => 'SYSTEM',
metadata => 'isa=CET',
tags => 'page=ENDBR64',
};
# ENQCMD.
ENCODING ENQCMD => {
diagram => 'MAP=0f38 MOD=MEM MR=1 OP=0xf8 P66=0 PF2=1 PF3=0',
iflags => 'af=Z cf=Z of=Z pf=Z sf=Z zf=W',
extensions => 'ENQCMD',
categories => 'SYSTEM',
metadata => 'isa=ENQCMD',
tags => 'page=ENQCMD',
};
# ENQCMDS.
ENCODING ENQCMDS => {
diagram => 'MAP=0f38 MOD=MEM MR=1 OP=0xf8 P66=0 PF2=0 PF3=1',
iflags => 'af=Z cf=Z of=Z pf=Z sf=Z zf=W',
extensions => 'ENQCMD',
categories => 'SYSTEM',
metadata => 'isa=ENQCMD',
tags => 'page=ENQCMDS',
};
# GETSEC.
ENCODING GETSEC => {
diagram => 'MAP=0f OP=0x37 P66=0 PF2=0 PF3=0',
extensions => 'SMX',
categories => 'SYSTEM',
metadata => 'isa=SMX',
docvars => 'PROTECTED_MODE=1',
tags => 'page=GETSEC',
};
# INCSSPD/INCSSPQ-Increment Shadow Stack Pointer.
ENCODING INCSSPx => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0xae P66=0 PF2=0 PF3=1 REG=5',
extensions => 'CET',
categories => 'SYSTEM|MISC',
metadata => 'isa=CET',
tags => 'page=INCSSPx',
};
# INVEPT-Invalidate Translations Derived from EPT.
ENCODING INVEPT => {
diagram => 'MAP=0f38 MOD=MEM MR=1 OP=0x80 P66=1 PF2=0 PF3=0',
iflags => 'af=Z cf=W of=Z pf=Z sf=Z zf=W',
extensions => 'VTX',
categories => 'SYSTEM',
metadata => 'isa=VTX cpl=RING0',
tags => 'page=INVEPT',
};
# INVPCID-Invalidate Process-Context Identifier.
ENCODING INVPCID => {
diagram => 'MAP=0f38 MOD=MEM MR=1 OP=0x82 P66=1 PF2=0 PF3=0',
extensions => 'INVPCID',
categories => 'SYSTEM|OS',
metadata => 'isa=INVPCID cpl=RING0',
tags => 'page=INVPCID',
};
# INVVPID-Invalidate Translations Based on VPID.
ENCODING INVVPID => {
diagram => 'MAP=0f38 MOD=MEM MR=1 OP=0x81 P66=1 PF2=0 PF3=0',
iflags => 'af=Z cf=W of=Z pf=Z sf=Z zf=W',
extensions => 'VTX',
categories => 'SYSTEM',
metadata => 'isa=VTX cpl=RING0',
tags => 'page=INVVPID',
};
# MONITOR-Set Up Monitor Address.
ENCODING MONITOR => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0x01 P66=0 PF2=0 PF3=0 REG=1 RM=0',
extensions => 'MONITOR',
categories => 'SYSTEM',
metadata => 'isa=MONITOR cpl=RING0',
tags => 'page=MONITOR',
};
# MWAIT-Monitor Wait.
ENCODING MWAIT => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0x01 P66=0 PF2=0 PF3=0 REG=1 RM=1',
extensions => 'MONITOR',
categories => 'SYSTEM',
metadata => 'isa=MONITOR cpl=RING0',
tags => 'page=MWAIT',
};
# PTWRITE-Write Data to a Processor Trace Packet.
ENCODING PTWRITE => {
diagram => 'MAP=0f MR=1 OP=0xae P66=0 PF2=0 PF3=1 REG=4',
extensions => 'PT',
categories => 'SYSTEM|MISC',
metadata => 'isa=PT',
tags => 'page=PTWRITE',
};
# RDPID-Read Processor ID.
ENCODING RDPID => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0xc7 P66=0 PF2=0 PF3=1 REG=7',
extensions => 'RDPID',
categories => 'SYSTEM',
metadata => 'isa=RDPID',
tags => 'page=RDPID',
};
# RDPKRU-Read Protection Key Rights for User Pages.
ENCODING RDPKRU => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0x01 P66=0 PF2=0 PF3=0 REG=5 RM=6',
extensions => 'PKU',
categories => 'SYSTEM',
metadata => 'isa=PKU',
tags => 'page=RDPKRU',
};
# RDRAND-Read Random Number.
ENCODING RDRAND => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0xc7 REG=6',
iflags => 'af=Z cf=W of=Z pf=Z sf=Z zf=Z',
extensions => 'RDRAND',
categories => 'SYSTEM|RANDOM',
metadata => 'isa=RDRAND',
tags => 'page=RDRAND',
};
# RDSEED-Read Random SEED.
ENCODING RDSEED => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0xc7 PF2=0 PF3=0 REG=7',
iflags => 'af=Z cf=W of=Z pf=Z sf=Z zf=Z',
extensions => 'RDSEED',
categories => 'SYSTEM|RANDOM',
metadata => 'isa=RDSEED',
tags => 'page=RDSEED',
};
# RDSSPD/RDSSPQ-Read Shadow Stack Pointer.
ENCODING RDSSPx => {
diagram => 'CET=1 MAP=0f MOD=REG MR=1 OP=0x1e PF2=0 PF3=1 REG=1',
extensions => 'CET',
categories => 'SYSTEM|MISC',
metadata => 'isa=CET',
tags => 'page=RDSSPx',
};
# RDTSCP-Read Time-Stamp Counter and Processor ID.
ENCODING RDTSCP => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0x01 REG=7 RM=1',
extensions => 'RDTSCP',
categories => 'SYSTEM',
metadata => 'isa=RDTSCP',
tags => 'page=RDTSCP',
};
# RDFSBASE/RDGSBASE-Read FS/GS Segment Base.
ENCODING RDFSBASE => {
diagram => 'MAP=0f MOD=REG MODE=64 MR=1 OP=0xae P66=0 PF2=0 PF3=1 REG=0',
extensions => 'RDWRFSGS',
categories => 'SYSTEM',
metadata => 'isa=RDWRFSGS',
tags => 'page=RDxSBASE',
};
ENCODING RDGSBASE => {
diagram => 'MAP=0f MOD=REG MODE=64 MR=1 OP=0xae P66=0 PF2=0 PF3=1 REG=1',
extensions => 'RDWRFSGS',
categories => 'SYSTEM',
metadata => 'isa=RDWRFSGS',
tags => 'page=RDxSBASE',
};
# RSTORSSP-Restore Saved Shadow Stack Pointer.
ENCODING RSTORSSP => {
diagram => 'MAP=0f MOD=MEM MR=1 OP=0x01 P66=0 PF2=0 PF3=1 REG=5',
extensions => 'CET',
categories => 'SYSTEM|MISC',
metadata => 'isa=CET',
tags => 'page=RSTORSSP',
};
# SAVEPREVSSP-Save Previous Shadow Stack Pointer.
ENCODING SAVEPREVSSP => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0x01 P66=0 PF2=0 PF3=1 REG=5 RM=2',
extensions => 'CET',
categories => 'SYSTEM|MISC',
metadata => 'isa=CET',
tags => 'page=SAVEPREVSSP',
};
# SETSSBSY-Mark Shadow Stack Busy.
ENCODING SETSSBSY => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0x01 P66=0 PF2=0 PF3=1 REG=5 RM=0',
extensions => 'CET',
categories => 'SYSTEM|MISC',
metadata => 'isa=CET',
tags => 'page=SETSSBSY',
};
# STAC-Set AC Flag in EFLAGS Register.
ENCODING STAC => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0x01 P66=0 PF2=0 PF3=0 REG=1 RM=3',
iflags => 'ac=S',
extensions => 'SMAP',
categories => 'SYSTEM',
metadata => 'isa=SMAP cpl=RING0',
tags => 'page=STAC',
};
# TPAUSE-Timed PAUSE.
ENCODING TPAUSE => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0xae P66=1 PF2=0 PF3=0 REG=6',
iflags => 'af=Z cf=W of=Z pf=Z sf=Z zf=Z',
extensions => 'WAITPKG',
categories => 'SYSTEM|MISC',
metadata => 'isa=WAITPKG',
tags => 'page=TPAUSE',
};
# UMONITOR-User Level Set Up Monitor Address.
ENCODING UMONITOR => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0xae P66=0 PF2=0 PF3=1 REG=6',
extensions => 'WAITPKG',
categories => 'SYSTEM|MISC',
metadata => 'isa=WAITPKG',
tags => 'page=UMONITOR',
};
# UMWAIT-User Level Monitor Wait.
ENCODING UMWAIT => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0xae P66=0 PF2=1 PF3=0 REG=6',
iflags => 'af=Z cf=W of=Z pf=Z sf=Z zf=Z',
extensions => 'WAITPKG',
categories => 'SYSTEM|MISC',
metadata => 'isa=WAITPKG',
tags => 'page=UMWAIT',
};
# VMCALL-Call to VM Monitor.
ENCODING VMCALL => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0x01 P66=0 PF2=0 PF3=0 REG=0 RM=1',
iflags => 'ac=CW af=CW cf=CW df=CW id=CW _if=CW iopl=RCW nt=CW of=CW pf=CW rf=CW sf=CW tf=CW vif=CW vip=CW vm=R zf=CW',
extensions => 'VTX',
categories => 'SYSTEM',
metadata => 'isa=VTX',
tags => 'page=VMCALL',
};
# VMCLEAR-Clear Virtual-Machine Control Structure.
ENCODING VMCLEAR => {
diagram => 'MAP=0f MOD=MEM MR=1 OP=0xc7 P66=1 PF2=0 PF3=0 REG=6',
iflags => 'af=Z cf=W of=Z pf=Z sf=Z zf=W',
extensions => 'VTX',
categories => 'SYSTEM',
metadata => 'isa=VTX cpl=RING0',
tags => 'page=VMCLEAR',
};
# VMFUNC-Invoke VM function.
ENCODING VMFUNC => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0x01 P66=0 PF2=0 PF3=0 REG=2 RM=4',
extensions => 'VMFUNC',
categories => 'SYSTEM',
metadata => 'isa=VMFUNC',
tags => 'page=VMFUNC',
};
# VMLAUNCH/VMRESUME-Launch/Resume Virtual Machine.
ENCODING VMLAUNCH => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0x01 P66=0 PF2=0 PF3=0 REG=0 RM=2',
iflags => 'ac=CW af=CW cf=CW df=CW id=CW _if=CW iopl=RCW nt=CW of=CW pf=CW rf=CW sf=CW tf=CW vif=CW vip=CW vm=R zf=CW',
extensions => 'VTX',
categories => 'SYSTEM',
metadata => 'isa=VTX cpl=RING0',
tags => 'page=VMLAUNCH_RESUME',
};
ENCODING VMRESUME => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0x01 P66=0 PF2=0 PF3=0 REG=0 RM=3',
iflags => 'ac=CW af=CW cf=CW df=CW id=CW _if=CW iopl=RCW nt=CW of=CW pf=CW rf=CW sf=CW tf=CW vif=CW vip=CW vm=R zf=CW',
extensions => 'VTX',
categories => 'SYSTEM',
metadata => 'isa=VTX cpl=RING0',
tags => 'page=VMLAUNCH_RESUME',
};
# VMPTRLD-Load Pointer to Virtual-Machine Control Structure.
ENCODING VMPTRLD => {
diagram => 'MAP=0f MOD=MEM MR=1 OP=0xc7 P66=0 PF2=0 PF3=0 REG=6',
iflags => 'af=Z cf=W of=Z pf=Z sf=Z zf=W',
extensions => 'VTX',
categories => 'SYSTEM',
metadata => 'isa=VTX cpl=RING0',
tags => 'page=VMPTRLD',
};
# VMPTRST-Store Pointer to Virtual-Machine Control Structure.
ENCODING VMPTRST => {
diagram => 'MAP=0f MOD=MEM MR=1 OP=0xc7 P66=0 PF2=0 PF3=0 REG=7',
iflags => 'af=Z cf=W of=Z pf=Z sf=Z zf=W',
extensions => 'VTX',
categories => 'SYSTEM',
metadata => 'isa=VTX cpl=RING0',
tags => 'page=VMPTRST',
};
# VMREAD-Read Field from Virtual-Machine Control Structure.
ENCODING VMREAD => {
diagram => 'MAP=0f MR=1 OP=0x78 P66=0 PF2=0 PF3=0',
iflags => 'af=Z cf=W of=Z pf=Z sf=Z zf=W',
extensions => 'VTX',
categories => 'SYSTEM',
metadata => 'isa=VTX cpl=RING0',
tags => 'page=VMREAD',
};
# VMWRITE-Write Field to Virtual-Machine Control Structure.
ENCODING VMWRITE => {
diagram => 'MAP=0f MR=1 OP=0x79 P66=0 PF2=0 PF3=0',
iflags => 'af=Z cf=W of=Z pf=Z sf=Z zf=W',
extensions => 'VTX',
categories => 'SYSTEM',
metadata => 'isa=VTX cpl=RING0',
tags => 'page=VMWRITE',
};
# VMXOFF-Leave VMX Operation.
ENCODING VMXOFF => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0x01 P66=0 PF2=0 PF3=0 REG=0 RM=4',
iflags => 'af=W cf=W of=W pf=W sf=W zf=W',
extensions => 'VTX',
categories => 'SYSTEM',
metadata => 'isa=VTX cpl=RING0',
tags => 'page=VMXOFF',
};
# VMXON-Enter VMX Operation.
ENCODING VMXON => {
diagram => 'MAP=0f MOD=MEM MR=1 OP=0xc7 P66=0 PF2=0 PF3=1 REG=6',
iflags => 'af=Z cf=W of=Z pf=Z sf=Z zf=W',
extensions => 'VTX',
categories => 'SYSTEM',
metadata => 'isa=VTX cpl=RING0',
docvars => 'PROTECTED_MODE=1',
tags => 'page=VMXON',
};
# WRPKRU-Write Data to User Page Key Register.
ENCODING WRPKRU => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0x01 P66=0 PF2=0 PF3=0 REG=5 RM=7',
extensions => 'PKU',
categories => 'SYSTEM',
metadata => 'isa=PKU',
tags => 'page=WRPKRU',
};
# WRSSD/WRSSQ-Write to Shadow Stack.
ENCODING WRSSx => {
diagram => 'MAP=0f38 MOD=MEM MR=1 OP=0xf6 P66=0 PF2=0 PF3=0',
extensions => 'CET',
categories => 'SYSTEM|MISC',
metadata => 'isa=CET',
tags => 'page=WRSSx',
};
# WRUSSD/WRUSSQ-Write to User Shadow Stack.
ENCODING WRUSSx => {
diagram => 'MAP=0f38 MOD=MEM MR=1 OP=0xf5 P66=1 PF2=0 PF3=0',
extensions => 'CET',
categories => 'SYSTEM|MISC',
metadata => 'isa=CET',
tags => 'page=WRUSSx',
};
# WRFSBASE/WRGSBASE-Write FS/GS Segment Base.
ENCODING WRFSBASE => {
diagram => 'MAP=0f MOD=REG MODE=64 MR=1 OP=0xae P66=0 PF2=0 PF3=1 REG=2',
extensions => 'RDWRFSGS',
categories => 'SYSTEM',
metadata => 'isa=RDWRFSGS',
tags => 'page=WRxSBASE',
};
ENCODING WRGSBASE => {
diagram => 'MAP=0f MOD=REG MODE=64 MR=1 OP=0xae P66=0 PF2=0 PF3=1 REG=3',
extensions => 'RDWRFSGS',
categories => 'SYSTEM',
metadata => 'isa=RDWRFSGS',
tags => 'page=WRxSBASE',
};
# XABORT-Transactional Abort.
ENCODING XABORT => {
diagram => 'MOD=REG MR=1 OP=0xc6 REG=7 RM=0',
extensions => 'RTM',
categories => 'SYSTEM|BRANCH|UNCONDITIONALLY',
metadata => 'isa=RTM',
tags => 'page=XABORT',
};
# XBEGIN-Transactional Begin.
ENCODING XBEGIN => {
diagram => 'MOD=REG MR=1 OP=0xc7 REG=7 RM=0',
extensions => 'RTM',
categories => 'SYSTEM|BRANCH|CONDITIONALLY',
metadata => 'isa=RTM',
tags => 'page=XBEGIN',
};
# XEND-Transactional End.
ENCODING XEND => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0x01 P66=0 PF2=0 PF3=0 REG=2 RM=5',
extensions => 'RTM',
categories => 'SYSTEM|BRANCH|CONDITIONALLY',
metadata => 'isa=RTM',
tags => 'page=XEND',
};
# XGETBV-Get Value of Extended Control Register.
ENCODING XGETBV => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0x01 P66=0 PF2=0 PF3=0 REG=2 RM=0',
extensions => 'XSAVE',
categories => 'SYSTEM|OS',
metadata => 'isa=XSAVE',
tags => 'page=XGETBV',
};
# XRSTOR-Restore Processor Extended States.
ENCODING XRSTOR => {
diagram => 'MAP=0f MOD=MEM MR=1 OP=0xae P66=0 PF2=0 PF3=0 REG=5',
extensions => 'XSAVE',
exceptions => 'ALIGNMENT',
categories => 'SYSTEM|OS',
metadata => 'isa=XSAVE requires_alignment=1',
docvars => 'XMM_STATE_CW=1',
tags => 'page=XRSTOR',
};
# XRSTORS-Restore Processor Extended States Supervisor.
ENCODING XRSTORS => {
diagram => 'MAP=0f MOD=MEM MR=1 OP=0xc7 P66=0 PF2=0 PF3=0 REG=3',
extensions => 'XSAVES',
exceptions => 'ALIGNMENT',
categories => 'SYSTEM|OS',
metadata => 'isa=XSAVES cpl=RING0 requires_alignment=1',
docvars => 'XMM_STATE_W=1',
tags => 'page=XRSTORS',
};
# XSAVE-Save Processor Extended States.
ENCODING XSAVE => {
diagram => 'MAP=0f MOD=MEM MR=1 OP=0xae P66=0 PF2=0 PF3=0 REG=4',
extensions => 'XSAVE',
exceptions => 'ALIGNMENT',
categories => 'SYSTEM|OS',
metadata => 'isa=XSAVE requires_alignment=1',
docvars => 'XMM_STATE_R=1',
tags => 'page=XSAVE',
};
# XSAVEC-Save Processor Extended States with Compaction.
ENCODING XSAVEC => {
diagram => 'MAP=0f MOD=MEM MR=1 OP=0xc7 P66=0 PF2=0 PF3=0 REG=4',
extensions => 'XSAVEC',
exceptions => 'ALIGNMENT',
categories => 'SYSTEM|OS',
metadata => 'isa=XSAVEC requires_alignment=1',
docvars => 'XMM_STATE_R=1',
tags => 'page=XSAVEC',
};
# XSAVEOPT-Save Processor Extended States Optimized.
ENCODING XSAVEOPT => {
diagram => 'MAP=0f MOD=MEM MR=1 OP=0xae P66=0 PF2=0 PF3=0 REG=6',
extensions => 'XSAVEOPT',
exceptions => 'ALIGNMENT',
categories => 'SYSTEM|OS',
metadata => 'isa=XSAVEOPT requires_alignment=1',
docvars => 'XMM_STATE_R=1',
tags => 'page=XSAVEOPT',
};
# XSAVES-Save Processor Extended States Supervisor.
ENCODING XSAVES => {
diagram => 'MAP=0f MOD=MEM MR=1 OP=0xc7 P66=0 PF2=0 PF3=0 REG=5 W=0',
extensions => 'XSAVES',
exceptions => 'ALIGNMENT',
categories => 'SYSTEM|OS',
metadata => 'isa=XSAVES cpl=RING0 requires_alignment=1',
docvars => 'XMM_STATE_R=1',
tags => 'page=XSAVES',
};
ENCODING XSAVES64 => {
diagram => 'MAP=0f MOD=MEM MR=1 OP=0xc7 P66=0 PF2=0 PF3=0 REG=5 W=1',
extensions => 'XSAVES',
exceptions => 'ALIGNMENT',
categories => 'SYSTEM',
metadata => 'isa=XSAVES cpl=RING0 requires_alignment=1',
docvars => 'XMM_STATE_R=1',
tags => 'page=XSAVES',
};
# XSETBV-Set Extended Control Register.
ENCODING XSETBV => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0x01 P66=0 PF2=0 PF3=0 REG=2 RM=1',
extensions => 'XSAVE',
categories => 'SYSTEM|OS',
metadata => 'isa=XSAVE cpl=RING0',
tags => 'page=XSETBV',
};
# XTEST-Test If In Transactional Execution.
ENCODING XTEST => {
diagram => 'MAP=0f MOD=REG MR=1 OP=0x01 P66=0 PF2=0 PF3=0 REG=2 RM=6',
iflags => 'af=Z cf=Z of=Z pf=Z sf=Z zf=W',
extensions => 'RTM',
categories => 'SYSTEM|BITWISE|LOGICAL',
metadata => 'isa=RTM',
tags => 'page=XTEST',
};
| MahdiSafsafi/opcodesDB | db/x86/system/encodings.pl | Perl | mit | 20,964 |
/*Owner & Copyrights: Vance King Saxbe. A.*/# This file was created by configpm when Perl was built. Any changes
# made to this file will be lost the next time perl is built.
# for a description of the variables, please have a look at the
# Glossary file, as written in the Porting folder, or use the url:
# http://perl5.git.perl.org/perl.git/blob/HEAD:/Porting/Glossary
package Config;
use strict;
use warnings;
use vars '%Config';
# Skip @Config::EXPORT because it only contains %Config, which we special
# case below as it's not a function. @Config::EXPORT won't change in the
# lifetime of Perl 5.
my %Export_Cache = (myconfig => 1, config_sh => 1, config_vars => 1,
config_re => 1, compile_date => 1, local_patches => 1,
bincompat_options => 1, non_bincompat_options => 1,
header_files => 1);
@Config::EXPORT = qw(%Config);
@Config::EXPORT_OK = keys %Export_Cache;
# Need to stub all the functions to make code such as print Config::config_sh
# keep working
sub bincompat_options;
sub compile_date;
sub config_re;
sub config_sh;
sub config_vars;
sub header_files;
sub local_patches;
sub myconfig;
sub non_bincompat_options;
# Define our own import method to avoid pulling in the full Exporter:
sub import {
shift;
@_ = @Config::EXPORT unless @_;
my @funcs = grep $_ ne '%Config', @_;
my $export_Config = @funcs < @_ ? 1 : 0;
no strict 'refs';
my $callpkg = caller(0);
foreach my $func (@funcs) {
die qq{"$func" is not exported by the Config module\n}
unless $Export_Cache{$func};
*{$callpkg.'::'.$func} = \&{$func};
}
*{"$callpkg\::Config"} = \%Config if $export_Config;
return;
}
die "Perl lib version (5.14.2) doesn't match executable '$0' version ($])"
unless $^V;
$^V eq 5.14.2
or die "Perl lib version (5.14.2) doesn't match executable '$0' version (" .
sprintf("v%vd",$^V) . ")";
sub FETCH {
my($self, $key) = @_;
# check for cached value (which may be undef so we use exists not defined)
return exists $self->{$key} ? $self->{$key} : $self->fetch_string($key);
}
sub TIEHASH {
bless $_[1], $_[0];
}
sub DESTROY { }
sub AUTOLOAD {
require 'Config_heavy.pl';
goto \&launcher unless $Config::AUTOLOAD =~ /launcher$/;
die "&Config::AUTOLOAD failed on $Config::AUTOLOAD";
}
# tie returns the object, so the value returned to require will be true.
tie %Config, 'Config', {
archlibexp => '\\.sys\\miniperl\\lib',
archname => 'MSWin32-x86-multi-thread',
cc => 'gcc',
d_readlink => undef,
d_symlink => undef,
dlext => 'dll',
dlsrc => 'dl_win32.xs',
dont_use_nlink => undef,
exe_ext => '.exe',
inc_version_list => '',
intsize => '4',
ldlibpthname => '',
libpth => '',
osname => 'MSWin32',
osvers => '4.0',
path_sep => ';',
privlibexp => '\\.sys\\miniperl\\lib',
scriptdir => '\\.sys\\miniperl\\bin',
sitearchexp => '\\.sys\\miniperl\\site\\lib',
sitelibexp => '\\.sys\\miniperl\\site\\lib',
so => 'dll',
useithreads => 'define',
usevendorprefix => 'define',
version => '5.14.2',
};
/*email to provide support at vancekingsaxbe@powerdominionenterprise.com, businessaffairs@powerdominionenterprise.com, For donations please write to fundraising@powerdominionenterprise.com*/////// | VanceKingSaxbeA/ASX-Engine | App/vdrive/.sys/miniperl/lib/Config.pm | Perl | mit | 3,297 |
:- encoding(utf8).
:- module(
media_type,
[
media_type//1, % ?MediaType
media_type/1, % ?MediaType
media_type_encoding/2, % ?MediaType, ?Encoding
media_type_extension/2, % ?MediaType, ?Extension
media_type_family/2, % ?MediaType, ?Family
media_type_label/2, % ?MediaType, ?Label
media_type_parameter/2, % +MediaType, ?Parameter
media_type_program/3, % ?MediaType, -Program, -Args
must_be_media_type/2 % +MediaTypes, +MediaType
]
).
/** <module> Media Types library
Library of Media Types, based on the official Media Types as
maintained by IANA, and de facto Media Types as they are used in the
wild.
Media Types are a common way of denoting binary and textual content.
As such, this library is useful to base sensible defaults for
processing input files on, and can be used to simplify HTTP requests
and aspects of serving content over HTTP.
Also tries to maintain a one-to-one mapping between Media Types and
the most commonly used file name extension for files containing
content in that Media Type.
*/
:- use_module(library(apply)).
:- use_module(library(error)).
:- use_module(library(lists)).
:- use_module(library(pair_ext), []).
:- use_module(library(settings)).
:- use_module(library(call_ext)).
:- use_module(library(dcg)).
:- use_module(library(stream_ext)).
:- discontiguous
encoding_/2,
extension_/2,
family_/2,
label_/2,
media_type_/2,
program_/2.
:- multifile
error:has_type/2.
error:has_type(media_type, MediaType) :-
MediaType = media(Super/Sub,Params),
error:has_type(atom, Super),
error:has_type(atom, Sub),
error:has_type(list(pair(atom)), Params).
:- setting(default_text_editor, atom, emacs,
"The default program that is used for opening text files.").
%! media_type(+MediaType:media_type)// is det.
%! media_type(-MediaType:media_type)// is det.
media_type(MediaType) -->
{ground(MediaType)}, !,
media_type_generate(MediaType).
media_type(MediaType) -->
media_type_parse(MediaType).
media_type_generate(media(Super/Sub,Params)) -->
atom(Super),
"/",
atom(Sub),
params_generate(Params).
params_generate([]) --> !, "".
params_generate([H|T]) -->
";",
param_generate(H),
params_generate(T).
param_generate(Key-Value) -->
atom(Key),
"=",
atom(Value).
media_type_parse(media(Super/Sub,Params)) -->
...(SuperCodes),
"/",
...(SubCodes),
(";" -> whites, params_parse(Params) ; eos, {Params = []}), !,
{maplist(atom_codes, [Super,Sub], [SuperCodes,SubCodes])}.
params_parse([H|T]) -->
param_parse(H), !,
params_parse(T).
params_parse([]) --> "".
param_parse(Key-Value) -->
...(KeyCodes),
"=",
...(ValueCodes),
(";" -> whites ; eos),
{maplist(atom_codes, [Key,Value], [KeyCodes,ValueCodes])}.
%! media_type(+MediaType:media_type) is semidet.
%! media_type(-MediaType:media_type) is multi.
media_type(MediaType) :-
media_type_(_, MediaType).
%! media_type_encoding(+MediaType:media_type, +Encoding:atom) is semidet.
%! media_type_encoding(+MediaType:media_type, -Encoding:atom) is semidet.
% A parameter `charset'.
media_type_encoding(MediaType, Encoding) :-
media_type_parameter(MediaType, charset-Encoding0), !,
stream_ext:clean_encoding_(Encoding0, Encoding).
media_type_encoding(MediaType, Encoding) :-
media_type_encoding_(MediaType, Encoding), !.
media_type_encoding_(media(text/turtle,_), utf8).
%! media_type_extension(+MediaType:media_type, +Extension:atom) is semidet.
%! media_type_extension(+MediaType:media_type, -Extension:atom) is det.
%! media_type_extension(-MediaType:media_type, +Extension:atom) is det.
%! media_type_extension(-MediaType:media_type, -Extension:atom) is multi.
media_type_extension(MediaType, Ext) :-
call_det_when(
(ground(MediaType) ; ground(Ext)),
(extension_(Key, Ext),
media_type_(Key, MediaType))).
%! media_type_family(+MediaType:media_type, +Family:term) is semidet.
%! media_type_family(+MediaType:media_type, -Family:term) is semidet.
%! media_type_family(-MediaType:media_type, +Family:term) is nondet.
%! media_type_family(-MediaType:media_type, -Family:term) is multi.
media_type_family(MediaType, Family) :-
ground(Family), !,
family_(Key, Family),
media_type_(Key, MediaType).
media_type_family(MediaType, Family) :-
media_type_(Key, MediaType),
family_(Key, Family).
%! media_type_label(+MediaType:media_type, -Label:string) is det.
%! media_type_label(-MediaType:media_type, -Label:string) is multi.
media_type_label(MediaType, Label) :-
call_det_when_ground(
MediaType,
(media_type_(Key, MediaType),
label_(Key, Label))).
%! media_type_parameter(+MediaType:media_type, +Parameter:pair(atom)) is semidet.
%! media_type_parameter(+MediaType:media_type, -Parameter:pair(atom)) is nondet.
media_type_parameter(media(_/_,Params), Param) :-
member(Param, Params).
%! media_type_program(+MediaType:media_type, -Program:atom, -Args:list) is nondet.
%! media_type_program(-MediaType:media_type, -Program:atom, -Args:list) is multi.
media_type_program(MediaType, Program, Args) :-
media_type_(Key, MediaType),
program_(Key, Program0),
media_type_program_(Program0, Program, Args).
media_type_program_(program(Program,Args), Program, Args) :- !.
media_type_program_(text_editor, Program, []) :- !,
setting(default_text_editor, Program).
media_type_program_(Program, Program, []).
%! must_be_media_type(+MediaTypes:list(media_type), +MediaType:media_type) is det.
must_be_media_type(MediaTypes, media(Super/Sub,_)) :-
memberchk(media(Super/Sub,_), MediaTypes), !.
must_be_media_type(MediaTypes, MediaType) :-
must_be(oneof(MediaTypes), MediaType).
% MEDIA TYPE REGISTRATIONS %
%! media_type_(?Extension:atom, ?MediaType:media_type, ?Programs:list(or([atom,compound])), ?Label:string) is nondet.
extension_('7z_1', '7z').
label_('7z_1', "7z").
media_type_('7z_1', media(application/'x-7z-compressed',[])).
extension_(atom_1, atom).
label_(atom_1, "Atom XML").
media_type_(atom_1, media(application/'atom+x',[])).
program_(atom_1, text_editor).
extension_(bmp_1, bmp).
label_(bmp_1, "Windows Bitmap (BMP)").
media_type_(bmp_1, media(image/bmp,[])).
program_(bmp_1, eog).
extension_(bz2_1, bz2).
label_(bz2_1, "bzip2").
media_type_(bz2_1, media(application/'x-bzip2',[])).
program_(bz2_1, program(bzip2,['-d'])).
extension_(cab_1, cab).
label_(cab_1, "Microsoft Cabinet").
media_type_(cab_1, media(application/'vnd.ms-cab-compressed',[])).
extension_(cdx_1, cdx).
label_(cdx_1, "CambridgeSoft ChemDraw").
media_type_(cdx_1, media(chemical/'x-cdx',[])).
extension_(cpio_1, cpio).
label_(cpio_1, "cpio").
media_type_(cpio_1, media(application/'x-cpio',[])).
extension_(csv_1, csv).
family_(csv_1, sparql(ask)).
family_(csv_1, sparql(select)).
label_(csv_1, "Comma-separated values (CSV)").
media_type_(csv_1, media(text/csv,[])).
program_(csv_1, text_editor).
extension_(doc_1, doc).
label_(doc_1, "Microsoft Word Document").
media_type_(doc_1, media(application/msword,[])).
program_(doc_1, [program(libreoffice,'--writer')]).
extension_(docm_1, docm).
label_(docm_1, "Microsoft Word Document").
media_type_(docm_1, media(application/'vnd.ms-word.document.macroenabled.12',[])).
program_(docm_1, program(libreoffice,'--writer')).
extension_(docx_1, docx).
label_(docx_1, "OpenOffice Wordprocessing Document").
media_type_(docx_1, media(application/'vnd.openxmlformats-officedocument.wordprocessingml.document',[])).
program_(docx_1, program(libreoffice,'--writer')).
extension_(dot_1, dot).
label_(dot_1, "GraphViz DOT").
media_type_(dot_1, media(text/'vnd.graphviz',[])).
program_(dot_1, text_editor).
extension_(dotm_1, dotm).
label_(dotm_1, "Microsoft Word Template").
media_type_(dotm_1, media(application/'vnd.ms-word.template.macroenabled.12',[])).
program_(dotm_1, program(libreoffice,'--writer')).
extension_(dotx_1, dotx).
label_(dotx_1, "OpenOffice Wordprocessing Template").
media_type_(dotx_1, media(application/'vnd.openxmlformats-officedocument.wordprocessingml.template',[])).
program_(dotx_1, program(libreoffice,'--writer')).
extension_(dwg_1, dwg).
label_(dwg_1, "Drawing (DWG) proprietary format used by CAD software").
media_type_(dwg_1, media(application/dwg,[])).
extension_(dxf_1, dxf).
label_(dxf_1, "AutoCAD Drawing Exchange Format (DXF)").
media_type_(dxf_1, media(image/'vnd.dxf',[])).
extension_(eps_1, eps).
label_(eps_1, "Encapsulated PostScript (EPS)").
media_type_(eps_1, media(image/eps,[])).
program_(eps_1, xfig).
extension_(epub_1, epub).
label_(epub_1, "ePub").
media_type_(epub_1, media(application/'epub+zip',[])).
extension_(exr_1, exr).
label_(exr_1, "OpenEXR is a high dynamic-range (HDR) image file format developed by Industrial Light & Magic for use in computer imaging applications").
extension_(fig_1, fig).
label_(fig_1, "FIG graphics language (Xfig)").
program_(fig_1, xfig).
extension_(fits_1, fits).
label_(fits_1, "Flexible Image Transport System (FITS)").
media_type_(fits_1, media(application/fits,[])).
extension_(flv_1, flv).
label_(flv_1, "Flash video (FLV)").
media_type_(flv_1, media(video/'x-flv',[])).
extension_(geojson_1, geojson).
label_(geojson_1, "GeoJSON").
media_type_(geojson_1, media(application/'vnd.geo+json',[])).
program_(geojson_1, text_editor).
extension_(gif_1, gif).
label_(gif_1, "Graphics Interchange Format (GIF)").
media_type_(gif_1, media(image/gif,[])).
program_(gif_1, eog).
program_(gif_1, xfig).
extension_(gml_1, gml).
label_(gml_1, "Geography Markup Language (GML)").
media_type_(gml_1, media(application/'gml+xml',[])).
program_(gml_1, text_editor).
extension_(gml_2, gml).
label_(gml_2, "Graph Markup Language (GML)").
media_type_(gml_2, media(text/'x-gml',[])).
program_(gml_2, gephi).
extension_(gpx_1, gpx).
label_(gpx_1, "GPS Exchange Format (GPX)").
media_type_(gpx_1, media(application/'gpx+xml',[])).
extension_(gz_1, gz).
label_(gz_1, "GNU Zip").
media_type_(gz_1, media(application/gzip,[])).
extension_(hdt_1, hdt).
label_(hdt_1, "Header Dictionary Triples (HDT)").
media_type_(hdt_1, media(application/'vnd.hdt',[])).
extension_(html_1, html).
family_(html_1, rdf).
family_(html_1, rdfa).
label_(html_1, "Hyper Text Markup Language (HTML)").
media_type_(html_1, media(text/html,[])).
program_(html_1, firefox).
extension_(ico_1, ico).
label_(ico_1, "Windows Icon (Microsoft uses Media Type `image/x-icon')").
media_type_(ico_1, media(image/'vnd.microsoft.icon',[])).
program_(ico_1, eog).
extension_(jgf_1, jgf).
label_(jgf_1, "JSON Graph Format (JGF)").
media_type_(jgf_1, media(application/'vnd.jgf+json',[])).
program_(jgf_1, text_editor).
extension_(jp2_1, jp2).
label_(jp2_1, "JPEG 2000").
media_type_(jp2_1, media(image/jp2,[])).
encoding_(jpg_1, octet).
extension_(jpg_1, jpg).
label_(jpg_1, "Joint Photographic Experts Group (JPEG)").
media_type_(jpg_1, media(image/jpeg,[])).
program_(jpg_1, eog).
program_(jpg_1, xfig).
extension_(js_1, js).
label_(js_1, "JavaScript (JS)").
media_type_(js_1, media(application/javascript,[])).
program_(js_1, text_editor).
encoding_(json_1, utf8).
extension_(json_1, json).
label_(json_1, "JavaScript Object Notation (JSON)").
media_type_(json_1, media(application/json,[])).
program_(json_1, text_editor).
extension_(jsonld_1, jsonld).
family_(jsonld_1, rdf).
label_(jsonld_1, "JSON-LD 1.0").
media_type_(jsonld_1, media(application/'ld+json',[])).
program_(jsonld_1, text_editor).
extension_(jsp_1, jsp).
label_(jsp_1, "Java Server Pages (JSP)").
media_type_(jsp_1, media(application/jsp,[])).
extension_(kml_1, kml).
label_(kml_1, "KML").
media_type_(kml_1, media(application/'vnd.google-earth.kml+xml',[])).
extension_(kmz_1, kmz).
label_(kmz_1, "KMZ").
media_type_(kmz_1, media(application/'vnd.google-earth.kmz',[])).
extension_(lha_1, lha).
label_(lha_1, "LHA").
media_type_(lha_1, media(application/'x-lzh-compressed',[])).
extension_(mdb_1, mdb).
label_(mdb_1, "Microsoft Access Database").
media_type_(mdb_1, media(application/'vnd.ms-access',[])).
program_(mdb_1, program(libreoffice,['--base'])).
extension_(mobi_1, mobi).
label_(mobi_1, "Mobi").
media_type_(mobi_1, media(application/'vnd.amazon.mobi8-ebook',[])).
extension_(mol_1, mol).
label_(mol_1, "MDL Information Systems (MDL) Molfile").
media_type_(mol_1, media(chemical/'x-mdl-molfile',[])).
extension_(mp4_1, mp4).
label_(mp4_1, "MPEG-4 Part 14").
media_type_(mp4_1, media(video/mp4,[])).
program_(mp4_1, vlc).
extension_(n3_1, n3).
label_(n3_1, "Notation 3 (N3)").
media_type_(n3_1, media(text/n3,[])).
program_(n3_1, text_editor).
extension_(nc_1, nc).
label_(nc_1, "Network Common Data Form (NetCDF)").
media_type_(nc_1, media(application/netcdf,[])).
encoding_(nq_1, utf8).
extension_(nq_1, nq).
family_(nq_1, rdf).
label_(nq_1, "N-Quads 1.1").
media_type_(nq_1, media(application/'n-quads',[])).
program_(nq_1, text_editor).
encoding_(nt_1, utf8).
extension_(nt_1, nt).
family_(nt_1, rdf).
label_(nt_1, "N-Triples 1.1").
media_type_(nt_1, media(application/'n-triples',[])).
program_(nt_1, text_editor).
extension_(odp_1, odp).
label_(odp_1, "OpenDocument presenatation").
media_type_(odp_1, media(application/'vnd.oasis.opendocument.presentation',[])).
program_(odp_1, program(libreoffice)).
extension_(ods_1, ods).
label_(ods_1, "OpenDocument Spreadsheet").
media_type_(ods_1, media(application/'vnd.oasis.opendocument.spreadsheet',[])).
program_(ods_1, program(libreoffice,['--calc'])).
extension_(odt_1, odt).
label_(odt_1, "OpenDocument Text").
media_type_(odt_1, media(application/'vnd.oasis.opendocument.text',[])).
program_(odt_1, program(libreoffice,['--writer'])).
extension_(pbm_1, pbm).
label_(pbm_1, "Portable Bitmap Format (PBM)").
media_type_(pbm_1, media(image/'x-portable-bitmap',[])).
extension_(pct_1, pct).
label_(pct_1, "PICT is a graphics file format introduced on the original Apple Macintosh computer as its standard metafile format.").
media_type_(pct_1, media(image/'x-pict',[])).
extension_(pcx_1, pcx).
label_(pcx_1, "PiCture EXchange (PC Paintbrush)").
media_type_(pcx_1, media(image/'vnd.zbrush.pcx',[])).
program_(pcx_1, eog).
program_(pcx_1, xfig).
extension_(pdf_1, pdf).
label_(pdf_1, "Portable Document Format (PDF)").
media_type_(pdf_1, media(application/pdf,[])).
program_(pdf_1, evince).
program_(pdf_1, xpdf).
extension_(pgm_1, pgm).
label_(pgm_1, "Portable Graymap Format (PGM)").
media_type_(pgm_1, media(image/'x-portable-graymap',[])).
extension_(pic_1, pic).
label_(pic_1, "PIC language").
encoding_(pl_1, utf8).
extension_(pl_1, pl).
label_(pl_1, "Prolog").
media_type_(pl_1, media(application/'x-prolog',[])).
program_(pl_1, text_editor).
encoding_(png_1, octet).
extension_(png_1, png).
label_(png_1, "Portable Network Graphics (PNG)").
media_type_(png_1, media(image/png,[])).
program_(png_1, eog).
program_(png_1, xfig).
extension_(pnm_1, pnm).
label_(pnm_1, "Portable Anymap Format (PNM)").
media_type_(pnm_1, media(image/'x-portable-anymap',[])).
program_(pnm_1, eog).
extension_(pot_1, pot).
label_(pot_1, "Microsoft PowerPoint").
media_type_(pot_1, media(application/'vnd.ms-powerpoint',[])).
program_(pot_1, program(libreoffice,['--impress'])).
extension_(potm_1, potm).
label_(potm_1, "Microsoft PowerPoint Template").
media_type_(potm_1, media(application/'vnd.ms-powerpoint.template.macroenabled.12',[])).
program_(potm_1, program(libreoffice,['--impress'])).
extension_(potx_1, potx).
label_(potx_1, "OpenOffice Presentation Template").
media_type_(potx_1, media(application/'vnd.openxmlformats-officedocument.presentationml.template',[])).
program_(potx_1, program(libreoffice,['--impress'])).
extension_(pov_1, pov).
label_(pov_1, "Scene-description language for 3D modelling for the Persistence of Vision Raytracer.").
extension_(ppa_1, ppa).
label_(ppa_1, "Microsoft PowerPoint").
media_type_(ppa_1, media(application/'vnd.ms-powerpoint',[])).
program_(ppa_1, program(libreoffice,['--impress'])).
extension_(ppam_1, ppam).
label_(ppam_1, "Microsoft PowerPoint Add-in").
media_type_(ppam_1, media(application/'vnd.ms-powerpoint.addin.macroenabled.12',[])).
program_(ppam_1, program(libreoffice,['--impress'])).
extension_(ppm_1, ppm).
label_(ppm_1, "Portable Pixmap Format (PPM)").
media_type_(ppm_1, media(image/'x-portable-pixmap',[])).
program_(ppm_1, xfig).
extension_(pps_1, pps).
label_(pps_1, "Microsoft PowerPoint").
media_type_(pps_1, media(application/'vnd.ms-powerpoint',[])).
program_(pps_1, program(libreoffice,['--impress'])).
extension_(ppsm_1, ppsm).
label_(ppsm_1, "Microsoft PowerPoint Slideshow").
media_type_(ppsm_1, media(application/'vnd.ms-powerpoint.slideshow.macroenabled.12',[])).
program_(ppsm_1, program(libreoffice,['--impress'])).
extension_(ppsx_1, ppsx).
label_(ppsx_1, "OpenOffice Presentation Slideshow").
media_type_(ppsx_1, media(application/'vnd.openxmlformats-officedocument.presentationml.slideshow',[])).
program_(ppsx_1, program(libreoffice,['--impress'])).
extension_(ppt_1, ppt).
label_(ppt_1, "Microsoft PowerPoint").
media_type_(ppt_1, media(application/'vnd.ms-powerpoint',[])).
program_(ppt_1, program(libreoffice,['--impress'])).
extension_(pptm_1, pptm).
label_(pptm_1, "Microsoft PowerPoint Presentation").
media_type_(pptm_1, media(application/'vnd.ms-powerpoint.presentation.macroenabled.12',[])).
program_(pptm_1, program(libreoffice,['--impress'])).
extension_(pptx_1, pptx).
label_(pptx_1, "OpenOffice Presentation").
media_type_(pptx_1, media(application/'vnd.openxmlformats-officedocument.presentationml.presentation',[])).
program_(pptx_1, program(libreoffice,['--impress'])).
extension_(ps_1, ps).
label_(ps_1, "PostScript (PS)").
media_type_(ps_1, media(application/postscript,[])).
program_(ps_1, evince).
program_(ps_1, xfig).
program_(ps_1, xpdf).
extension_(psd_1, psd).
label_(psd_1, "Adobe Photoshop Document (PSD)").
media_type_(psd_1, media(image/'image/vnd.adobe.photoshop',[])).
extension_(rar_1, rar).
label_(rar_1, "Roshal Archive (RAR)").
media_type_(rar_1, media(application/'vnd.rar',[])).
extension_(ras_1, ras).
label_(ras_1, "Sun Raster").
program_(ras_1, eog).
extension_(rdf_1, rdf).
family_(rdf_1, rdf).
label_(rdf_1, "RDF/XML 1.1").
media_type_(rdf_1, media(application/'rdf+xml',[])).
program_(rdf_1, text_editor).
encoding_(rq_1, utf8).
extension_(rq_1, rq).
label_(rq_1, "SPARQL 1.1 Query").
media_type_(rq_1, media(application/'sparql-query',[])).
program_(rq_1, text_editor).
extension_(rss_1, rss).
label_(rss_1, "Rich Site Summary (RSS)").
media_type_(rss_1, media(application/'rss+xml',[])).
program_(rss_1, text_editor).
extension_(rtf_1, rtf).
label_(rtf_1, "Rich Text Format (RTF)").
media_type_(rtf_1, media(application/rtf,[])).
extension_(ru_1, ru).
label_(ru_1, "SPARQL 1.1 Update").
media_type_(ru_1, media(application/'sparql-update',[])).
program_(ru_1, text_editor).
extension_(sgi_1, sgi).
label_(sgi_1, "Silicon Graphics Image (SGI)").
media_type_(sgi_1, media(image/sgi,[])).
encoding_(srj_1, utf8).
extension_(srj_1, srj).
family_(srj_1, sparql(ask)).
family_(srj_1, sparql(select)).
label_(srj_1, "SPARQL 1.1 Query Results JSON Format").
media_type_(srj_1, media(application/'sparql-results+json',[])).
program_(srj_1, text_editor).
extension_(srx_1, srx).
family_(srx_1, sparql(ask)).
family_(srx_1, sparql(select)).
label_(srx_1, "SPARQL Query Results XML Format").
media_type_(srx_1, media(application/'sparql-results+xml',[])).
program_(srx_1, text_editor).
extension_(svg_1, svg).
label_(svg_1, "Scalable Vector Graphics (SVG)").
media_type_(svg_1, media(image/'svg+xml',[])).
program_(svg_1, eog).
program_(svg_1, firefox).
extension_(tar_1, tar).
media_type_(tar_1, media(application/'x-tar',[])).
label_(tar_1, "TAR").
extension_(tga_1, tga).
label_(tga_1, "Truevision Advanced Raster Graphics Adapter (TARGA)").
media_type_(tga_1, media(image/'x-targa',[])).
program_(tga_1, eog).
extension_(tiff_1, tiff).
label_(tiff_1, "Tagged Image File Format (TIFF)").
media_type_(tiff_1, media(image/tiff,[])).
program_(tiff_1, eog).
program_(tiff_1, xfig).
extension_(torrent_1, torrent).
label_(torrent_1, "BitTorrent").
media_type_(torrent_1, media(application/'x-bittorrent',[])).
program_(torrent_1, 'transmission-gtk').
encoding_(trig_1, utf8).
extension_(trig_1, trig).
family_(trig_1, rdf).
label_(trig_1, "TriG 1.1").
media_type_(trig_1, media(application/trig,[])).
program_(trig_1, text_editor).
extension_(trix_1, trix).
label_(trix_1, "Triples in XML (TriX)").
program_(trix_1, text_editor).
extension_(tsv_1, tsv).
family_(tsv_1, sparql(ask)).
family_(tsv_1, sparql(select)).
label_(tsv_1, "Tag-separated values (TSV)").
media_type_(tsv_1, media(text/'tab-separated-values',[])).
program_(tsv_1, text_editor).
encoding_(ttl_1, utf8).
extension_(ttl_1, ttl).
label_(ttl_1, "Turtle 1.1").
media_type_(ttl_1, media(text/turtle,[])).
program_(ttl_1, text_editor).
extension_(wbmp_1, wbmp).
label_(wbmp_1, "Wireless Application Protocol Bitmap Format (Wireless Bitmap)").
media_type_(wbmp_1, media(image/'vnd.wap.bmp',[])).
program_(wbmp_1, eog).
extension_(xbm_1, xbm).
label_(xbm_1, "X BitMap (XBM)").
media_type_(xbm_1, media(image/'x-bitmap',[])).
program_(xbm_1, eog).
program_(xbm_1, xfig).
extension_(xhtml_1, xhtml).
family_(xhtml_1, rdf).
family_(xhtml_1, rdfa).
label_(xhtml_1, "XHTML").
media_type_(xhtml_1, media(application/'xhtml+xml',[])).
program_(xhtml_1, text_editor).
extension_(xla_1, xla).
label_(xla_1, "Microsoft Excel").
media_type_(xla_1, media(application/'vnd.ms-excel',[])).
program_(xla_1, program(libreoffice,['--calc'])).
extension_(xlam_1, xlam).
label_(xlam_1, "Microsoft Excel Add-in").
media_type_(xlam_1, media(application/'vnd.ms-excel.addin.macroenabled.12',[])).
program_(xlam_1, program(libreoffice,['--calc'])).
extension_(xls_1, xls).
label_(xls_1, "Microsoft Excel").
media_type_(xls_1, media(application/'vnd.ms-excel',[])).
program_(xls_1, program(libreoffice,['--calc'])).
extension_(xlsb_1, xlsb).
label_(xlsb_1, "Microsoft Excel Spreadsheet").
media_type_(xlsb_1, media(application/'vnd.ms-excel.sheet.binary.macroenabled.12',[])).
program_(xlsb_1, program(libreoffice,['--calc'])).
extension_(xlsm_1, xlsm).
label_(xlsm_1, "Microsoft Excel Spreadsheet").
media_type_(xlsm_1, media(application/'vnd.ms-excel.sheet.macroenabled.12',[])).
program_(xlsm_1, program(libreoffice,['--calc'])).
extension_(xlsx_1, xlsx).
label_(xlsx_1, "OpenOffice Spreadsheet").
media_type_(xlsx_1, media(application/'vnd.openxmlformats-officedocument.spreadsheetml.sheet',[])).
program_(xlsx_1, program(libreoffice,['--calc'])).
extension_(xlt_1, xlt).
label_(xlt_1, "Microsoft Excel").
media_type_(xlt_1, media(application/'vnd.ms-excel',[])).
program_(xlt_1, program(libreoffice,['--calc'])).
extension_(xltm_1, xltm).
label_(xltm_1, "Microsoft Excel Template").
media_type_(xltm_1, media(application/'vnd.ms-excel.template.macroenabled.12',[])).
program_(xltm_1, program(libreoffice,['--calc'])).
extension_(xltx_1, xltx).
label_(xltx_1, "OpenOffice Spreadsheet Template").
media_type_(xltx_1, media(application/'vnd.openxmlformats-officedocument.spreadsheetml.template',[])).
program_(xltx_1, program(libreoffice,['--calc'])).
extension_(xml_1, xml).
label_(xml_1, "Extended Markup Language (XML)").
media_type_(xml_1, media(text/xml,[])).
program_(xml_1, text_editor).
extension_(xpm_1, xpm).
label_(xpm_1, "X PixMap (XPM)").
media_type_(xpm_1, media(image/'x-xpixmap',[])).
program_(xpm_1, eog).
program_(xpm_1, xfig).
extension_(xz_1, xz).
label_(xz_1, "xz").
media_type_(xz_1, media(application/'x-xz',[])).
extension_(yml_1, yml).
label_(yml_1, "YAML Ain't Markup Language (YAML)").
media_type_(yml_1, media(application/'x-yaml',[])).
program_(yml_1, text_editor).
extension_(vdx_1, vdx).
label_(vdx_1, "Microsoft Visio XML drawing").
extension_(vml_1, vml).
label_(vml_1, "Vector Markup Language (VML), part of Microsoft Open Office XML").
media_type_(vml_1, media(application/'vnd.openxmlformats-officedocument.vmlDrawing',[])).
extension_(vmlz_1, vmlz).
label_(vmlz_1, "GNU zipped VML").
media_type_(vmlz_1, media(application/'vnd.openxmlformats-officedocument.vmlDrawing',[])).
extension_(warc_1, warc).
label_(warc_1, "Web ARChive (WARC) archive format").
media_type_(warc_1, media(application/warc,[])).
extension_(wbmp_1, wbmp).
label_(wbmp_1, "Wireless Application Protocol Bitmap Format (WBMP)").
media_type_(wbmp_1, media(image/'vnd.wap.wbmp',[])).
extension_(webp_1, webp).
media_type_(webp_1, media(image/webp,[])).
label_(webp_1, "Google image format for the web (WebP)").
extension_(wmv_1, wmv).
label_(wmv_1, "Windows Media Video (WMV)").
media_type_(wmv_1, media(video/'x-ms-wmv',[])).
program_(wmv_1, vlc).
extension_(wrl_1, wrl).
media_type_(wrl_1, media(model/vrml,[])).
label_(wrl_1, "Virtual Reality Modeling Language (VRML)").
extension_(wrz_1, wrz).
label_(wrz_1, "GNU zipped VRML").
media_type_(wrz_1, media(model/vrml,[])).
extension_(zip_1, zip).
label_(zip_1, "ZIP").
media_type_(zip_1, media(application/zip,[])).
| wouterbeek/Prolog_Library_Collection | prolog/media_type.pl | Perl | mit | 24,938 |
#!/usr/bin/perl -w
######################################################################
#
# process LNB data files, u01, u03, mpr and write data
# out as csv files.
#
# NOTES:
#
# z_cass or feeder table no = FADD/10000
#
######################################################################
#
use strict;
#
use Getopt::Std;
use File::Find;
use File::Path qw(mkpath);
#
######################################################################
#
# logical constants
#
use constant TRUE => 1;
use constant FALSE => 0;
#
# output types
#
use constant PROD_COMPLETE => 3;
use constant PROD_COMPLETE_LATER => 4;
use constant DETECT_CHANGE => 5;
use constant MANUAL_CLEAR => 11;
use constant TIMER_NOT_RUNNING => 12;
use constant AUTO_CLEAR => 13;
#
# processing states
#
use constant RESET => 'reset';
use constant BASELINE => 'baseline';
use constant DELTA => 'delta';
#
# common sections for all files types: u01, u03, mpr
#
use constant INDEX => '[Index]';
use constant INFORMATION => '[Information]';
#
# sections specific to u01
#
use constant TIME => '[Time]';
use constant CYCLETIME => '[CycleTime]';
use constant COUNT => '[Count]';
use constant DISPENSER => '[Dispenser]';
use constant MOUNTPICKUPFEEDER => '[MountPickupFeeder]';
use constant MOUNTPICKUPNOZZLE => '[MountPickupNozzle]';
use constant INSPECTIONDATA => '[InspectionData]';
#
# sections specific to u03
#
use constant BRECG => '[BRecg]';
use constant BRECGCALC => '[BRecgCalc]';
use constant ELAPSETIMERECOG => '[ElapseTimeRecog]';
use constant SBOARD => '[SBoard]';
use constant HEIGHTCORRECT => '[HeightCorrect]';
use constant MOUNTQUALITYTRACE => '[MountQualityTrace]';
use constant MOUNTLATESTREEL => '[MountLatestReel]';
use constant MOUNTEXCHANGEREEL => '[MountExchangeReel]';
#
# sections specfic to mpr
#
use constant TIMEDATASP => '[TimeDataSP]';
use constant COUNTDATASP => '[CountDataSP]';
use constant COUNTDATASP2 => '[CountDataSP2]';
use constant TRACEDATASP => '[TraceDataSP]';
use constant TRACEDATASP_2 => '[TraceDataSP_2]';
use constant ISPINFODATA => '[ISPInfoData]';
use constant MASKISPINFODATA => '[MaskISPInfoData]';
#
# files types
#
use constant LNB_U01_FILE_TYPE => 'u01';
use constant LNB_U03_FILE_TYPE => 'u03';
use constant LNB_MPR_FILE_TYPE => 'mpr';
#
# verbose levels
#
use constant NOVERBOSE => 0;
use constant MINVERBOSE => 1;
use constant MIDVERBOSE => 2;
use constant MAXVERBOSE => 3;
#
# processing options
#
use constant PROC_OPT_NONE => 0;
use constant PROC_OPT_IGNRESET12 => 1;
use constant PROC_OPT_IGNALL12 => 2;
use constant PROC_OPT_USENEGDELTS => 4;
use constant PROC_OPT_USEOLDNZ => 8;
#
# nozzle key names
#
use constant NZ_KEY_HEAD => 'Head';
use constant NZ_KEY_NHADD => 'NHAdd';
use constant NZ_KEY_NCADD => 'NCAdd';
#
use constant NZ_LABEL_NHADD_NCADD => 'nhadd_ncadd';
use constant NZ_LABEL_HEAD_NHADD => 'head_nhadd';
use constant NZ_LABEL_HEAD_NCADD => 'head_ncadd';
#
######################################################################
#
# globals
#
my $cmd = $0;
my $log_fh = *STDOUT;
#
# cmd line options
#
my $logfile = '';
my $verbose = NOVERBOSE;
my $file_type = "all";
my $export_dir = '/tmp/';
my $proc_options = PROC_OPT_NONE;
#
my %verbose_levels =
(
off => NOVERBOSE(),
min => MINVERBOSE(),
mid => MIDVERBOSE(),
max => MAXVERBOSE()
);
#
my %allowed_proc_options =
(
NONE => PROC_OPT_NONE(),
IGNRESET12 => PROC_OPT_IGNRESET12(),
IGNALL12 => PROC_OPT_IGNALL12(),
USENEGDELTS => PROC_OPT_USENEGDELTS(),
USEOLDNZ => PROC_OPT_USEOLDNZ()
);
#
# fields to ignore for output=12 files if enabled.
#
my %ignored_output12_fields =
(
'TPICKUP' => 1,
'TPMISS' => 1,
'TRMISS' => 1,
'TDMISS' => 1,
'TMMISS' => 1,
'THMISS' => 1,
'CPERR' => 1,
'CRERR' => 1,
'CDERR' => 1,
'CMERR' => 1,
'CTERR' => 1
);
#
# summary tables.
#
my %totals = ();
#
# list of colums to export
#
my @mount_quality_trace_export_cols =
(
{ name => 'B', format => '%s' },
{ name => 'IDNUM', format => '%s' },
{ name => 'TURN', format => '%s' },
{ name => 'MS', format => '%s' },
{ name => 'TS', format => '%s' },
{ name => 'FAdd', format => '%s' },
{ name => 'FSAdd', format => '%s' },
{ name => 'FBLKCode', format => '%s' },
{ name => 'FBLKSerial', format => '%s' },
{ name => 'NHAdd', format => '%s' },
{ name => 'NCAdd', format => '%s' },
{ name => 'NBLKCode', format => '%s' },
{ name => 'NBLKSerial', format => '%s' },
{ name => 'ReelID', format => '%s' },
{ name => 'F', format => '%s' },
{ name => 'RCGX', format => '%s' },
{ name => 'RCGY', format => '%s' },
{ name => 'RCGA', format => '%s' },
{ name => 'TCX', format => '%s' },
{ name => 'TCY', format => '%s' },
{ name => 'MPosiRecX', format => '%s' },
{ name => 'MPosiRecY', format => '%s' },
{ name => 'MPosiRecA', format => '%s' },
{ name => 'MPosiRecZ', format => '%s' },
{ name => 'THMAX', format => '%s' },
{ name => 'THAVE', format => '%s' },
{ name => 'MNTCX', format => '%s' },
{ name => 'MNTCY', format => '%s' },
{ name => 'MNTCA', format => '%s' },
{ name => 'TLX', format => '%s' },
{ name => 'TLY', format => '%s' },
{ name => 'InspectArea', format => '%s' },
{ name => 'DIDNUM', format => '%s' },
{ name => 'DS', format => '%s' },
{ name => 'DispenseID', format => '%s' },
{ name => 'PARTS', format => '%s' },
{ name => 'WarpZ', format => '%s' }
);
my @feeder_export_cols =
(
{ name => 'Machine', format => '%s' },
{ name => 'Lane', format => ',%s' },
{ name => 'Stage', format => ',%s' },
{ name => 'FAdd', format => ',%s' },
{ name => 'FSAdd', format => ',%s' },
{ name => 'ReelID', format => ',%s' },
{ name => 'Pickup', format => ',%s' },
{ name => 'PMiss', format => ',%s' },
{ name => 'RMiss', format => ',%s' },
{ name => 'DMiss', format => ',%s' },
{ name => 'MMiss', format => ',%s' },
{ name => 'HMiss', format => ',%s' },
{ name => 'TRSMiss', format => ',%s' },
{ name => 'Mount', format => ',%s' }
);
#
my @feeder_export_cols2 =
(
{ name => 'Machine', format => '%s' },
{ name => 'Lane', format => ',%s' },
{ name => 'Stage', format => ',%s' },
{ name => 'FAdd', format => ',%s' },
{ name => 'FSAdd', format => ',%s' },
{ name => 'Pickup', format => ',%s' },
{ name => 'PMiss', format => ',%s' },
{ name => 'RMiss', format => ',%s' },
{ name => 'DMiss', format => ',%s' },
{ name => 'MMiss', format => ',%s' },
{ name => 'HMiss', format => ',%s' },
{ name => 'TRSMiss', format => ',%s' },
{ name => 'Mount', format => ',%s' }
);
#
my @feeder_export_cols3 =
(
{ name => 'Machine', format => '%s' },
{ name => 'Lane', format => ',%s' },
{ name => 'Stage', format => ',%s' },
{ name => 'TableNo', format => ',%s' },
{ name => 'Pickup', format => ',%s' },
{ name => 'PMiss', format => ',%s' },
{ name => 'RMiss', format => ',%s' },
{ name => 'DMiss', format => ',%s' },
{ name => 'MMiss', format => ',%s' },
{ name => 'HMiss', format => ',%s' },
{ name => 'TRSMiss', format => ',%s' },
{ name => 'Mount', format => ',%s' }
);
#
my @feeder_count_cols =
(
'Pickup',
'PMiss',
'RMiss',
'DMiss',
'MMiss',
'HMiss',
'TRSMiss',
'Mount'
);
#
my @nozzle_export_cols =
(
{ name => 'Machine', format => '%s' },
{ name => 'Lane', format => ',%s' },
{ name => 'Stage', format => ',%s' },
{ name => 'NHAdd', format => ',%s' },
{ name => 'NCAdd', format => ',%s' },
{ name => 'Blkserial', format => ',%s' },
{ name => 'Pickup', format => ',%s' },
{ name => 'PMiss', format => ',%s' },
{ name => 'RMiss', format => ',%s' },
{ name => 'DMiss', format => ',%s' },
{ name => 'MMiss', format => ',%s' },
{ name => 'HMiss', format => ',%s' },
{ name => 'TRSMiss', format => ',%s' },
{ name => 'Mount', format => ',%s' }
);
#
my @nozzle_export_cols2 =
(
{ name => 'Machine', format => '%s' },
{ name => 'Lane', format => ',%s' },
{ name => 'Stage', format => ',%s' },
{ name => 'NHAdd', format => ',%s' },
{ name => 'NCAdd', format => ',%s' },
{ name => 'Pickup', format => ',%s' },
{ name => 'PMiss', format => ',%s' },
{ name => 'RMiss', format => ',%s' },
{ name => 'DMiss', format => ',%s' },
{ name => 'MMiss', format => ',%s' },
{ name => 'HMiss', format => ',%s' },
{ name => 'TRSMiss', format => ',%s' },
{ name => 'Mount', format => ',%s' }
);
#
my %nozzle_export_cols_new =
(
NZ_LABEL_NHADD_NCADD() => [
{ name => 'Machine', format => '%s' },
{ name => 'Lane', format => ',%s' },
{ name => 'Stage', format => ',%s' },
{ name => NZ_KEY_NHADD(), format => ',%s' },
{ name => NZ_KEY_NCADD(), format => ',%s' },
{ name => 'Blkserial', format => ',%s' },
{ name => 'Pickup', format => ',%s' },
{ name => 'PMiss', format => ',%s' },
{ name => 'RMiss', format => ',%s' },
{ name => 'DMiss', format => ',%s' },
{ name => 'MMiss', format => ',%s' },
{ name => 'HMiss', format => ',%s' },
{ name => 'TRSMiss', format => ',%s' },
{ name => 'Mount', format => ',%s' }
],
NZ_LABEL_HEAD_NHADD() => [
{ name => 'Machine', format => '%s' },
{ name => 'Lane', format => ',%s' },
{ name => 'Stage', format => ',%s' },
{ name => NZ_KEY_HEAD(), format => ',%s' },
{ name => NZ_KEY_NHADD(), format => ',%s' },
{ name => 'Blkserial', format => ',%s' },
{ name => 'Pickup', format => ',%s' },
{ name => 'PMiss', format => ',%s' },
{ name => 'RMiss', format => ',%s' },
{ name => 'DMiss', format => ',%s' },
{ name => 'MMiss', format => ',%s' },
{ name => 'HMiss', format => ',%s' },
{ name => 'TRSMiss', format => ',%s' },
{ name => 'Mount', format => ',%s' }
],
NZ_LABEL_HEAD_NCADD() => [
{ name => 'Machine', format => '%s' },
{ name => 'Lane', format => ',%s' },
{ name => 'Stage', format => ',%s' },
{ name => NZ_KEY_HEAD(), format => ',%s' },
{ name => NZ_KEY_NCADD(), format => ',%s' },
{ name => 'Blkserial', format => ',%s' },
{ name => 'Pickup', format => ',%s' },
{ name => 'PMiss', format => ',%s' },
{ name => 'RMiss', format => ',%s' },
{ name => 'DMiss', format => ',%s' },
{ name => 'MMiss', format => ',%s' },
{ name => 'HMiss', format => ',%s' },
{ name => 'TRSMiss', format => ',%s' },
{ name => 'Mount', format => ',%s' }
]
);
#
my %nozzle_export_cols2_new =
(
NZ_LABEL_NHADD_NCADD() => [
{ name => 'Machine', format => '%s' },
{ name => 'Lane', format => ',%s' },
{ name => 'Stage', format => ',%s' },
{ name => NZ_KEY_NHADD(), format => ',%s' },
{ name => NZ_KEY_NCADD(), format => ',%s' },
{ name => 'Pickup', format => ',%s' },
{ name => 'PMiss', format => ',%s' },
{ name => 'RMiss', format => ',%s' },
{ name => 'DMiss', format => ',%s' },
{ name => 'MMiss', format => ',%s' },
{ name => 'HMiss', format => ',%s' },
{ name => 'TRSMiss', format => ',%s' },
{ name => 'Mount', format => ',%s' }
],
NZ_LABEL_HEAD_NHADD() => [
{ name => 'Machine', format => '%s' },
{ name => 'Lane', format => ',%s' },
{ name => 'Stage', format => ',%s' },
{ name => NZ_KEY_HEAD(), format => ',%s' },
{ name => NZ_KEY_NHADD(), format => ',%s' },
{ name => 'Pickup', format => ',%s' },
{ name => 'PMiss', format => ',%s' },
{ name => 'RMiss', format => ',%s' },
{ name => 'DMiss', format => ',%s' },
{ name => 'MMiss', format => ',%s' },
{ name => 'HMiss', format => ',%s' },
{ name => 'TRSMiss', format => ',%s' },
{ name => 'Mount', format => ',%s' }
],
NZ_LABEL_HEAD_NCADD() => [
{ name => 'Machine', format => '%s' },
{ name => 'Lane', format => ',%s' },
{ name => 'Stage', format => ',%s' },
{ name => NZ_KEY_HEAD(), format => ',%s' },
{ name => NZ_KEY_NCADD(), format => ',%s' },
{ name => 'Pickup', format => ',%s' },
{ name => 'PMiss', format => ',%s' },
{ name => 'RMiss', format => ',%s' },
{ name => 'DMiss', format => ',%s' },
{ name => 'MMiss', format => ',%s' },
{ name => 'HMiss', format => ',%s' },
{ name => 'TRSMiss', format => ',%s' },
{ name => 'Mount', format => ',%s' }
]
);
#
my @nozzle_count_cols =
(
'Pickup',
'PMiss',
'RMiss',
'DMiss',
'MMiss',
'HMiss',
'TRSMiss',
'Mount'
);
#
########################################################################
########################################################################
#
# miscellaneous functions
#
sub short_usage
{
my ($arg0) = @_;
print $log_fh <<EOF;
usage: $arg0 [-?] [-h] [-H] \\
[-w | -W |-v level] \\
[-t u10|u03|mpr] \\
[-l logfile] \\
[-o option] \\
[-d path] \\
directory ...
where:
-? or -h - print this usage.
-H - print long usage and description.
-w - enable warning (level=min=1)
-W - enable warning and trace (level=mid=2)
-v - verbose level: 0=off,1=min,2=mid,3=max
-t file-type = type of file to process: u01, u03, mpr.
default is all files.
-l logfile - log file path
-o option - enable a procesing option:
ignreset12 - ignore resetable output=12 fields.
ignall12 - ignore all output=12 files.
usenegdelts - use negative deltas in calculations.
useoldnz - use old nozzle processing.
-d path - export directory, defaults to '/tmp'.
EOF
}
sub long_usage
{
my ($arg0) = @_;
print $log_fh <<EOF;
usage: $arg0 [-?] [-h] [-H] \\
[-w | -W |-v level] \\
[-t u10|u03|mpr] \\
[-l logfile] \\
[-o option] \\
[-d path] \\
directory ...
where:
-? or -h - print this usage.
-H - print long usage and description.
-w - enable warning (level=min=1)
-W - enable warning and trace (level=mid=2)
-v - verbose level: 0=off,1=min,2=mid,3=max
-t file-type = type of file to process: u01, u03, mpr.
default is all files.
-l logfile - log file path
-o option - enable a procesing option:
ignreset12 - ignore resetable output=12 fields.
ignall12 - ignore all output=12 files.
usenegdelts - use negative deltas in calculations.
useoldnz - use old nozzle processing.
-d path - export directory, defaults to '/tmp'.
Description:
The script scans the list of given directories for U01, U03 and
MPR files, then it processes the files.
For U01 files, the data in the following sections are tabulated
and reported in CSV files:
[Time]
[Count]
[MountPickupFeeder]
[MountPickupNozzle]
The CSV files are list below. The names indicate how the data
were grouped, that is, what keys were used:
TIME_BY_MACHINE.csv
TIME_BY_MACHINE_LANE.csv
TIME_BY_PRODUCT_MACHINE.csv
TIME_BY_PRODUCT_MACHINE_LANE.csv
TIME_TOTALS_BY_PRODUCT.csv
TIME_TOTALS.csv
COUNT_BY_MACHINE.csv
COUNT_BY_MACHINE_LANE.csv
COUNT_BY_PRODUCT_MACHINE.csv
COUNT_BY_PRODUCT_MACHINE_LANE.csv
COUNT_TOTALS_BY_PRODUCT.csv
COUNT_TOTALS.csv
FEEDER_BY_MACHINE_LANE_STAGE_FADD_FSADD.csv
FEEDER_BY_MACHINE_LANE_STAGE_FADD_FSADD_REELID.csv
FEEDER_BY_MACHINE_LANE_STAGE_TABLE_NO.csv
FEEDER_BY_PRODUCT_MACHINE_LANE_STAGE_FADD_FSADD.csv
FEEDER_BY_PRODUCT_MACHINE_LANE_STAGE_FADD_FSADD_REELID.csv
FEEDER_BY_PRODUCT_MACHINE_LANE_STAGE_TABLE_NO.csv
NOZZLE_BY_MACHINE_LANE_STAGE_HEAD_NCADD_BLKSERIAL.csv
NOZZLE_BY_MACHINE_LANE_STAGE_HEAD_NCADD.csv
NOZZLE_BY_MACHINE_LANE_STAGE_HEAD_NHADD_BLKSERIAL.csv
NOZZLE_BY_MACHINE_LANE_STAGE_HEAD_NHADD.csv
NOZZLE_BY_MACHINE_LANE_STAGE_NHADD_NCADD_BLKSERIAL.csv
NOZZLE_BY_MACHINE_LANE_STAGE_NHADD_NCADD.csv
NOZZLE_BY_PRODUCT_MACHINE_LANE_STAGE_HEAD_NCADD_BLKSERIAL.csv
NOZZLE_BY_PRODUCT_MACHINE_LANE_STAGE_HEAD_NCADD.csv
NOZZLE_BY_PRODUCT_MACHINE_LANE_STAGE_HEAD_NHADD_BLKSERIAL.csv
NOZZLE_BY_PRODUCT_MACHINE_LANE_STAGE_HEAD_NHADD.csv
NOZZLE_BY_PRODUCT_MACHINE_LANE_STAGE_NHADD_NCADD_BLKSERIAL.csv
NOZZLE_BY_PRODUCT_MACHINE_LANE_STAGE_NHADD_NCADD.csv
The U01 file raw data are written to separate files by section. The
following list of files is generated:
TIME_BY_MACHINE_LANE_STAGE_FILENAME.csv
TIME_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
CYCLE_TIME_BY_MACHINE_LANE_STAGE_FILENAME.csv
CYCLE_TIME_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
COUNT_BY_MACHINE_LANE_STAGE_FILENAME.csv
COUNT_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
DISPENSER_BY_MACHINE_LANE_STAGE_FILENAME.csv
DISPENSER_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
MOUNT_PICKUP_FEEDER_BY_MACHINE_LANE_STAGE_FILENAME.csv
MOUNT_PICKUP_FEEDER_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
MOUNT_PICKUP_NOZZLE_BY_MACHINE_LANE_STAGE_FILENAME.csv
MOUNT_PICKUP_NOZZLE_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
INSPECTION_DATA_BY_MACHINE_LANE_STAGE_FILENAME.csv
INSPECTION_DATA_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
The U03 file raw data are written to separate files by section. The
following list of files is generated:
MOUNT_QUALITY_TRACE_BY_MACHINE_LANE_STAGE_FILENAME.csv
MOUNT_QUALITY_TRACE_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
MOUNT_LATEST_REEL_BY_MACHINE_LANE_STAGE_FILENAME.csv
MOUNT_LATEST_REEL_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
MOUNT_EXCHANGE_REEL_BY_MACHINE_LANE_STAGE_FILENAME.csv
MOUNT_EXCHANGE_REEL_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
The MPR file raw data are written to separate files by section. The
following list of files is generated:
TIME_DATA_SP_BY_MACHINE_LANE_STAGE_FILENAME.csv
TIME_DATA_SP_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
COUNT_DATA_SP_BY_MACHINE_LANE_STAGE_FILENAME.csv
COUNT_DATA_SP_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
COUNT_DATA_SP2_BY_MACHINE_LANE_STAGE_FILENAME.csv
COUNT_DATA_SP2_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
TRACE_DATA_SP_BY_MACHINE_LANE_STAGE_FILENAME.csv
TRACE_DATA_SP_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
TRACE_DATA_SP_2_BY_MACHINE_LANE_STAGE_FILENAME.csv
TRACE_DATA_SP_2_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
ISP_INFO_DATA_BY_MACHINE_LANE_STAGE_FILENAME.csv
ISP_INFO_DATA_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
MASK_ISP_INFO_DATA_BY_MACHINE_LANE_STAGE_FILENAME.csv
MASK_ISP_INFO_DATA_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
The command line options '-?' and '-h' list a short version of the
usage message. This is the default usage message. Option '-H'
prints out a more detailed version of the usage. This one.
There are four verbose levels:
1) The default value is 0 which prints out no warnings. Only error
messages are printed when the script exits because of a fatal error.
2) Level 1 which is set eith with '-w' or '-v 1' prints out non-fatal
warnings. This includes warning for negative deltas, changes in
blkserial or reel id, change overs, etc. The warnings highlight
events which may be of interest. I usually run with this warning
level when debugging.
3) Level 2 which is set with '-W' or '-v 2' includes all the data
include with level 1 and 0, and additional messages for tracing. It
can generate a lot of messages.
4) Level 3 which is set with '-v 3' generates the most messages. It
will list the data which are read in, etc. It is *very* verbose.
If you wish to limit the file processing to only one type of
file, then use the the '-t' option and choose the type: u01, u03,
or mpr. The default is all types of files if the file type is found.
You can set the output file name using the '-l' option. You give
it the name of the file. By default all output goes to STDOUT.
The CSV files are written by default in /tmp. If you wish to
use a different directory, then use the '-d' option and give
the path as the option argument.
The '-o' option allows you to change how the U01 tabulation is
performed. The following options are available:
ignreset12 - ignore resetable output=12 fields. This options causes
the data in the [Count] section of a U01, output=12 file to be
completely ignored.
ignall12 - ignore all output=12 files. Ths option cause all
U01, output=12 files to be ignored in all tabulations.
usenegdelts - use negative deltas in calculations. This option
causes all negative deltas to be used in tabulations. The default
is to set any negative delta to zero.
useoldnz - use old nozzle processing. This is strictly for testing.
Do not use.
EOF
}
#
sub set_name_value_section_column_names
{
my ($file_type, $pfile, $section) = @_;
#
if ( ! exists($pfile->{$section}))
{
printf $log_fh "%d: No column data for %s %s.\n", __LINE__, $file_type, $section if ($verbose >= MAXVERBOSE);
}
elsif ( ! exists($totals{column_names}{$file_type}{$section}) )
{
@{$totals{column_names}{$file_type}{$section}} =
(sort keys %{$pfile->{$section}->{data}});
#
printf $log_fh "\n%d: Setting column names %s %s: %s\n", __LINE__, $file_type, $section, join(' ', @{$totals{column_names}{$file_type}{$section}});
}
}
#
sub set_list_section_column_names
{
my ($file_type, $pfile, $section) = @_;
#
if ( ! exists($pfile->{$section}))
{
printf $log_fh "%d: No column data for %s %s.\n", __LINE__, $file_type, $section if ($verbose >= MAXVERBOSE);
}
elsif ( ! exists($totals{column_names}{$file_type}{$section}) )
{
my $pcols = $pfile->{$section}->{column_names};
$totals{column_names}{$file_type}{$section} = $pcols;
#
printf $log_fh "\n%d: Setting column names %s %s: %s\n", __LINE__, $file_type, $section, join(' ', @{$totals{column_names}{$file_type}{$section}});
}
}
#
sub export_list_section_as_csv
{
my ($section, $file_type, $file_name, $machine_label, $do_product) = @_;
#
if ( ! exists($totals{$section}))
{
printf $log_fh "\n%d: Section %s does NOT exist\n", __LINE__, $section;
return;
}
#
###############################################################
#
printf $log_fh "\n%d: Export Data For %s by Machine, Lane, Stage, Filename:\n", __LINE__, $section;
#
my $outfnm = "${export_dir}/${file_name}_BY_MACHINE_LANE_STAGE_FILENAME.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open(my $outfh, ">" , $outfnm) || die $!;
#
my $pcols = $totals{column_names}{$file_type}{$section};
#
printf $outfh "${machine_label},lane,stage,filename";
foreach my $col (@{$pcols})
{
printf $outfh ",%s", $col;
}
printf $outfh "\n";
#
foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_filename}})
{
foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_filename}{$machine}})
{
foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_filename}{$machine}{$lane}})
{
foreach my $filename (sort keys %{$totals{$section}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}})
{
foreach my $prow (@{$totals{$section}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}{$filename}{data}})
{
printf $outfh "%s,%s,%s,%s", $machine, $lane, $stage, $filename;
foreach my $col (@{$pcols})
{
printf $outfh ",%s", $prow->{$col};
}
printf $outfh "\n";
}
}
}
}
}
close($outfh);
#
return unless ($do_product == TRUE);
#
###############################################################
#
printf $log_fh "\n%d: Export Data For %s by Product, Machine, Lane, Stage, Filename:\n", __LINE__, $section;
#
$outfnm = "${export_dir}/${file_name}_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open($outfh, ">" , $outfnm) || die $!;
#
printf $outfh "product,${machine_label},lane,stage,filename";
foreach my $col (@{$pcols})
{
printf $outfh ",%s", $col;
}
printf $outfh "\n";
#
foreach my $product (sort keys %{$totals{$section}{by_product}})
{
foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_filename}})
{
foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_filename}{$machine}})
{
foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_filename}{$machine}{$lane}})
{
foreach my $filename (sort keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}})
{
foreach my $prow (@{$totals{$section}{by_product}{$product}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}{$filename}{data}})
{
printf $outfh "%s,%s,%s,%s,%s", $product, $machine, $lane, $stage, $filename;
foreach my $col (@{$pcols})
{
printf $outfh ",%s", $prow->{$col};
}
printf $outfh "\n";
}
}
}
}
}
}
close($outfh);
}
#
sub export_name_value_section_as_csv
{
my ($section, $file_type, $file_name, $machine_label, $do_product) = @_;
#
if ( ! exists($totals{$section}))
{
printf $log_fh "\n%d: Section %s does NOT exist\n", __LINE__, $section;
return;
}
#
###############################################################
#
printf $log_fh "\n%d: Export Data For %s by Machine, Lane, Stage, Filename:\n", __LINE__, $section;
#
my $outfnm = "${export_dir}/${file_name}_BY_MACHINE_LANE_STAGE_FILENAME.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open(my $outfh, ">" , $outfnm) || die $!;
#
my $pcols = $totals{column_names}{$file_type}{$section};
#
printf $outfh "${machine_label},lane,stage,filename";
foreach my $col (@{$pcols})
{
printf $outfh ",%s", $col;
}
printf $outfh "\n";
#
foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_filename}})
{
foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_filename}{$machine}})
{
foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_filename}{$machine}{$lane}})
{
foreach my $filename (sort keys %{$totals{$section}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}})
{
printf $outfh "%s,%s,%s,%s", $machine, $lane, $stage, $filename;
foreach my $col (@{$pcols})
{
printf $outfh ",%s", $totals{$section}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}{$filename}{data}{$col};
}
printf $outfh "\n";
}
}
}
}
close($outfh);
#
return unless ($do_product == TRUE);
#
###############################################################
#
printf $log_fh "\n%d: Export Data For %s by Product, Machine, Lane, Stage, Filename:\n", __LINE__, $section;
#
$outfnm = "${export_dir}/${file_name}_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open($outfh, ">" , $outfnm) || die $!;
#
printf $outfh "product,${machine_label},lane,stage,filename";
foreach my $col (@{$pcols})
{
printf $outfh ",%s", $col;
}
printf $outfh "\n";
#
foreach my $product (sort keys %{$totals{$section}{by_product}})
{
foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_filename}})
{
foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_filename}{$machine}})
{
foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_filename}{$machine}{$lane}})
{
foreach my $filename (sort keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}})
{
printf $outfh "%s,%s,%s,%s,%s", $product, $machine, $lane, $stage, $filename;
foreach my $col (@{$pcols})
{
printf $outfh ",%s", $totals{$section}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}{$filename}{data}{$col};
}
printf $outfh "\n";
}
}
}
}
}
close($outfh);
}
#
sub tabulate_list_section
{
my ($pdb, $pfile, $file_type, $section, $do_product) = @_;
#
my $filename = $pfile->{file_name};
my $machine = $pfile->{mach_no};
my $lane = $pfile->{lane};
my $stage = $pfile->{stage};
my $output_no = $pfile->{output_no};
#
#
if ( ! exists($pfile->{$section}))
{
printf $log_fh "%d: WARNING: Section %s does NOT exist in file %s\n", __LINE__, $section, $filename if ($verbose >= MINVERBOSE);
return;
}
#
@{$totals{$section}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}{$filename}{data}} = ();
#
foreach my $prow (@{$pfile->{$section}->{data}})
{
unshift @{$totals{$section}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}{$filename}{data}}, $prow;
}
#
return unless ($do_product == TRUE);
#
my $product = $pdb->{product}{$file_type}{$machine}{$lane}{$stage};
#
@{$totals{$section}{by_product}{$product}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}{$filename}{data}} = ();
#
foreach my $prow (@{$pfile->{$section}->{data}})
{
unshift @{$totals{$section}{by_product}{$product}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}{$filename}{data}}, $prow;
}
}
#
sub prepare_list_section
{
my ($pdb, $pfile, $file_type, $section, $do_product) = @_;
#
if ($pfile->{found_data}->{$section} == FALSE)
{
printf $log_fh "%d: WARNING: No data for %s.\n", __LINE__, $section if ($verbose >= MIDVERBOSE);
return;
}
#
my $machine = $pfile->{mach_no};
my $lane = $pfile->{lane};
my $stage = $pfile->{stage};
my $output_no = $pfile->{output_no};
my $filename = $pfile->{file_name};
#
set_list_section_column_names($file_type, $pfile, $section);
#
printf $log_fh "\n%d: SECTION : %s\n", __LINE__, $section
if ($verbose >= MAXVERBOSE);
#
if ($verbose >= MAXVERBOSE)
{
printf $log_fh "%d: MACHINE : %s\n", __LINE__, $machine;
printf $log_fh "%d: LANE : %d\n", __LINE__, $lane;
printf $log_fh "%d: STAGE : %d\n", __LINE__, $stage;
printf $log_fh "%d: OUTPUT NO: %s\n", __LINE__, $output_no;
printf $log_fh "%d: FILE RECS : %d\n", __LINE__, scalar(@{$pfile->{data}});
printf $log_fh "%d: %s RECS: %d\n", __LINE__, $section, scalar(@{$pfile->{$section}->{data}}) if (defined(@{$pfile->{$section}->{data}}));
}
#
tabulate_list_section($pdb, $pfile, $file_type, $section, $do_product);
#
return;
}
#
sub tabulate_name_value_section
{
my ($pdb, $pfile, $file_type, $section, $do_product) = @_;
#
my $filename = $pfile->{file_name};
my $machine = $pfile->{mach_no};
my $lane = $pfile->{lane};
my $stage = $pfile->{stage};
my $output_no = $pfile->{output_no};
#
if ( ! exists($pfile->{$section}))
{
printf $log_fh "%d: WARNING: Section %s does NOT exist in file %s\n", __LINE__, $section, $filename if ($verbose >= MINVERBOSE);
return;
}
#
foreach my $key (keys %{$pfile->{$section}->{data}})
{
$totals{$section}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}{$filename}{data}{$key} = $pfile->{$section}->{data}->{$key};
}
#
return unless ($do_product == TRUE);
#
my $product = $pdb->{product}{$file_type}{$machine}{$lane}{$stage};
#
foreach my $key (keys %{$pfile->{$section}->{data}})
{
$totals{$section}{by_product}{$product}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}{$filename}{data}{$key} = $pfile->{$section}->{data}->{$key};
}
}
#
sub prepare_name_value_section
{
my ($pdb, $pfile, $file_type, $section, $do_product) = @_;
#
if ($pfile->{found_data}->{$section} == FALSE)
{
printf $log_fh "%d: WARNING: No data for %s.\n", __LINE__, $section if ($verbose >= MIDVERBOSE);
return;
}
#
my $machine = $pfile->{mach_no};
my $lane = $pfile->{lane};
my $stage = $pfile->{stage};
my $output_no = $pfile->{output_no};
my $filename = $pfile->{file_name};
#
set_name_value_section_column_names($file_type, $pfile, $section);
#
printf $log_fh "\n%d: SECTION : %s\n", __LINE__, $section
if ($verbose >= MAXVERBOSE);
#
if ($verbose >= MAXVERBOSE)
{
printf $log_fh "%d: MACHINE : %s\n", __LINE__, $machine;
printf $log_fh "%d: LANE : %d\n", __LINE__, $lane;
printf $log_fh "%d: STAGE : %d\n", __LINE__, $stage;
printf $log_fh "%d: OUTPUT NO: %s\n", __LINE__, $output_no;
printf $log_fh "%d: FILE RECS : %d\n", __LINE__, scalar(@{$pfile->{data}});
printf $log_fh "%d: %s RECS: %d\n", __LINE__, $section, scalar(@{$pfile->{$section}->{data}}) if (defined(@{$pfile->{$section}->{data}}));
}
#
tabulate_name_value_section($pdb, $pfile, $file_type, $section, $do_product);
#
return;
}
#
########################################################################
########################################################################
#
# current product functions
#
sub get_product_info
{
my ($pdata, $pmjsid, $plotname, $plotnumber) = @_;
#
my $section = INDEX;
$$pmjsid = $pdata->{$section}->{data}->{MJSID};
$$pmjsid = $1 if ($$pmjsid =~ m/"([^"]*)"/);
#
$section = INFORMATION;
$$plotname = $pdata->{$section}->{data}->{LotName};
$$plotname = $1 if ($$plotname =~ m/"([^"]*)"/);
$$plotnumber = $pdata->{$section}->{data}->{LotNumber};
}
#
sub set_product_info
{
my ($pdb, $pfile, $ftype) = @_;
#
my $filename = $pfile->{file_name};
#
my $machine = $pfile->{mach_no};
my $lane = $pfile->{lane};
my $stage = $pfile->{stage};
my $output_no = $pfile->{output_no};
#
my $mjsid = 'UNKNOWN';
my $lotname = 'UNKNOWN';
my $lotnumber = 0;
#
if ( ! exists($pdb->{product}{$ftype}{$machine}{$lane}{$stage}))
{
$pdb->{product}{$ftype}{$machine}{$lane}{$stage} = "${mjsid}_${lotname}_${lotnumber}";
$pdb->{change_over}{$ftype}{$machine}{$lane}{$stage} = FALSE;
}
elsif (($output_no == PROD_COMPLETE) ||
($output_no == PROD_COMPLETE_LATER))
{
get_product_info($pfile, \$mjsid, \$lotname, \$lotnumber);
#
if (($pdb->{product}{$ftype}{$machine}{$lane}{$stage} ne "${mjsid}_${lotname}_${lotnumber}") &&
($pdb->{product}{$ftype}{$machine}{$lane}{$stage} ne "UNKNOWN_UNKNOWN_0"))
{
$pdb->{change_over}{$ftype}{$machine}{$lane}{$stage} = TRUE;
}
else
{
$pdb->{change_over}{$ftype}{$machine}{$lane}{$stage} = FALSE;
}
#
$pdb->{product}{$ftype}{$machine}{$lane}{$stage} = "${mjsid}_${lotname}_${lotnumber}";
}
else
{
# clear this flag.
$pdb->{change_over}{$ftype}{$machine}{$lane}{$stage} = FALSE;
}
#
printf $log_fh "%d: Product %s: %s, Change Over: %d\n", __LINE__, $ftype, $pdb->{product}{$ftype}{$machine}{$lane}{$stage}, $pdb->{change_over}{$ftype}{$machine}{$lane}{$stage} if ($verbose >= MIDVERBOSE);
}
#
########################################################################
########################################################################
#
# scan directories for U01, U03 and MPR files.
#
my %all_list = ();
my $one_type = '';
#
sub want_one_type
{
if ($_ =~ m/^.*\.${one_type}$/)
{
printf $log_fh "%d: FOUND %s FILE: %s\n", __LINE__, $one_type, $File::Find::name
if ($verbose >= MAXVERBOSE);
#
my $file_name = $_;
#
my $date = '';
my $mach_no = '';
my $stage = '';
my $lane = '';
my $pcb_serial = '';
my $pcb_id = '';
my $output_no = '';
my $pcb_id_lot_no = '';
#
my @parts = split('\+-\+', $file_name);
if (scalar(@parts) >= 9)
{
$date = $parts[0];
$mach_no = $parts[1];
$stage = $parts[2];
$lane = $parts[3];
$pcb_serial = $parts[4];
$pcb_id = $parts[5];
$output_no = $parts[6];
$pcb_id_lot_no = $parts[7];
}
else
{
@parts = split('-', $file_name);
if (scalar(@parts) >= 9)
{
$date = $parts[0];
$mach_no = $parts[1];
$stage = $parts[2];
$lane = $parts[3];
$pcb_serial = $parts[4];
$pcb_id = $parts[5];
$output_no = $parts[6];
$pcb_id_lot_no = $parts[7];
}
}
#
unshift @{$all_list{$one_type}},
{
'file_name' => $file_name,
'full_path' => $File::Find::name,
'directory' => $File::Find::dir,
'date' => $date,
'mach_no' => $mach_no,
'stage' => $stage,
'lane' => $lane,
'pcb_serial' => $pcb_serial,
'pcb_id' => $pcb_id,
'output_no' => $output_no,
'pcb_id_lot_no' => $pcb_id_lot_no
};
}
}
#
sub want_all_types
{
my $dt = '';
#
if ($_ =~ m/^.*\.u01$/)
{
printf $log_fh "%d: FOUND u01 FILE: %s\n", __LINE__, $File::Find::name
if ($verbose >= MAXVERBOSE);
$dt = 'u01';
}
elsif ($_ =~ m/^.*\.u03$/)
{
printf $log_fh "%d: FOUND u03 FILE: %s\n", __LINE__, $File::Find::name
if ($verbose >= MAXVERBOSE);
$dt = 'u03';
}
elsif ($_ =~ m/^.*\.mpr$/)
{
printf $log_fh "%d: FOUND mpr FILE: %s\n", __LINE__, $File::Find::name
if ($verbose >= MAXVERBOSE);
$dt = 'mpr';
}
#
if ($dt ne '')
{
my $file_name = $_;
#
my $date = '';
my $mach_no = '';
my $stage = '';
my $lane = '';
my $pcb_serial = '';
my $pcb_id = '';
my $output_no = '';
my $pcb_id_lot_no = '';
#
my @parts = split('\+-\+', $file_name);
if (scalar(@parts) >= 9)
{
$date = $parts[0];
$mach_no = $parts[1];
$stage = $parts[2];
$lane = $parts[3];
$pcb_serial = $parts[4];
$pcb_id = $parts[5];
$output_no = $parts[6];
$pcb_id_lot_no = $parts[7];
}
else
{
@parts = split('-', $file_name);
if (scalar(@parts) >= 9)
{
$date = $parts[0];
$mach_no = $parts[1];
$stage = $parts[2];
$lane = $parts[3];
$pcb_serial = $parts[4];
$pcb_id = $parts[5];
$output_no = $parts[6];
$pcb_id_lot_no = $parts[7];
}
}
#
unshift @{$all_list{$dt}},
{
'file_name' => $file_name,
'full_path' => $File::Find::name,
'directory' => $File::Find::dir,
'date' => $date,
'mach_no' => $mach_no,
'stage' => $stage,
'lane' => $lane,
'pcb_serial' => $pcb_serial,
'pcb_id' => $pcb_id,
'output_no' => $output_no,
'pcb_id_lot_no' => $pcb_id_lot_no
};
}
}
#
sub get_all_files
{
my ($ftype, $pargv, $pu01, $pu03, $pmpr) = @_;
#
# optimize for file type
#
if ($ftype eq 'u01')
{
$one_type = $ftype;
$all_list{$one_type} = $pu01;
#
find(\&want_one_type, @{$pargv});
#
@{$pu01} = sort { $a->{file_name} cmp $b->{file_name} } @{$pu01};
}
elsif ($ftype eq 'u03')
{
$one_type = $ftype;
$all_list{$one_type} = $pu03;
#
find(\&want_one_type, @{$pargv});
#
@{$pu03} = sort { $a->{file_name} cmp $b->{file_name} } @{$pu03};
}
elsif ($ftype eq 'mpr')
{
$one_type = $ftype;
$all_list{$one_type} = $pmpr;
#
find(\&want_one_type, @{$pargv});
#
@{$pmpr} = sort { $a->{file_name} cmp $b->{file_name} } @{$pmpr};
}
else
{
$all_list{u01} = $pu01;
$all_list{u03} = $pu03;
$all_list{mpr} = $pmpr;
#
find(\&want_all_types, @{$pargv});
#
@{$pu01} = sort { $a->{file_name} cmp $b->{file_name} } @{$pu01};
@{$pu03} = sort { $a->{file_name} cmp $b->{file_name} } @{$pu03};
@{$pmpr} = sort { $a->{file_name} cmp $b->{file_name} } @{$pmpr};
}
}
#
######################################################################
######################################################################
#
# read in data file and load all sections
#
sub load
{
my ($pdata) = @_;
#
my $path = $pdata->{full_path};
#
if ( ! -r $path )
{
printf $log_fh "\n%d: ERROR: file $path is NOT readable\n\n", __LINE__;
return 0;
}
#
unless (open(INFD, $path))
{
printf $log_fh "\n%d: ERROR: unable to open $path.\n\n", __LINE__;
return 0;
}
@{$pdata->{data}} = <INFD>;
close(INFD);
#
# remove newlines
#
chomp(@{$pdata->{data}});
printf $log_fh "%d: Lines read: %d\n", __LINE__, scalar(@{$pdata->{data}})
if ($verbose >= MAXVERBOSE);
#
return 1;
}
#
sub load_name_value
{
my ($pdata, $section) = @_;
#
$pdata->{found_data}->{$section} = FALSE;
#
printf $log_fh "\n%d: Loading Name-Value Section: %s\n", __LINE__, $section
if ($verbose >= MAXVERBOSE);
#
my $re_section = '\\' . $section;
@{$pdata->{raw}->{$section}} =
grep /^${re_section}\s*$/ .. /^\s*$/, @{$pdata->{data}};
#
# printf $log_fh "<%s>\n", join("\n", @{$pdata->{raw}->{$section}});
#
if (scalar(@{$pdata->{raw}->{$section}}) <= 2)
{
# $pdata->{$section} = {};
delete $pdata->{$section};
printf $log_fh "%d: No data found.\n", __LINE__
if ($verbose >= MAXVERBOSE);
return 0;
}
#
shift @{$pdata->{raw}->{$section}};
pop @{$pdata->{raw}->{$section}};
#
printf $log_fh "%d: Section Lines: %d\n", __LINE__, scalar(@{$pdata->{raw}->{$section}})
if ($verbose >= MAXVERBOSE);
#
%{$pdata->{$section}->{data}} =
map { split /\s*=\s*/, $_, 2 } @{$pdata->{raw}->{$section}};
printf $log_fh "%d: Number of Keys: %d\n", __LINE__, scalar(keys %{$pdata->{$section}->{data}})
if ($verbose >= MAXVERBOSE);
#
$pdata->{found_data}->{$section} = TRUE;
#
return 1;
}
#
sub split_quoted_string
{
my $rec = shift;
#
my $rec_len = length($rec);
#
my $istart = -1;
my $iend = -1;
my $in_string = 0;
#
my @tokens = ();
my $token = "";
#
for (my $i=0; $i<$rec_len; $i++)
{
my $c = substr($rec, $i, 1);
#
if ($in_string == 1)
{
if ($c eq '"')
{
$in_string = 0;
}
else
{
$token .= $c;
}
}
elsif ($c eq '"')
{
$in_string = 1;
}
elsif ($c eq ' ')
{
# printf $log_fh "Token ... <%s>\n", $token;
push (@tokens, $token);
$token = '';
}
else
{
$token .= $c;
}
}
#
if (length($token) > 0)
{
# printf $log_fh "Token ... <%s>\n", $token;
push (@tokens, $token);
$token = '';
}
#
# printf $log_fh "Tokens: \n%s\n", join("\n",@tokens);
#
return @tokens;
}
#
sub load_list
{
my ($pdata, $section) = @_;
#
printf $log_fh "\n%d: Loading List Section: %s\n", __LINE__, $section
if ($verbose >= MAXVERBOSE);
#
$pdata->{found_data}->{$section} = FALSE;
#
my $re_section = '\\' . $section;
@{$pdata->{raw}->{$section}} =
grep /^${re_section}\s*$/ .. /^\s*$/, @{$pdata->{data}};
#
# printf $log_fh "<%s>\n", join("\n", @{$pdata->{raw}->{$section}});
#
if (scalar(@{$pdata->{raw}->{$section}}) <= 3)
{
# $pdata->{$section} = {};
delete $pdata->{$section};
printf $log_fh "%d: No data found.\n", __LINE__
if ($verbose >= MAXVERBOSE);
return 0;
}
shift @{$pdata->{raw}->{$section}};
pop @{$pdata->{raw}->{$section}};
$pdata->{$section}->{header} = shift @{$pdata->{raw}->{$section}};
@{$pdata->{$section}->{column_names}} =
split / /, $pdata->{$section}->{header};
my $number_columns = scalar(@{$pdata->{$section}->{column_names}});
#
@{$pdata->{$section}->{data}} = ();
#
printf $log_fh "%d: Section Lines: %d\n", __LINE__, scalar(@{$pdata->{raw}->{$section}})
if ($verbose >= MAXVERBOSE);
# printf $log_fh "Column Names: %d\n", $number_columns;
foreach my $record (@{$pdata->{raw}->{$section}})
{
# printf $log_fh "\nRECORD: %s\n", $record;
#
# printf $log_fh "\nRECORD (original): %s\n", $record;
# $record =~ s/"\s+"\s/"" /g;
# $record =~ s/"\s+"\s*$/""/g;
# printf $log_fh "\nRECORD (final): %s\n", $record;
# my @tokens = split / /, $record;
#
my @tokens = split_quoted_string($record);
my $number_tokens = scalar(@tokens);
printf $log_fh "%d: Number of tokens in record: %d\n", __LINE__, $number_tokens
if ($verbose >= MAXVERBOSE);
#
if ($number_tokens == $number_columns)
{
my %data = ();
@data{@{$pdata->{$section}->{column_names}}} = @tokens;
my $data_size = scalar(keys %data);
# printf $log_fh "Current Data Size: %d\n", $data_size;
unshift @{$pdata->{$section}->{data}}, \%data;
printf $log_fh "%d: Current Number of Records: %d\n", __LINE__, scalar(@{$pdata->{$section}->{data}})
if ($verbose >= MAXVERBOSE);
}
else
{
printf $log_fh "%d: SKIPPING RECORD - NUMBER TOKENS (%d) != NUMBER COLUMNS (%d)\n", __LINE__, $number_tokens, $number_columns;
}
}
#
$pdata->{found_data}->{$section} = TRUE;
#
return 1;
}
#
########################################################################
########################################################################
#
# process U01 files.
#
sub export_u01_count_data
{
my ($pdb) = @_;
#
###############################################################
#
my $section = COUNT;
#
printf $log_fh "\n%d: Export Total Data For %s:\n", __LINE__, $section;
#
my $first_time = TRUE;
#
my $outfnm = "${export_dir}/COUNT_TOTALS.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open(my $outfh, ">" , $outfnm) || die $!;
#
foreach my $key (sort keys %{$totals{$section}{totals}})
{
if ($first_time == TRUE)
{
printf $outfh "%s", $key;
$first_time = FALSE;
}
else
{
printf $outfh ",%s", $key;
}
}
printf $outfh "\n";
#
$first_time = TRUE;
foreach my $key (sort keys %{$totals{$section}{totals}})
{
if ($first_time == TRUE)
{
printf $outfh "%d", $totals{$section}{totals}{$key};
$first_time = FALSE;
}
else
{
printf $outfh ",%d", $totals{$section}{totals}{$key};
}
}
printf $outfh "\n";
close($outfh);
#
$section = COUNT;
#
printf $log_fh "\n%d: Export Data For %s by Machine:\n", __LINE__, $section;
#
$first_time = TRUE;
#
$outfnm = "${export_dir}/COUNT_BY_MACHINE.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open($outfh, ">" , $outfnm) || die $!;
foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_machine}})
{
if ($first_time == TRUE)
{
printf $outfh "machine";
foreach my $key (sort keys %{$totals{$section}{by_machine}{$machine}})
{
printf $outfh ",%s", $key;
}
printf $outfh "\n";
$first_time = FALSE;
}
#
printf $outfh "%s", $machine;
foreach my $key (sort keys %{$totals{$section}{by_machine}{$machine}})
{
printf $outfh ",%d", $totals{$section}{by_machine}{$machine}{$key};
}
printf $outfh "\n";
}
close($outfh);
#
$section = COUNT;
#
printf $log_fh "\n%d: Export Data For %s by Machine and Lane:\n", __LINE__, $section;
#
$first_time = TRUE;
$outfnm = "${export_dir}/COUNT_BY_MACHINE_LANE.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open($outfh, ">" , $outfnm) || die $!;
foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane}})
{
foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane}{$machine}})
{
if ($first_time == TRUE)
{
printf $outfh "machine,lane";
foreach my $key (sort keys %{$totals{$section}{by_machine_lane}{$machine}{$lane}})
{
printf $outfh ",%s", $key;
}
printf $outfh "\n";
$first_time = FALSE;
}
#
printf $outfh "%s,%s", $machine, $lane;
foreach my $key (sort keys %{$totals{$section}{by_machine_lane}{$machine}{$lane}})
{
printf $outfh ",%d", $totals{$section}{by_machine_lane}{$machine}{$lane}{$key};
}
printf $outfh "\n";
}
}
close($outfh);
#
###############################################################
#
$section = COUNT;
#
printf $log_fh "\n%d: Export Total Data For %s by Product:\n", __LINE__, $section;
#
$first_time = TRUE;
$outfnm = "${export_dir}/COUNT_TOTALS_BY_PRODUCT.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open($outfh, ">" , $outfnm) || die $!;
foreach my $product (sort keys %{$totals{$section}{by_product}})
{
if ($first_time == TRUE)
{
printf $outfh "product";
foreach my $key (sort keys %{$totals{$section}{by_product}{$product}{totals}})
{
printf $outfh ",%s", $key;
}
printf $outfh "\n";
$first_time = FALSE;
}
#
printf $outfh "%s", $product;
foreach my $key (sort keys %{$totals{$section}{by_product}{$product}{totals}})
{
printf $outfh ",%d", $totals{$section}{by_product}{$product}{totals}{$key};
}
printf $outfh "\n";
}
close($outfh);
#
$section = COUNT;
#
printf $log_fh "\n%d: Export Data For %s by Product and Machine:\n", __LINE__, $section;
#
$first_time = TRUE;
$outfnm = "${export_dir}/COUNT_BY_PRODUCT_MACHINE.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open($outfh, ">" , $outfnm) || die $!;
foreach my $product (sort keys %{$totals{$section}{by_product}})
{
foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine}})
{
if ($first_time == TRUE)
{
printf $outfh "product,machine";
foreach my $key (sort keys %{$totals{$section}{by_product}{$product}{by_machine}{$machine}})
{
printf $outfh ",%s", $key;
}
printf $outfh "\n";
$first_time = FALSE;
}
#
printf $outfh "%s,%s", $product, $machine;
foreach my $key (sort keys %{$totals{$section}{by_product}{$product}{by_machine}{$machine}})
{
printf $outfh ",%d", $totals{$section}{by_product}{$product}{by_machine}{$machine}{$key};
}
printf $outfh "\n";
}
}
close($outfh);
#
$section = COUNT;
#
printf $log_fh "\n%d: Export Data For %s by Product, Machine and Lane:\n", __LINE__, $section;
#
$first_time = TRUE;
$outfnm = "${export_dir}/COUNT_BY_PRODUCT_MACHINE_LANE.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open($outfh, ">" , $outfnm) || die $!;
foreach my $product (sort keys %{$totals{$section}{by_product}})
{
foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane}})
{
foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane}{$machine}})
{
if ($first_time == TRUE)
{
printf $outfh "product,machine,lane";
foreach my $key (sort keys %{$totals{$section}{by_product}{$product}{by_machine_lane}{$machine}{$lane}})
{
printf $outfh ",%s", $key;
}
printf $outfh "\n";
$first_time = FALSE;
}
#
printf $outfh "%s,%s,%s", $product, $machine, $lane;
foreach my $key (sort keys %{$totals{$section}{by_product}{$product}{by_machine_lane}{$machine}{$lane}})
{
printf $outfh ",%d", $totals{$section}{by_product}{$product}{by_machine_lane}{$machine}{$lane}{$key};
}
printf $outfh "\n";
}
}
}
close($outfh);
}
#
sub export_u01_time_data
{
my ($pdb) = @_;
#
###############################################################
#
my $section = TIME;
#
printf $log_fh "\n%d: Export Total Data For %s:\n", __LINE__, $section;
#
my $first_time = TRUE;
#
my $outfnm = "${export_dir}/TIME_TOTALS.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open(my $outfh, ">" , $outfnm) || die $!;
#
foreach my $key (sort keys %{$totals{$section}{totals}})
{
if ($first_time == TRUE)
{
printf $outfh "%s", $key;
$first_time = FALSE;
}
else
{
printf $outfh ",%s", $key;
}
}
printf $outfh "\n";
#
$first_time = TRUE;
foreach my $key (sort keys %{$totals{$section}{totals}})
{
if ($first_time == TRUE)
{
printf $outfh "%s", $totals{$section}{totals}{$key};
$first_time = FALSE;
}
else
{
printf $outfh ",%s", $totals{$section}{totals}{$key};
}
}
printf $outfh "\n";
close($outfh);
#
###############################################################
#
$section = TIME;
#
printf $log_fh "\n%d: Export Data For %s by Machine:\n", __LINE__, $section;
#
$first_time = TRUE;
#
$outfnm = "${export_dir}/TIME_BY_MACHINE.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open($outfh, ">" , $outfnm) || die $!;
foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_machine}})
{
if ($first_time == TRUE)
{
printf $outfh "machine";
foreach my $key (sort keys %{$totals{$section}{by_machine}{$machine}})
{
printf $outfh ",%s", $key;
}
printf $outfh "\n";
$first_time = FALSE;
}
#
printf $outfh "%s", $machine;
foreach my $key (sort keys %{$totals{$section}{by_machine}{$machine}})
{
printf $outfh ",%s", $totals{$section}{by_machine}{$machine}{$key};
}
printf $outfh "\n",
}
close($outfh);
#
$section = TIME;
#
printf $log_fh "\n%d: Export Data For %s by Machine and Lane:\n", __LINE__, $section;
#
$first_time = TRUE;
$outfnm = "${export_dir}/TIME_BY_MACHINE_LANE.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open($outfh, ">" , $outfnm) || die $!;
foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane}})
{
foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane}{$machine}})
{
if ($first_time == TRUE)
{
printf $outfh "machine,lane";
foreach my $key (sort keys %{$totals{$section}{by_machine_lane}{$machine}{$lane}})
{
printf $outfh ",%s", $key;
}
printf $outfh "\n";
$first_time = FALSE;
}
#
printf $outfh "%s,%s", $machine, $lane;
foreach my $key (sort keys %{$totals{$section}{by_machine_lane}{$machine}{$lane}})
{
printf $outfh ",%s", $totals{$section}{by_machine_lane}{$machine}{$lane}{$key};
}
printf $outfh "\n";
}
}
close($outfh);
#
###############################################################
#
$section = TIME;
#
printf $log_fh "\n%d: Export Total Data For %s by Product:\n", __LINE__, $section;
#
$first_time = TRUE;
$outfnm = "${export_dir}/TIME_TOTALS_BY_PRODUCT.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open($outfh, ">" , $outfnm) || die $!;
foreach my $product (sort keys %{$totals{$section}{by_product}})
{
if ($first_time == TRUE)
{
printf $outfh "product";
foreach my $key (sort keys %{$totals{$section}{by_product}{$product}{totals}})
{
printf $outfh ",%s", $key;
}
printf $outfh "\n";
$first_time = FALSE;
}
#
printf $outfh "%s", $product;
foreach my $key (sort keys %{$totals{$section}{by_product}{$product}{totals}})
{
printf $outfh ",%s", $totals{$section}{by_product}{$product}{totals}{$key};
}
printf $outfh "\n";
}
close($outfh);
#
###############################################################
#
$section = TIME;
#
printf $log_fh "\n%d: Export Data For %s by Product and Machine:\n", __LINE__, $section;
#
$first_time = TRUE;
$outfnm = "${export_dir}/TIME_BY_PRODUCT_MACHINE.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open($outfh, ">" , $outfnm) || die $!;
foreach my $product (sort keys %{$totals{$section}{by_product}})
{
foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine}})
{
if ($first_time == TRUE)
{
printf $outfh "product,machine";
foreach my $key (sort keys %{$totals{$section}{by_product}{$product}{by_machine}{$machine}})
{
printf $outfh ",%s", $key;
}
printf $outfh "\n";
$first_time = FALSE;
}
#
printf $outfh "%s,%s", $product, $machine;
foreach my $key (sort keys %{$totals{$section}{by_product}{$product}{by_machine}{$machine}})
{
printf $outfh ",%s", $totals{$section}{by_product}{$product}{by_machine}{$machine}{$key};
}
printf $outfh "\n";
}
}
close($outfh);
#
printf $log_fh "\n%d: Export Data For %s by Product, Machine and Lane:\n", __LINE__, $section;
#
$first_time = TRUE;
$outfnm = "${export_dir}/TIME_BY_PRODUCT_MACHINE_LANE.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open($outfh, ">" , $outfnm) || die $!;
foreach my $product (sort keys %{$totals{$section}{by_product}})
{
foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane}})
{
foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane}{$machine}})
{
if ($first_time == TRUE)
{
printf $outfh "product,machine,lane";
foreach my $key (sort keys %{$totals{$section}{by_product}{$product}{by_machine_lane}{$machine}{$lane}})
{
printf $outfh ",%s", $key;
}
printf $outfh "\n";
$first_time = FALSE;
}
#
printf $outfh "%s,s,%s", $product, $machine, $lane;
foreach my $key (sort keys %{$totals{$section}{by_product}{$product}{by_machine_lane}{$machine}{$lane}})
{
printf $outfh ",%s", $totals{$section}{by_product}{$product}{by_machine_lane}{$machine}{$lane}{$key};
}
printf $outfh "\n";
}
}
}
close($outfh);
}
#
sub export_u01_feeder_data
{
my ($pdb) = @_;
#
###############################################################
#
my $section = MOUNTPICKUPFEEDER;
#
printf $log_fh "\n%d: Export Data For %s by Machine, Lane, Stage, FAdd, FSAdd, ReelID:\n", __LINE__, $section;
#
my $outfnm = "${export_dir}/FEEDER_BY_MACHINE_LANE_STAGE_FADD_FSADD_REELID.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open(my $outfh, ">" , $outfnm) || die $!;
foreach my $pcol (@feeder_export_cols)
{
printf $outfh $pcol->{format}, $pcol->{name};
}
printf $outfh "\n";
#
foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_fadd_fsadd_reelid}})
{
foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}})
{
foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}})
{
foreach my $fadd (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}})
{
foreach my $fsadd (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}{$fadd}})
{
foreach my $reelid (sort keys %{$totals{$section}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}{$fadd}{$fsadd}})
{
printf $outfh "%s,%s,%s,%s,%s,%s", $machine, $lane, $stage, $fadd, $fsadd, $reelid;
foreach my $col (@feeder_count_cols)
{
printf $outfh ",%d", $totals{$section}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$reelid}{$col};
}
printf $outfh "\n";
}
}
}
}
}
}
close($outfh);
#
printf $log_fh "\n%d: Export Data For %s by Machine, Lane, Stage, FAdd, FSAdd:\n", __LINE__, $section;
#
$outfnm = "${export_dir}/FEEDER_BY_MACHINE_LANE_STAGE_FADD_FSADD.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open($outfh, ">" , $outfnm) || die $!;
foreach my $pcol (@feeder_export_cols2)
{
printf $outfh $pcol->{format}, $pcol->{name};
}
printf $outfh "\n";
#
foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_fadd_fsadd}})
{
foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_fadd_fsadd}{$machine}})
{
foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}})
{
foreach my $fadd (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}{$stage}})
{
foreach my $fsadd (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}{$stage}{$fadd}})
{
printf $outfh "%s,%s,%s,%s,%s", $machine, $lane, $stage, $fadd, $fsadd;
foreach my $col (@feeder_count_cols)
{
printf $outfh ",%d", $totals{$section}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$col};
}
printf $outfh "\n";
}
}
}
}
}
close($outfh);
#
printf $log_fh "\n%d: Export Data For %s by Machine, Lane, Stage, TableNo:\n", __LINE__, $section;
#
$outfnm = "${export_dir}/FEEDER_BY_MACHINE_LANE_STAGE_TABLE_NO.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open($outfh, ">" , $outfnm) || die $!;
foreach my $pcol (@feeder_export_cols3)
{
printf $outfh $pcol->{format}, $pcol->{name};
}
printf $outfh "\n";
#
foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_table_no}})
{
foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_table_no}{$machine}})
{
foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_table_no}{$machine}{$lane}})
{
foreach my $table_no (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_table_no}{$machine}{$lane}{$stage}})
{
printf $outfh "%s,%s,%s,%s", $machine, $lane, $stage, $table_no;
foreach my $col (@feeder_count_cols)
{
printf $outfh ",%d", $totals{$section}{by_machine_lane_stage_table_no}{$machine}{$lane}{$stage}{$table_no}{$col};
}
printf $outfh "\n";
}
}
}
}
close($outfh);
#
###############################################################
#
$section = MOUNTPICKUPFEEDER;
#
printf $log_fh "\n%d: Export Data For %s by Product, Machine, Lane, Stage, FAdd, FSAdd, ReelID:\n", __LINE__, $section;
#
$outfnm = "${export_dir}/FEEDER_BY_PRODUCT_MACHINE_LANE_STAGE_FADD_FSADD_REELID.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open($outfh, ">" , $outfnm) || die $!;
printf $outfh "product,";
foreach my $pcol (@feeder_export_cols)
{
printf $outfh $pcol->{format}, $pcol->{name};
}
printf $outfh "\n";
#
foreach my $product (sort keys %{$totals{$section}{by_product}})
{
foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd_reelid}})
{
foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}})
{
foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}})
{
foreach my $fadd (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}})
{
foreach my $fsadd (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}{$fadd}})
{
foreach my $reelid (sort keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}{$fadd}{$fsadd}})
{
printf $outfh "%s,%s,%s,%s,%s,%s,%s", $product, $machine, $lane, $stage, $fadd, $fsadd, $reelid;
foreach my $col (@feeder_count_cols)
{
printf $outfh ",%d", $totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$reelid}{$col};
}
printf $outfh "\n";
}
}
}
}
}
}
}
close($outfh);
#
printf $log_fh "\n%d: Export Data For %s by Product, Machine, Lane, Stage, FAdd, FSAdd:\n", __LINE__, $section;
#
$outfnm = "${export_dir}/FEEDER_BY_PRODUCT_MACHINE_LANE_STAGE_FADD_FSADD.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open($outfh, ">" , $outfnm) || die $!;
printf $outfh "product,";
foreach my $pcol (@feeder_export_cols2)
{
printf $outfh $pcol->{format}, $pcol->{name};
}
printf $outfh "\n";
#
foreach my $product (sort keys %{$totals{$section}{by_product}})
{
foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd}})
{
foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd}{$machine}})
{
foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}})
{
foreach my $fadd (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}{$stage}})
{
foreach my $fsadd (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}{$stage}{$fadd}})
{
printf $outfh "%s,%s,%s,%s,%s,%s", $product, $machine, $lane, $stage, $fadd, $fsadd;
foreach my $col (@feeder_count_cols)
{
printf $outfh ",%d", $totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$col};
}
printf $outfh "\n";
}
}
}
}
}
}
close($outfh);
#
printf $log_fh "\n%d: Export Data For %s by Product, Machine, Lane, Stage, TableNo:\n", __LINE__, $section;
#
$outfnm = "${export_dir}/FEEDER_BY_PRODUCT_MACHINE_LANE_STAGE_TABLE_NO.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open($outfh, ">" , $outfnm) || die $!;
printf $outfh "product,";
foreach my $pcol (@feeder_export_cols3)
{
printf $outfh $pcol->{format}, $pcol->{name};
}
printf $outfh "\n";
#
foreach my $product (sort keys %{$totals{$section}{by_product}})
{
foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_table_no}})
{
foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_table_no}{$machine}})
{
foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_table_no}{$machine}{$lane}})
{
foreach my $table_no (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_table_no}{$machine}{$lane}{$stage}})
{
printf $outfh "%s,%s,%s,%s,%s", $product, $machine, $lane, $stage, $table_no;
foreach my $col (@feeder_count_cols)
{
printf $outfh ",%d", $totals{$section}{by_product}{$product}{by_machine_lane_stage_table_no}{$machine}{$lane}{$stage}{$table_no}{$col};
}
printf $outfh "\n";
}
}
}
}
}
close($outfh);
}
#
sub export_u01_nozzle_data
{
my ($pdb) = @_;
#
###############################################################
#
my $section = MOUNTPICKUPNOZZLE;
#
printf $log_fh "\n%d: Export Data For %s by Machine, Lane, Stage, NHAdd, NCAdd, Blkserial:\n", __LINE__, $section;
#
my $outfnm = "${export_dir}/NOZZLE_BY_MACHINE_LANE_STAGE_NHADD_NCADD_BLKSERIAL.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open(my $outfh, ">" , $outfnm) || die $!;
foreach my $pcol (@nozzle_export_cols)
{
printf $outfh $pcol->{format}, $pcol->{name};
}
printf $outfh "\n";
#
foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_nhadd_ncadd_blkserial}})
{
foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}})
{
foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}})
{
foreach my $nhadd (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}})
{
foreach my $ncadd (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}{$nhadd}})
{
foreach my $blkserial (sort keys %{$totals{$section}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}})
{
printf $outfh "%s,%s,%s,%s,%s,%s",
$machine, $lane, $stage, $nhadd, $ncadd, $blkserial;
foreach my $col (@nozzle_count_cols)
{
printf $outfh ",%d",
$totals{$section}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$blkserial}{$col};
}
printf $outfh "\n";
}
}
}
}
}
}
close($outfh);
#
printf $log_fh "\n%d: Export Data For %s by Machine, Lane, Stage, NHAdd, NCAdd:\n", __LINE__, $section;
#
$outfnm = "${export_dir}/NOZZLE_BY_MACHINE_LANE_STAGE_NHADD_NCADD.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open($outfh, ">" , $outfnm) || die $!;
foreach my $pcol (@nozzle_export_cols2)
{
printf $outfh $pcol->{format}, $pcol->{name};
}
printf $outfh "\n";
#
foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_nhadd_ncadd}})
{
foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_nhadd_ncadd}{$machine}})
{
foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}})
{
foreach my $nhadd (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}{$stage}})
{
foreach my $ncadd (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}{$stage}{$nhadd}})
{
printf $outfh "%s,%s,%s,%s,%s", $machine, $lane, $stage, $nhadd, $ncadd;
foreach my $col (@nozzle_count_cols)
{
printf $outfh ",%d", $totals{$section}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$col};
}
printf $outfh "\n";
}
}
}
}
}
close($outfh);
#
###############################################################
#
$section = MOUNTPICKUPNOZZLE;
#
printf $log_fh "\n%d: Export Data For %s by Product, Machine, Lane, Stage, NHAdd, NCAdd, Blkserial:\n", __LINE__, $section;
#
$outfnm = "${export_dir}/NOZZLE_BY_PRODUCT_MACHINE_LANE_STAGE_NHADD_NCADD_BLKSERIAL.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open($outfh, ">" , $outfnm) || die $!;
printf $outfh "product,";
foreach my $pcol (@nozzle_export_cols)
{
printf $outfh $pcol->{format}, $pcol->{name};
}
printf $outfh "\n";
#
foreach my $product (sort keys %{$totals{$section}{by_product}})
{
foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd_blkserial}})
{
foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}})
{
foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}})
{
foreach my $nhadd (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}})
{
foreach my $ncadd (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}{$nhadd}})
{
foreach my $blkserial (sort keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}})
{
printf $outfh "%s,%s,%s,%s,%s,%s,%s", $product, $machine, $lane, $stage, $nhadd, $ncadd, $blkserial;
foreach my $col (@nozzle_count_cols)
{
printf $outfh ",%d", $totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$blkserial}{$col};
}
printf $outfh "\n";
}
}
}
}
}
}
}
close($outfh);
#
printf $log_fh "\n%d: Export Data For %s by Product, Machine, Lane, Stage, NHAdd, NCAdd:\n", __LINE__, $section;
#
$outfnm = "${export_dir}/NOZZLE_BY_PRODUCT_MACHINE_LANE_STAGE_NHADD_NCADD.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open($outfh, ">" , $outfnm) || die $!;
printf $outfh "product,";
foreach my $pcol (@nozzle_export_cols2)
{
printf $outfh $pcol->{format}, $pcol->{name};
}
printf $outfh "\n";
#
foreach my $product (sort keys %{$totals{$section}{by_product}})
{
foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd}})
{
foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd}{$machine}})
{
foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}})
{
foreach my $nhadd (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}{$stage}})
{
foreach my $ncadd (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}{$stage}{$nhadd}})
{
printf $outfh "%s,%s,%s,%s,%s,%s",
$product, $machine, $lane, $stage, $nhadd, $ncadd;
foreach my $col (@nozzle_count_cols)
{
printf $outfh ",%d", $totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$col};
}
printf $outfh "\n";
}
}
}
}
}
}
close($outfh);
}
#
sub export_u01_nozzle_data_keys
{
my ($pdb, $nmkey1, $nmkey2, $label) = @_;
#
my $NMKEY1 = $nmkey1;
$NMKEY1 =~ tr/[a-z]/[A-Z]/;
my $NMKEY2 = $nmkey2;
$NMKEY2 =~ tr/[a-z]/[A-Z]/;
my $LABEL = $label;
$LABEL =~ tr/[a-z]/[A-Z]/;
#
###############################################################
#
my $section = MOUNTPICKUPNOZZLE;
#
printf $log_fh "\n%d: Export Data For %s by Machine, Lane, Stage, $nmkey1, $nmkey2, Blkserial:\n", __LINE__, $section;
#
my $outfnm = "${export_dir}/NOZZLE_BY_MACHINE_LANE_STAGE_${NMKEY1}_${NMKEY2}_BLKSERIAL.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open(my $outfh, ">" , $outfnm) || die $!;
foreach my $pcol (@{$nozzle_export_cols_new{$label}})
{
printf $outfh $pcol->{format}, $pcol->{name};
}
printf $outfh "\n";
#
foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{$label}{by_machine_lane_stage_key1_key2_blkserial}})
{
foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}})
{
foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}})
{
foreach my $key1 (sort { $a <=> $b } keys %{$totals{$section}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}})
{
foreach my $key2 (sort { $a <=> $b } keys %{$totals{$section}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}{$key1}})
{
foreach my $blkserial (sort keys %{$totals{$section}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}{$key1}{$key2}})
{
printf $outfh "%s,%s,%s,%s,%s,%s",
$machine, $lane, $stage, $key1, $key2, $blkserial;
foreach my $col (@nozzle_count_cols)
{
printf $outfh ",%d",
$totals{$section}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}{$key1}{$key2}{$blkserial}{$col};
}
printf $outfh "\n";
}
}
}
}
}
}
close($outfh);
#
printf $log_fh "\n%d: Export Data For %s by Machine, Lane, Stage, $nmkey1, $nmkey2:\n", __LINE__, $section;
#
$outfnm = "${export_dir}/NOZZLE_BY_MACHINE_LANE_STAGE_${NMKEY1}_${NMKEY2}.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open($outfh, ">" , $outfnm) || die $!;
foreach my $pcol (@{$nozzle_export_cols2_new{$label}})
{
printf $outfh $pcol->{format}, $pcol->{name};
}
printf $outfh "\n";
#
foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{$label}{by_machine_lane_stage_key1_key2}})
{
foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{$label}{by_machine_lane_stage_key1_key2}{$machine}})
{
foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}})
{
foreach my $key1 (sort { $a <=> $b } keys %{$totals{$section}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}{$stage}})
{
foreach my $key2 (sort { $a <=> $b } keys %{$totals{$section}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}{$stage}{$key1}})
{
printf $outfh "%s,%s,%s,%s,%s", $machine, $lane, $stage, $key1, $key2;
foreach my $col (@nozzle_count_cols)
{
printf $outfh ",%d", $totals{$section}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}{$stage}{$key1}{$key2}{$col};
}
printf $outfh "\n";
}
}
}
}
}
close($outfh);
#
###############################################################
#
$section = MOUNTPICKUPNOZZLE;
#
printf $log_fh "\n%d: Export Data For %s by Product, Machine, Lane, Stage, $nmkey1, $nmkey2, Blkserial:\n", __LINE__, $section;
#
$outfnm = "${export_dir}/NOZZLE_BY_PRODUCT_MACHINE_LANE_STAGE_${NMKEY1}_${NMKEY2}_BLKSERIAL.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open($outfh, ">" , $outfnm) || die $!;
printf $outfh "product,";
foreach my $pcol (@{$nozzle_export_cols_new{$label}})
{
printf $outfh $pcol->{format}, $pcol->{name};
}
printf $outfh "\n";
#
foreach my $product (sort keys %{$totals{$section}{by_product}})
{
foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2_blkserial}})
{
foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}})
{
foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}})
{
foreach my $key1 (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}})
{
foreach my $key2 (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}{$key1}})
{
foreach my $blkserial (sort keys %{$totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}{$key1}{$key2}})
{
printf $outfh "%s,%s,%s,%s,%s,%s,%s", $product, $machine, $lane, $stage, $key1, $key2, $blkserial;
foreach my $col (@nozzle_count_cols)
{
printf $outfh ",%d", $totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}{$key1}{$key2}{$blkserial}{$col};
}
printf $outfh "\n";
}
}
}
}
}
}
}
close($outfh);
#
printf $log_fh "\n%d: Export Data For %s by Product, Machine, Lane, Stage, ${nmkey1}, ${nmkey2}:\n", __LINE__, $section;
#
$outfnm = "${export_dir}/NOZZLE_BY_PRODUCT_MACHINE_LANE_STAGE_${NMKEY1}_${NMKEY2}.csv";
printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
open($outfh, ">" , $outfnm) || die $!;
printf $outfh "product,";
foreach my $pcol (@{$nozzle_export_cols2_new{$label}})
{
printf $outfh $pcol->{format}, $pcol->{name};
}
printf $outfh "\n";
#
foreach my $product (sort keys %{$totals{$section}{by_product}})
{
foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2}})
{
foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2}{$machine}})
{
foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}})
{
foreach my $key1 (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}{$stage}})
{
foreach my $key2 (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}{$stage}{$key1}})
{
printf $outfh "%s,%s,%s,%s,%s,%s",
$product, $machine, $lane, $stage, $key1, $key2;
foreach my $col (@nozzle_count_cols)
{
printf $outfh ",%d", $totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}{$stage}{$key1}{$key2}{$col};
}
printf $outfh "\n";
}
}
}
}
}
}
close($outfh);
}
#
sub export_u01_nozzle_data_new
{
my ($pdb) = @_;
#
export_u01_nozzle_data_keys($pdb,
NZ_KEY_NHADD,
NZ_KEY_NCADD,
NZ_LABEL_NHADD_NCADD);
export_u01_nozzle_data_keys($pdb,
NZ_KEY_HEAD,
NZ_KEY_NHADD,
NZ_LABEL_HEAD_NHADD);
export_u01_nozzle_data_keys($pdb,
NZ_KEY_HEAD,
NZ_KEY_NCADD,
NZ_LABEL_HEAD_NCADD);
}
#
sub export_u01_data
{
my ($pdb) = @_;
#
export_u01_count_data($pdb);
export_u01_time_data($pdb);
export_u01_feeder_data($pdb);
if (($proc_options & PROC_OPT_USEOLDNZ) != 0)
{
export_u01_nozzle_data($pdb);
}
else
{
export_u01_nozzle_data_new($pdb);
}
}
#
######################################################################
#
# high-level u01 file audit functions
#
sub calculate_u01_name_value_delta
{
my ($pdb, $pu01, $section) = @_;
#
my $filename = $pu01->{file_name};
#
my $machine = $pu01->{mach_no};
my $lane = $pu01->{lane};
my $stage = $pu01->{stage};
my $output_no = $pu01->{output_no};
#
foreach my $key (keys %{$pu01->{$section}->{data}})
{
my $delta = 0;
#
if (exists($pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$key}))
{
$delta =
$pu01->{$section}->{data}->{$key} -
$pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$key};
#
if ($delta >= 0)
{
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key} = $delta;
}
elsif (($proc_options & PROC_OPT_USENEGDELTS) != 0)
{
printf $log_fh "%d: WARNING: [%s] using NEGATIVE delta for %s key %s: %d\n", __LINE__, $filename, $section, $key, $delta if ($verbose >= MINVERBOSE);
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key} = $delta;
}
else
{
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key} = 0;
printf $log_fh "%d: WARNING: [%s] setting NEGATIVE delta (%d) for %s key %s to ZERO\n", __LINE__, $filename, $delta, $section, $key if ($verbose >= MINVERBOSE);
}
#
printf $log_fh "%d: %s: %s = %d\n", __LINE__, $section, $key, $delta if ($verbose >= MAXVERBOSE);
}
else
{
printf $log_fh "%d: ERROR: [%s] %s key %s NOT found in cache. Ignoring counts (%d).\n", __LINE__, $filename, $section, $key, $pu01->{$section}->{data}->{$key};
die "ERROR: [$filename] $section key $key NOT found it cache. Stopped";
}
}
}
#
sub copy_u01_name_value_cache
{
my ($pdb, $pu01, $section) = @_;
#
my $filename = $pu01->{file_name};
#
my $machine = $pu01->{mach_no};
my $lane = $pu01->{lane};
my $stage = $pu01->{stage};
my $output_no = $pu01->{output_no};
#
foreach my $key (keys %{$pu01->{$section}->{data}})
{
$pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$key} =
$pu01->{$section}->{data}->{$key};
}
}
#
sub copy_u01_name_value_delta
{
my ($pdb, $pu01, $section) = @_;
#
my $filename = $pu01->{file_name};
#
my $machine = $pu01->{mach_no};
my $lane = $pu01->{lane};
my $stage = $pu01->{stage};
my $output_no = $pu01->{output_no};
#
delete $pdb->{$section}->{$machine}{$lane}{$stage}{delta};
#
foreach my $key (keys %{$pu01->{$section}->{data}})
{
my $delta = $pu01->{$section}->{data}->{$key};
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key} = $delta;
printf $log_fh "%d: %s: %s = %d\n", __LINE__, $section, $key, $delta
if ($verbose >= MAXVERBOSE);
}
}
#
sub tabulate_u01_name_value_delta
{
my ($pdb, $pu01, $section) = @_;
#
my $filename = $pu01->{file_name};
#
my $machine = $pu01->{mach_no};
my $lane = $pu01->{lane};
my $stage = $pu01->{stage};
my $output_no = $pu01->{output_no};
#
my $product = $pdb->{product}{u01}{$machine}{$lane}{$stage};
#
foreach my $key (keys %{$pu01->{$section}->{data}})
{
#
# product dependent totals
#
if (exists($totals{$section}{by_product}{$product}{totals}{$key}))
{
$totals{$section}{by_product}{$product}{totals}{$key} +=
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
}
else
{
$totals{$section}{by_product}{$product}{totals}{$key} =
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
}
printf $log_fh "%d: %s %s %s total=%d\n", __LINE__, $product, $section, $key, $totals{$section}{by_product}{$product}{totals}{$key} if ($verbose >= MAXVERBOSE);
#
if (exists($totals{$section}{by_product}{$product}{by_machine}{$machine}{$key}))
{
$totals{$section}{by_product}{$product}{by_machine}{$machine}{$key} +=
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
}
else
{
$totals{$section}{by_product}{$product}{by_machine}{$machine}{$key} =
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
}
printf $log_fh "%d: %s %s %s %s total=%d\n", __LINE__, $product, $section, $machine, $key, $totals{$section}{by_product}{$product}{by_machine}{$machine}{$key} if ($verbose >= MAXVERBOSE);
#
if (exists($totals{$section}{by_product}{$product}{by_machine_lane}{$machine}{$lane}{$key}))
{
$totals{$section}{by_product}{$product}{by_machine_lane}{$machine}{$lane}{$key} +=
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
}
else
{
$totals{$section}{by_product}{$product}{by_machine_lane}{$machine}{$lane}{$key} =
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
}
printf $log_fh "%d: %s %s %s %s %s total=%d\n", __LINE__, $product, $section, $machine, $lane, $key, $totals{$section}{by_product}{$product}{by_machine_lane}{$machine}{$lane}{$key} if ($verbose >= MAXVERBOSE);
#
if (exists($totals{$section}{by_product}{$product}{by_machine_lane_stage}{$machine}{$lane}{$stage}{$key}))
{
$totals{$section}{by_product}{$product}{by_machine_lane_stage}{$machine}{$lane}{$stage}{$key} +=
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
}
else
{
$totals{$section}{by_product}{$product}{by_machine_lane_stage}{$machine}{$lane}{$stage}{$key} =
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
}
printf $log_fh "%d: %s %s %s %s %s %s total=%d\n", __LINE__, $product, $section, $machine, $lane, $stage, $key, $totals{$section}{by_product}{$product}{by_machine_lane_stage}{$machine}{$lane}{$stage}{$key} if ($verbose >= MAXVERBOSE);
#
# product independent totals
#
if (exists($totals{$section}{totals}{$key}))
{
$totals{$section}{totals}{$key} +=
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
}
else
{
$totals{$section}{totals}{$key} =
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
}
#
if (exists($totals{$section}{by_machine}{$machine}{$key}))
{
$totals{$section}{by_machine}{$machine}{$key} +=
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
}
else
{
$totals{$section}{by_machine}{$machine}{$key} =
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
}
#
if (exists($totals{$section}{by_machine_lane}{$machine}{$lane}{$key}))
{
$totals{$section}{by_machine_lane}{$machine}{$lane}{$key} +=
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
}
else
{
$totals{$section}{by_machine_lane}{$machine}{$lane}{$key} =
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
}
#
if (exists($totals{$section}{by_machine_lane_stage}{$machine}{$lane}{$stage}{$key}))
{
$totals{$section}{by_machine_lane_stage}{$machine}{$lane}{$stage}{$key} +=
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
}
else
{
$totals{$section}{by_machine_lane_stage}{$machine}{$lane}{$stage}{$key} =
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
}
}
}
#
sub audit_u01_name_value
{
my ($pdb, $pu01, $section) = @_;
#
my $filename = $pu01->{file_name};
my $machine = $pu01->{mach_no};
my $lane = $pu01->{lane};
my $stage = $pu01->{stage};
my $output_no = $pu01->{output_no};
#
return if ((($proc_options & PROC_OPT_IGNRESET12) != 0) &&
($output_no == TIMER_NOT_RUNNING) &&
($section eq COUNT));
#
my $mjsid = '';
my $lotname = '';
my $lotnumber = 0;
#
my $change_over = $pdb->{change_over}{u01}{$machine}{$lane}{$stage};
printf $log_fh "%d: Change Over: %s\n", __LINE__, $change_over if ($verbose >= MAXVERBOSE);
#
get_product_info($pu01, \$mjsid, \$lotname, \$lotnumber);
#
printf $log_fh "\n%d: SECTION : %s\n", __LINE__, $section
if ($verbose >= MAXVERBOSE);
#
if ($verbose >= MAXVERBOSE)
{
printf $log_fh "%d: MACHINE : %s\n", __LINE__, $machine;
printf $log_fh "%d: LANE : %d\n", __LINE__, $lane;
printf $log_fh "%d: STAGE : %d\n", __LINE__, $stage;
printf $log_fh "%d: OUTPUT NO: %s\n", __LINE__, $output_no;
printf $log_fh "%d: FILE RECS : %d\n", __LINE__, scalar(@{$pu01->{data}});
printf $log_fh "%d: %s RECS: %d\n", __LINE__, $section, scalar(keys %{$pu01->{$section}->{data}});
}
#
# output 3,4,5,12 U01 files have both Time and Count sections.
# these output types can all be treated the same.
#
if (($output_no == PROD_COMPLETE) ||
($output_no == PROD_COMPLETE_LATER) ||
($output_no == DETECT_CHANGE) ||
($output_no == TIMER_NOT_RUNNING))
{
if ( ! exists($pdb->{$section}->{$machine}{$lane}{$stage}{state}))
{
#
# first file of any of these types to be processed.
#
printf $log_fh "%d: ENTRY STATE: UNKNOWN\n", __LINE__
if ($verbose >= MAXVERBOSE);
#
delete $pdb->{$section}->{$machine}{$lane}{$stage}{cache};
copy_u01_name_value_cache($pdb, $pu01, $section);
#
$pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
}
else
{
my $state = $pdb->{$section}->{$machine}{$lane}{$stage}{state};
#
printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__, $state if ($verbose >= MAXVERBOSE);
#
if ($change_over == TRUE)
{
copy_u01_name_value_delta($pdb, $pu01, $section);
tabulate_u01_name_value_delta($pdb, $pu01, $section);
copy_u01_name_value_cache($pdb, $pu01, $section);
#
$pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
}
elsif ($state eq DELTA)
{
calculate_u01_name_value_delta($pdb, $pu01, $section);
tabulate_u01_name_value_delta($pdb, $pu01, $section);
copy_u01_name_value_cache($pdb, $pu01, $section);
#
$pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
}
elsif ($state eq RESET)
{
copy_u01_name_value_delta($pdb, $pu01, $section);
tabulate_u01_name_value_delta($pdb, $pu01, $section);
copy_u01_name_value_cache($pdb, $pu01, $section);
#
$pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
}
elsif ($state eq BASELINE)
{
delete $pdb->{$section}->{$machine}{$lane}{$stage}{cache};
copy_u01_name_value_cache($pdb, $pu01, $section);
#
$pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
}
else
{
die "ERROR: unknown $section state: $state. Stopped";
}
printf $log_fh "%d: EXIT STATE: %s\n", __LINE__, $state if ($verbose >= MAXVERBOSE);
}
}
elsif (($output_no == MANUAL_CLEAR) ||
($output_no == AUTO_CLEAR))
{
#
# reset files have no data. they indicate the machine
# and counters were all reset to zero.
#
my $state = $pdb->{$section}->{$machine}{$lane}{$stage}{state};
printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__, $state if ($verbose >= MAXVERBOSE);
#
delete $pdb->{$section}->{$machine}{$lane}{$stage}{cache};
#
$pdb->{$section}->{$machine}{$lane}{$stage}{state} = RESET;
printf $log_fh "%d: EXIT STATE: %s\n", __LINE__, $state if ($verbose >= MAXVERBOSE);
}
else
{
die "ERROR: unknown output type: $output_no. Stopped";
}
#
return;
}
#
######################################################################
#
# routines for feeder section
#
sub calculate_u01_feeder_delta
{
my ($pdb, $pu01) = @_;
#
my $section = MOUNTPICKUPFEEDER;
#
my $filename = $pu01->{file_name};
my $machine = $pu01->{mach_no};
my $lane = $pu01->{lane};
my $stage = $pu01->{stage};
my $output_no = $pu01->{output_no};
#
my $pcols = $pu01->{$section}->{column_names};
#
delete $pdb->{$section}->{$machine}{$lane}{$stage}{delta};
#
foreach my $prow (@{$pu01->{$section}->{data}})
{
my $fadd = $prow->{FAdd};
my $fsadd = $prow->{FSAdd};
my $reelid = $prow->{ReelID};
#
my $is_tray = substr($fadd, -4, 2);
if ($is_tray > 0)
{
$is_tray = TRUE;
printf $log_fh "%d: [%s] %s IS tray part (%s) fadd: %s, fsadd: %s\n", __LINE__, $filename, $section, $is_tray, $fadd, $fsadd
if ($verbose >= MAXVERBOSE);
}
else
{
$is_tray = FALSE;
printf $log_fh "%d: [%s] %s IS NOT tray part (%s) fadd: %s, fsadd: %s\n", __LINE__, $filename, $section, $is_tray, $fadd, $fsadd
if ($verbose >= MAXVERBOSE);
}
#
if ( ! exists($pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$fadd}{$fsadd}{data}))
{
#
# unlike name-value (count,time) sections, it is possible
# to get new entries which have not been seen before. for
# example, new reelids or new feeders may not be in the
# previous u01 file, but appear as new. in those cases,
# take the counts as is.
#
printf $log_fh "%d: WARNING: [%s] %s FAdd %s, FSAdd %s NOT found in cache. Taking all counts as is.\n", __LINE__, $filename, $section, $fadd, $fsadd if ($verbose >= MINVERBOSE);
foreach my $col (@{$pcols})
{
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col} = $prow->{$col};
}
}
else
{
my $cache_reelid = $pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$fadd}{$fsadd}{data}{ReelID};
my $cache_filename = $pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$fadd}{$fsadd}{filename};
if (($reelid eq $cache_reelid) || ($is_tray == TRUE))
{
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{ReelID} = $reelid;
#
foreach my $col (@feeder_count_cols)
{
my $u01_value = $prow->{$col};
my $cache_value = $pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$fadd}{$fsadd}{data}{$col};
#
my $delta = $u01_value - $cache_value;
#
if ($delta >= 0)
{
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col} = $delta;
}
elsif (($proc_options & PROC_OPT_USENEGDELTS) != 0)
{
printf $log_fh "%d: WARNING: [%s] [%s] %s FAdd %s, FSAdd %s using NEGATIVE delta for key %s: %d\n", __LINE__, $filename, $cache_filename, $section, $fadd, $fsadd, $col, $delta if ($verbose >= MINVERBOSE);
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col} = $delta;
}
else
{
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col} = 0;
printf $log_fh "%d: WARNING: [%s] [%s] %s FAdd %s, FSAdd %s setting NEGATIVE delta (%d) for key %s to ZERO; current value %d, cache value %d\n", __LINE__, $filename, $cache_filename, $section, $fadd, $fsadd, $delta, $col, $u01_value, $cache_value if ($verbose >= MINVERBOSE);
}
}
}
else
{
printf $log_fh "%d: WARNING: [%s] %s FAdd %s, FSAdd %s REELID CHANGED: CACHED %s, CURRENT U01 %s\n", __LINE__, $filename, $section, $fadd, $fsadd, $cache_reelid, $reelid if ($verbose >= MINVERBOSE);
#
delete $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data};
#
foreach my $col (@{$pcols})
{
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col} = $prow->{$col};
}
}
}
}
}
#
sub copy_u01_feeder_cache
{
my ($pdb, $pu01, $state) = @_;
#
my $section = MOUNTPICKUPFEEDER;
#
my $filename = $pu01->{file_name};
my $machine = $pu01->{mach_no};
my $lane = $pu01->{lane};
my $stage = $pu01->{stage};
my $output_no = $pu01->{output_no};
#
my $pcols = $pu01->{$section}->{column_names};
#
foreach my $prow (@{$pu01->{$section}->{data}})
{
my $fadd = $prow->{FAdd};
my $fsadd = $prow->{FSAdd};
#
foreach my $col (@{$pcols})
{
$pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$fadd}{$fsadd}{data}{$col} = $prow->{$col};
}
#
$pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$fadd}{$fsadd}{state} = $state;
$pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$fadd}{$fsadd}{filename} = $filename;
}
}
#
sub copy_u01_feeder_delta
{
my ($pdb, $pu01) = @_;
#
my $section = MOUNTPICKUPFEEDER;
#
my $filename = $pu01->{file_name};
my $machine = $pu01->{mach_no};
my $lane = $pu01->{lane};
my $stage = $pu01->{stage};
my $output_no = $pu01->{output_no};
#
my $pcols = $pu01->{$section}->{column_names};
#
delete $pdb->{$section}->{$machine}{$lane}{$stage}{delta};
#
foreach my $prow (@{$pu01->{$section}->{data}})
{
my $fadd = $prow->{FAdd};
my $fsadd = $prow->{FSAdd};
#
foreach my $col (@{$pcols})
{
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col} = $prow->{$col};
}
}
}
#
sub tabulate_u01_feeder_delta
{
my ($pdb, $pu01) = @_;
#
my $filename = $pu01->{file_name};
#
my $machine = $pu01->{mach_no};
my $lane = $pu01->{lane};
my $stage = $pu01->{stage};
my $output_no = $pu01->{output_no};
my $section = MOUNTPICKUPFEEDER;
#
my $product = $pdb->{product}{u01}{$machine}{$lane}{$stage};
#
foreach my $fadd (sort { $a <=> $b } keys %{$pdb->{$section}{$machine}{$lane}{$stage}{delta}})
{
my $table_no = int($fadd/10000); # truncate
#
foreach my $fsadd (sort { $a <=> $b } keys %{$pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}})
{
my $reelid = $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{ReelID};
#
# product-independent totals
#
# by_machine_lane_stage_fadd_fsadd_reelid
#
if (exists($totals{$section}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$reelid}))
{
foreach my $col (@feeder_count_cols)
{
$totals{$section}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$reelid}{$col} += $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col};
}
}
else
{
foreach my $col (@feeder_count_cols)
{
$totals{$section}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$reelid}{$col} = $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col};
}
}
#
# by_machine_lane_stage_fadd_fsadd
#
if (exists($totals{$section}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}{$stage}{$fadd}{$fsadd}))
{
foreach my $col (@feeder_count_cols)
{
$totals{$section}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$col} += $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col};
}
}
else
{
foreach my $col (@feeder_count_cols)
{
$totals{$section}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$col} = $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col};
}
}
#
# by_machine_lane_stage_table_no
#
if (exists($totals{$section}{by_machine_lane_stage_table_no}{$machine}{$lane}{$stage}{$table_no}))
{
foreach my $col (@feeder_count_cols)
{
$totals{$section}{by_machine_lane_stage_table_no}{$machine}{$lane}{$stage}{$table_no}{$col} += $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col};
}
}
else
{
foreach my $col (@feeder_count_cols)
{
$totals{$section}{by_machine_lane_stage_table_no}{$machine}{$lane}{$stage}{$table_no}{$col} = $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col};
}
}
#
# product-dependent totals
#
# by_product by_machine_lane_stage_fadd_fsadd_reelid
#
if (exists($totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$reelid}))
{
foreach my $col (@feeder_count_cols)
{
$totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$reelid}{$col} += $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col};
}
}
else
{
foreach my $col (@feeder_count_cols)
{
$totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$reelid}{$col} = $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col};
}
}
#
# by_product by_machine_lane_stage_fadd_fsadd
#
if (exists($totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}{$stage}{$fadd}{$fsadd}))
{
foreach my $col (@feeder_count_cols)
{
$totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$col} += $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col};
}
}
else
{
foreach my $col (@feeder_count_cols)
{
$totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$col} = $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col};
}
}
#
# by_product by_machine_lane_stage_table_no
#
if (exists($totals{$section}{by_product}{$product}{by_machine_lane_stage_table_no}{$machine}{$lane}{$stage}{$table_no}))
{
foreach my $col (@feeder_count_cols)
{
$totals{$section}{by_product}{$product}{by_machine_lane_stage_table_no}{$machine}{$lane}{$stage}{$table_no}{$col} += $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col};
}
}
else
{
foreach my $col (@feeder_count_cols)
{
$totals{$section}{by_product}{$product}{by_machine_lane_stage_table_no}{$machine}{$lane}{$stage}{$table_no}{$col} = $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col};
}
}
}
}
}
#
sub audit_u01_feeders
{
my ($pdb, $pu01) = @_;
#
my $machine = $pu01->{mach_no};
my $lane = $pu01->{lane};
my $stage = $pu01->{stage};
my $output_no = $pu01->{output_no};
my $section = MOUNTPICKUPFEEDER;
my $filename = $pu01->{file_name};
#
set_list_section_column_names(LNB_U01_FILE_TYPE, $pu01, $section);
#
printf $log_fh "\n%d: SECTION : %s\n", __LINE__, $section
if ($verbose >= MAXVERBOSE);
#
if ($verbose >= MAXVERBOSE)
{
printf $log_fh "%d: MACHINE : %s\n", __LINE__, $machine;
printf $log_fh "%d: LANE : %d\n", __LINE__, $lane;
printf $log_fh "%d: STAGE : %d\n", __LINE__, $stage;
printf $log_fh "%d: OUTPUT NO: %s\n", __LINE__, $output_no;
printf $log_fh "%d: FILE RECS : %d\n", __LINE__, scalar(@{$pu01->{data}});
printf $log_fh "%d: %s RECS: %d\n", __LINE__, $section, scalar(@{$pu01->{$section}->{data}}) if (defined(@{$pu01->{$section}->{data}}));
}
#
# check if the file has a feeder data section.
#
if ($output_no == TIMER_NOT_RUNNING)
{
printf $log_fh "%d: No Feeder data in Output=%d U01 files. Skipping.\n", __LINE__, $output_no if ($verbose >= MAXVERBOSE);
return;
}
elsif (($output_no == PROD_COMPLETE) ||
($output_no == PROD_COMPLETE_LATER))
{
if ( ! exists($pdb->{$section}->{$machine}{$lane}{$stage}{state}))
{
printf $log_fh "%d: ENTRY STATE: UNKNOWN\n", __LINE__
if ($verbose >= MAXVERBOSE);
#
delete $pdb->{$section}->{$machine}{$lane}{$stage}{cache};
copy_u01_feeder_cache($pdb, $pu01, DELTA);
#
$pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
}
elsif ($pdb->{$section}->{$machine}{$lane}{$stage}{state} eq RESET)
{
printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,
$pdb->{$section}->{$machine}{$lane}{$stage}{state}
if ($verbose >= MAXVERBOSE);
copy_u01_feeder_delta($pdb, $pu01);
tabulate_u01_feeder_delta($pdb, $pu01);
copy_u01_feeder_cache($pdb, $pu01, DELTA);
#
$pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
}
elsif ($pdb->{$section}->{$machine}{$lane}{$stage}{state} eq DELTA)
{
printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,
$pdb->{$section}->{$machine}{$lane}{$stage}{state}
if ($verbose >= MAXVERBOSE);
calculate_u01_feeder_delta($pdb, $pu01);
tabulate_u01_feeder_delta($pdb, $pu01);
copy_u01_feeder_cache($pdb, $pu01, DELTA);
#
$pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
}
elsif ($pdb->{$section}->{$machine}{$lane}{$stage}{state} eq BASELINE)
{
printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,
$pdb->{$section}->{$machine}{$lane}{$stage}{state}
if ($verbose >= MAXVERBOSE);
delete $pdb->{$section}->{$machine}{$lane}{$stage}{cache};
#
copy_u01_feeder_cache($pdb, $pu01, DELTA);
#
$pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
}
else
{
my $state = $pdb->{$section}->{$machine}{$lane}{$stage}{state};
die "ERROR: unknown $section state: $state. Stopped";
}
}
elsif ($output_no == DETECT_CHANGE)
{
if ( ! exists($pdb->{$section}->{$machine}{$lane}{$stage}{state}))
{
printf $log_fh "%d: ENTRY STATE: UNKNOWN\n", __LINE__,
if ($verbose >= MAXVERBOSE);
#
copy_u01_feeder_cache($pdb, $pu01, DELTA);
$pdb->{$section}->{$machine}{$lane}{$stage}{state} = BASELINE;
}
elsif ($pdb->{$section}->{$machine}{$lane}{$stage}{state} eq RESET)
{
printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,
$pdb->{$section}->{$machine}{$lane}{$stage}{state}
if ($verbose >= MAXVERBOSE);
copy_u01_feeder_delta($pdb, $pu01);
tabulate_u01_feeder_delta($pdb, $pu01);
copy_u01_feeder_cache($pdb, $pu01, DELTA);
#
$pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
}
elsif ($pdb->{$section}->{$machine}{$lane}{$stage}{state} eq DELTA)
{
printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,__LINE__,
$pdb->{$section}->{$machine}{$lane}{$stage}{state}
if ($verbose >= MAXVERBOSE);
calculate_u01_feeder_delta($pdb, $pu01);
tabulate_u01_feeder_delta($pdb, $pu01);
copy_u01_feeder_cache($pdb, $pu01, DELTA);
#
$pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
}
elsif ($pdb->{$section}->{$machine}{$lane}{$stage}{state} eq BASELINE)
{
printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,
$pdb->{$section}->{$machine}{$lane}{$stage}{state}
if ($verbose >= MAXVERBOSE);
#
copy_u01_feeder_cache($pdb, $pu01, DELTA);
$pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
}
else
{
my $state = $pdb->{$section}->{$machine}{$lane}{$stage}{state};
die "ERROR: unknown $section state: $state. Stopped";
}
}
elsif (($output_no == MANUAL_CLEAR) ||
($output_no == AUTO_CLEAR))
{
printf $log_fh "%D: ENTRY STATE: %s\n", __LINE__,
$pdb->{$section}->{$machine}{$lane}{$stage}{state}
if ($verbose >= MAXVERBOSE);
$pdb->{$section}->{$machine}{$lane}{$stage}{state} = RESET;
delete $pdb->{$section}->{$machine}{$lane}{$stage}{cache};
}
else
{
die "ERROR: unknown $section output type: $output_no. Stopped";
}
#
printf $log_fh "%d: EXIT STATE: %s\n", __LINE__,
$pdb->{$section}->{$machine}{$lane}{$stage}{state}
if ($verbose >= MAXVERBOSE);
#
return;
}
#
######################################################################
#
# routines for nozzle section
#
sub calculate_u01_nozzle_delta
{
my ($pdb, $pu01) = @_;
#
my $section = MOUNTPICKUPNOZZLE;
#
my $filename = $pu01->{file_name};
my $machine = $pu01->{mach_no};
my $lane = $pu01->{lane};
my $stage = $pu01->{stage};
my $output_no = $pu01->{output_no};
#
my $pcols = $pu01->{$section}->{column_names};
#
delete $pdb->{$section}->{$machine}{$lane}{$stage}{delta};
#
foreach my $prow (@{$pu01->{$section}->{data}})
{
my $nhadd = $prow->{NHAdd};
my $ncadd = $prow->{NCAdd};
my $blkserial = $prow->{BLKSerial};
#
if ( ! exists($pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$nhadd}{$ncadd}{data}))
{
printf $log_fh "%d: WARNING: [%s] %s NHAdd %s, NCAdd %s NOT found in cache. Taking all counts as is.\n", __LINE__, $filename, $section, $nhadd, $ncadd if ($verbose >= MINVERBOSE);
foreach my $col (@{$pcols})
{
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col} = $prow->{$col};
}
}
else
{
my $cache_blkserial = $pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$nhadd}{$ncadd}{data}{BLKSerial};
if ($blkserial eq $cache_blkserial)
{
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{BLKSerial} = $blkserial;
#
foreach my $col (@nozzle_count_cols)
{
my $u01_value = $prow->{$col};
my $cache_value = $pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$nhadd}{$ncadd}{data}{$col};
#
my $delta = $u01_value - $cache_value;
#
if ($delta >= 0)
{
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col} = $delta;
}
elsif (($proc_options & PROC_OPT_USENEGDELTS) != 0)
{
printf $log_fh "%d: WARNING: [%s] %s NHAdd %s, NCAdd %s using NEGATIVE delta for key %s: %d\n", __LINE__, $filename, $section, $nhadd, $ncadd, $col, $delta if ($verbose >= MINVERBOSE);
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col} = $delta;
}
else
{
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col} = 0;
printf $log_fh "%d: WARNING: [%s] %s NHAdd %s, NCAdd %s setting NEGATIVE delta (%d) for key %s to ZERO\n", __LINE__, $filename, $section, $nhadd, $ncadd, $delta, $col if ($verbose >= MINVERBOSE);
}
}
}
else
{
printf $log_fh "%d: WARNING: [%s] %s NHAdd %s, NCAdd %s BLKSERIAL CHANGED: CACHED %s, CURRENT U01 %s\n", __LINE__, $filename, $section, $nhadd, $ncadd, $cache_blkserial, $blkserial if ($verbose >= MINVERBOSE);
#
delete $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data};
#
foreach my $col (@{$pcols})
{
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col} = $prow->{$col};
}
}
}
}
}
#
sub copy_u01_nozzle_cache
{
my ($pdb, $pu01, $state) = @_;
#
my $section = MOUNTPICKUPNOZZLE;
#
my $filename = $pu01->{file_name};
my $machine = $pu01->{mach_no};
my $lane = $pu01->{lane};
my $stage = $pu01->{stage};
my $output_no = $pu01->{output_no};
#
my $pcols = $pu01->{$section}->{column_names};
#
foreach my $prow (@{$pu01->{$section}->{data}})
{
my $nhadd = $prow->{NHAdd};
my $ncadd = $prow->{NCAdd};
#
foreach my $col (@{$pcols})
{
$pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$nhadd}{$ncadd}{data}{$col} = $prow->{$col};
}
#
$pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$nhadd}{$ncadd}{state} = $state;
}
}
#
sub copy_u01_nozzle_delta
{
my ($pdb, $pu01) = @_;
#
my $section = MOUNTPICKUPNOZZLE;
#
my $filename = $pu01->{file_name};
my $machine = $pu01->{mach_no};
my $lane = $pu01->{lane};
my $stage = $pu01->{stage};
my $output_no = $pu01->{output_no};
#
my $pcols = $pu01->{$section}->{column_names};
#
delete $pdb->{$section}->{$machine}{$lane}{$stage}{delta};
#
foreach my $prow (@{$pu01->{$section}->{data}})
{
my $nhadd = $prow->{NHAdd};
my $ncadd = $prow->{NCAdd};
#
foreach my $col (@{$pcols})
{
$pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col} = $prow->{$col};
}
}
}
#
sub tabulate_u01_nozzle_delta
{
my ($pdb, $pu01) = @_;
#
my $filename = $pu01->{file_name};
#
my $machine = $pu01->{mach_no};
my $lane = $pu01->{lane};
my $stage = $pu01->{stage};
my $output_no = $pu01->{output_no};
my $section = MOUNTPICKUPNOZZLE;
#
my $product = $pdb->{product}{u01}{$machine}{$lane}{$stage};
#
foreach my $nhadd (sort { $a <=> $b } keys %{$pdb->{$section}{$machine}{$lane}{$stage}{delta}})
{
foreach my $ncadd (sort { $a <=> $b } keys %{$pdb->{$section}{$machine}{$lane}{$stage}{delta}{$nhadd}})
{
my $blkserial = $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{BLKSerial};
#
# product-independent totals
#
if (exists($totals{$section}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$blkserial}))
{
foreach my $col (@nozzle_count_cols)
{
$totals{$section}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$blkserial}{$col} += $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col};
}
}
else
{
foreach my $col (@nozzle_count_cols)
{
$totals{$section}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$blkserial}{$col} = $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col};
}
}
#
if (exists($totals{$section}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}))
{
foreach my $col (@nozzle_count_cols)
{
$totals{$section}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$col} += $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col};
}
}
else
{
foreach my $col (@nozzle_count_cols)
{
$totals{$section}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$col} = $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col};
}
}
#
# product-dependent totals
#
if (exists($totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$blkserial}))
{
foreach my $col (@nozzle_count_cols)
{
$totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$blkserial}{$col} += $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col};
}
}
else
{
foreach my $col (@nozzle_count_cols)
{
$totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$blkserial}{$col} = $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col};
}
}
#
if (exists($totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}))
{
foreach my $col (@nozzle_count_cols)
{
$totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$col} += $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col};
}
}
else
{
foreach my $col (@nozzle_count_cols)
{
$totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$col} = $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col};
}
}
}
}
}
#
sub audit_u01_nozzles
{
my ($pdb, $pu01) = @_;
#
my $machine = $pu01->{mach_no};
my $lane = $pu01->{lane};
my $stage = $pu01->{stage};
my $output_no = $pu01->{output_no};
my $section = MOUNTPICKUPNOZZLE;
my $filename = $pu01->{file_name};
#
set_list_section_column_names(LNB_U01_FILE_TYPE, $pu01, $section);
#
printf $log_fh "\n%d: SECTION : %s\n", __LINE__, $section
if ($verbose >= MAXVERBOSE);
#
if ($verbose >= MAXVERBOSE)
{
printf $log_fh "%d: MACHINE : %s\n", __LINE__, $machine;
printf $log_fh "%d: LANE : %d\n", __LINE__, $lane;
printf $log_fh "%d: STAGE : %d\n", __LINE__, $stage;
printf $log_fh "%d: OUTPUT NO: %s\n", __LINE__, $output_no;
printf $log_fh "%d: FILE RECS : %d\n", __LINE__, scalar(@{$pu01->{data}});
printf $log_fh "%d: %s RECS: %d\n", __LINE__, $section, scalar(@{$pu01->{$section}->{data}}) if (defined(@{$pu01->{$section}->{data}}));
}
#
# check if the file has a nozzle data section.
#
if (($output_no == DETECT_CHANGE) ||
($output_no == TIMER_NOT_RUNNING))
{
printf $log_fh "%d: No Nozzle data in Output=%d U01 files. Skipping.\n", __LINE__, $output_no if ($verbose >= MAXVERBOSE);
return;
}
elsif (($output_no == PROD_COMPLETE) ||
($output_no == PROD_COMPLETE_LATER))
{
if ( ! exists($pdb->{$section}->{$machine}{$lane}{$stage}{state}))
{
printf $log_fh "%d: ENTRY STATE: UNKNOWN\n", __LINE__,
if ($verbose >= MAXVERBOSE);
#
delete $pdb->{$section}->{$machine}{$lane}{$stage}{cache};
copy_u01_nozzle_cache($pdb, $pu01, DELTA);
#
$pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
}
elsif ($pdb->{$section}->{$machine}{$lane}{$stage}{state} eq RESET)
{
printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,
$pdb->{$section}->{$machine}{$lane}{$stage}{state}
if ($verbose >= MAXVERBOSE);
copy_u01_nozzle_delta($pdb, $pu01);
tabulate_u01_nozzle_delta($pdb, $pu01);
copy_u01_nozzle_cache($pdb, $pu01, DELTA);
#
$pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
}
elsif ($pdb->{$section}->{$machine}{$lane}{$stage}{state} eq DELTA)
{
printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,
$pdb->{$section}->{$machine}{$lane}{$stage}{state}
if ($verbose >= MAXVERBOSE);
calculate_u01_nozzle_delta($pdb, $pu01);
tabulate_u01_nozzle_delta($pdb, $pu01);
copy_u01_nozzle_cache($pdb, $pu01, DELTA);
#
$pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
}
elsif ($pdb->{$section}->{$machine}{$lane}{$stage}{state} eq BASELINE)
{
printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,
$pdb->{$section}->{$machine}{$lane}{$stage}{state}
if ($verbose >= MAXVERBOSE);
#
delete $pdb->{$section}->{$machine}{$lane}{$stage}{cache};
copy_u01_nozzle_cache($pdb, $pu01, DELTA);
#
$pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
}
else
{
my $state = $pdb->{$section}->{$machine}{$lane}{$stage}{state};
die "ERROR: unknown $section state: $state. Stopped";
}
}
elsif (($output_no == MANUAL_CLEAR) ||
($output_no == AUTO_CLEAR))
{
printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,
$pdb->{$section}->{$machine}{$lane}{$stage}{state}
if ($verbose >= MAXVERBOSE);
$pdb->{$section}->{$machine}{$lane}{$stage}{state} = RESET;
delete $pdb->{$section}->{$machine}{$lane}{$stage}{cache};
}
else
{
die "ERROR: unknown $section output type: $output_no. Stopped";
}
#
printf $log_fh "%d: EXIT STATE: %s\n", __LINE__,
$pdb->{$section}->{$machine}{$lane}{$stage}{state}
if ($verbose >= MAXVERBOSE);
#
return;
}
#
######################################################################
#
# routines for nozzle section
#
sub calculate_u01_nozzle_delta_keys
{
my ($pdb, $pu01, $nmkey1, $nmkey2, $label) = @_;
#
my $section = MOUNTPICKUPNOZZLE;
#
my $filename = $pu01->{file_name};
my $machine = $pu01->{mach_no};
my $lane = $pu01->{lane};
my $stage = $pu01->{stage};
my $output_no = $pu01->{output_no};
#
my $pcols = $pu01->{$section}->{column_names};
#
delete $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta};
#
foreach my $prow (@{$pu01->{$section}->{data}})
{
my $key1 = $prow->{$nmkey1};
my $key2 = $prow->{$nmkey2};
my $blkserial = $prow->{BLKSerial};
#
if ( ! exists($pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{cache}{$key1}{$key2}{data}))
{
printf $log_fh "%d: WARNING: [%s] %s $nmkey2 %s, $nmkey2 %s NOT found in cache. Taking all counts as is.\n", __LINE__, $filename, $section, $key1, $key2 if ($verbose >= MINVERBOSE);
foreach my $col (@{$pcols})
{
$pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col} = $prow->{$col};
}
}
else
{
my $cache_blkserial = $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{cache}{$key1}{$key2}{data}{BLKSerial};
if ($blkserial eq $cache_blkserial)
{
$pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{BLKSerial} = $blkserial;
#
foreach my $col (@nozzle_count_cols)
{
my $u01_value = $prow->{$col};
my $cache_value = $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{cache}{$key1}{$key2}{data}{$col};
#
my $delta = $u01_value - $cache_value;
#
if ($delta >= 0)
{
$pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col} = $delta;
}
elsif (($proc_options & PROC_OPT_USENEGDELTS) != 0)
{
printf $log_fh "%d: WARNING: [%s] %s $nmkey1 %s, $nmkey2 %s using NEGATIVE delta for key %s: %d\n", __LINE__, $filename, $section, $key1, $key2, $col, $delta if ($verbose >= MINVERBOSE);
$pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col} = $delta;
}
else
{
$pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col} = 0;
printf $log_fh "%d: WARNING: [%s] %s $nmkey1 %s, $nmkey2 %s setting NEGATIVE delta (%d) for key %s to ZERO\n", __LINE__, $filename, $section, $key1, $key2, $delta, $col if ($verbose >= MINVERBOSE);
}
}
}
else
{
printf $log_fh "%d: WARNING: [%s] %s $nmkey1 %s, $nmkey2 %s BLKSERIAL CHANGED: CACHED %s, CURRENT U01 %s\n", __LINE__, $filename, $section, $key1, $key2, $cache_blkserial, $blkserial if ($verbose >= MINVERBOSE);
#
delete $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data};
#
foreach my $col (@{$pcols})
{
$pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col} = $prow->{$col};
}
}
}
}
}
#
sub copy_u01_nozzle_cache_keys
{
my ($pdb, $pu01, $state, $nmkey1, $nmkey2, $label) = @_;
#
my $section = MOUNTPICKUPNOZZLE;
#
my $filename = $pu01->{file_name};
my $machine = $pu01->{mach_no};
my $lane = $pu01->{lane};
my $stage = $pu01->{stage};
my $output_no = $pu01->{output_no};
#
my $pcols = $pu01->{$section}->{column_names};
#
foreach my $prow (@{$pu01->{$section}->{data}})
{
my $key1 = $prow->{$nmkey1};
my $key2 = $prow->{$nmkey2};
# printf $log_fh "%d: $label $nmkey1 %d $nmkey2 %d\n", __LINE__, $key1, $key2;
#
foreach my $col (@{$pcols})
{
$pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{cache}{$key1}{$key2}{data}{$col} = $prow->{$col};
# printf $log_fh "%d: $label $nmkey1 %d $nmkey2 %d $col %s\n", __LINE__, $key1, $key2, $prow->{$col}
}
#
$pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{cache}{$key1}{$key2}{state} = $state;
}
}
#
sub copy_u01_nozzle_delta_keys
{
my ($pdb, $pu01, $nmkey1, $nmkey2, $label) = @_;
#
my $section = MOUNTPICKUPNOZZLE;
#
my $filename = $pu01->{file_name};
my $machine = $pu01->{mach_no};
my $lane = $pu01->{lane};
my $stage = $pu01->{stage};
my $output_no = $pu01->{output_no};
#
my $pcols = $pu01->{$section}->{column_names};
#
delete $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta};
#
foreach my $prow (@{$pu01->{$section}->{data}})
{
my $key1 = $prow->{$nmkey1};
my $key2 = $prow->{$nmkey2};
#
foreach my $col (@{$pcols})
{
$pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col} = $prow->{$col};
}
}
}
#
sub tabulate_u01_nozzle_delta_keys
{
my ($pdb, $pu01, $nmkey1, $nmkey2, $label) = @_;
#
my $filename = $pu01->{file_name};
#
my $machine = $pu01->{mach_no};
my $lane = $pu01->{lane};
my $stage = $pu01->{stage};
my $output_no = $pu01->{output_no};
my $section = MOUNTPICKUPNOZZLE;
#
my $product = $pdb->{product}{u01}{$machine}{$lane}{$stage};
#
foreach my $key1 (sort { $a <=> $b } keys %{$pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}})
{
foreach my $key2 (sort { $a <=> $b } keys %{$pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}})
{
my $blkserial = $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{BLKSerial};
#
# product-independent totals
#
if (exists($totals{$section}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}{$key1}{$key2}{$blkserial}))
{
foreach my $col (@nozzle_count_cols)
{
$totals{$section}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}{$key1}{$key2}{$blkserial}{$col} += $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col};
}
}
else
{
foreach my $col (@nozzle_count_cols)
{
$totals{$section}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}{$key1}{$key2}{$blkserial}{$col} = $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col};
}
}
#
if (exists($totals{$section}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}{$stage}{$key1}{$key2}))
{
foreach my $col (@nozzle_count_cols)
{
$totals{$section}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}{$stage}{$key1}{$key2}{$col} += $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col};
}
}
else
{
foreach my $col (@nozzle_count_cols)
{
$totals{$section}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}{$stage}{$key1}{$key2}{$col} = $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col};
}
}
#
# product-dependent totals
#
if (exists($totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}{$key1}{$key2}{$blkserial}))
{
foreach my $col (@nozzle_count_cols)
{
$totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}{$key1}{$key2}{$blkserial}{$col} += $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col};
}
}
else
{
foreach my $col (@nozzle_count_cols)
{
$totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}{$key1}{$key2}{$blkserial}{$col} = $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col};
}
}
#
if (exists($totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}{$stage}{$key1}{$key2}))
{
foreach my $col (@nozzle_count_cols)
{
$totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}{$stage}{$key1}{$key2}{$col} += $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col};
}
}
else
{
foreach my $col (@nozzle_count_cols)
{
$totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}{$stage}{$key1}{$key2}{$col} = $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col};
}
}
}
}
}
#
sub audit_u01_nozzles_keys
{
my ($pdb, $pu01, $nmkey1, $nmkey2, $label) = @_;
#
my $machine = $pu01->{mach_no};
my $lane = $pu01->{lane};
my $stage = $pu01->{stage};
my $output_no = $pu01->{output_no};
my $section = MOUNTPICKUPNOZZLE;
my $filename = $pu01->{file_name};
#
printf $log_fh "\n%d: SECTION : %s\n", __LINE__, $section
if ($verbose >= MAXVERBOSE);
#
if ($verbose >= MAXVERBOSE)
{
printf $log_fh "%d: MACHINE : %s\n", __LINE__, $machine;
printf $log_fh "%d: LANE : %d\n", __LINE__, $lane;
printf $log_fh "%d: STAGE : %d\n", __LINE__, $stage;
printf $log_fh "%d: OUTPUT NO: %s\n", __LINE__, $output_no;
printf $log_fh "%d: FILE RECS : %d\n", __LINE__, scalar(@{$pu01->{data}});
printf $log_fh "%d: %s RECS: %d\n", __LINE__, $section, scalar(@{$pu01->{$section}->{data}}) if (defined(@{$pu01->{$section}->{data}}));
}
#
# check if the file has a nozzle data section.
#
if (($output_no == DETECT_CHANGE) ||
($output_no == TIMER_NOT_RUNNING))
{
printf $log_fh "%d: No Nozzle data in Output=%d U01 files. Skipping.\n", __LINE__, $output_no if ($verbose >= MAXVERBOSE);
return;
}
elsif (($output_no == PROD_COMPLETE) ||
($output_no == PROD_COMPLETE_LATER))
{
if ( ! exists($pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state}))
{
printf $log_fh "%d: ENTRY STATE: UNKNOWN\n", __LINE__
if ($verbose >= MAXVERBOSE);
#
delete $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{cache};
copy_u01_nozzle_cache_keys(
$pdb, $pu01, DELTA, $nmkey1, $nmkey2, $label);
#
$pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state} = DELTA;
}
elsif ($pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state} eq RESET)
{
printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,
$pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state}
if ($verbose >= MAXVERBOSE);
copy_u01_nozzle_delta_keys(
$pdb, $pu01, $nmkey1, $nmkey2, $label);
tabulate_u01_nozzle_delta_keys(
$pdb, $pu01, $nmkey1, $nmkey2, $label);
copy_u01_nozzle_cache_keys(
$pdb, $pu01, DELTA, $nmkey1, $nmkey2, $label);
#
$pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state} = DELTA;
}
elsif ($pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state} eq DELTA)
{
printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,
$pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state}
if ($verbose >= MAXVERBOSE);
calculate_u01_nozzle_delta_keys(
$pdb, $pu01, $nmkey1, $nmkey2, $label);
tabulate_u01_nozzle_delta_keys(
$pdb, $pu01, $nmkey1, $nmkey2, $label);
copy_u01_nozzle_cache_keys(
$pdb, $pu01, DELTA, $nmkey1, $nmkey2, $label);
#
$pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state} = DELTA;
}
elsif ($pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state} eq BASELINE)
{
printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,
$pdb->{$section}->{$machine}{$lane}{$stage}{state}
if ($verbose >= MAXVERBOSE);
#
delete $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{cache};
copy_u01_nozzle_cache_keys(
$pdb, $pu01, DELTA, $nmkey1, $nmkey2, $label);
#
$pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state} = DELTA;
}
else
{
my $state = $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state};
die "ERROR: unknown $section state: $state. Stopped";
}
}
elsif (($output_no == MANUAL_CLEAR) ||
($output_no == AUTO_CLEAR))
{
printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,
$pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state}
if ($verbose >= MAXVERBOSE);
$pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state} = RESET;
delete $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{cache};
}
else
{
die "ERROR: unknown $section output type: $output_no. Stopped";
}
#
printf $log_fh "%d: EXIT STATE: %s\n", __LINE__,
$pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state}
if ($verbose >= MAXVERBOSE);
#
return;
}
#
sub audit_u01_nozzles_new
{
my ($pdb, $pu01) = @_;
#
audit_u01_nozzles_keys($pdb, $pu01,
NZ_KEY_NHADD,
NZ_KEY_NCADD,
NZ_LABEL_NHADD_NCADD);
audit_u01_nozzles_keys($pdb, $pu01,
NZ_KEY_HEAD,
NZ_KEY_NHADD,
NZ_LABEL_HEAD_NHADD);
audit_u01_nozzles_keys($pdb, $pu01,
NZ_KEY_HEAD,
NZ_KEY_NCADD,
NZ_LABEL_HEAD_NCADD);
}
#
#####################################################################
#
# high-level audit functions for u01 files.
#
sub audit_u01_file
{
my ($pdb, $pu01) = @_;
#
my $output_no = $pu01->{output_no};
#
return if (($output_no == TIMER_NOT_RUNNING) &&
(($proc_options & PROC_OPT_IGNALL12) != 0));
#
set_product_info($pdb, $pu01, LNB_U01_FILE_TYPE);
#
audit_u01_name_value($pdb, $pu01, COUNT);
audit_u01_name_value($pdb, $pu01, TIME);
audit_u01_feeders($pdb, $pu01);
#
if (($proc_options & PROC_OPT_USEOLDNZ) != 0)
{
audit_u01_nozzles($pdb, $pu01);
}
else
{
audit_u01_nozzles_new($pdb, $pu01);
}
#
return;
}
#
sub load_u01_sections
{
my ($pu01) = @_;
#
load_name_value($pu01, INDEX);
load_name_value($pu01, INFORMATION);
#
load_name_value($pu01, TIME);
load_name_value($pu01, CYCLETIME);
load_name_value($pu01, COUNT);
load_list($pu01, DISPENSER);
load_list($pu01, MOUNTPICKUPFEEDER);
load_list($pu01, MOUNTPICKUPNOZZLE);
load_name_value($pu01, INSPECTIONDATA);
}
#
sub audit_u01_files
{
my ($pu01s, $pdb) = @_;
#
printf $log_fh "\n%d: Audit U01 files:\n", __LINE__;
#
foreach my $pu01 (@{$pu01s})
{
printf $log_fh "\n%d: Audit U01: %s\n", __LINE__, $pu01->{file_name} if ($verbose >= MIDVERBOSE);
#
next unless (load($pu01) != 0);
#
load_u01_sections($pu01);
#
audit_u01_file($pdb, $pu01);
}
#
return;
}
#
########################################################################
########################################################################
#
# process U01 files for csv export.
#
sub export_u01_data_as_csv
{
my ($pdb) = @_;
#
export_name_value_section_as_csv(TIME,
LNB_U01_FILE_TYPE,
'TIME',
'machine',
TRUE);
export_name_value_section_as_csv(CYCLETIME,
LNB_U01_FILE_TYPE,
'CYCLE_TIME',
'machine',
TRUE);
export_name_value_section_as_csv(COUNT,
LNB_U01_FILE_TYPE,
'COUNT',
'machine',
TRUE);
#
export_list_section_as_csv(DISPENSER,
LNB_U01_FILE_TYPE,
'DISPENSER',
'machine',
TRUE);
export_list_section_as_csv(MOUNTPICKUPFEEDER,
LNB_U01_FILE_TYPE,
'MOUNT_PICKUP_FEEDER',
'machine',
TRUE);
export_list_section_as_csv(MOUNTPICKUPNOZZLE,
LNB_U01_FILE_TYPE,
'MOUNT_PICKUP_NOZZLE',
'machine',
TRUE);
export_name_value_section_as_csv(INSPECTIONDATA,
LNB_U01_FILE_TYPE,
'INSPECTION_DATA',
'machine',
TRUE);
}
#
sub prepare_u01_file
{
my ($pdb, $pu01) = @_;
#
set_product_info($pdb, $pu01, LNB_U01_FILE_TYPE);
#
prepare_name_value_section($pdb,
$pu01,
LNB_U01_FILE_TYPE,
TIME,
TRUE);
prepare_name_value_section($pdb,
$pu01,
LNB_U01_FILE_TYPE,
CYCLETIME,
TRUE);
prepare_name_value_section($pdb,
$pu01,
LNB_U01_FILE_TYPE,
COUNT,
TRUE);
prepare_list_section($pdb,
$pu01,
LNB_U01_FILE_TYPE,
DISPENSER,
TRUE);
prepare_list_section($pdb,
$pu01,
LNB_U01_FILE_TYPE,
MOUNTPICKUPFEEDER,
TRUE);
prepare_list_section($pdb,
$pu01,
LNB_U01_FILE_TYPE,
MOUNTPICKUPNOZZLE,
TRUE);
prepare_name_value_section($pdb,
$pu01,
LNB_U01_FILE_TYPE,
INSPECTIONDATA,
TRUE);
#
return;
}
#
sub prepare_u01_files
{
my ($pu01s, $pdb) = @_;
#
printf $log_fh "\n%d: Audit U01 files:\n", __LINE__;
#
foreach my $pu01 (@{$pu01s})
{
printf $log_fh "\n%d: Audit u01: %s\n", __LINE__, $pu01->{file_name}
if ($verbose >= MIDVERBOSE);
#
next unless (load($pu01) != 0);
#
load_u01_sections($pu01);
#
prepare_u01_file($pdb, $pu01);
}
#
return;
}
#
sub process_u01_files
{
my ($pu01s) = @_;
#
# any files to process?
#
if (scalar(@{$pu01s}) <= 0)
{
printf $log_fh "%d: No U01 files to process. Returning.\n\n", __LINE__;
return;
}
#
my %db = ();
audit_u01_files($pu01s, \%db);
export_u01_data(\%db);
#
my %csv_db = ();
prepare_u01_files($pu01s, \%csv_db);
export_u01_data_as_csv(\%csv_db);
#
return;
}
#
########################################################################
########################################################################
#
# process U03 files.
#
sub export_u03_data_as_csv
{
my ($pdb) = @_;
#
export_list_section_as_csv(MOUNTQUALITYTRACE,
LNB_U03_FILE_TYPE(),
'MOUNT_QUALITY_TRACE',
'machine',
TRUE);
export_list_section_as_csv(MOUNTLATESTREEL,
LNB_U03_FILE_TYPE(),
'MOUNT_LATEST_REEL',
'machine',
TRUE);
export_list_section_as_csv(MOUNTEXCHANGEREEL,
LNB_U03_FILE_TYPE(),
'MOUNT_EXCHANGE_REEL',
'machine',
TRUE);
}
#
sub prepare_u03_file
{
my ($pdb, $pu03) = @_;
#
set_product_info($pdb, $pu03, LNB_U03_FILE_TYPE);
#
prepare_list_section($pdb,
$pu03,
LNB_U03_FILE_TYPE,
MOUNTQUALITYTRACE,
TRUE);
prepare_list_section($pdb,
$pu03,
LNB_U03_FILE_TYPE,
MOUNTLATESTREEL,
TRUE);
prepare_list_section($pdb,
$pu03,
LNB_U03_FILE_TYPE,
MOUNTEXCHANGEREEL,
TRUE);
#
return;
}
#
sub load_u03_sections
{
my ($pu03) = @_;
#
load_name_value($pu03, INDEX);
load_name_value($pu03, INFORMATION);
#
load_list($pu03, BRECG);
load_list($pu03, BRECGCALC);
load_list($pu03, ELAPSETIMERECOG);
load_list($pu03, SBOARD);
load_list($pu03, HEIGHTCORRECT);
load_list($pu03, MOUNTQUALITYTRACE);
load_list($pu03, MOUNTLATESTREEL);
load_list($pu03, MOUNTEXCHANGEREEL);
}
#
sub prepare_u03_files
{
my ($pu03s, $pdb) = @_;
#
printf $log_fh "\n%d: Audit U03 files:\n", __LINE__;
#
foreach my $pu03 (@{$pu03s})
{
printf $log_fh "\n%d: Audit u03: %s\n", __LINE__, $pu03->{file_name}
if ($verbose >= MIDVERBOSE);
#
next unless (load($pu03) != 0);
#
load_u03_sections($pu03);
#
prepare_u03_file($pdb, $pu03);
}
#
return;
}
#
sub process_u03_files
{
my ($pu03s) = @_;
#
# any files to process?
#
if (scalar(@{$pu03s}) <= 0)
{
printf $log_fh "\n%d: No U03 files to process. Returning.\n\n", __LINE__;
return;
}
#
my %csv_db = ();
prepare_u03_files($pu03s, \%csv_db);
export_u03_data_as_csv(\%csv_db);
#
return;
}
#
########################################################################
########################################################################
#
# process MPR files.
#
sub export_mpr_data_as_csv
{
my ($pdb) = @_;
#
export_list_section_as_csv(TIMEDATASP,
LNB_MPR_FILE_TYPE(),
'TIME_DATA_SP',
'sp',
FALSE);
export_list_section_as_csv(COUNTDATASP,
LNB_MPR_FILE_TYPE(),
'COUNT_DATA_SP',
'sp',
FALSE);
export_list_section_as_csv(COUNTDATASP2,
LNB_MPR_FILE_TYPE(),
'COUNT_DATA_SP2',
'sp',
FALSE);
export_list_section_as_csv(TRACEDATASP,
LNB_MPR_FILE_TYPE(),
'TRACE_DATA_SP',
'sp',
FALSE);
export_list_section_as_csv(TRACEDATASP_2,
LNB_MPR_FILE_TYPE(),
'TRACE_DATA_SP_2',
'sp',
FALSE);
export_list_section_as_csv(ISPINFODATA,
LNB_MPR_FILE_TYPE(),
'ISP_INFO_DATA',
'sp',
FALSE);
export_list_section_as_csv(MASKISPINFODATA,
LNB_MPR_FILE_TYPE(),
'MASK_ISP_INFO_DATA',
'sp',
FALSE);
}
#
sub prepare_mpr_file
{
my ($pdb, $pmpr) = @_;
#
prepare_list_section($pdb,
$pmpr,
LNB_MPR_FILE_TYPE,
TIMEDATASP,
FALSE);
prepare_list_section($pdb,
$pmpr,
LNB_MPR_FILE_TYPE,
COUNTDATASP,
FALSE);
prepare_list_section($pdb,
$pmpr,
LNB_MPR_FILE_TYPE,
COUNTDATASP2,
FALSE);
prepare_list_section($pdb,
$pmpr,
LNB_MPR_FILE_TYPE,
TRACEDATASP,
FALSE);
prepare_list_section($pdb,
$pmpr,
LNB_MPR_FILE_TYPE,
TRACEDATASP_2,
FALSE);
prepare_list_section($pdb,
$pmpr,
LNB_MPR_FILE_TYPE,
ISPINFODATA,
FALSE);
prepare_list_section($pdb,
$pmpr,
LNB_MPR_FILE_TYPE,
MASKISPINFODATA,
FALSE);
#
return;
}
#
sub load_mpr_sections
{
my ($pmpr) = @_;
#
load_name_value($pmpr, INDEX());
load_name_value($pmpr, INFORMATION());
#
load_list($pmpr, TIMEDATASP());
load_list($pmpr, COUNTDATASP());
load_list($pmpr, COUNTDATASP2());
load_list($pmpr, TRACEDATASP());
load_list($pmpr, TRACEDATASP_2());
load_list($pmpr, ISPINFODATA());
load_list($pmpr, MASKISPINFODATA());
}
#
sub prepare_mpr_files
{
my ($pmprs, $pdb) = @_;
#
printf $log_fh "\n%d: Audit MPR files:\n", __LINE__;
#
foreach my $pmpr (@{$pmprs})
{
printf $log_fh "\n%d: Audit mpr: %s\n", __LINE__, $pmpr->{file_name}
if ($verbose >= MIDVERBOSE);
#
next unless (load($pmpr) != 0);
#
load_mpr_sections($pmpr);
#
prepare_mpr_file($pdb, $pmpr);
}
#
return;
}
#
sub process_mpr_files
{
my ($pmprs) = @_;
#
# any files to process?
#
if (scalar(@{$pmprs}) <= 0)
{
printf $log_fh "\n%d: No MPR files to process. Returning.\n\n", __LINE__;
return;
}
#
my %csv_db = ();
prepare_mpr_files($pmprs, \%csv_db);
export_mpr_data_as_csv(\%csv_db);
#
return;
}
#
########################################################################
########################################################################
#
# start main execution.
#
my %opts;
if (getopts('?HhwWv:t:l:o:d:', \%opts) != 1)
{
short_usage($cmd);
exit 2;
}
#
foreach my $opt (%opts)
{
if (($opt eq "h") or ($opt eq "?"))
{
short_usage($cmd);
exit 0;
}
elsif ($opt eq "H")
{
long_usage($cmd);
exit 0;
}
elsif ($opt eq "w")
{
$verbose = MINVERBOSE;
}
elsif ($opt eq "W")
{
$verbose = MIDVERBOSE;
}
elsif ($opt eq "v")
{
if ($opts{$opt} =~ m/^[0123]$/)
{
$verbose = $opts{$opt};
}
elsif (exists($verbose_levels{$opts{$opt}}))
{
$verbose = $verbose_levels{$opts{$opt}};
}
else
{
printf $log_fh "\n%d: Invalid verbose level: $opts{$opt}\n", __LINE__;
short_usage($cmd);
exit 2;
}
}
elsif ($opt eq "t")
{
$file_type = $opts{$opt};
$file_type =~ tr/[A-Z]/[a-z]/;
if ($file_type !~ m/^(u01|u03|mpr)$/i)
{
printf $log_fh "\n%d: Invalid file type: $opts{$opt}\n", __LINE__;
short_usage($cmd);
exit 2;
}
}
elsif ($opt eq "l")
{
local *FH;
$logfile = $opts{$opt};
open(FH, '>', $logfile) or die $!;
$log_fh = *FH;
printf $log_fh "\n%d: Log File: %s\n", __LINE__, $logfile;
}
elsif ($opt eq "o")
{
my $option = $opts{$opt};
$option =~ tr/[a-z]/[A-Z]/;
if (exists($allowed_proc_options{$option}))
{
$proc_options |= $allowed_proc_options{$option};
}
else
{
printf $log_fh "\n%d: Invalid option type: $opts{$opt}\n", __LINE__;
short_usage($cmd);
exit 2;
}
}
elsif ($opt eq "d")
{
$export_dir = $opts{$opt};
mkpath($export_dir) unless ( -d $export_dir );
printf $log_fh "\n%d: Export directory: %s\n", __LINE__, $export_dir;
}
}
#
if (scalar(@ARGV) == 0)
{
printf $log_fh "%d: No directories given.\n", __LINE__;
short_usage($cmd);
exit 2;
}
#
printf $log_fh "\n%d: Scan directories for U01, U03 and MPR files: \n\n", __LINE__;
#
my @u01_files = ();
my @u03_files = ();
my @mpr_files = ();
#
get_all_files($file_type,
\@ARGV,
\@u01_files,
\@u03_files,
\@mpr_files);
#
printf $log_fh "%d: Number of U01 files: %d\n", __LINE__, scalar(@u01_files);
printf $log_fh "%d: Number of U03 files: %d\n", __LINE__, scalar(@u03_files);
printf $log_fh "%d: Number of MPR files: %d\n\n", __LINE__, scalar(@mpr_files);
#
process_u01_files(\@u01_files);
process_u03_files(\@u03_files);
process_mpr_files(\@mpr_files);
#
printf $log_fh "\n%d: All Done\n", __LINE__;
#
exit 0;
| ombt/analytics | sql/1505041213-new-lnb2csv/lnb2csv.pl | Perl | mit | 169,600 |
#
# (c) Jan Gehring <jan.gehring@gmail.com>
#
# vim: set ts=2 sw=2 tw=0:
# vim: set expandtab:
package Rex::Service::SuSE;
use strict;
use warnings;
our $VERSION = '0.56.1'; # VERSION
use Rex::Commands::Run;
use Rex::Helper::Run;
use Rex::Logger;
use base qw(Rex::Service::Base);
sub new {
my $that = shift;
my $proto = ref($that) || $that;
my $self = $proto->SUPER::new(@_);
bless( $self, $proto );
$self->{commands} = {
start => '/etc/rc.d/%s start >/dev/null',
restart => '/etc/rc.d/%s restart >/dev/null',
stop => '/etc/rc.d/%s stop >/dev/null',
reload => '/etc/rc.d/%s reload >/dev/null',
status => '/etc/rc.d/%s status >/dev/null',
ensure_stop => 'chkconfig %s off',
ensure_start => 'chkconfig %s on',
action => '/etc/rc.d/%s %s >/dev/null',
};
return $self;
}
1;
| gitpan/Rex | lib/Rex/Service/SuSE.pm | Perl | apache-2.0 | 867 |
package GRNOC::TSDS::Writer::Worker;
use Moo;
use Types::Standard qw( Str Int HashRef Object Maybe );
use GRNOC::TSDS::DataType;
use GRNOC::TSDS::Constants;
use GRNOC::TSDS::AggregateDocument;
use GRNOC::TSDS::DataDocument;
use GRNOC::TSDS::EventDocument;
use GRNOC::TSDS::Writer::AggregateMessage;
use GRNOC::TSDS::Writer::DataMessage;
use GRNOC::TSDS::Writer::EventMessage;
use GRNOC::TSDS::RedisLock;
use MongoDB;
use Net::AMQP::RabbitMQ;
use Cache::Memcached::Fast;
use Tie::IxHash;
use JSON::XS;
use Math::Round qw( nlowmult );
use Time::HiRes qw( time );
use Try::Tiny;
use Data::Dumper;
### constants ###
use constant DATA_CACHE_EXPIRATION => 60 * 60;
use constant AGGREGATE_CACHE_EXPIRATION => 60 * 60;
use constant MEASUREMENT_CACHE_EXPIRATION => 60 * 60;
use constant QUEUE_PREFETCH_COUNT => 20;
use constant QUEUE_FETCH_TIMEOUT => 10 * 1000;
use constant RECONNECT_TIMEOUT => 10;
### required attributes ###
has config => ( is => 'ro',
required => 1 );
has logger => ( is => 'ro',
required => 1 );
has queue => ( is => 'ro',
required => 1 );
### internal attributes ###
has is_running => ( is => 'rwp',
default => 0 );
has data_types => ( is => 'rwp',
default => sub { {} } );
has mongo_rw => ( is => 'rwp' );
has rabbit => ( is => 'rwp' );
has redislock => ( is => 'rwp' );
has memcache => ( is => 'rwp' );
has locker => ( is => 'rwp' );
has json => ( is => 'rwp' );
### public methods ###
sub start {
my ( $self ) = @_;
my $queue = $self->queue;
$self->logger->debug( "Starting." );
# flag that we're running
$self->_set_is_running( 1 );
# change our process name
$0 = "tsds_writer ($queue) [worker]";
# setup signal handlers
$SIG{'TERM'} = sub {
$self->logger->info( "Received SIG TERM." );
$self->stop();
};
$SIG{'HUP'} = sub {
$self->logger->info( "Received SIG HUP." );
};
# create JSON object
my $json = JSON::XS->new();
$self->_set_json( $json );
# connect to mongo
my $mongo_host = $self->config->get( '/config/mongo/@host' );
my $mongo_port = $self->config->get( '/config/mongo/@port' );
my $rw_user = $self->config->get( "/config/mongo/readwrite" );
$self->logger->debug( "Connecting to MongoDB as readwrite on $mongo_host:$mongo_port." );
my $mongo;
try {
$mongo = MongoDB::MongoClient->new( host => "$mongo_host:$mongo_port",
username => $rw_user->{'user'},
password => $rw_user->{'password'} );
}
catch {
$self->logger->error( "Error connecting to MongoDB: $_" );
die( "Error connecting to MongoDB: $_" );
};
$self->_set_mongo_rw( $mongo );
$self->_set_redislock( GRNOC::TSDS::RedisLock->new( config => $self->config ) );
# connect to memcache
my $memcache_host = $self->config->get( '/config/memcache/@host' );
my $memcache_port = $self->config->get( '/config/memcache/@port' );
$self->logger->debug( "Connecting to memcached $memcache_host:$memcache_port." );
my $memcache = Cache::Memcached::Fast->new( {'servers' => [{'address' => "$memcache_host:$memcache_port", 'weight' => 1}]} );
$self->_set_memcache( $memcache );
# connect to rabbit
$self->_rabbit_connect();
$self->logger->debug( 'Starting RabbitMQ consume loop.' );
# continually consume messages from rabbit queue, making sure we have to acknowledge them
return $self->_consume_loop();
}
sub stop {
my ( $self ) = @_;
$self->logger->debug( 'Stopping.' );
$self->_set_is_running( 0 );
}
### private methods ###
sub _consume_loop {
my ( $self ) = @_;
while ( 1 ) {
# have we been told to stop?
if ( !$self->is_running ) {
$self->logger->debug( 'Exiting consume loop.' );
return 0;
}
# receive the next rabbit message
my $rabbit_message;
try {
$rabbit_message = $self->rabbit->recv( QUEUE_FETCH_TIMEOUT );
}
catch {
$self->logger->error( "Error receiving rabbit message: $_" );
# reconnect to rabbit since we had a failure
$self->_rabbit_connect();
};
# didn't get a message?
if ( !$rabbit_message ) {
$self->logger->debug( 'No message received.' );
# re-enter loop to retrieve the next message
next;
}
# try to JSON decode the messages
my $messages;
try {
$messages = $self->json->decode( $rabbit_message->{'body'} );
}
catch {
$self->logger->error( "Unable to JSON decode message: $_" );
};
if ( !$messages ) {
try {
# reject the message and do NOT requeue it since its malformed JSON
$self->rabbit->reject( 1, $rabbit_message->{'delivery_tag'}, 0 );
}
catch {
$self->logger->error( "Unable to reject rabbit message: $_" );
# reconnect to rabbit since we had a failure
$self->_rabbit_connect();
};
}
# retrieve the next message from rabbit if we couldn't decode this one
next if ( !$messages );
# make sure its an array (ref) of messages
if ( ref( $messages ) ne 'ARRAY' ) {
$self->logger->error( "Message body must be an array." );
try {
# reject the message and do NOT requeue since its not properly formed
$self->rabbit->reject( 1, $rabbit_message->{'delivery_tag'}, 0 );
}
catch {
$self->logger->error( "Unable to reject rabbit message: $_" );
# reconnect to rabbit since we had a failure
$self->_rabbit_connect();
};
next;
}
my $num_messages = @$messages;
$self->logger->debug( "Processing message containing $num_messages updates." );
my $t1 = time();
my $success = $self->_consume_messages( $messages );
my $t2 = time();
my $delta = $t2 - $t1;
$self->logger->debug( "Processed $num_messages updates in $delta seconds." );
# didn't successfully consume the messages, so reject but requeue the entire message to try again
if ( !$success ) {
$self->logger->debug( "Rejecting rabbit message, requeueing." );
try {
$self->rabbit->reject( 1, $rabbit_message->{'delivery_tag'}, 1 );
}
catch {
$self->logger->error( "Unable to reject rabbit message: $_" );
# reconnect to rabbit since we had a failure
$self->_rabbit_connect();
};
}
# successfully consumed message, acknowledge it to rabbit
else {
$self->logger->debug( "Acknowledging successful message." );
try {
$self->rabbit->ack( 1, $rabbit_message->{'delivery_tag'} );
}
catch {
$self->logger->error( "Unable to acknowledge rabbit message: $_" );
# reconnect to rabbit since we had a failure
$self->_rabbit_connect();
};
}
}
}
sub _consume_messages {
my ( $self, $messages ) = @_;
# gather all messages to process
my $data_to_process = [];
my $events_to_process = [];
my $aggregates_to_process = [];
# keep track and build up all of the bulk operations we'll want to do at the end
my $bulk_creates = {};
my $bulk_updates = {};
my $acquired_locks = [];
# handle every TSDS message that came within the rabbit message
foreach my $message ( @$messages ) {
# make sure message is an object/hash (ref)
if ( ref( $message ) ne 'HASH' ) {
$self->logger->error( "Messages must be an object/hash of data, skipping." );
next;
}
my $type = $message->{'type'};
my $time = $message->{'time'};
my $interval = $message->{'interval'};
my $values = $message->{'values'};
my $meta = $message->{'meta'};
my $affected = $message->{'affected'};
my $text = $message->{'text'};
my $start = $message->{'start'};
my $end = $message->{'end'};
my $event_type = $message->{'event_type'};
my $identifier = $message->{'identifier'};
# make sure a type was specified
if ( !defined( $type ) ) {
$self->logger->error( "No type specified, skipping message." );
next;
}
# does it appear to be an aggregate or event message?
if ( $type =~ /^(.+)\.(aggregate|event)$/ ) {
my $data_type_name = $1;
my $message_type = $2;
my $data_type = $self->data_types->{$data_type_name};
# we haven't seen this data type before, re-fetch them
if ( !$data_type ) {
my $success = 1;
# this involves communicating to mongodb which may fail
try {
$self->_fetch_data_types();
}
# requeue the message to try again later if mongo communication fails
catch {
$self->logger->error( "Unable to fetch data types from MongoDB." );
$success = 0;
};
# dont bother handling any more of the messages in this rabbit message
return 0 if !$success;
$data_type = $self->data_types->{$data_type_name};
}
# detect unknown data type, ignore it
if ( !$data_type ) {
$self->logger->warn( "Unknown data type '$data_type_name', skipping." );
next;
}
# was it an event?
if ( $message_type eq "event" ) {
my $event_message;
try {
$event_message = GRNOC::TSDS::Writer::EventMessage->new( data_type => $data_type,
affected => $affected,
text => $text,
start => $start,
end => $end,
identifier => $identifier,
type => $event_type );
}
catch {
$self->logger->error( $_ );
};
# include this to our list of events to process if it was valid
push( @$events_to_process, $event_message ) if $event_message;
}
# was it an aggregate?
elsif ( $message_type eq "aggregate" ) {
my $aggregate_message;
try {
$aggregate_message = GRNOC::TSDS::Writer::AggregateMessage->new( data_type => $data_type,
time => $time,
interval => $interval,
values => $values,
meta => $meta );
}
catch {
$self->logger->error( $_ );
};
# include this to our list of aggregates to process if it was valid
push( @$aggregates_to_process, $aggregate_message ) if $aggregate_message;
}
}
# must be a data message
else {
my $data_type = $self->data_types->{$type};
# we haven't seen this data type before, re-fetch them
if ( !$data_type ) {
my $success = 1;
# this involves communicating to mongodb, which may fail
try {
$self->_fetch_data_types();
}
# requeue the message to try again later if mongo communication fails
catch {
$self->logger->error( "Unable to fetch data types from MongoDB." );
$success = 0;
};
# dont bother handling any more of the messages in this rabbit message
return 0 if !$success;
$data_type = $self->data_types->{$type};
}
# detected unknown data type, ignore it
if ( !$data_type ) {
$self->logger->warn( "Unknown data type '$type', skipping." );
next;
}
my $data_message;
try {
$data_message = GRNOC::TSDS::Writer::DataMessage->new( data_type => $data_type,
time => $time,
interval => $interval,
values => $values,
meta => $meta );
}
catch {
$self->logger->error( $_ );
# release any outstanding locks
$self->_release_locks( $acquired_locks );
};
# include this to our list of data to process if it was valid
push( @$data_to_process, $data_message ) if $data_message;
}
}
# process all of the data points and events within this message
my $success = 1;
try {
# at least one aggregate to process
if ( @$aggregates_to_process > 0 ) {
$self->logger->debug( "Processing " . @$aggregates_to_process . " aggregate messages." );
$self->_process_aggregate_messages( messages => $aggregates_to_process,
bulk_creates => $bulk_creates,
bulk_updates => $bulk_updates,
acquired_locks => $acquired_locks );
}
# at least one high res data to process
if ( @$data_to_process > 0 ) {
$self->logger->debug( "Processing " . @$data_to_process . " data messages." );
$self->_process_data_messages( messages => $data_to_process,
bulk_creates => $bulk_creates,
bulk_updates => $bulk_updates,
acquired_locks => $acquired_locks );
}
# at least one event to process
if ( @$events_to_process > 0 ) {
$self->logger->debug( "Processing " . @$events_to_process . " event messages." );
$self->_process_event_messages( messages => $events_to_process,
bulk_creates => $bulk_creates,
bulk_updates => $bulk_updates,
acquired_locks => $acquired_locks );
}
# perform all (most, except for data type changes..) create and update operations in bulk
$self->_process_bulks( $bulk_creates );
$self->_process_bulks( $bulk_updates );
# release all the locks we're acquired for the docs we're changing
$self->_release_locks( $acquired_locks );
}
catch {
$self->logger->error( "Error processing messages: $_" );
# release any outstanding locks
$self->_release_locks( $acquired_locks );
$success = 0;
};
return $success;
}
sub _release_locks {
my ( $self, $locks ) = @_;
foreach my $lock ( @$locks ) {
$self->redislock->unlock( $lock );
}
}
sub _process_bulks {
my ( $self, $bulks ) = @_;
my @database_names = keys( %$bulks );
foreach my $database_name ( @database_names ) {
my @collection_names = keys( %{$bulks->{$database_name}});
foreach my $collection_name ( @collection_names ) {
my $bulk = $bulks->{$database_name}{$collection_name};
$self->logger->debug( "Executing bulk query for $database_name - $collection_name." );
my $ret = $bulk->execute();
my $num_errors = $ret->count_writeErrors() + $ret->count_writeConcernErrors();
# did at least one error occur during the bulk update?
if ( $num_errors > 0 ) {
# throw an exception so this entire message will get requeued
die( "bulk update failed: " . $ret->last_errmsg() );
}
}
}
}
sub _process_event_messages {
my ( $self, %args ) = @_;
my $messages = $args{'messages'};
my $bulk_creates = $args{'bulk_creates'};
my $bulk_updates = $args{'bulk_updates'};
my $acquired_locks = $args{'acquired_locks'};
# all unique documents we're handling (and their corresponding events)
my $unique_documents = {};
# handle every message
foreach my $message ( @$messages ) {
my $data_type = $message->data_type;
my $start = $message->start;
my $end = $message->end;
my $affected = $message->affected;
my $text = $message->text;
my $type = $message->type;
my $event = $message->event;
# determine proper start and end time of document
my $doc_start = nlowmult( EVENT_DOCUMENT_DURATION, $start );
my $doc_end = $doc_start + EVENT_DOCUMENT_DURATION;
# determine the document that this event would belong within
my $document = GRNOC::TSDS::EventDocument->new( data_type => $data_type,
start => $doc_start,
end => $doc_end,
type => $type );
# mark the document for this event if one hasn't been set already
my $unique_doc = $unique_documents->{$data_type->name}{$type}{$document->start}{$document->end};
# we've never handled an event for this document before
if ( !$unique_doc ) {
# mark it as being a new unique document we need to handle
$unique_documents->{$data_type->name}{$type}{$document->start}{$document->end} = $document;
$unique_doc = $unique_documents->{$data_type->name}{$type}{$document->start}{$document->end};
}
# add this as another event to update/set in the document
$unique_doc->add_event( $event );
}
# handle every distinct document that we'll need to update
my @data_types = keys( %$unique_documents );
foreach my $data_type ( sort @data_types ) {
my @types = keys( %{$unique_documents->{$data_type}} );
foreach my $type ( sort @types ) {
my @starts = keys( %{$unique_documents->{$data_type}{$type}} );
foreach my $start ( sort { $a <=> $b }@starts ) {
my @ends = keys( %{$unique_documents->{$data_type}{$type}{$start}} );
foreach my $end ( sort { $a <=> $b } @ends ) {
my $document = $unique_documents->{$data_type}{$type}{$start}{$end};
# process this event document, including all events contained within it
$self->_process_event_document( document => $document,
bulk_creates => $bulk_creates,
bulk_updates => $bulk_updates,
acquired_locks => $acquired_locks );
# all done with this document, remove it so we don't hold onto its memory
delete( $unique_documents->{$data_type}{$type}{$start}{$end} );
}
}
}
}
}
sub _process_data_messages {
my ( $self, %args ) = @_;
my $messages = $args{'messages'};
my $bulk_creates = $args{'bulk_creates'};
my $bulk_updates = $args{'bulk_updates'};
my $acquired_locks = $args{'acquired_locks'};
# all unique value types we're handling per each data type
my $unique_data_types = {};
my $unique_value_types = {};
# all unique measurements we're handling
my $unique_measurements = {};
# all unique documents we're handling (and their corresponding data points)
my $unique_documents = {};
# handle every message sent, ordered by their timestamp in ascending order
foreach my $message ( sort { $a->time <=> $b->time } @$messages ) {
my $data_type = $message->data_type;
my $measurement_identifier = $message->measurement_identifier;
my $interval = $message->interval;
my $data_points;
# this is lazily built so it might fail validation
try {
$data_points = $message->data_points;
}
catch {
$self->logger->error( "Error building data points for message: $_" );
};
next if (! defined $data_points);
my $time = $message->time;
my $meta = $message->meta;
# mark this data type as being found
$unique_data_types->{$data_type->name} = $data_type;
# have we handled this measurement already?
my $unique_measurement = $unique_measurements->{$data_type->name}{$measurement_identifier};
if ( $unique_measurement ) {
# keep the older start time, just update its meta data with the latest
$unique_measurements->{$data_type->name}{$measurement_identifier}{'meta'} = $meta;
}
# never seen this measurement before
else {
# mark this measurement as being found, and include its meta data and start time
$unique_measurements->{$data_type->name}{$measurement_identifier} = {'meta' => $meta,
'start' => $time,
'interval' => $interval};
}
# determine proper start and end time of document
my $doc_length = $interval * HIGH_RESOLUTION_DOCUMENT_SIZE;
my $start = nlowmult( $doc_length, $time );
my $end = $start + $doc_length;
# determine the document that this message would belong within
my $document = GRNOC::TSDS::DataDocument->new( data_type => $data_type,
measurement_identifier => $measurement_identifier,
interval => $interval,
start => $start,
end => $end );
# mark the document for this data point if one hasn't been set already
my $unique_doc = $unique_documents->{$data_type->name}{$measurement_identifier}{$document->start}{$document->end};
# we've never handled a data point for this document before
if ( !$unique_doc ) {
# mark it as being a new unique document we need to handle
$unique_documents->{$data_type->name}{$measurement_identifier}{$document->start}{$document->end} = $document;
$unique_doc = $unique_documents->{$data_type->name}{$measurement_identifier}{$document->start}{$document->end};
}
# handle every data point that was included in this message
foreach my $data_point ( @$data_points ) {
my $value_type = $data_point->value_type;
# add this as another data point to update/set in the document
$unique_doc->add_data_point( $data_point );
# mark this value type as being found
$unique_value_types->{$data_type->name}{$value_type} = 1;
}
}
# get cache ids for all unique measurements we'll ask about
my @measurement_cache_ids;
my @data_types = keys( %$unique_measurements );
foreach my $data_type ( @data_types ) {
my @measurement_identifiers = keys( %{$unique_measurements->{$data_type}} );
foreach my $measurement_identifier ( @measurement_identifiers ) {
my $cache_id = $self->redislock->get_cache_id( type => $data_type,
collection => 'measurements',
identifier => $measurement_identifier );
push( @measurement_cache_ids, $cache_id );
}
}
if ( @measurement_cache_ids ) {
# grab measurements from our cache
my $measurement_cache_results = $self->memcache->get_multi( @measurement_cache_ids );
# potentially create new measurement entries that we've never seen before
@data_types = keys( %$unique_measurements );
foreach my $data_type ( sort @data_types ) {
my @measurement_identifiers = keys( %{$unique_measurements->{$data_type}} );
foreach my $measurement_identifier ( sort @measurement_identifiers ) {
my $cache_id = shift( @measurement_cache_ids );
# this measurement exists in our cache, dont bother creating it
next if ( $measurement_cache_results->{$cache_id} );
# potentially create a new entry unless someone else beats us to it
my $meta = $unique_measurements->{$data_type}{$measurement_identifier}{'meta'};
my $start = $unique_measurements->{$data_type}{$measurement_identifier}{'start'};
my $interval = $unique_measurements->{$data_type}{$measurement_identifier}{'interval'};
$self->_create_measurement_document( identifier => $measurement_identifier,
data_type => $unique_data_types->{$data_type},
meta => $meta,
start => $start,
interval => $interval,
bulk_creates => $bulk_creates,
acquired_locks => $acquired_locks );
}
}
}
# potentially update the metadata value types for every distinct one found
@data_types = keys( %$unique_value_types );
foreach my $data_type ( @data_types ) {
my @value_types = keys( %{$unique_value_types->{$data_type}} );
$self->_update_metadata_value_types( data_type => $unique_data_types->{$data_type},
value_types => \@value_types );
}
# handle every distinct document that we'll need to update
@data_types = keys( %$unique_documents );
foreach my $data_type ( sort @data_types ) {
my @measurement_identifiers = sort keys( %{$unique_documents->{$data_type}} );
foreach my $measurement_identifier ( sort @measurement_identifiers ) {
my @starts = keys( %{$unique_documents->{$data_type}{$measurement_identifier}} );
foreach my $start ( sort { $a <=> $b } @starts ) {
my @ends = keys( %{$unique_documents->{$data_type}{$measurement_identifier}{$start}} );
foreach my $end ( sort { $a <=> $b } @ends ) {
my $document = $unique_documents->{$data_type}{$measurement_identifier}{$start}{$end};
# process this data document, including all data points contained within it
$self->_process_data_document( document => $document,
bulk_creates => $bulk_creates,
bulk_updates => $bulk_updates,
acquired_locks => $acquired_locks );
# all done with this document, remove it so we don't hold onto its memory
delete( $unique_documents->{$data_type}{$measurement_identifier}{$start}{$end} );
}
}
}
}
}
sub _process_aggregate_messages {
my ( $self, %args ) = @_;
my $messages = $args{'messages'};
my $bulk_creates = $args{'bulk_creates'};
my $bulk_updates = $args{'bulk_updates'};
my $acquired_locks = $args{'acquired_locks'};
# all unique documents we're handling (and their corresponding data points)
my $unique_documents = {};
# handle every message sent, ordered by their timestamp in ascending order
foreach my $message ( sort { $a->time <=> $b->time } @$messages ) {
my $data_type = $message->data_type;
my $measurement_identifier = $message->measurement_identifier;
my $interval = $message->interval;
my $time = $message->time;
my $meta = $message->meta;
# This is lazily built so it might actually fail type validation
# when we invoke it for the first time
my $aggregate_points;
try {
$aggregate_points = $message->aggregate_points;
}
catch {
$self->logger->error( "Error processing aggregate update - bad data format: $_" );
};
next if (! defined $aggregate_points);
# determine proper start and end time of document
my $doc_length = $interval * AGGREGATE_DOCUMENT_SIZE;
my $start = nlowmult( $doc_length, $time );
my $end = $start + $doc_length;
# determine the document that this message would belong within
my $document = GRNOC::TSDS::AggregateDocument->new( data_type => $data_type,
measurement_identifier => $measurement_identifier,
interval => $interval,
start => $start,
end => $end );
# mark the document for this data point if one hasn't been set already
my $unique_doc = $unique_documents->{$data_type->name}{$measurement_identifier}{$document->start}{$document->end};
# we've never handled a data point for this document before
if ( !$unique_doc ) {
# mark it as being a new unique document we need to handle
$unique_documents->{$data_type->name}{$measurement_identifier}{$document->start}{$document->end} = $document;
$unique_doc = $unique_documents->{$data_type->name}{$measurement_identifier}{$document->start}{$document->end};
}
# handle every aggregate point that was included in this message
foreach my $aggregate_point ( @$aggregate_points ) {
my $value_type = $aggregate_point->value_type;
# add this as another data point to update/set in the document
$unique_doc->add_aggregate_point( $aggregate_point );
}
}
# handle every distinct document that we'll need to update
my @data_types = keys( %$unique_documents );
foreach my $data_type ( sort @data_types ) {
my @measurement_identifiers = keys( %{$unique_documents->{$data_type}} );
foreach my $measurement_identifier ( sort @measurement_identifiers ) {
my @starts = keys( %{$unique_documents->{$data_type}{$measurement_identifier}} );
foreach my $start ( sort { $a <=> $b } @starts ) {
my @ends = keys( %{$unique_documents->{$data_type}{$measurement_identifier}{$start}} );
foreach my $end ( sort { $a <=> $b } @ends ) {
my $document = $unique_documents->{$data_type}{$measurement_identifier}{$start}{$end};
# process this aggregate document, including all aggregate points contained within it
$self->_process_aggregate_document( document => $document,
bulk_creates => $bulk_creates,
bulk_updates => $bulk_updates,
acquired_locks => $acquired_locks );
# all done with this document, remove it so we don't hold onto its memory
delete( $unique_documents->{$data_type}{$measurement_identifier}{$start}{$end} );
}
}
}
}
}
sub _process_event_document {
my ( $self, %args ) = @_;
my $document = $args{'document'};
my $bulk_creates = $args{'bulk_creates'};
my $bulk_updates = $args{'bulk_updates'};
my $acquired_locks = $args{'acquired_locks'};
my $data_type = $document->data_type;
my $data_type_name = $data_type->name;
my $type = $document->type;
my $start = $document->start;
my $end = $document->end;
$self->logger->debug( "Processing event document $data_type_name / $type / $start / $end." );
# get lock for this event document
my $lock = $self->redislock->lock( type => $data_type_name,
collection => 'event',
identifier => $type,
start => $start,
end => $end ) or die "Unable to lock event document $data_type_name / $type / $start / $end, requeueing";
push( @$acquired_locks, $lock );
my $cache_id = $self->redislock->get_cache_id( type => $data_type_name,
collection => 'event',
identifier => $type,
start => $start,
end => $end );
# its already in our cache, seen it before
if ( my $cached = $self->memcache->get( $cache_id ) ) {
$self->logger->debug( 'Found document in cache, updating.' );
# retrieve the full old document from mongo
my $old_doc = GRNOC::TSDS::EventDocument->new( data_type => $data_type,
type => $type,
start => $start,
end => $end )->fetch();
# update it and its events accordingly
$self->_update_event_document( new_document => $document,
old_document => $old_doc,
bulk_updates => $bulk_updates,
acquired_locks => $acquired_locks );
# re-cache existing entry found in cache
$self->memcache->set( $cache_id,
1,
DATA_CACHE_EXPIRATION );
}
# not in cache, we'll have to query mongo to see if its there
else {
$self->logger->debug( 'Document not found in cache.' );
# retrieve the full old document from mongo
my $old_doc = GRNOC::TSDS::EventDocument->new( data_type => $data_type,
type => $type,
start => $start,
end => $end )->fetch();
# document exists in mongo, so we'll need to update it
if ( $old_doc ) {
# we found it in the database, set our cache accordingly to mark that it exists
$self->memcache->set( $cache_id,
1,
DATA_CACHE_EXPIRATION );
$self->logger->debug( 'Document exists in mongo, updating.' );
# update it and its events accordingly
$self->_update_event_document( new_document => $document,
old_document => $old_doc,
bulk_updates => $bulk_updates,
acquired_locks => $acquired_locks );
}
# doesn't exist in mongo, we'll need to create it along with its data points we added to it
else {
$self->logger->debug( 'Document does not exist in mongo, creating.' );
my $bulk = $bulk_creates->{$data_type_name}{'event'};
# haven't initialized a bulk op for this data type + collection yet
if ( !defined( $bulk ) ) {
my $collection = $data_type->database->get_collection( 'event' );
$bulk = $collection->initialize_unordered_bulk_op();
$bulk_creates->{$data_type_name}{'event'} = $bulk;
}
$document->create( bulk => $bulk );
}
# dont update memcache here, because it might fail during the bulk op; we'll find it and update cache later
}
$self->logger->debug( "Finished processing event document $data_type_name / $type / $start / $end." );
}
sub _process_data_document {
my ( $self, %args ) = @_;
my $document = $args{'document'};
my $bulk_creates = $args{'bulk_creates'};
my $bulk_updates = $args{'bulk_updates'};
my $acquired_locks = $args{'acquired_locks'};
my $data_type = $document->data_type->name;
my $measurement_identifier = $document->measurement_identifier;
my $start = $document->start;
my $end = $document->end;
my %new_value_types = %{$document->value_types};
$self->logger->debug( "Processing data document $data_type / $measurement_identifier / $start / $end." );
# get lock for this data document
my $lock = $self->redislock->lock( type => $data_type,
collection => 'data',
identifier => $measurement_identifier,
start => $start,
end => $end ) or die "Can't lock data document for $data_type / $measurement_identifier / $start / $end";
push( @$acquired_locks, $lock );
my $cache_id = $self->redislock->get_cache_id( type => $data_type,
collection => 'data',
identifier => $measurement_identifier,
start => $start,
end => $end );
# its already in our cache, seen it before
if ( my $cached = $self->memcache->get( $cache_id ) ) {
$self->logger->debug( 'Found document in cache, updating.' );
my $old_value_types = $cached->{'value_types'};
# update existing document along with its new data points
( $document, my $added_value_types ) = $self->_update_data_document( document => $document,
old_value_types => $old_value_types,
new_value_types => \%new_value_types,
bulk_updates => $bulk_updates,
acquired_locks => $acquired_locks );
# will this update add a new value type?
if ( @$added_value_types > 0 ) {
# invalidate the cache entry so we fetch it from the db later and verify they were properly added during the bulk op
$self->memcache->delete( $cache_id );
}
# maintain/update existing cache entry
else {
$self->memcache->set( $cache_id,
{'value_types' => $document->value_types},
DATA_CACHE_EXPIRATION );
}
}
# not in cache, we'll have to query mongo to see if its there
else {
$self->logger->debug( 'Document not found in cache.' );
# retrieve the full updated doc from mongo
my $live_doc = $document->fetch();
# document exists in mongo, so we'll need to update it
if ( $live_doc ) {
# update our cache with the doc info we found in the db
$self->memcache->set( $cache_id,
{'value_types' => $live_doc->value_types},
DATA_CACHE_EXPIRATION );
$self->logger->debug( 'Document exists in mongo, updating.' );
# update existing document along with its new data points
( $document, my $added_value_types ) = $self->_update_data_document( document => $document,
old_value_types => $live_doc->value_types,
new_value_types => \%new_value_types,
bulk_updates => $bulk_updates,
acquired_locks => $acquired_locks );
# will this update add a new value type?
if ( @$added_value_types > 0 ) {
# invalidate the cache entry so we fetch it from the db again later and verify they were properly added during the bulk op
$self->memcache->delete( $cache_id );
}
}
# doesn't exist in mongo, we'll need to create it along with the data points provided, and
# make sure there are no overlaps with other docs due to interval change, etc.
else {
$self->logger->debug( 'Document does not exist in mongo, creating.' );
$document = $self->_create_data_document( document => $document,
bulk_creates => $bulk_creates,
acquired_locks => $acquired_locks );
}
}
$self->logger->debug( "Finished processing document $data_type / $measurement_identifier / $start / $end." );
}
sub _process_aggregate_document {
my ( $self, %args ) = @_;
my $document = $args{'document'};
my $bulk_creates = $args{'bulk_creates'};
my $bulk_updates = $args{'bulk_updates'};
my $acquired_locks = $args{'acquired_locks'};
my $data_type = $document->data_type;
my $data_type_name = $data_type->name;
my $measurement_identifier = $document->measurement_identifier;
my $start = $document->start;
my $end = $document->end;
my $interval = $document->interval;
my %new_value_types = %{$document->value_types};
$self->logger->debug( "Processing aggregate document $data_type_name - $interval / $measurement_identifier / $start / $end." );
# get lock for this aggregate document
my $lock = $self->redislock->lock( type => $data_type_name,
collection => "data_$interval",
identifier => $measurement_identifier,
start => $start,
end => $end ) or die "Can't lock aggregate data doc for $data_type_name - $interval / $measurement_identifier / $start / $end.";
push( @$acquired_locks, $lock );
my $cache_id = $self->redislock->get_cache_id( type => $data_type_name,
collection => "data_$interval",
identifier => $measurement_identifier,
start => $start,
end => $end );
# its already in our cache, seen it before
if ( my $cached = $self->memcache->get( $cache_id ) ) {
$self->logger->debug( 'Found document in cache, updating.' );
my $old_value_types = $cached->{'value_types'};
# update existing document along with its new data points
( $document, my $added_value_types ) = $self->_update_aggregate_document( document => $document,
old_value_types => $old_value_types,
new_value_types => \%new_value_types,
bulk_updates => $bulk_updates,
acquired_locks => $acquired_locks );
# will this update add a new value type?
if ( @$added_value_types > 0 ) {
# invalidate the cache entry so we fetch it from the db later and verify they were properly added during the bulk op
$self->memcache->delete( $cache_id );
}
# maintain/update existing cache entry
else {
$self->memcache->set( $cache_id,
{'value_types' => $document->value_types},
AGGREGATE_CACHE_EXPIRATION );
}
}
# not in cache, we'll have to query mongo to see if its there
else {
$self->logger->debug( 'Document not found in cache.' );
# retrieve the full updated doc from mongo
my $live_doc = $document->fetch();
# document exists in mongo, so we'll need to update it
if ( $live_doc ) {
# update our cache with the doc info we found in the db
$self->memcache->set( $cache_id,
{'value_types' => $live_doc->value_types},
AGGREGATE_CACHE_EXPIRATION );
$self->logger->debug( 'Document exists in mongo, updating.' );
# update existing document along with its new data points
( $document, my $added_value_types ) = $self->_update_aggregate_document( document => $document,
old_value_types => $live_doc->value_types,
new_value_types => \%new_value_types,
bulk_updates => $bulk_updates,
acquired_locks => $acquired_locks );
# will this update add a new value type?
if ( @$added_value_types > 0 ) {
# invalidate the cache entry so we fetch it from the db again later and verify they were properly added during the bulk op
$self->memcache->delete( $cache_id );
}
}
# doesn't exist in mongo, we'll need to create it along with the aggregate points provided
else {
$self->logger->debug( 'Document does not exist in mongo, creating.' );
my $bulk = $bulk_creates->{$data_type_name}{'data_' . $document->interval};
# haven't initialized a bulk op for this data type + collection yet
if ( !defined( $bulk ) ) {
my $collection = $data_type->database->get_collection( 'data_' . $document->interval );
$bulk = $collection->initialize_unordered_bulk_op();
$bulk_creates->{$data_type_name}{'data_' . $document->interval} = $bulk;
}
$document = $document->create( bulk => $bulk );
}
}
$self->logger->debug( "Finished processing aggregate document $data_type_name - $interval / $measurement_identifier / $start / $end." );
}
sub _update_event_document {
my ( $self, %args ) = @_;
my $old_document = $args{'old_document'};
my $new_document = $args{'new_document'};
my $bulk_updates = $args{'bulk_updates'};
my $acquired_locks = $args{'acquired_locks'};
my $old_events = $old_document->events;
my $new_events = $new_document->events;
# index the old events by their unique criteria
my $event_index = {};
foreach my $old_event ( @$old_events ) {
my $start = $old_event->start;
my $identifier = $old_event->identifier;
$event_index->{$start}{$identifier} = $old_event;
}
foreach my $new_event ( @$new_events ) {
my $start = $new_event->start;
my $identifier = $new_event->identifier;
# either replace/update existing event or add brand new event
$event_index->{$start}{$identifier} = $new_event;
}
my $events = [];
my @starts = keys( %$event_index );
foreach my $start ( @starts ) {
my @identifiers = keys( %{$event_index->{$start}} );
foreach my $identifier ( @identifiers ) {
my $event = $event_index->{$start}{$identifier};
push( @$events, $event );
}
}
my $data_type = $new_document->data_type;
my $collection_name = 'event';
my $bulk = $bulk_updates->{$data_type->name}{$collection_name};
# haven't initialized a bulk op for this data type + collection yet
if ( !defined( $bulk ) ) {
my $collection = $data_type->database->get_collection( $collection_name );
$bulk = $collection->initialize_unordered_bulk_op();
$bulk_updates->{$data_type->name}{$collection_name} = $bulk;
}
$new_document->events( $events );
$new_document->update( bulk => $bulk );
}
sub _create_data_document {
my ( $self, %args ) = @_;
my $document = $args{'document'};
my $bulk_creates = $args{'bulk_creates'};
my $acquired_locks = $args{'acquired_locks'};
# before we insert this new document, we will want to check for existing documents which
# may have overlapping data with this new one. this can happen if there was an interval
# change, since that affects the start .. end range of the document
my $data_type = $document->data_type;
my $identifier = $document->measurement_identifier;
my $start = $document->start;
my $end = $document->end;
my $interval = $document->interval;
$self->logger->debug( "Creating new data document $identifier / $start / $end." );
# help from http://eli.thegreenplace.net/2008/08/15/intersection-of-1d-segments
my $query = Tie::IxHash->new( 'identifier' => $identifier,
'start' => {'$lt' => $end},
'end' => {'$gt' => $start} );
# get this document's data collection
my $data_collection = $data_type->database->get_collection( 'data' );
$self->logger->debug( 'Finding existing overlapping data documents before creation.' );
# the ids of the overlaps we found
my @overlap_ids;
# the cache ids of the overlaps we found
my @overlap_cache_ids;
# unique documents that the data points, after altering their interval, will belong in
my $unique_documents = {};
# add this new document as one of the unique documents that will need to get created
$unique_documents->{$identifier}{$start}{$end} = $document;
# specify index hint to address occasional performance problems executing this query
my $overlaps = $data_collection->find( $query )->hint( 'identifier_1_start_1_end_1' )->fields( {'interval' => 1,
'start' => 1,
'end' => 1} );
# handle every existing overlapping doc, if any
while ( my $overlap = $overlaps->next ) {
my $id = $overlap->{'_id'};
my $overlap_interval = $overlap->{'interval'};
my $overlap_start = $overlap->{'start'};
my $overlap_end = $overlap->{'end'};
# keep this as one of the docs that will need removed later
push( @overlap_ids, $id );
# determine cache id for this doc
my $cache_id = $self->redislock->get_cache_id( type => $data_type->name,
collection => 'data',
identifier => $identifier,
start => $overlap_start,
end => $overlap_end );
push( @overlap_cache_ids, $cache_id );
# grab lock for this doc
my $lock = $self->redislock->lock( type => $data_type->name,
collection => 'data',
identifier => $identifier,
start => $overlap_start,
end => $overlap_end ) or die "Can't lock overlapping data doc for $identifier";
push( @$acquired_locks, $lock );
$self->logger->debug( "Found overlapping data document with interval: $overlap_interval start: $overlap_start end: $overlap_end." );
# create object representation of this duplicate doc
my $overlap_doc = GRNOC::TSDS::DataDocument->new( data_type => $data_type,
measurement_identifier => $identifier,
interval => $overlap_interval,
start => $overlap_start,
end => $overlap_end );
# fetch entire doc to grab its data points
$overlap_doc->fetch( data => 1 );
# handle every data point in this overlapping doc
my $data_points = $overlap_doc->data_points;
foreach my $data_point ( @$data_points ) {
# set the *new* interval we'll be using for this data point
$data_point->interval( $interval );
# determine proper start and end time of *new* document
my $doc_length = $interval * HIGH_RESOLUTION_DOCUMENT_SIZE;
my $new_start = nlowmult( $doc_length, $data_point->time );
my $new_end = $new_start + $doc_length;
# determine the *new* document that this message would belong within
my $new_document = GRNOC::TSDS::DataDocument->new( data_type => $data_type,
measurement_identifier => $identifier,
interval => $interval,
start => $new_start,
end => $new_end );
# mark the document for this data point if one hasn't been set already
my $unique_doc = $unique_documents->{$identifier}{$new_start}{$new_end};
# we've never handled a data point for this document before
if ( !$unique_doc ) {
# mark it as being a new unique document we need to handle
$unique_documents->{$identifier}{$new_start}{$new_end} = $new_document;
$unique_doc = $unique_documents->{$identifier}{$new_start}{$new_end};
}
# add this as another data point to update/set in the document, if needed
$unique_doc->add_data_point( $data_point ) if ( defined $data_point->value );
}
}
# process all new documents that get created as a result of splitting the old document up
my @measurement_identifiers = keys( %$unique_documents );
foreach my $measurement_identifier ( @measurement_identifiers ) {
my @starts = keys( %{$unique_documents->{$measurement_identifier}} );
foreach my $start ( @starts ) {
my @ends = keys( %{$unique_documents->{$measurement_identifier}{$start}} );
foreach my $end ( @ends ) {
my $unique_document = $unique_documents->{$measurement_identifier}{$start}{$end};
my $bulk = $bulk_creates->{$data_type->name}{'data'};
# haven't initialized a bulk op for this data type + collection yet
if ( !defined( $bulk ) ) {
$bulk = $data_collection->initialize_unordered_bulk_op();
$bulk_creates->{$data_type->name}{'data'} = $bulk;
}
$self->logger->debug( "Creating new data document $measurement_identifier / $start / $end." );
$unique_document->create( bulk => $bulk );
}
}
}
# remove all old documents that are getting replaced with new docs
if ( @overlap_ids > 0 ) {
# first remove from mongo
$data_collection->delete_many( {'_id' => {'$in' => \@overlap_ids}} );
# also must remove them from our cache since they should no longer exist
$self->memcache->delete_multi( @overlap_cache_ids );
}
return $document;
}
sub _update_data_document {
my ( $self, %args ) = @_;
my $document = $args{'document'};
my $old_value_types = $args{'old_value_types'};
my $new_value_types = $args{'new_value_types'};
my $bulk_updates = $args{'bulk_updates'};
my $acquired_locks = $args{'acquired_locks'};
# do we need to add any value types to the document?
my @value_types_to_add;
my @new_value_types = keys( %$new_value_types );
my @old_value_types = keys( %$old_value_types );
foreach my $new_value_type ( @new_value_types ) {
# already in the doc
next if ( $old_value_types->{$new_value_type} );
# must be new
push( @value_types_to_add, $new_value_type );
}
# did we find at least one new value type not in the doc?
if ( @value_types_to_add ) {
$self->logger->debug( "Adding new value types " . join( ',', @value_types_to_add ) . " to document." );
$document->add_value_types( \@value_types_to_add );
}
my $data_type = $document->data_type;
my $collection_name = 'data';
my $bulk = $bulk_updates->{$data_type->name}{$collection_name};
# haven't initialized a bulk op for this data type + collection yet
if ( !defined( $bulk ) ) {
my $collection = $data_type->database->get_collection( $collection_name );
$bulk = $collection->initialize_unordered_bulk_op();
$bulk_updates->{$data_type->name}{$collection_name} = $bulk;
}
$document->update( bulk => $bulk );
return ( $document, \@value_types_to_add );
}
sub _update_aggregate_document {
my ( $self, %args ) = @_;
my $document = $args{'document'};
my $old_value_types = $args{'old_value_types'};
my $bulk_updates = $args{'bulk_updates'};
my $acquired_locks =$args{'acquired_locks'};
# do we need to add any value types to the document?
my @value_types_to_add;
foreach my $new_value_type ( keys %{$document->value_types} ) {
# already in the doc
next if ( $old_value_types->{$new_value_type} );
# must be new
push( @value_types_to_add, $new_value_type );
}
# did we find at least one new value type not in the doc?
if ( @value_types_to_add ) {
$self->logger->debug( "Adding new value types " . join( ',', @value_types_to_add ) . " to document." );
$document->add_value_types( \@value_types_to_add );
}
my $data_type = $document->data_type;
my $collection_name = 'data_' . $document->interval;
my $bulk = $bulk_updates->{$data_type->name}{$collection_name};
# haven't initialized a bulk op for this data type + collection yet
if ( !defined( $bulk ) ) {
my $collection = $data_type->database->get_collection( $collection_name );
$bulk = $collection->initialize_unordered_bulk_op();
$bulk_updates->{$data_type->name}{$collection_name} = $bulk;
}
$document->update( bulk => $bulk );
return ( $document, \@value_types_to_add );
}
sub _update_metadata_value_types {
my ( $self, %args ) = @_;
my $data_type = $args{'data_type'};
my $new_value_types = $args{'value_types'};
# determine all the cache ids for all these metadata value types
my @cache_ids;
foreach my $new_value_type ( @$new_value_types ) {
# include this value type in its data type entry
$self->data_types->{$data_type->name}->value_types->{$new_value_type} = {'description' => $new_value_type,
'units' => $new_value_type};
my $cache_id = $self->redislock->get_cache_id( type => $data_type->name,
collection => 'metadata',
identifier => $new_value_type );
push( @cache_ids, $cache_id );
}
# consult our cache to see if any of them dont exists
my $cache_results = $self->memcache->get_multi( @cache_ids );
my $found_missing = 0;
foreach my $cache_id ( @cache_ids ) {
# cache hit
next if ( $cache_results->{$cache_id} );
# found a value type we've never seen before
$found_missing = 1;
last;
}
# no new value types found to update
return if ( !$found_missing );
# get metadata collection for this data type
my $metadata_collection = $data_type->database->get_collection( 'metadata' );
# get lock for this metadata document
my $lock = $self->redislock->lock( type => $data_type->name,
collection => 'metadata' ) or die "Can't lock metadata for " . $data_type->name;
# grab the current metadata document
my $doc = $metadata_collection->find_one( {}, {'values' => 1} );
# error if there is none present
if ( !$doc ) {
$self->redislock->unlock( $lock );
die( 'No metadata document found for database ' . $data_type->name . '.' );
}
my $updates = {};
# find any new value types
foreach my $new_value_type ( @$new_value_types ) {
# skip it if it already exists
next if ( exists( $doc->{'values'}{$new_value_type} ) );
$self->logger->debug( "Adding new value type $new_value_type to database " . $data_type->name . "." );
# found a new one that needs to be added
$updates->{"values.$new_value_type"} = {'description' => $new_value_type,
'units' => $new_value_type};
}
# is there at least one update to perform?
if ( keys( %$updates ) > 0 ) {
# update the single metadata document with all new value types found
$metadata_collection->update_one( {},
{'$set' => $updates} );
}
# mark all value types in our cache
my @multi = map { [$_ => 1] } @cache_ids;
$self->memcache->set_multi( @multi );
# all done, release our lock on this metadata document
$self->redislock->unlock( $lock );
}
sub _create_measurement_document {
my ( $self, %args ) = @_;
my $identifier = $args{'identifier'};
my $data_type = $args{'data_type'};
my $meta = $args{'meta'};
my $start = $args{'start'};
my $interval = $args{'interval'};
my $bulk_creates = $args{'bulk_creates'};
my $acquired_locks = $args{'acquired_locks'};
$self->logger->debug( "Measurement $identifier in database " . $data_type->name . " not found in cache." );
# get lock for this measurement identifier
my $lock = $self->redislock->lock( type => $data_type->name,
collection => 'measurements',
identifier => $identifier ) or die "Can't lock measurements for $identifier";
push( @$acquired_locks, $lock );
# get measurement collection for this data type
my $measurement_collection = $data_type->database->get_collection( 'measurements' );
# see if it exists in the database (and is active)
my $query = Tie::IxHash->new( identifier => $identifier,
end => undef );
my $exists = $measurement_collection->count( $query );
# doesn't exist yet
if ( !$exists ) {
$self->logger->debug( "Active measurement $identifier not found in database " . $data_type->name . ", adding." );
my $metadata_fields = $data_type->metadata_fields;
my $fields = Tie::IxHash->new( identifier => $identifier,
start => $start + 0,
end => undef,
last_updated => $start + 0 );
while ( my ( $field, $value ) = each( %$meta ) ) {
# skip it if its not a required meta field for this data type, the writer should only ever set those
next if ( !$metadata_fields->{$field}{'required'} );
$fields->Push( $field => $value );
}
# create it
$measurement_collection->insert_one( $fields );
}
# mark it in our known cache so no one ever tries to add it again
my $cache_id = $self->redislock->get_cache_id( type => $data_type->name,
collection => 'measurements',
identifier => $identifier );
my $cache_duration = MEASUREMENT_CACHE_EXPIRATION;
# use longer cache duration for measurements not submitted often
$cache_duration = $interval * 2 if ( $interval * 2 > $cache_duration );
$self->memcache->set( $cache_id, 1, $interval * 2 );
}
sub _fetch_data_types {
my ( $self ) = @_;
$self->logger->debug( 'Getting data types.' );
my $data_types = {};
# determine databases to ignore
my $ignore_databases = {};
$self->config->{'force_array'} = 1;
my @ignore_databases = $self->config->get( '/config/ignore-databases/database' );
$self->config->{'force_array'} = 0;
foreach my $database ( @ignore_databases ) {
$database = $database->[0];
$self->logger->debug( "Ignoring database '$database'." );
$ignore_databases->{$database} = 1;
}
# grab all database names in mongo
my @database_names = $self->mongo_rw->database_names();
foreach my $database ( @database_names ) {
# skip it if its marked to be ignored
next if ( $ignore_databases->{$database} || $database =~ /^_/ );
$self->logger->debug( "Storing data type for database $database." );
my $data_type;
try {
$data_type = GRNOC::TSDS::DataType->new( name => $database,
database => $self->mongo_rw->get_database( $database ) );
}
catch {
$self->logger->warn( $_ );
};
next if !$data_type;
# store this as one of our known data types
$data_types->{$database} = $data_type;
}
# update the list of known data types
$self->_set_data_types( $data_types );
}
sub _rabbit_connect {
my ( $self ) = @_;
my $rabbit_host = $self->config->get( '/config/rabbit/@host' );
my $rabbit_port = $self->config->get( '/config/rabbit/@port' );
my $rabbit_queue = $self->queue;
while ( 1 ) {
$self->logger->info( "Connecting to RabbitMQ $rabbit_host:$rabbit_port." );
my $connected = 0;
try {
my $rabbit = Net::AMQP::RabbitMQ->new();
$rabbit->connect( $rabbit_host, {'port' => $rabbit_port} );
$rabbit->channel_open( 1 );
$rabbit->queue_declare( 1, $rabbit_queue, {'auto_delete' => 0} );
$rabbit->basic_qos( 1, { prefetch_count => QUEUE_PREFETCH_COUNT } );
$rabbit->consume( 1, $rabbit_queue, {'no_ack' => 0} );
$self->_set_rabbit( $rabbit );
$connected = 1;
}
catch {
$self->logger->error( "Error connecting to RabbitMQ: $_" );
};
last if $connected;
$self->logger->info( "Reconnecting after " . RECONNECT_TIMEOUT . " seconds..." );
sleep( RECONNECT_TIMEOUT );
}
}
1;
| daldoyle/tsds-services | lib/GRNOC/TSDS/Writer/Worker.pm | Perl | apache-2.0 | 67,730 |
#
# Copyright 2018 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package hardware::server::dell::idrac::snmp::plugin;
use strict;
use warnings;
use base qw(centreon::plugins::script_snmp);
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
%{$self->{modes}} = (
'global-status' => 'hardware::server::dell::idrac::snmp::mode::globalstatus',
'hardware' => 'hardware::server::dell::idrac::snmp::mode::hardware',
);
return $self;
}
1;
__END__
=head1 PLUGIN DESCRIPTION
Check Dell iDrac in SNMP.
=cut
| wilfriedcomte/centreon-plugins | hardware/server/dell/idrac/snmp/plugin.pm | Perl | apache-2.0 | 1,409 |
package Paws::Greengrass::Function;
use Moose;
has FunctionArn => (is => 'ro', isa => 'Str');
has FunctionConfiguration => (is => 'ro', isa => 'Paws::Greengrass::FunctionConfiguration');
has Id => (is => 'ro', isa => 'Str');
1;
### main pod documentation begin ###
=head1 NAME
Paws::Greengrass::Function
=head1 USAGE
This class represents one of two things:
=head3 Arguments in a call to a service
Use the attributes of this class as arguments to methods. You shouldn't make instances of this class.
Each attribute should be used as a named argument in the calls that expect this type of object.
As an example, if Att1 is expected to be a Paws::Greengrass::Function object:
$service_obj->Method(Att1 => { FunctionArn => $value, ..., Id => $value });
=head3 Results returned from an API call
Use accessors for each attribute. If Att1 is expected to be an Paws::Greengrass::Function object:
$result = $service_obj->Method(...);
$result->Att1->FunctionArn
=head1 DESCRIPTION
Information on function
=head1 ATTRIBUTES
=head2 FunctionArn => Str
Arn of the Lambda function.
=head2 FunctionConfiguration => L<Paws::Greengrass::FunctionConfiguration>
Configuration of the function
=head2 Id => Str
Id of the function in this version.
=head1 SEE ALSO
This class forms part of L<Paws>, describing an object used in L<Paws::Greengrass>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/Greengrass/Function.pm | Perl | apache-2.0 | 1,552 |
#!/usr/bin/perl
use warnings;
use strict;
my @srcstr = split //, ' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_';
srand(time());
my $currand = 0;
my @myrands = map rand(), 1..(53 * 53);
my ($count, $outfile) = @ARGV;
my $reclen = 0;
sub intrand
{
$currand = 0 if $currand > $#myrands;
return int($_[0] * $myrands[$currand++]);
}
sub rand_string {
my $len = shift;
$len = intrand($len);
$len += 10 if ($len < 10);
return '"' . join('', map $srcstr[intrand(63)], 1..$len) . '"';
}
open(OUTF, '>', $outfile) or die "Can't open output file: $!";
binmode OUTF;
for (1..$count)
{
my $rec = join(',', $_, rand_string(100)) . "\n";
print OUTF pack('N', length($rec)), $rec;
}
close(OUTF);
| renodino/rdadapter | scripts/mksimplecsv.pl | Perl | apache-2.0 | 717 |
package Google::Ads::AdWords::v201402::UserListMembershipStatus;
use strict;
use warnings;
sub get_xmlns { 'https://adwords.google.com/api/adwords/rm/v201402'};
# derivation by restriction
use base qw(
SOAP::WSDL::XSD::Typelib::Builtin::string);
1;
__END__
=pod
=head1 NAME
=head1 DESCRIPTION
Perl data type class for the XML Schema defined simpleType
UserListMembershipStatus from the namespace https://adwords.google.com/api/adwords/rm/v201402.
Membership status of the user list. This status indicates whether a user list can accumulate more users and may be targeted to.
This clase is derived from
SOAP::WSDL::XSD::Typelib::Builtin::string
. SOAP::WSDL's schema implementation does not validate data, so you can use it exactly
like it's base type.
# Description of restrictions not implemented yet.
=head1 METHODS
=head2 new
Constructor.
=head2 get_value / set_value
Getter and setter for the simpleType's value.
=head1 OVERLOADING
Depending on the simple type's base type, the following operations are overloaded
Stringification
Numerification
Boolification
Check L<SOAP::WSDL::XSD::Typelib::Builtin> for more information.
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| gitpan/GOOGLE-ADWORDS-PERL-CLIENT | lib/Google/Ads/AdWords/v201402/UserListMembershipStatus.pm | Perl | apache-2.0 | 1,214 |
package Paws::Glue::GetClassifier;
use Moose;
has Name => (is => 'ro', isa => 'Str', required => 1);
use MooseX::ClassAttribute;
class_has _api_call => (isa => 'Str', is => 'ro', default => 'GetClassifier');
class_has _returns => (isa => 'Str', is => 'ro', default => 'Paws::Glue::GetClassifierResponse');
class_has _result_key => (isa => 'Str', is => 'ro');
1;
### main pod documentation begin ###
=head1 NAME
Paws::Glue::GetClassifier - Arguments for method GetClassifier on Paws::Glue
=head1 DESCRIPTION
This class represents the parameters used for calling the method GetClassifier on the
AWS Glue service. Use the attributes of this class
as arguments to method GetClassifier.
You shouldn't make instances of this class. Each attribute should be used as a named argument in the call to GetClassifier.
As an example:
$service_obj->GetClassifier(Att1 => $value1, Att2 => $value2, ...);
Values for attributes that are native types (Int, String, Float, etc) can passed as-is (scalar values). Values for complex Types (objects) can be passed as a HashRef. The keys and values of the hashref will be used to instance the underlying object.
=head1 ATTRIBUTES
=head2 B<REQUIRED> Name => Str
Name of the C<Classifier> to retrieve.
=head1 SEE ALSO
This class forms part of L<Paws>, documenting arguments for method GetClassifier in L<Paws::Glue>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/Glue/GetClassifier.pm | Perl | apache-2.0 | 1,554 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2020] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
=head1 NAME
Bio::EnsEMBL::Pipeline::SeqFetcher::Mfetch -
=head1 SYNOPSIS
my $obj = Bio::EnsEMBL::Pipeline::SeqFetcher::Mfetch->new(
-executable => $exe
);
my $seq = $obj->get_Seq_by_acc($acc);
=head1 DESCRIPTION
Object to retrieve sequences as Bio::Seq, using mfetch.
=head1 METHODS
=head1 APPENDIX
The rest of the documentation details each of the object methods.
Internal methods are usually preceded with a _
Method Bio::EnsEMBL::Root::_rearrange is deprecated.
use warnings ;
use Bio::EnsEMBL::Utils::Argument qw(rearrange);
rearrange(order, list); #instead
=cut
# Let the code begin...
package Bio::EnsEMBL::Pipeline::SeqFetcher::Mfetch;
use strict;
use Bio::EnsEMBL::Utils::Argument qw(rearrange);
use Bio::EnsEMBL::Utils::Exception qw(throw warning);
use Bio::DB::RandomAccessI;
use Bio::Seq;
use Time::HiRes qw ( gettimeofday ) ;
use vars qw(@ISA);
$|=0;
@ISA = qw(Bio::DB::RandomAccessI);
sub new {
my ($class, @args) = @_;
my $self = bless {}, $class;
my ($exe, $options ) = rearrange(['EXECUTABLE', 'OPTIONS'], @args);
if (!defined $exe) {
$exe = 'mfetch';
}
$self->executable($exe);
if (defined $options) {
$options=~s/^\s+//g;
$self->options($options);
}
# caching of sequences
$self->{_seq_cache}={};
return $self; # success - we hope!
}
=head2 executable
Title : executable
Usage : $self->executable('/path/to/executable');
Function: Get/set for the path to the executable being used by the module. If not set, the executable is looked for in $PATH.
Returns : string
Args : string
=cut
sub executable {
my ($self, $exe) = @_;
if (defined $exe)
{
$self->{'_exe'} = $exe;
}
return $self->{'_exe'};
}
=head2 options
Title : options
Usage : $self->options('tc');
Function: Get/set for options to mfetch
Returns : string
Args : string
=cut
sub options {
my ($self, $options) = @_;
if ($options) {
if ( $self->{_options} ) {
$self->{'_options'} = $self->{_options} . " " . $options;
} else {
$self->{'_options'} = $options;
}
}
return $self->{'_options'};
}
=head2 get_Seq_by_acc
Title : get_Seq_by_acc
Usage : $self->get_eq_by_acc($accession);
Function: Does the sequence retrieval via mfetch
Returns : Bio::Seq
Args :
=cut
sub get_Seq_by_acc {
my ($self, $acc) = @_;
if (!defined($acc)) {
throw("No accession input");
}
if ( defined ( $self->{_seq_cache}{$acc})) {
return $self->{_seq_cache}{$acc};
}
# seqence not cached, needs to be fetched
my $seqstr;
my $seq;
my $mfetch = $self->executable;
# the option for fetching a sequence only by mfetch is mfetch -v fasta
my $options = $self->options;
if (defined($options)) { $options = '-' . $options unless $options =~ /^-/; }
my $command = "$mfetch -v fasta ";
if (defined $options){
$command .= "$options ";
}
$command .= $acc;
print STDERR "$command\n" if $self->{verbose};
open(IN,"$command |") or throw("Error opening pipe to mfetch for accession [$acc]: $mfetch");
while (my $line=<IN>){
chomp($line) ;
next if $line=~m/>/;
$seqstr.=$line;
}
close IN or throw("Error running mfetch for accession [$acc]: \n$mfetch");
chomp($seqstr);
eval{
if(defined $seqstr && $seqstr ne "no match") {
$seq = new Bio::Seq('-seq' => $seqstr,
'-accession_number' => $acc,
'-display_id' => $acc);
}
};
if($@){
print STDERR "$@\n";
}
throw("Could not mfetch sequence for [$acc]\n") unless defined $seq;
$self->{_seq_cache}{$acc}=$seq;
return $seq;
}
=head2 get_Entry_Fields_no_mfetch
Title : get_Entry_Fields_no_mfetch
Usage : $self->get_Entry_Fields_no_mfetch(\@accessions,\@fields,"/nfs/ensembl/amonida/uniprot_sprot_summary.dat","/nfs/ensembl/amonida/uniprot_trembl_summary.dat");
: $self->get_Entry_Fields_no_mfetch("Q5RFX5", \@fields,"/nfs/ensembl/amonida/uniprot_sprot_summary.dat","/nfs/ensembl/amonida/uniprot_trembl_summary.dat");
Arg [0] : $self
arg [1] : ACC as string ( Q5RFX5 ) OR arrayref to array of acc
arg [2] : arrayref to fields which you want to receive
\@field = qw( pe taxon acc ) ;
arg [3] : Filepath to uniprot dumped file containing Swiss-Prot protein fields.
arg [4] : Filepath to uniprot dumped file containing TrEMBL protein fields.
Function: Does the retrieval of different files like Taxonomy_id or PE level via uniprot dumped files.
Returns : arrayref to array of hashes for each acc.?
Args :
=cut
sub get_Entry_Fields_no_mfetch {
my ($self,$sprot_file,$trembl_file,$acc_to_fetch,$fields) = @_;
print "Fields to get : " . join ( " " , @$fields )."\n" if $self->{verbose};
if ( ref($acc_to_fetch)=~m/ARRAY/ ) {
print "BatchFetch fields to get : " . join ( " " , @$fields )."\n" if $self->{verbose};
my @acc_to_fetch_no_version = ();
foreach my $an_acc (@$acc_to_fetch)
{
my $an_acc_no_version = $an_acc;
$an_acc_no_version =~ s/\..*//;
push(@acc_to_fetch_no_version,$an_acc_no_version);
}
return $self->get_Entry_Fields_BatchFetch_no_mfetch($sprot_file,$trembl_file,\@acc_to_fetch_no_version,$fields);
}
}
=head2 get_Entry_fields
Title : get_Entry_Fields
Usage : $self->get_Entry_Fields(\@accessions,\@fields);
: $self->get_Entry_Fields("Q5RFX5", \@fields);
Arg [0] : $self
arg [1] : ACC as string ( Q5RFX5 ) OR arrayref to array of acc
arg [2] : arrayref to fields which you want to receive
\@field = qw( pe taxon acc ) ;
Function: Does the retrieval of different files like Taxonomy_id or PE level via mfetch,
either for one acc or in batch mode.
Returns : arrayref to array of hashes for each acc.?
Args :
=cut
sub get_Entry_Fields {
my ($self, $acc_to_fetch,$fields) = @_;
print "Fields to get : " . join ( " " , @$fields )."\n" if $self->{verbose};
if ( ref($acc_to_fetch)=~m/ARRAY/ ) {
print "BatchFetch fields to get : " . join ( " " , @$fields )."\n" if $self->{verbose};
return $self->get_Entry_Fields_BatchFetch($acc_to_fetch,$fields);
}
if (!defined($acc_to_fetch)) {
throw("No accession input");
}
print " try to fetch $acc_to_fetch\n" if $self->{verbose};
my $command ;
my %all_entries;
my @entries_not_found;
my $cmd_prefix = $self->_make_mfetch_prefix_command($fields);
$command = $cmd_prefix ." " . $acc_to_fetch;
# extract acc if you do a wildcard like mfetch -i "AJFLD%"
my $acc_format = $acc_to_fetch;
$acc_format=~s/acc://g;
$acc_format=~s/\%//g;
$acc_format=~s/\"//g;
print "cmd: $command\n" if $self->{verbose};
my @lines = @{$self->_mfetch_command($command)} ;
#open(IN,"$command |") or throw("Error opening pipe to mfetch for accession [$acc_to_fetch]: $command ");
# my @lines = <IN> ;
#close IN or throw("Error running mfetch for accession [$acc_format]: $command ");
my %entry;
for my $line ( @lines ) {
chomp($line) ;
#print "LINE $line" if $self->{verbose};
# we're just parsing one entry so if we get a no_match we just return ....
if ( $line =~m/no match/ ) {
print "no entry found for $acc_to_fetch with $command\n" if $self->{verbose};
push @entries_not_found, $acc_to_fetch;
return [\%all_entries , \@entries_not_found ] ;
}
my @l = split /\s+/, $line;
my $key_field = shift @l ;
# result can contain more than one line begining with same field identifier , ie
# AC Q4589;
# AC Q0999;
# not sure how this works if we only get AC's .... but why would we do this anyway ?
if ( $key_field =~/AC/) {
if ( scalar(@l) > 1 ) {
warning ("more than one AC number returned : " . join(" ", @l) . " - we ignore the others " . scalar(@l) . " objects\n") ;
}
}
$entry{$key_field}.= join(" ", @l);
print "populating $key_field ... ".join(" ", @l) . "\n" if $self->{verbose};
}
# populate all_entry-hash
for my $field ( keys %entry ) {
$all_entries{$acc_format}{$field}=$entry{$field};
}
if ( 0 ) {
for my $key ( keys %all_entries ) {
print "KEY $key\n";
my %tmp = %{$all_entries{$key}} ;
for ( keys %tmp ) {
#if ( /AC/ ) {
# print "\t\t$_ --> " . join(" ", @{$tmp{$_}} ). "\n";
#}else {
print "\t\t$_ --> $tmp{$_}\n";
#}
}
print "\n" ;
}
}
return [\%all_entries , \@entries_not_found ] ;
}
sub _mfetch_command {
my ($self, $command)= @_;
open(IN,"$command |") or throw("Error opening pipe to mfetch for command $command ");
my @lines = <IN> ;
close IN or throw("Error running mfetch for command $command ");
return \@lines ;
}
=head2 get_Seq_by_acc_wildcard
Title : get_Seq_by_acc_wildcard
Usage : $self->get_eq_by_acc($accession);
Function: Does the sequence retrieval via mfetch
Returns : string
Args :
=cut
sub get_Seq_by_acc_wildcard {
my ($self, $acc) = @_;
$acc=~s/\..*//g; # chop version off
my $options = $self->options ;
unless ( $options=~m/-v fasta/ ) {
$self->options("-v fasta");
}
my $cmd_prefix = $self->_make_mfetch_prefix_command(\[]) ;
$cmd_prefix .= " -i \"acc:$acc\%\"" ;
my @entry;
my $not_found;
my @lines = @{ $self->_mfetch_command($cmd_prefix)};
chomp(@lines) ;
for ( @lines ) {
if (/no match/) {
return \@entry;
}
}
return \@lines ;
}
sub build_acc_index {
my ($acc) = @_ ;
my %tmp ;
my $position = 0 ;
for my $ac ( @$acc ) {
$tmp{$position} = $ac;
$position++;
}
return \%tmp ;
}
sub get_Entry_Fields_file {
my ($filename,@acc) = @_;
my @lines = ();
my $acc_found = 0;
my %acc_hash = map { $_ => 1 } @acc;
print("Starting get_Entry_Fields_file $filename, num acc=".scalar(@acc)."\n");
open(FILE,$filename) || die "Could not open file $filename";
while (my $line=<FILE>) {
if ($acc_found == 1) {
if ($line =~ /^PE/) { # end of fields for the accession found, last line to store for the current accession
# adding the DT field artificially from the filename
if ($filename =~ /uniprot_sprot/) {
push(@lines,"DT 00-AAA-0000, integrated into UniProtKB/Swiss-Prot.");
} else {
push(@lines,"DT 00-AAA-0000, integrated into UniProtKB/TrEMBL.");
}
$acc_found = 0;
}
push(@lines,$line);
} else {
if ($line =~ /^AC/) {
my $first_AC_line = $line;
my @acc_line_fields = split /\s+/,$line;
shift(@acc_line_fields); # remove AC field
my $line_after_AC_line;
while ($line=<FILE>) {
if ($line !~ /^AC/) {
$line_after_AC_line = $line;
last;
} else { # add more accessions from additional AC lines to our AC line
my @more_acc_line_fields = split /\s+/,$line;
shift(@more_acc_line_fields); # remove AC field
push(@acc_line_fields,@more_acc_line_fields);
}
}
# we only want to print the accs that we can find in our accs hash, not all of the line where at least we have 1 match
my @acc_found = ();
foreach my $an_acc (@acc_line_fields) {
$an_acc =~ s/;//; # remove last character ;
if (exists $acc_hash{$an_acc}) {
push(@acc_found,$an_acc);
$acc_found = 1;
#print "found an_acc: $an_acc\n";
}
#last if $acc_found == 1;
}
if ($acc_found == 1) {
my $acc_str = join(" ",@acc_found);
push(@lines,"AC $acc_str\n");
push(@lines,$line_after_AC_line);
}
}
}
}
close(FILE);
return @lines;
}
sub get_Entry_Fields_BatchFetch_no_mfetch {
my ($self,$sprot_file,$trembl_file,$acc,$fields) = @_;
unless ( ref($acc) =~m/ARRAY/ ) {
throw("if you use batchfetching you have to submit the acc-numbers in an array-reference\n");
}
my $cmd_prefix = $self->_make_mfetch_prefix_command($fields);
my %acc_index = %{build_acc_index($acc)};
my @acc_to_fetch = @$acc;
my @fetch_strings;
push @fetch_strings,join( " ",@acc_to_fetch); # everything is fetched
print "got " . scalar(@fetch_strings) . " jobs to run \n";
my $command ;
my @entries_not_found;
my $last_acc;
my %last_entry ;
my $entry_number = 0;
my @lines ;
# data fetching + parsing
my %no_match_index;
my $last_field ="";
#my ( %little_hash, %all_entries ) ; #c
my ( $little_hash, $all_entries ) ;
my $new_entry = 0;
#my $no_match_found = 0 ;
STRING: for my $acc_string ( @fetch_strings ) {
$command = $cmd_prefix ." " . $acc_string;
my @nr_acc = split /\s+/, $acc_string ;
print $entry_number . " / " . keys (%acc_index) . " entries fetched\n" ;
my $t0 = gettimeofday() ;
print "starting no_mfetch\n" ;
my @lines_sprot = get_Entry_Fields_file($sprot_file,@nr_acc);
print ("sprot finished, num of lines_sprot:".scalar(@lines_sprot)."\n");
my @lines_trembl = get_Entry_Fields_file($trembl_file,@nr_acc);
print ("trembl finished, num of lines_trembl:".scalar(@lines_trembl)."\n");
my @unsorted_lines = ();
push(@unsorted_lines,@lines_sprot);
push(@unsorted_lines,@lines_trembl);
print("SORTING NOW!\n");
my $is_found = 0;
# have to sort lines so that the accessions are in the same order as in acc_string
ONE_ACC: foreach my $one_acc (@nr_acc) {
foreach my $one_line (@unsorted_lines) {
if ($is_found == 0) {
if ( ($one_line =~ /^AC/) and ($one_line =~ /$one_acc/) ) {
push(@lines,"AC $one_acc");
$is_found = 1;
} else {
;
}
} else {
push(@lines,$one_line);
if ($one_line =~ /^PE/) { # end of fields for the accession found, last line to store for the current accession
$is_found = 0;
next ONE_ACC;
}
}
} # end foreach one_line
push(@lines,"no match\n");
} # end foreach one_acc
my $t1 = gettimeofday() ;
my $delta_t = $t1 - $t0 ;
print "time for no_mfetch : $delta_t\n" ;
# data parsing
LINE: for my $line ( @lines ) {
print "PARSING : $line\n" if $self->verbose();
chomp($line) ;
if ( $line =~m/no match/ ) {
print "line contains \"no match\" \n" if $self->{verbose} ;
$last_field = "";
# if we have an entry in memory store it
if (scalar(keys %$little_hash) > 0 ) { # if we have read a full entry before which has not been stored
print " have no_match now, but a full entry in memory for ". $acc_index{$entry_number} . "-- so let's try and store it.\n" if $self->{verbose};
my $query_acc = $acc_index{$entry_number};
$all_entries = _add_information_to_big_hash($all_entries, $little_hash,$query_acc );
undef $little_hash;
$entry_number++ ;
print "stored and entry incremented : $entry_number $acc_index{$entry_number} NO_MATCH \n" ;
print "NEW adding $acc_index{$entry_number} $entry_number to the list of no_matches \n" if $self->{verbose};
push @entries_not_found, $acc_index{$entry_number};
$no_match_index{$entry_number}=1;
next LINE;
}
print "no match for $entry_number -- $acc_index{$entry_number}\n" if $self->{verbose};;
if ( exists $no_match_index{$entry_number} ) {
$entry_number++ ;
}
$no_match_index{$entry_number}=1;
print "adding $acc_index{$entry_number} $entry_number to the list of no_matches \n" if $self->{verbose};
push @entries_not_found, $acc_index{$entry_number};
$entry_number++ ;
next LINE;
}
my @l = split /\s+/, $line;
my $field = shift @l ;
# parsing the start of the ENTRY with AC field ....
if ($field =~m/AC/ ) {
if ( $last_field =~m/AC/) {
# we have multiple AC fields in the entry which follow each other AC xxx AC xxx
$new_entry = 0 ;
} else {
print "\nnew entry found ...\n" if $self->{verbose} ;
$new_entry = 1;
$last_field = $field ;
}
}
if ( $new_entry == 1 ) {
if (scalar(keys %$little_hash) > 0 ) { # if we have read a full entry before which has not been stored
if ( $field =~/AC/ ) { # if we NOW READ a new entry we need to store the last one ...
print " NEW ENTRY STARTS\n" if $self->{verbose} ;
# determine correct entry index
while ( exists $no_match_index{$entry_number} ) {
print $acc_index{$entry_number} . " is recorded as NO MATCH - checking next ...\n" if $self->{verbose} ;
$entry_number++ ;
}
my $query_acc ;
#if ( $no_match_found > 0 ) {
# print "no matches found ... $no_match_found \n" if $self->{verbose};
# $query_acc = $acc_index{($entry_number-$no_match_found) };
# $no_match_found = 0;
#} else {
$query_acc = $acc_index{$entry_number};
#}
$all_entries = _add_information_to_big_hash($all_entries, $little_hash,$query_acc );
undef $little_hash;
$entry_number++;
}
}
elsif ( exists $no_match_index{$entry_number} ) {
warning("entry with number $entry_number ( $acc_index{$entry_number} ) was recorded as no_match -incrementing entry ... \n");
$entry_number++;
}
}
# add to little hash
$$little_hash{$field}.=join (" " , @l);
$last_field = $field ;
} # next LINE
} # next STRING - fetch next round
# add last entry to all_entries .
$all_entries = _add_information_to_big_hash($all_entries, $little_hash,$acc_index{$entry_number} );
if ( $self->{verbose} ) {
for my $key ( keys %{$all_entries} ) {
print "KEY $key\n" ;
my %tmp = %{$$all_entries{$key}} ;
for ( keys %tmp ) {
print "\t\t$_ --> $tmp{$_}\n";
}
print "\n" ;
}
}
# combine both
return [$all_entries , \@entries_not_found ] ;
}
=head2 get_Entry_Fields_BatchFetch
Title : batch_fetch
Usage : $self->batch_retrieval(@accession_list);
Function: Retrieves multiple sequences via mfetch
Returns : reference to a list of Bio::Seq objects
Args : array of accession strings
=cut
sub get_Entry_Fields_BatchFetch {
my ($self, $acc,$fields) = @_;
unless ( ref($acc) =~m/ARRAY/ ) {
throw("if you use batchfetching you have to submit the acc-numbers in an array-reference\n");
}
# NOTE : mfetch does currently not work for fetching with wildcards. use get_entryFields()
my $cmd_prefix = $self->_make_mfetch_prefix_command($fields);
my %acc_index = %{build_acc_index($acc)};
# fetch in batches of 300
my @fetch_strings = @{make_fetch_strings($acc, 1000 )};
print "got " . scalar(@fetch_strings) . " jobs to run \n";
my $command ;
my @entries_not_found;
my $last_acc;
my %last_entry ;
my $entry_number = 0;
my @lines ;
# data fetching + parsing
my %no_match_index;
my $last_field ="";
#my ( %little_hash, %all_entries ) ; #c
my ( $little_hash, $all_entries ) ;
my $new_entry = 0;
#my $no_match_found = 0 ;
STRING: for my $acc_string ( @fetch_strings ) {
$command = $cmd_prefix ." " . $acc_string;
my @nr_acc = split /\s+/, $acc_string ;
print $entry_number . " / " . keys (%acc_index) . " entries fetched\n" ;
if ( $self->{verbose} ) {
print "\n\n$command \n\n" ;
}
my $t0 = gettimeofday() ;
print "starting mfetch\n" ;
my @lines = @{$self->_mfetch_command($command)} ;
my $t1 = gettimeofday() ;
my $delta_t = $t1 - $t0 ;
print "time for mfetch : $delta_t\n" ;
# data parsing
LINE: for my $line ( @lines ) {
print "PARSING : $line\n" if $self->verbose();
chomp($line) ;
if ( $line =~m/no match/ ) {
# print "line $line\n";
print "line contains \"no match\" \n" if $self->{verbose} ;
$last_field = "";
# if we have an entry in memory store it
if (scalar(keys %$little_hash) > 0 ) { # if we have read a full entry before which has not been stored
print " have no_match now, but a full entry in memory for ". $acc_index{$entry_number} . "-- so let's try and store it.\n" if $self->{verbose};
my $query_acc = $acc_index{$entry_number};
$all_entries = _add_information_to_big_hash($all_entries, $little_hash,$query_acc );
undef $little_hash;
$entry_number++ ;
print "stored and entry incremented : $entry_number $acc_index{$entry_number} NO_MATCH \n" ;
print "NEW adding $acc_index{$entry_number} $entry_number to the list of no_matches \n" if $self->{verbose};
push @entries_not_found, $acc_index{$entry_number};
$no_match_index{$entry_number}=1;
next LINE;
}
print "no match for $entry_number -- $acc_index{$entry_number}\n" if $self->{verbose};;
if ( exists $no_match_index{$entry_number} ) {
$entry_number++ ;
}
$no_match_index{$entry_number}=1;
print "adding $acc_index{$entry_number} $entry_number to the list of no_matches \n" if $self->{verbose};
push @entries_not_found, $acc_index{$entry_number};
$entry_number++ ;
next LINE;
}
my @l = split /\s+/, $line;
my $field = shift @l ;
# parsing the start of the ENTRY with AC field ....
if ($field =~m/AC/ ) {
if ( $last_field =~m/AC/) {
# we have multiple AC fields in the entry which follow each other AC xxx AC xxx
$new_entry = 0 ;
} else {
print "\nnew entry found ...\n" if $self->{verbose} ;
$new_entry = 1;
$last_field = $field ;
}
}
if ( $new_entry == 1 ) {
if (scalar(keys %$little_hash) > 0 ) { # if we have read a full entry before which has not been stored
if ( $field =~/AC/ ) { # if we NOW READ a new entry we need to store the last one ...
print " NEW ENTRY STARTS\n" if $self->{verbose} ;
# determine correct entry index
while ( exists $no_match_index{$entry_number} ) {
print $acc_index{$entry_number} . " is recorded as NO MATCH - checking next ...\n" if $self->{verbose} ;
$entry_number++ ;
}
my $query_acc ;
#if ( $no_match_found > 0 ) {
# print "no matches found ... $no_match_found \n" if $self->{verbose};
# $query_acc = $acc_index{($entry_number-$no_match_found) };
# $no_match_found = 0;
#} else {
$query_acc = $acc_index{$entry_number};
#}
$all_entries = _add_information_to_big_hash($all_entries, $little_hash,$query_acc );
undef $little_hash;
$entry_number++;
}
} elsif ( exists $no_match_index{$entry_number} ) {
warning("entry with number $entry_number ( $acc_index{$entry_number} ) was recorded as no_match -incrementing entry ... \n");
$entry_number++;
}
}
# add to little hash
$$little_hash{$field}.=join (" " , @l);
$last_field = $field ;
} # next LINE
} # next STRING - fetch next round
# add last entry to all_entries .
$all_entries = _add_information_to_big_hash($all_entries, $little_hash,$acc_index{$entry_number} );
if ( $self->{verbose} ) {
for my $key ( keys %{$all_entries} ) {
print "KEY $key\n" ;
my %tmp = %{$$all_entries{$key}} ;
for ( keys %tmp ) {
# if ( /AC/ ) {
# print "\t\t$_ --> " . join(" ", @{$tmp{$_}} ). "\n";
# }else {
print "\t\t$_ --> $tmp{$_}\n";
# }
}
print "\n" ;
}
}
# combine both
return [$all_entries , \@entries_not_found ] ;
}
sub get_Seq_BatchFetch {
my ($self, $acc ) = @_;
unless ( ref($acc) =~m/ARRAY/ ) {
throw("if you use batchfetching you have to submit the acc-numbers in an array-reference\n");
}
# NOTE : mfetch does currently not work for fetching with wildcards. use get_entryFields()
my $options = $self->options ;
unless ( $options=~m/-v fasta/ ) {
$self->options("-v fasta") ;
}
my $cmd_prefix = $self->_make_mfetch_prefix_command([]) ;
my %acc_index = %{build_acc_index($acc)};
# fetch in batches of 500
my @fetch_strings = @{make_fetch_strings($acc, 500 )};
my (@clean , @entries_not_found, @lines ) ;
my ($command) ;
my $entry_number = 0;
# data fetching + parsing
STRING: for my $acc_string ( @fetch_strings ) {
$command = $cmd_prefix ." " . $acc_string;
my @nr_acc = split /\s+/, $acc_string ;
# data retrieval
#print $command ;
my @lines = @{$self->_mfetch_command($command)} ;
#open(IN,"$command |") or throw("Error opening pipe to mfetch for accession [$acc_string]: $command ");
#my @lines = <IN> ;
#close IN or throw("Error running mfetch for accession [$acc_string]: $command");
LINE: for my $line ( @lines ) {
chomp($line) ;
if ( $line =~m/no match/ ) {
$entry_number++;
print "no match for $acc_index{$entry_number}\n" ;
push @entries_not_found , $acc_index{$entry_number};
next LINE;
} else {
push @clean, $line ;
}
}
} # fetch next round ..
for ( @entries_not_found ) {
print "no entry found for : $_\n" if $self->{verbose};
}
return [\@clean, \@entries_not_found ] ;
}
sub _add_information_to_big_hash {
my ($all, $little, $query_acc) = @_ ;
#my %all_entries = %$all; #c
#my %little_hash = %$little; #c
#my %little_hash = %$little; #c
# $little_hash{AC} = Q7PYN8; Q01FQ6;
# $little_hash{OC} = Eukaryota; Metazoa; Arthropoda; Hexapoda; Insecta; Pterygota;
# $little_hash{PE} = 4: Predicted;
my $acc_string = $$little{AC};
$acc_string =~s/\;//g;
my @accession_numbers = split /\s+/, $acc_string ;
# consistency check - the query acc which we used in the mfetch query should also be in the AC field of the entry returned ....
my $found ;
for my $returned_acc ( @accession_numbers ) {
if ($query_acc =~m/$returned_acc/) {
$found = 1;
}
}
unless ( $found ) {
throw( " the acc $query_acc can't be found in the query returned by mfetch [ " . join (" ", @accession_numbers) . " ]\n");
}
unless ( exists $$all{$query_acc} ) {
$$all{$query_acc} = $little;
}else {
# we already have an entry for this ACC. - check if the entries are the same ...
print "Warning ! The acc. you like to add has already been indexed ...\n" ;
# check if the entries are the same
for my $lk ( keys %$little) {
if ( $$little{$lk}=~/$$all{$query_acc}{$lk}/ ) {
} else {
warning( "POSSIBLE DATA INCONSITENCY !!!\n" );
print "NEW : $lk --> $$little{$lk}\n" ;
print "OLD : $lk --> $$all{$query_acc}{$lk}\n";
}
}
print "\n\n\n" ;
}
return $all;
}
sub make_fetch_strings {
my ($acc_array, $size ) = @_;
my @acc_to_fetch = @$acc_array;
my @fetch_strings ;
if ( scalar(@acc_to_fetch) > $size ) {
while ( @acc_to_fetch ) {
my @tmp = splice (@acc_to_fetch, 0, $size ) ;
push @fetch_strings, join ( " ", @tmp) ;
#print $fetch_strings[-1]."\n" ;
}
} else {
push @fetch_strings, join ( " ", @acc_to_fetch ) ;
}
return \@fetch_strings;
}
sub verbose {
my ($self, $arg ) = @_ ;
if (defined $arg ) {
$self->{verbose}= $arg ;
}
}
sub _make_mfetch_prefix_command {
my ($self, $f ) = @_ ;
my $mfetch = $self->executable;
my $options = $self->options;
if (defined($options)) {
unless ($options =~ /^-/ ) {
$options = '-' . $options;
}
}
my $command = "$mfetch ";
# case 1 : we want to fetch entries with -f flag, ie
# mfetch -f "acc Taxon " ABCDE890.1
# user should only submit field names,NOT " -f Taxon " ...
if ( defined $f && ref($f)=~m/ARRAY/) {
my @fields = @$f ;
if ( scalar(@fields) > 0 ) {
my $f = join (" ",@fields) ;
# remove -f flag if user has submitted it
$f=~s/-f//g;
# put 'acc' field at the beginning of the string and remove redundancy
# and that it's there as well.
if ( $f=~m/acc/) {
$f =~s/acc//g;
}
$f = "acc " .$f;
$command .= " -f \"$f \" " ;
}
}
if (defined $options){
$command .= "$options ";
}
if ($self->verbose ) {
print "PREFIX COMMAND $command\n"
}
return $command ;
}
1;
| Ensembl/ensembl-pipeline | modules/Bio/EnsEMBL/Pipeline/SeqFetcher/Mfetch.pm | Perl | apache-2.0 | 30,301 |
#!/usr/bin/perl
use strict;
use Email::MIME;
use Message;
use Encode qw(decode);
use Data::Dumper;
#my $email = Email::MIME->new($rw);
#print "Parts: " . (scalar $email->parts) . "\n";
my $msg = new MessageExtract::Message(slurp("test_data/attachment.eml"));
my $data = $msg->graceful_extract("attachment0");
print Dumper($data);
##
# Slurp
#
# Read a file all at once
#
# @param {string} filename the filename to open
# @return {string} contents of the file
# @throws die on error
sub slurp {
my ($filename)=@_;
open my $fh, "<", $filename or die ("Unable to open $filename\n");
do {
local $/;
<$fh>;
}
}
| active911/MessageExtract | mytest.pl | Perl | apache-2.0 | 627 |
package VMOMI::VirtualDeviceConnectInfo;
use parent 'VMOMI::DynamicData';
use strict;
use warnings;
our @class_ancestors = (
'DynamicData',
);
our @class_members = (
['startConnected', 'boolean', 0, ],
['allowGuestControl', 'boolean', 0, ],
['connected', 'boolean', 0, ],
['status', undef, 0, 1],
);
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/VirtualDeviceConnectInfo.pm | Perl | apache-2.0 | 542 |
=pod
=head1 NAME
Bio::EnsEMBL::Hive::PipeConfig::HiveGeneric_conf
=head1 SYNOPSIS
# Example 1: specifying only the mandatory option:
init_pipeline.pl Bio::EnsEMBL::Hive::PipeConfig::HiveGeneric_conf -password <mypass>
# Example 2: specifying the mandatory options as well as overriding some defaults:
init_pipeline.pl Bio::EnsEMBL::Hive::PipeConfig::HiveGeneric_conf -host <myhost> -dbname <mydbname> -password <mypass>
=head1 DESCRIPTION
Generic configuration module for all Hive pipelines with loader functionality.
All other Hive PipeConfig modules should inherit from this module and will probably need to redefine some or all of the following interface methods:
* default_options: returns a hash of (possibly multilevel) defaults for the options on which depend the rest of the configuration
* pipeline_create_commands: returns a list of strings that will be executed as system commands needed to create and set up the pipeline database
* pipeline_wide_parameters: returns a hash of pipeline-wide parameter names and their values
* resource_classes: returns a hash of resource class definitions
* pipeline_analyses: returns a list of hash structures that define analysis objects bundled with definitions of corresponding jobs, rules and resources
* beekeeper_extra_cmdline_options returns a string with command line options that you want to be passed to the beekeeper.pl
When defining anything except the keys of default_options() a call to $self->o('myoption') can be used.
This call means "substitute this call for the value of 'myoption' at the time of configuring the pipeline".
All option names mentioned in $self->o() calls within the five interface methods above can be given non-default values from the command line.
Please make sure you have studied the pipeline configuraton examples in Bio::EnsEMBL::Hive::PipeConfig before creating your own PipeConfig modules.
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2022] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License
is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
=head1 CONTACT
Please subscribe to the Hive mailing list: http://listserver.ebi.ac.uk/mailman/listinfo/ehive-users to discuss Hive-related questions or to be notified of our updates
=cut
package Bio::EnsEMBL::Hive::PipeConfig::HiveGeneric_conf;
use strict;
use warnings;
use Exporter 'import';
our @EXPORT = qw(WHEN ELSE INPUT_PLUS);
use Scalar::Util qw(looks_like_number);
use Bio::EnsEMBL::Hive;
use Bio::EnsEMBL::Hive::Utils ('stringify', 'join_command_args', 'whoami');
use Bio::EnsEMBL::Hive::Utils::PCL;
use Bio::EnsEMBL::Hive::Utils::URL;
use Bio::EnsEMBL::Hive::DBSQL::SqlSchemaAdaptor;
use Bio::EnsEMBL::Hive::DBSQL::AnalysisJobAdaptor;
use Bio::EnsEMBL::Hive::Analysis;
use Bio::EnsEMBL::Hive::AnalysisCtrlRule;
use Bio::EnsEMBL::Hive::DataflowRule;
use Bio::EnsEMBL::Hive::AnalysisStats;
use Bio::EnsEMBL::Hive::AnalysisJob;
use Bio::EnsEMBL::Hive::ResourceClass;
use Bio::EnsEMBL::Hive::ResourceDescription;
use Bio::EnsEMBL::Hive::Valley;
use base ('Bio::EnsEMBL::Hive::DependentOptions');
# ---------------------------[the following methods will be overridden by specific pipelines]-------------------------
=head2 default_options
Description : Interface method that should return a hash of option_name->default_option_value pairs.
Please see existing PipeConfig modules for examples.
=cut
sub default_options {
my ($self) = @_;
return {
'hive_root_dir' => $ENV{'EHIVE_ROOT_DIR'}, # this value is set up automatically if this code is run by init_pipeline.pl
'hive_driver' => 'mysql',
'host' => $ENV{'EHIVE_HOST'} || 'localhost', # BEWARE that 'localhost' for mysql driver usually means a UNIX socket, not a TCPIP socket!
# If you need to connect to TCPIP socket, set -host => '127.0.0.1' instead.
'port' => $ENV{'EHIVE_PORT'}, # or remain undef, which means default for the driver
'user' => $ENV{'EHIVE_USER'} // $self->o('user'),
'password' => $ENV{'EHIVE_PASS'} // $self->o('password'), # people will have to make an effort NOT to insert it into config files like .bashrc etc
'dbowner' => $ENV{'EHIVE_USER'} || whoami() || $self->o('dbowner'), # although it is very unlikely that the current user has no name
'hive_use_triggers' => 0, # there have been a few cases of big pipelines misbehaving with triggers on, let's keep the default off.
'hive_use_param_stack' => 0, # do not reconstruct the calling stack of parameters by default (yet)
'hive_auto_rebalance_semaphores' => 0, # do not attempt to rebalance semaphores periodically by default
'hive_default_max_retry_count' => 3, # default value for the max_retry_count parameter of each analysis
'hive_force_init' => 0, # setting it to 1 will drop the database prior to creation (use with care!)
'hive_no_init' => 0, # setting it to 1 will skip pipeline_create_commands (useful for topping up)
'hive_debug_init' => 0, # setting it to 1 will make init_pipeline.pl tell everything it's doing
'pipeline_name' => $self->default_pipeline_name(),
'pipeline_db' => {
-driver => $self->o('hive_driver'),
-host => $self->o('host'),
-port => $self->o('port'),
-user => $self->o('user'),
-pass => $self->o('password'),
-dbname => $self->o('dbowner').'_'.$self->o('pipeline_name'),
},
};
}
=head2 pipeline_create_commands
Description : Interface method that should return a list of command lines to be run in order to create and set up the pipeline database.
Please see existing PipeConfig modules for examples.
=cut
sub pipeline_create_commands {
my $self = shift @_;
my $pipeline_url = $self->pipeline_url();
my $second_pass = $pipeline_url!~ /^#:subst/;
my $parsed_url = $second_pass && (Bio::EnsEMBL::Hive::Utils::URL::parse( $pipeline_url ) || die "Could not parse the '$pipeline_url' as the database URL");
my $driver = $second_pass ? $parsed_url->{'driver'} : '';
my $hive_force_init = $self->o('hive_force_init');
# Will insert two keys: "hive_all_base_tables" and "hive_all_views"
my $hive_tables_sql = 'INSERT INTO hive_meta SELECT CONCAT("hive_all_", REPLACE(LOWER(TABLE_TYPE), " ", "_"), "s"), GROUP_CONCAT(TABLE_NAME) FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = "%s" GROUP BY TABLE_TYPE';
return [
$hive_force_init ? $self->db_cmd('DROP DATABASE IF EXISTS') : (),
$self->db_cmd('CREATE DATABASE'),
# we got table definitions for all drivers:
$self->db_cmd().' <'.$self->o('hive_root_dir').'/sql/tables.'.$driver,
# auto-sync'ing triggers are off by default:
$self->o('hive_use_triggers') ? ( $self->db_cmd().' <'.$self->o('hive_root_dir').'/sql/triggers.'.$driver ) : (),
# FOREIGN KEY constraints cannot be defined in sqlite separately from table definitions, so they are off there:
($driver ne 'sqlite') ? ( $self->db_cmd().' <'.$self->o('hive_root_dir').'/sql/foreign_keys.sql' ) : (),
# we got procedure definitions for all drivers:
$self->db_cmd().' <'.$self->o('hive_root_dir').'/sql/procedures.'.$driver,
# list of all tables and views (MySQL only)
($driver eq 'mysql' ? ($self->db_cmd(sprintf($hive_tables_sql, $parsed_url->{'dbname'}))) : ()),
# when the database was created
$self->db_cmd(q{INSERT INTO hive_meta (meta_key, meta_value) VALUES ('creation_timestamp', CURRENT_TIMESTAMP)}),
];
}
=head2 pipeline_wide_parameters
Description : Interface method that should return a hash of pipeline_wide_parameter_name->pipeline_wide_parameter_value pairs.
The value doesn't have to be a scalar, can be any Perl structure now (will be stringified and de-stringified automagically).
Please see existing PipeConfig modules for examples.
=cut
sub pipeline_wide_parameters {
my ($self) = @_;
return {
# 'variable1' => 'value1',
# 'variable2' => 'value2',
};
}
=head2 resource_classes
Description : Interface method that should return a hash of resource_description_id->resource_description_hash.
Please see existing PipeConfig modules for examples.
=cut
sub resource_classes {
my ($self) = @_;
return {
## No longer supported resource declaration syntax:
# 1 => { -desc => 'default', 'LSF' => '' },
# 2 => { -desc => 'urgent', 'LSF' => '-q yesterday' },
## Currently supported resource declaration syntax:
'default' => { 'LSF' => '' },
'urgent' => { 'LSF' => '-q yesterday' },
};
}
=head2 pipeline_analyses
Description : Interface method that should return a list of hashes that define analysis bundled with corresponding jobs, dataflow and analysis_ctrl rules and resource_id.
Please see existing PipeConfig modules for examples.
=cut
sub pipeline_analyses {
my ($self) = @_;
return [
];
}
=head2 beekeeper_extra_cmdline_options
Description : Interface method that should return a string with extra parameters that you want to be passed to beekeeper.pl
=cut
sub beekeeper_extra_cmdline_options {
my ($self) = @_;
return '';
}
# ---------------------------------[now comes the interfacing stuff - feel free to call but not to modify]--------------------
sub hive_meta_table {
my ($self) = @_;
return {
'hive_sql_schema_version' => Bio::EnsEMBL::Hive::DBSQL::SqlSchemaAdaptor->get_code_sql_schema_version(),
'hive_pipeline_name' => $self->o('pipeline_name'),
'hive_use_param_stack' => $self->o('hive_use_param_stack'),
'hive_auto_rebalance_semaphores' => $self->o('hive_auto_rebalance_semaphores'),
'hive_default_max_retry_count' => $self->o('hive_default_max_retry_count'),
};
}
sub pre_options {
my $self = shift @_;
return {
'help!' => '',
'pipeline_url' => '',
'pipeline_name' => '',
};
}
=head2 dbconn_2_url
Description : A convenience method used to stringify a connection-parameters hash into a 'pipeline_url' that beekeeper.pl will undestand
=cut
sub dbconn_2_url {
my ($self, $db_conn, $with_db) = @_;
$with_db = 1 unless(defined($with_db));
my $driver = $self->o($db_conn, '-driver');
my $port = $self->o($db_conn,'-port');
return ( ($driver eq 'sqlite')
? $driver.':///'
: $driver.'://'.$self->o($db_conn,'-user').':'.$self->o($db_conn,'-pass').'@'.$self->o($db_conn,'-host').($port ? ':'.$port : '').'/'
) . ($with_db ? $self->o($db_conn,'-dbname') : '');
}
sub pipeline_url {
my $self = shift @_;
return $self->root()->{'pipeline_url'} || $self->dbconn_2_url('pipeline_db', 1); # used to force vivification of the whole 'pipeline_db' structure (used in run() )
}
=head2 db_cmd
Description : Returns a db_cmd.pl-based command line that should execute by any supported driver (mysql/pgsql/sqlite)
=cut
sub db_cmd {
my ($self, $sql_command, $db_url) = @_;
$db_url //= $self->pipeline_url();
my $db_cmd_path = $self->o('hive_root_dir').'/scripts/db_cmd.pl';
$sql_command =~ s/'/'\\''/g if $sql_command;
return "$db_cmd_path -url '$db_url'".($sql_command ? " -sql '$sql_command'" : '');
}
sub print_debug {
my $self = shift;
print @_ if $self->o('hive_debug_init');
}
sub process_pipeline_name {
my ($self, $ppn) = @_;
$ppn=~s/([[:lower:]])([[:upper:]])/${1}_${2}/g; # CamelCase into Camel_Case
$ppn=~s/[\s\/]/_/g; # remove all spaces and other annoying characters
$ppn = lc($ppn);
return $ppn;
}
sub default_pipeline_name {
my $self = shift @_;
my $dpn = ref($self); # get the original class name
$dpn=~s/^.*:://; # trim the leading classpath prefix
$dpn=~s/_conf$//; # trim the optional _conf from the end
return $dpn;
}
=head2 process_options
Description : The method that does all the parameter parsing magic.
It is two-pass through the interface methods: first pass collects the options, second is intelligent substitution.
Caller : init_pipeline.pl or any other script that will drive this module.
Note : You can override parsing the command line bit by providing a hash as the argument to this method.
This hash should contain definitions of all the parameters you would otherwise be providing from the command line.
Useful if you are creating batches of hive pipelines using a script.
=cut
sub process_options {
my ($self, $include_pcc_use_case) = @_;
# pre-patch definitely_used_options:
$self->{'_extra_options'} = $self->load_cmdline_options( $self->pre_options() );
$self->root()->{'pipeline_url'} = $self->{'_extra_options'}{'pipeline_url'};
my @use_cases = ( 'pipeline_wide_parameters', 'resource_classes', 'pipeline_analyses', 'beekeeper_extra_cmdline_options', 'hive_meta_table', 'print_debug' );
if($include_pcc_use_case) {
unshift @use_cases, 'overridable_pipeline_create_commands';
push @use_cases, 'useful_commands_legend';
}
$self->use_cases( \@use_cases );
$self->SUPER::process_options();
# post-processing:
$self->root()->{'pipeline_name'} = $self->process_pipeline_name( $self->root()->{'pipeline_name'} );
$self->root()->{'pipeline_db'}{'-dbname'} &&= $self->process_pipeline_name( $self->root()->{'pipeline_db'}{'-dbname'} ); # may be used to construct $self->pipeline_url()
}
sub overridable_pipeline_create_commands {
my $self = shift @_;
my $pipeline_create_commands = $self->pipeline_create_commands();
return $self->o('hive_no_init') ? [] : $pipeline_create_commands;
}
sub is_analysis_topup {
my $self = shift @_;
return $self->o('hive_no_init');
}
sub run_pipeline_create_commands {
my $self = shift @_;
foreach my $cmd (@{$self->overridable_pipeline_create_commands}) {
# We allow commands to be given as an arrayref, but we join the
# array elements anyway
(my $dummy,$cmd) = join_command_args($cmd);
$self->print_debug( "$cmd\n" );
if(my $retval = system($cmd)) {
die "Return value = $retval, possibly an error running $cmd\n";
}
}
$self->print_debug( "\n" );
}
=head2 add_objects_from_config
Description : The method that uses the Hive/EnsEMBL API to actually create all the analyses, jobs, dataflow and control rules and resource descriptions.
Caller : init_pipeline.pl or any other script that will drive this module.
=cut
sub add_objects_from_config {
my $self = shift @_;
my $pipeline = shift @_;
$self->print_debug( "Adding hive_meta table entries ...\n" );
my $new_meta_entries = $self->hive_meta_table();
while( my ($meta_key, $meta_value) = each %$new_meta_entries ) {
$pipeline->add_new_or_update( 'MetaParameters', $self->o('hive_debug_init'),
'meta_key' => $meta_key,
'meta_value' => $meta_value,
);
}
$self->print_debug( "Done.\n\n" );
$self->print_debug( "Adding pipeline-wide parameters ...\n" );
my $new_pwp_entries = $self->pipeline_wide_parameters();
while( my ($param_name, $param_value) = each %$new_pwp_entries ) {
$pipeline->add_new_or_update( 'PipelineWideParameters', $self->o('hive_debug_init'),
'param_name' => $param_name,
'param_value' => stringify($param_value),
);
}
$self->print_debug( "Done.\n\n" );
$self->print_debug( "Adding Resources ...\n" );
my $resource_classes_hash = $self->resource_classes;
unless( exists $resource_classes_hash->{'default'} ) {
warn "\tNB:'default' resource class is not in the database (did you forget to inherit from SUPER::resource_classes ?) - creating it for you\n";
$resource_classes_hash->{'default'} = {};
}
my @resource_classes_order = sort { ($b eq 'default') or -($a eq 'default') or ($a cmp $b) } keys %$resource_classes_hash; # put 'default' to the front
my %cached_resource_classes = map {$_->name => $_} $pipeline->collection_of('ResourceClass')->list();
foreach my $rc_name (@resource_classes_order) {
if($rc_name=~/^\d+$/) {
die "-rc_id syntax is no longer supported, please use the new resource notation (-rc_name)";
}
my ($resource_class) = $pipeline->add_new_or_update( 'ResourceClass', # NB: add_new_or_update returns a list
'name' => $rc_name,
);
$cached_resource_classes{$rc_name} = $resource_class;
while( my($meadow_type, $resource_param_list) = each %{ $resource_classes_hash->{$rc_name} } ) {
$resource_param_list = [ $resource_param_list ] unless(ref($resource_param_list)); # expecting either a scalar or a 2-element array
my ($resource_description) = $pipeline->add_new_or_update( 'ResourceDescription', $self->o('hive_debug_init'), # NB: add_new_or_update returns a list
'resource_class' => $resource_class,
'meadow_type' => $meadow_type,
'submission_cmd_args' => $resource_param_list->[0],
'worker_cmd_args' => $resource_param_list->[1],
);
}
}
$self->print_debug( "Done.\n\n" );
my $amh = Bio::EnsEMBL::Hive::Valley->new()->available_meadow_hash();
my %seen_logic_name = ();
my %analyses_by_logic_name = map {$_->logic_name => $_} $pipeline->collection_of('Analysis')->list();
$self->print_debug( "Adding Analyses ...\n" );
foreach my $aha (@{$self->pipeline_analyses}) {
my %aha_copy = %$aha;
my ($logic_name, $module, $parameters_hash, $comment, $tags, $input_ids, $blocked, $batch_size, $hive_capacity, $failed_job_tolerance,
$max_retry_count, $can_be_empty, $rc_id, $rc_name, $priority, $meadow_type, $analysis_capacity, $language, $wait_for, $flow_into)
= delete @aha_copy{qw(-logic_name -module -parameters -comment -tags -input_ids -blocked -batch_size -hive_capacity -failed_job_tolerance
-max_retry_count -can_be_empty -rc_id -rc_name -priority -meadow_type -analysis_capacity -language -wait_for -flow_into)}; # slicing a hash reference
my @unparsed_attribs = keys %aha_copy;
if(@unparsed_attribs) {
die "Could not parse the following analysis attributes: ".join(', ',@unparsed_attribs);
}
if( not $logic_name ) {
die "'-logic_name' must be defined in every analysis";
} elsif( $logic_name =~ /[+\-\%\.,]/ ) {
die "Characters + - % . , are no longer allowed to be a part of an Analysis name. Please rename Analysis '$logic_name' and try again.\n";
} elsif( looks_like_number($logic_name) ) {
die "Numeric Analysis names are not allowed because they may clash with dbIDs. Please rename Analysis '$logic_name' and try again.\n";
}
if($seen_logic_name{$logic_name}++) {
die "an entry with -logic_name '$logic_name' appears at least twice in the same configuration file, probably a typo";
}
if($rc_id) {
die "(-rc_id => $rc_id) syntax is deprecated, please use (-rc_name => 'your_resource_class_name')";
}
my $analysis = $analyses_by_logic_name{$logic_name}; # the analysis with this logic_name may have already been stored in the db
my $stats;
if( $analysis ) {
warn "Skipping creation of already existing analysis '$logic_name'.\n";
next;
} else {
$rc_name ||= 'default';
my $resource_class = $cached_resource_classes{$rc_name}
or die "Could not find local resource with name '$rc_name', please check that resource_classes() method of your PipeConfig either contains or inherits it from the parent class";
if ($meadow_type and not exists $amh->{$meadow_type}) {
warn "The meadow '$meadow_type' is currently not registered (analysis '$logic_name')\n";
}
$parameters_hash ||= {}; # in case nothing was given
die "'-parameters' has to be a hash" unless(ref($parameters_hash) eq 'HASH');
($analysis) = $pipeline->add_new_or_update( 'Analysis', $self->o('hive_debug_init'), # NB: add_new_or_update returns a list
'logic_name' => $logic_name,
'module' => $module,
'language' => $language,
'parameters' => $parameters_hash,
'comment' => $comment,
'tags' => ( (ref($tags) eq 'ARRAY') ? join(',', @$tags) : $tags ),
'resource_class' => $resource_class,
'failed_job_tolerance' => $failed_job_tolerance,
'max_retry_count' => $max_retry_count,
'can_be_empty' => $can_be_empty,
'priority' => $priority,
'meadow_type' => $meadow_type,
'analysis_capacity' => $analysis_capacity,
'hive_capacity' => $hive_capacity,
'batch_size' => $batch_size,
);
$analysis->get_compiled_module_name(); # check if it compiles and is named correctly
($stats) = $pipeline->add_new_or_update( 'AnalysisStats', $self->o('hive_debug_init'), # NB: add_new_or_update returns a list
'analysis' => $analysis,
'status' => $blocked ? 'BLOCKED' : 'EMPTY', # be careful, as this "soft" way of blocking may be accidentally unblocked by deep sync
'total_job_count' => 0,
'semaphored_job_count' => 0,
'ready_job_count' => 0,
'done_job_count' => 0,
'failed_job_count' => 0,
'num_running_workers' => 0,
'sync_lock' => 0,
);
}
# Keep a link to the analysis object to speed up the creation of control and dataflow rules
$analyses_by_logic_name{$logic_name} = $analysis;
# now create the corresponding jobs (if there are any):
if($input_ids) {
push @{ $analysis->jobs_collection }, map { Bio::EnsEMBL::Hive::AnalysisJob->new(
'prev_job' => undef, # these jobs are created by the initialization script, not by another job
'analysis' => $analysis,
'input_id' => $_, # input_ids are now centrally stringified in the AnalysisJob itself
) } @$input_ids;
unless( $pipeline->hive_use_triggers() ) {
$stats->recalculate_from_job_counts( { 'READY' => scalar(@$input_ids) } );
}
}
}
$self->print_debug( "Done.\n\n" );
$self->print_debug( "Adding Control and Dataflow Rules ...\n" );
foreach my $aha (@{$self->pipeline_analyses}) {
my ($logic_name, $wait_for, $flow_into)
= @{$aha}{qw(-logic_name -wait_for -flow_into)}; # slicing a hash reference
my $analysis = $analyses_by_logic_name{$logic_name};
if($wait_for) {
Bio::EnsEMBL::Hive::Utils::PCL::parse_wait_for($pipeline, $analysis, $wait_for, $self->o('hive_debug_init'));
}
if($flow_into) {
Bio::EnsEMBL::Hive::Utils::PCL::parse_flow_into($pipeline, $analysis, $flow_into, $self->o('hive_debug_init'));
}
}
$self->print_debug( "Done.\n\n" );
# Block the analyses that should be blocked
$self->print_debug( "Blocking the analyses that should be ...\n" );
foreach my $stats ($pipeline->collection_of('AnalysisStats')->list()) {
$stats->check_blocking_control_rules('no_die');
$stats->determine_status();
}
$self->print_debug( "Done.\n\n" );
}
sub useful_commands_legend {
my $self = shift @_;
my $pipeline_url = $self->pipeline_url();
unless ($pipeline_url =~ /^[\'\"]/) {
$pipeline_url = '"' . $pipeline_url . '"';
}
my $pipeline_name = $self->o('pipeline_name');
my $extra_cmdline = $self->beekeeper_extra_cmdline_options();
my @output_lines = (
'','',
'# ' . '-' x 22 . '[Useful commands]' . '-' x 22,
'',
" # It is convenient to store the pipeline url in a variable:",
"\texport EHIVE_URL=$pipeline_url\t\t\t# bash version",
"(OR)",
"\tsetenv EHIVE_URL $pipeline_url\t\t\t# [t]csh version",
'',
" # Add a new job to the pipeline (usually done once before running, but pipeline can be \"topped-up\" at any time) :",
"\tseed_pipeline.pl -url $pipeline_url -logic_name <analysis_name> -input_id <param_hash>",
'',
" # At any moment during or after execution you can request a pipeline diagram in an image file (desired format is set via extension) :",
"\tgenerate_graph.pl -url $pipeline_url -out $pipeline_name.png",
'',
" # Synchronize the Hive (to display fresh statistics about all analyses):",
"\tbeekeeper.pl -url $pipeline_url -sync",
'',
" # Depending on the Meadow the pipeline is running on, you may be able to collect actual resource usage statistics :",
"\tload_resource_usage.pl -url $pipeline_url",
'',
" # After having run load_resource_usage.pl, you can request a resource usage timeline in an image file (desired format is set via extension) :",
"\tgenerate_timeline.pl -url $pipeline_url -out timeline_$pipeline_name.png",
'',
" # Peek into your pipeline database with a database client (useful to have open while the pipeline is running) :",
"\tdb_cmd.pl -url $pipeline_url",
'',
" # Run the pipeline (can be interrupted and restarted) :",
"\tbeekeeper.pl -url $pipeline_url $extra_cmdline -loop\t\t# run in looped automatic mode (a scheduling step performed every minute)",
"(OR)",
"\tbeekeeper.pl -url $pipeline_url $extra_cmdline -run \t\t# run one scheduling step of the pipeline and exit (useful for debugging/learning)",
"(OR)",
"\trunWorker.pl -url $pipeline_url $extra_cmdline \t\t# run exactly one Worker locally (useful for debugging/learning)",
'',
);
return join("\n", @output_lines);
}
1;
| Ensembl/ensembl-hive | modules/Bio/EnsEMBL/Hive/PipeConfig/HiveGeneric_conf.pm | Perl | apache-2.0 | 28,273 |
#!/usr/bin/env perl
package Lego::Part::Transfer::Example;
# Pragmas.
use base qw(Lego::Part::Transfer);
use strict;
use warnings;
# Convert design to element.
sub design2element {
my ($self, $part) = @_;
$self->_check_part($part);
if ($part->color eq 'red' && $part->design_id eq '3002') {
$part->element_id('300221');
}
return;
}
package main;
# Pragmas.
use strict;
use warnings;
# Modules.
use Lego::Part;
use Lego::Part::Action;
# Lego part.
my $part = Lego::Part->new(
'color' => 'red',
'design_id' => '3002',
);
# Lego part action.
my $act = Lego::Part::Action->new;
# Transfer class.
my $trans = Lego::Part::Transfer::Example->new;
# Load element id.
$act->load_element_id($trans, $part);
# Print color and design ID.
print 'Color: '.$part->color."\n";
print 'Design ID: '.$part->design_id."\n";
print 'Element ID: '.$part->element_id."\n";
# Output:
# Color: red
# Design ID: 3002
# Element ID: 300221 | tupinek/Lego-Part | examples/ex3.pl | Perl | bsd-2-clause | 995 |
=head1 NAME
GridmapNavSimul - GUI robot simulation within a gridmap
=head1 SYNOPSIS
GridmapNavSimul
=head1 DESCRIPTION
A GUI application for simulation of robot motion within a simulated environment defined by an
occupancy grid map. The program simulates noisy odometry and laser measurements and generates
a rawlog which can then be used as the input of SLAM algorithms. A ground truth file is also
generated.
The robot is controlled with a joystick or the cursor arrow keys.
=head1 BUGS
Please report bugs at https://github.com/MRPT/mrpt/issues
=head1 SEE ALSO
The application wiki page at https://www.mrpt.org/Applications
=head1 AUTHORS
B<GridmapNavSimul> is part of the Mobile Robot Programming Toolkit (MRPT), and was originally
written by the MAPIR laboratory (University of Malaga).
This manual page was written by Jose Luis Blanco <joseluisblancoc@gmail.com>.
=head1 COPYRIGHT
This program is free software; you can redistribute it and/or modify it
under the terms of the BSD License.
On Debian GNU/Linux systems, the complete text of the BSD License can be
found in `/usr/share/common-licenses/BSD'.
=cut
| MRPT/mrpt | doc/man-pages/pod/GridmapNavSimul.pod | Perl | bsd-3-clause | 1,138 |
#!perl
# Copyright 2018 Jeffrey Kegler
# This file is part of Marpa::R2. Marpa::R2 is free software: you can
# redistribute it and/or modify it under the terms of the GNU Lesser
# General Public License as published by the Free Software Foundation,
# either version 3 of the License, or (at your option) any later version.
#
# Marpa::R2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser
# General Public License along with Marpa::R2. If not, see
# http://www.gnu.org/licenses/.
use 5.010001;
use strict;
use warnings;
use English qw( -no_match_vars );
use Fatal qw( open close );
my %exclude = map { ( $_, 1 ) } qw();
open my $manifest, '<', '../MANIFEST'
or Marpa::R2::exception("open of ../MANIFEST failed: $ERRNO");
my @test_files = ();
FILE: while ( my $file = <$manifest> ) {
chomp $file;
$file =~ s/\s*[#].*\z//xms;
next FILE if $exclude{$file};
my ($ext) = $file =~ / [.] ([^.]+) \z /xms;
given ( lc $ext ) {
when (undef) {
break
}
when ('pl') { say $file or die "Cannot say: $ERRNO" }
when ('pm') { say $file or die "Cannot say: $ERRNO" }
when ('t') { say $file or die "Cannot say: $ERRNO" }
} ## end given
} ## end while ( my $file = <$manifest> )
close $manifest;
| jddurand/c-marpaESLIF | 3rdparty/github/marpaWrapper/3rdparty/github/Marpa--R2/cpan/author.t/create_critic_list.pl | Perl | mit | 1,500 |
###########################################################################
#
# This file is partially auto-generated by the DateTime::Locale generator
# tools (v0.10). This code generator comes with the DateTime::Locale
# distribution in the tools/ directory, and is called generate-modules.
#
# This file was generated from the CLDR JSON locale data. See the LICENSE.cldr
# file included in this distribution for license details.
#
# Do not edit this file directly unless you are sure the part you are editing
# is not created by the generator.
#
###########################################################################
=pod
=encoding UTF-8
=head1 NAME
DateTime::Locale::hr_BA - Locale data examples for the hr-BA locale.
=head1 DESCRIPTION
This pod file contains examples of the locale data available for the
Croatian Bosnia & Herzegovina locale.
=head2 Days
=head3 Wide (format)
ponedjeljak
utorak
srijeda
četvrtak
petak
subota
nedjelja
=head3 Abbreviated (format)
pon
uto
sri
čet
pet
sub
ned
=head3 Narrow (format)
P
U
S
Č
P
S
N
=head3 Wide (stand-alone)
ponedjeljak
utorak
srijeda
četvrtak
petak
subota
nedjelja
=head3 Abbreviated (stand-alone)
pon
uto
sri
čet
pet
sub
ned
=head3 Narrow (stand-alone)
p
u
s
č
p
s
n
=head2 Months
=head3 Wide (format)
siječnja
veljače
ožujka
travnja
svibnja
lipnja
srpnja
kolovoza
rujna
listopada
studenoga
prosinca
=head3 Abbreviated (format)
sij
velj
ožu
tra
svi
lip
srp
kol
ruj
lis
stu
pro
=head3 Narrow (format)
1.
2.
3.
4.
5.
6.
7.
8.
9.
10.
11.
12.
=head3 Wide (stand-alone)
siječanj
veljača
ožujak
travanj
svibanj
lipanj
srpanj
kolovoz
rujan
listopad
studeni
prosinac
=head3 Abbreviated (stand-alone)
sij
velj
ožu
tra
svi
lip
srp
kol
ruj
lis
stu
pro
=head3 Narrow (stand-alone)
1.
2.
3.
4.
5.
6.
7.
8.
9.
10.
11.
12.
=head2 Quarters
=head3 Wide (format)
1. kvartal
2. kvartal
3. kvartal
4. kvartal
=head3 Abbreviated (format)
1kv
2kv
3kv
4kv
=head3 Narrow (format)
1.
2.
3.
4.
=head3 Wide (stand-alone)
1. kvartal
2. kvartal
3. kvartal
4. kvartal
=head3 Abbreviated (stand-alone)
1kv
2kv
3kv
4kv
=head3 Narrow (stand-alone)
1.
2.
3.
4.
=head2 Eras
=head3 Wide (format)
prije Krista
poslije Krista
=head3 Abbreviated (format)
pr. Kr.
p. Kr.
=head3 Narrow (format)
pr.n.e.
AD
=head2 Date Formats
=head3 Full
2008-02-05T18:30:30 = utorak, 5. veljače 2008.
1995-12-22T09:05:02 = petak, 22. prosinca 1995.
-0010-09-15T04:44:23 = subota, 15. rujna -10.
=head3 Long
2008-02-05T18:30:30 = 5. veljače 2008.
1995-12-22T09:05:02 = 22. prosinca 1995.
-0010-09-15T04:44:23 = 15. rujna -10.
=head3 Medium
2008-02-05T18:30:30 = 5. velj 2008.
1995-12-22T09:05:02 = 22. pro 1995.
-0010-09-15T04:44:23 = 15. ruj -10.
=head3 Short
2008-02-05T18:30:30 = 05.02.2008.
1995-12-22T09:05:02 = 22.12.1995.
-0010-09-15T04:44:23 = 15.09.-10.
=head2 Time Formats
=head3 Full
2008-02-05T18:30:30 = 18:30:30 UTC
1995-12-22T09:05:02 = 09:05:02 UTC
-0010-09-15T04:44:23 = 04:44:23 UTC
=head3 Long
2008-02-05T18:30:30 = 18:30:30 UTC
1995-12-22T09:05:02 = 09:05:02 UTC
-0010-09-15T04:44:23 = 04:44:23 UTC
=head3 Medium
2008-02-05T18:30:30 = 18:30:30
1995-12-22T09:05:02 = 09:05:02
-0010-09-15T04:44:23 = 04:44:23
=head3 Short
2008-02-05T18:30:30 = 18:30
1995-12-22T09:05:02 = 09:05
-0010-09-15T04:44:23 = 04:44
=head2 Datetime Formats
=head3 Full
2008-02-05T18:30:30 = utorak, 5. veljače 2008. u 18:30:30 UTC
1995-12-22T09:05:02 = petak, 22. prosinca 1995. u 09:05:02 UTC
-0010-09-15T04:44:23 = subota, 15. rujna -10. u 04:44:23 UTC
=head3 Long
2008-02-05T18:30:30 = 5. veljače 2008. u 18:30:30 UTC
1995-12-22T09:05:02 = 22. prosinca 1995. u 09:05:02 UTC
-0010-09-15T04:44:23 = 15. rujna -10. u 04:44:23 UTC
=head3 Medium
2008-02-05T18:30:30 = 5. velj 2008. 18:30:30
1995-12-22T09:05:02 = 22. pro 1995. 09:05:02
-0010-09-15T04:44:23 = 15. ruj -10. 04:44:23
=head3 Short
2008-02-05T18:30:30 = 05.02.2008. 18:30
1995-12-22T09:05:02 = 22.12.1995. 09:05
-0010-09-15T04:44:23 = 15.09.-10. 04:44
=head2 Available Formats
=head3 E (ccc)
2008-02-05T18:30:30 = uto
1995-12-22T09:05:02 = pet
-0010-09-15T04:44:23 = sub
=head3 EHm (E HH:mm)
2008-02-05T18:30:30 = uto 18:30
1995-12-22T09:05:02 = pet 09:05
-0010-09-15T04:44:23 = sub 04:44
=head3 EHms (E HH:mm:ss)
2008-02-05T18:30:30 = uto 18:30:30
1995-12-22T09:05:02 = pet 09:05:02
-0010-09-15T04:44:23 = sub 04:44:23
=head3 Ed (E, d.)
2008-02-05T18:30:30 = uto, 5.
1995-12-22T09:05:02 = pet, 22.
-0010-09-15T04:44:23 = sub, 15.
=head3 Ehm (E h:mm a)
2008-02-05T18:30:30 = uto 6:30 PM
1995-12-22T09:05:02 = pet 9:05 AM
-0010-09-15T04:44:23 = sub 4:44 AM
=head3 Ehms (E h:mm:ss a)
2008-02-05T18:30:30 = uto 6:30:30 PM
1995-12-22T09:05:02 = pet 9:05:02 AM
-0010-09-15T04:44:23 = sub 4:44:23 AM
=head3 Gy (y. G)
2008-02-05T18:30:30 = 2008. p. Kr.
1995-12-22T09:05:02 = 1995. p. Kr.
-0010-09-15T04:44:23 = -10. pr. Kr.
=head3 GyMMM (LLL y. G)
2008-02-05T18:30:30 = velj 2008. p. Kr.
1995-12-22T09:05:02 = pro 1995. p. Kr.
-0010-09-15T04:44:23 = ruj -10. pr. Kr.
=head3 GyMMMEd (E, d. MMM y. G)
2008-02-05T18:30:30 = uto, 5. velj 2008. p. Kr.
1995-12-22T09:05:02 = pet, 22. pro 1995. p. Kr.
-0010-09-15T04:44:23 = sub, 15. ruj -10. pr. Kr.
=head3 GyMMMd (d. MMM y. G)
2008-02-05T18:30:30 = 5. velj 2008. p. Kr.
1995-12-22T09:05:02 = 22. pro 1995. p. Kr.
-0010-09-15T04:44:23 = 15. ruj -10. pr. Kr.
=head3 H (HH)
2008-02-05T18:30:30 = 18
1995-12-22T09:05:02 = 09
-0010-09-15T04:44:23 = 04
=head3 Hm (HH:mm)
2008-02-05T18:30:30 = 18:30
1995-12-22T09:05:02 = 09:05
-0010-09-15T04:44:23 = 04:44
=head3 Hms (HH:mm:ss)
2008-02-05T18:30:30 = 18:30:30
1995-12-22T09:05:02 = 09:05:02
-0010-09-15T04:44:23 = 04:44:23
=head3 Hmsv (HH:mm:ss v)
2008-02-05T18:30:30 = 18:30:30 UTC
1995-12-22T09:05:02 = 09:05:02 UTC
-0010-09-15T04:44:23 = 04:44:23 UTC
=head3 Hmv (HH:mm v)
2008-02-05T18:30:30 = 18:30 UTC
1995-12-22T09:05:02 = 09:05 UTC
-0010-09-15T04:44:23 = 04:44 UTC
=head3 M (L.)
2008-02-05T18:30:30 = 2.
1995-12-22T09:05:02 = 12.
-0010-09-15T04:44:23 = 9.
=head3 MEd (E, dd.MM.)
2008-02-05T18:30:30 = uto, 05.02.
1995-12-22T09:05:02 = pet, 22.12.
-0010-09-15T04:44:23 = sub, 15.09.
=head3 MMM (LLL)
2008-02-05T18:30:30 = velj
1995-12-22T09:05:02 = pro
-0010-09-15T04:44:23 = ruj
=head3 MMMEd (E, d. MMM)
2008-02-05T18:30:30 = uto, 5. velj
1995-12-22T09:05:02 = pet, 22. pro
-0010-09-15T04:44:23 = sub, 15. ruj
=head3 MMMMEd (E, d. MMMM)
2008-02-05T18:30:30 = uto, 5. veljače
1995-12-22T09:05:02 = pet, 22. prosinca
-0010-09-15T04:44:23 = sub, 15. rujna
=head3 MMMMd (d. MMMM)
2008-02-05T18:30:30 = 5. veljače
1995-12-22T09:05:02 = 22. prosinca
-0010-09-15T04:44:23 = 15. rujna
=head3 MMMd (d. MMM)
2008-02-05T18:30:30 = 5. velj
1995-12-22T09:05:02 = 22. pro
-0010-09-15T04:44:23 = 15. ruj
=head3 MMdd (dd. MM.)
2008-02-05T18:30:30 = 05. 02.
1995-12-22T09:05:02 = 22. 12.
-0010-09-15T04:44:23 = 15. 09.
=head3 Md (dd.MM.)
2008-02-05T18:30:30 = 05.02.
1995-12-22T09:05:02 = 22.12.
-0010-09-15T04:44:23 = 15.09.
=head3 d (d.)
2008-02-05T18:30:30 = 5.
1995-12-22T09:05:02 = 22.
-0010-09-15T04:44:23 = 15.
=head3 h (h a)
2008-02-05T18:30:30 = 6 PM
1995-12-22T09:05:02 = 9 AM
-0010-09-15T04:44:23 = 4 AM
=head3 hm (hh:mm a)
2008-02-05T18:30:30 = 06:30 PM
1995-12-22T09:05:02 = 09:05 AM
-0010-09-15T04:44:23 = 04:44 AM
=head3 hms (hh:mm:ss a)
2008-02-05T18:30:30 = 06:30:30 PM
1995-12-22T09:05:02 = 09:05:02 AM
-0010-09-15T04:44:23 = 04:44:23 AM
=head3 hmsv (h:mm:ss a v)
2008-02-05T18:30:30 = 6:30:30 PM UTC
1995-12-22T09:05:02 = 9:05:02 AM UTC
-0010-09-15T04:44:23 = 4:44:23 AM UTC
=head3 hmv (h:mm a v)
2008-02-05T18:30:30 = 6:30 PM UTC
1995-12-22T09:05:02 = 9:05 AM UTC
-0010-09-15T04:44:23 = 4:44 AM UTC
=head3 ms (mm:ss)
2008-02-05T18:30:30 = 30:30
1995-12-22T09:05:02 = 05:02
-0010-09-15T04:44:23 = 44:23
=head3 y (y.)
2008-02-05T18:30:30 = 2008.
1995-12-22T09:05:02 = 1995.
-0010-09-15T04:44:23 = -10.
=head3 yM (MM.y.)
2008-02-05T18:30:30 = 02.2008.
1995-12-22T09:05:02 = 12.1995.
-0010-09-15T04:44:23 = 09.-10.
=head3 yMEd (E, dd.MM.y.)
2008-02-05T18:30:30 = uto, 05.02.2008.
1995-12-22T09:05:02 = pet, 22.12.1995.
-0010-09-15T04:44:23 = sub, 15.09.-10.
=head3 yMM (MM. y.)
2008-02-05T18:30:30 = 02. 2008.
1995-12-22T09:05:02 = 12. 1995.
-0010-09-15T04:44:23 = 09. -10.
=head3 yMMM (LLL y.)
2008-02-05T18:30:30 = velj 2008.
1995-12-22T09:05:02 = pro 1995.
-0010-09-15T04:44:23 = ruj -10.
=head3 yMMMEd (E, d. MMM y.)
2008-02-05T18:30:30 = uto, 5. velj 2008.
1995-12-22T09:05:02 = pet, 22. pro 1995.
-0010-09-15T04:44:23 = sub, 15. ruj -10.
=head3 yMMMM (LLLL y.)
2008-02-05T18:30:30 = veljača 2008.
1995-12-22T09:05:02 = prosinac 1995.
-0010-09-15T04:44:23 = rujan -10.
=head3 yMMMd (d. MMM y.)
2008-02-05T18:30:30 = 5. velj 2008.
1995-12-22T09:05:02 = 22. pro 1995.
-0010-09-15T04:44:23 = 15. ruj -10.
=head3 yMd (dd.MM.y.)
2008-02-05T18:30:30 = 05.02.2008.
1995-12-22T09:05:02 = 22.12.1995.
-0010-09-15T04:44:23 = 15.09.-10.
=head3 yQQQ (QQQ y.)
2008-02-05T18:30:30 = 1kv 2008.
1995-12-22T09:05:02 = 4kv 1995.
-0010-09-15T04:44:23 = 3kv -10.
=head3 yQQQQ (QQQQ y.)
2008-02-05T18:30:30 = 1. kvartal 2008.
1995-12-22T09:05:02 = 4. kvartal 1995.
-0010-09-15T04:44:23 = 3. kvartal -10.
=head2 Miscellaneous
=head3 Prefers 24 hour time?
Yes
=head3 Local first day of the week
1 (ponedjeljak)
=head1 SUPPORT
See L<DateTime::Locale>.
=cut
| jkb78/extrajnm | local/lib/perl5/DateTime/Locale/hr_BA.pod | Perl | mit | 10,097 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=head1 NAME
Bio::EnsEMBL::Compara::RunnableDB::MercatorPecan::Pecan
=cut
=head1 SYNOPSIS
=cut
=head1 DESCRIPTION
This module acts as a layer between the Hive system and the Bio::EnsEMBL::Analysis::Runnable::Pecan
module since the ensembl-analysis API does not know about ennembl-compara
Pecan wants the files to be provided in the same order as in the tree string. This module starts
by getting all the DnaFragRegions of the SyntenyRegion and then use them to edit the tree (some
nodes must be removed and otehr one must be duplicated in order to cope with deletions and
duplications). The buid_tree_string methods numbers the sequences in order and changes the
order of the dnafrag_regions array accordingly. Last, the dumpFasta() method dumps the sequences
according to the tree_string order.
Supported keys:
'synteny_region_id' => <number>
The region to be aligned by Pecan, defined as a SyntenyRegion in the database. Obligatory
'mlss_id' => <number>
The MethodLinkSpeciesSet for the resulting Pecan alignment. Obligatory
'tree_file' => <newick_tree>
The path to the file containing the species tree in NEWICK format. Leaves names should be the genome_db_ids
'java_options' => <options>
Options used to run Java, ie: '-server -Xmx1000M'
'exonerate' => <path>
Path to exonerate
'max_block_size' => <number>
Split blocks longer than this size
'trim' => <string> (testing)
Option to use only part of the SyntenyRegion. For instance, trim=>{from_905394=>125100925,from_2046355=>126902742,to_1045566=>139208434}
will use the region for DnaFrag 905394 from position 125100925 only,
the region for DnaFrag 2046355 from position 126902742 only and
the region for DnaFrag 1045566 to position 139208434 only
'do_transactions' => <0|1>
Use transactions. Default is yes.
=head1 APPENDIX
The rest of the documentation details each of the object methods.
Internal methods are usually preceded with a _
=cut
package Bio::EnsEMBL::Compara::RunnableDB::MercatorPecan::Pecan;
use strict;
use Bio::EnsEMBL::Utils::Exception qw(throw);
use Bio::EnsEMBL::Utils::SqlHelper;
use Bio::EnsEMBL::Analysis::Runnable::Pecan;
use Bio::EnsEMBL::Analysis::Runnable::Ortheus;
use Bio::EnsEMBL::Compara::DnaFragRegion;
use Bio::EnsEMBL::Compara::Graph::NewickParser;
use Bio::EnsEMBL::Compara::NestedSet;
use base ('Bio::EnsEMBL::Compara::RunnableDB::BaseRunnable');
sub param_defaults {
return {
'do_transactions' => 1, #set default to do transactions
'trim' => undef,
'species_order' => undef, #local
'species_tree' => undef, #local
'tree_file' => undef, #local
'species_tree_string' => undef, #local
'species_tree_file' => undef, #local
'fasta_files' => undef, #local
};
}
=head2 fetch_input
Title : fetch_input
Usage : $self->fetch_input
Function: Fetches input data for repeatmasker from the database
Returns : none
Args : none
=cut
sub fetch_input {
my( $self) = @_;
#set default to 0. Run Ortheus to create the tree if a duplication is found
$self->param('found_a_duplication', 0);
#Check that mlss_id has been defined
$self->param_required('mlss_id');
## Store DnaFragRegions corresponding to the SyntenyRegion in $self->dnafrag_regions(). At this point the
## DnaFragRegions are in random order
$self->_load_DnaFragRegions($self->param('synteny_region_id'));
if ($self->param('dnafrag_regions')) {
## Get the tree string by taking into account duplications and deletions. Resort dnafrag_regions
## in order to match the name of the sequences in the tree string (seq1, seq2...)
if ($self->get_species_tree and $self->param('dnafrag_regions')) {
$self->_build_tree_string;
}
## Dumps fasta files for the DnaFragRegions. Fasta files order must match the entries in the
## newick tree. The order of the files will match the order of sequences in the tree_string.
$self->_dump_fasta;
#if have duplications, run Ortheus.py with -y option to create a tree
if ($self->param('found_a_duplication')) {
$self->_run_ortheus();
}
} else {
throw("Cannot start Pecan job because some information is missing");
}
return 1;
}
sub run
{
my $self = shift;
my $fake_analysis = Bio::EnsEMBL::Analysis->new;
#Check whether can see exonerate to try to prevent errors in java where the autoloader doesn't seem to always work
$self->require_executable('exonerate');
$self->compara_dba->dbc->disconnect_when_inactive(1);
my $runnable = new Bio::EnsEMBL::Analysis::Runnable::Pecan(
-workdir => $self->worker_temp_directory,
-fasta_files => $self->param('fasta_files'),
-tree_string => $self->param('pecan_tree_string'),
-analysis => $fake_analysis,
-parameters => $self->param('java_options'),
-exonerate => $self->param('exonerate'),
-jar_file => $self->param('jar_file'),
);
$self->param('runnable', $runnable);
$self->param('more_heap', 0);
eval {
$runnable->run_analysis;
} or do {
if ($@ =~ /Java heap space/ ||
$@ =~ /GC overhead limit exceeded/ ||
$@ =~ /Cannot allocate memory/ ||
$@ =~ /OutOfMemoryError/ ) {
print "Failed due to insufficient heap space or memory\n";
$self->param('more_heap', 1);
} else {
throw("Pecan execution failed $@\n");
}
};
$self->compara_dba->dbc->disconnect_when_inactive(0);
}
sub write_output {
my ($self) = @_;
#If job failed due to insufficient heap space, flow into new analysis
if ($self->param('more_heap')) {
#Flow to next memory.
my $num_jobs = $self->dataflow_output_id($self->input_id,2);
#Check if any jobs created (if none, then know that no flow was defined on this branch ie got to last pecan_mem(
if (@$num_jobs == 0) {
throw("Pecan ". $self->input_job->analysis->logic_name . " still failed due to insufficient heap space");
}
#Don't want to flow to gerp jobs here
$self->input_job->autoflow(0);
} else {
#Job succeeded, write output
if ($self->param('do_transactions')) {
my $compara_conn = $self->compara_dba->dbc;
my $compara_helper = Bio::EnsEMBL::Utils::SqlHelper->new(-DB_CONNECTION => $compara_conn);
$compara_helper->transaction(-CALLBACK => sub {
$self->_write_output;
});
} else {
$self->_write_output;
}
}
}
sub _write_output {
my ($self) = @_;
if ($self->param('runnable')->{tree_to_save}) {
my $meta_container = $self->compara_dba->get_MetaContainer;
$meta_container->store_key_value("synteny_region_tree_".$self->param('synteny_region_id'),
$self->param('runnable')->{tree_to_save});
}
my $mlssa = $self->compara_dba->get_MethodLinkSpeciesSetAdaptor;
my $mlss = $mlssa->fetch_by_dbID($self->param('mlss_id'));
my $gaba = $self->compara_dba->get_GenomicAlignBlockAdaptor;
my $gaa = $self->compara_dba->get_GenomicAlignAdaptor;
foreach my $gab (@{$self->param('runnable')->output}) {
foreach my $ga (@{$gab->genomic_align_array}) {
$ga->adaptor($gaa);
$ga->method_link_species_set($mlss);
$ga->visible(1);
unless (defined $gab->length) {
$gab->length(length($ga->aligned_sequence));
}
}
$gab->adaptor($gaba);
$gab->method_link_species_set($mlss);
my $group;
## Hard trim condition (testing, this is intended for one single GAB only)
#$gab->_print(\*STDERR);
if ($self->param('trim')) {
$gab = $self->_hard_trim_gab($gab);
}
#$gab->_print(\*STDERR);
# Split block if it is too long and store as groups
# Remove any blocks which contain only 1 genomic align and trim the 2
# neighbouring blocks
if ($self->param('max_block_size') and $gab->length > $self->param('max_block_size')) {
my $gab_array = undef;
my $find_next = 0;
for (my $start = 1; $start <= $gab->length; $start += $self->param('max_block_size')) {
my $split_gab = $gab->restrict_between_alignment_positions(
$start, $start + $self->param('max_block_size') - 1, 1);
#less than 2 genomic_aligns
if (@{$split_gab->get_all_GenomicAligns()} < 2) {
#set find_next flag to remember to trim the block to the right if it has more than 2 genomic_aligns
$find_next = 1;
#trim the previous block
my $prev_gab = pop @$gab_array;
my $trim_gab = _trim_gab_right($prev_gab);
#check it has at least 2 genomic_aligns, otherwise try again
while (@{$trim_gab->get_all_GenomicAligns()} < 2) {
$prev_gab = pop @$gab_array;
$trim_gab = _trim_gab_right($prev_gab);
}
#add trimmed block to array
if ($trim_gab) {
push @$gab_array, $trim_gab;
}
} else {
#more than 2 genomic_aligns
push @$gab_array, $split_gab;
#but may be to the right of a gab with only 1 ga and
#therefore needs to be trimmed
if ($find_next) {
my $next_gab = pop @$gab_array;
my $trim_gab = _trim_gab_left($next_gab);
if (@{$trim_gab->get_all_GenomicAligns()} >= 2) {
push @$gab_array, $trim_gab;
$find_next = 0;
}
}
}
}
#store the first block to get the dbID which is used to create the
#group_id.
my $first_block = shift @$gab_array;
$gaba->store($first_block);
my $group_id = $first_block->dbID;
$gaba->store_group_id($first_block, $group_id);
$self->_write_gerp_dataflow($first_block, $mlss);
#store the rest of the genomic_align_blocks
foreach my $this_gab (@$gab_array) {
$this_gab->group_id($group_id);
$gaba->store($this_gab);
$self->_write_gerp_dataflow($this_gab, $mlss);
}
} else {
$gaba->store($gab);
$self->_write_gerp_dataflow($gab, $mlss);
}
}
return 1;
}
#trim genomic align block from the left hand edge to first position having at
#least 2 genomic aligns which overlap
sub _trim_gab_left {
my ($gab) = @_;
if (!defined($gab)) {
return undef;
}
my $align_length = $gab->length;
my $gas = $gab->get_all_GenomicAligns();
my $d_length;
my $m_length;
my $min_d_length = $align_length;
my $found_min = 0;
#take first element in cigar string for each genomic_align and if it is a
#match, it must extend to the start of the block. Find the shortest delete.
#If the shortest delete and the match are the same length, there is no
#overlap between them so restrict to the end of the delete and try again.
#If the delete is shorter than the match, there must be an overlap.
foreach my $ga (@$gas) {
my ($cigLength, $cigType) = ( $ga->cigar_line =~ /^(\d*)([GMD])/ );
$cigLength = 1 unless ($cigLength =~ /^\d+$/);
if ($cigType eq "D" or $cigType eq "G") {
$d_length = $cigLength;
if ($d_length < $min_d_length) {
$min_d_length = $d_length;
}
} else {
$m_length = $cigLength;
$found_min++;
}
}
#if more than one alignment filled to the left edge, no need to restrict
if ($found_min > 1) {
return $gab;
}
my $new_gab = ($gab->restrict_between_alignment_positions(
$min_d_length+1, $align_length, 1));
#no overlapping genomic_aligns
if ($new_gab->length == 0) {
return $new_gab;
}
#if delete length is less than match length then must have sequence overlap
if ($min_d_length < $m_length) {
return $new_gab;
}
#otherwise try again with restricted gab
return _trim_gab_left($new_gab);
}
#trim genomic align block from the right hand edge to first position having at
#least 2 genomic aligns which overlap
sub _trim_gab_right {
my ($gab) = @_;
if (!defined($gab)) {
return undef;
}
my $align_length = $gab->length;
my $max_pos = 0;
my $gas = $gab->get_all_GenomicAligns();
my $found_max = 0;
my $d_length;
my $m_length;
my $min_d_length = $align_length;
#take last element in cigar string for each genomic_align and if it is a
#match, it must extend to the end of the block. Find the shortest delete.
#If the shortest delete and the match are the same length, there is no
#overlap between them so restrict to the end of the delete and try again.
#If the delete is shorter than the match, there must be an overlap.
foreach my $ga (@$gas) {
my ($cigLength, $cigType) = ( $ga->cigar_line =~ /(\d*)([GMD])$/ );
$cigLength = 1 unless ($cigLength =~ /^\d+$/);
if ($cigType eq "D" or $cigType eq "G") {
$d_length =$cigLength;
if ($d_length < $min_d_length) {
$min_d_length = $d_length;
}
} else {
$m_length = $cigLength;
$found_max++;
}
}
#if more than one alignment filled the right edge, no need to restrict
if ($found_max > 1) {
return $gab;
}
my $new_gab = $gab->restrict_between_alignment_positions(1, $align_length - $min_d_length, 1);
#no overlapping genomic_aligns
if ($new_gab->length == 0) {
return $new_gab;
}
#if delete length is less than match length then must have sequence overlap
if ($min_d_length < $m_length) {
return $new_gab;
}
#otherwise try again with restricted gab
return _trim_gab_right($new_gab);
}
sub _hard_trim_gab {
my ($self, $gab) = @_;
my $trim = $self->param('trim');
die "Wrong trim argument" if (!%$trim);
die "Wrong number of keys in trim argument" if (keys %$trim != @{$gab->get_all_GenomicAligns()});
## Check that trim hash matches current GAB
my $match;
while (my ($key, $value) = each %$trim) {
my ($opt, $dnafrag_id) = $key =~ m/(\w+)_(\d+)/;
$match = 0;
foreach my $this_ga (@{$gab->get_all_GenomicAligns()}) {
if ($this_ga->dnafrag_id == $dnafrag_id and $this_ga->dnafrag_start <= $value and
$this_ga->dnafrag_end >= $value) {
$match = 1;
last;
}
}
if (!$match) {
last;
}
}
die "Trim argument does not match current GAB" if (!$match);
## Get the right trimming coordinates
print "Trying to trim this GAB... ", join("; ", map {$_." => ".$trim->{$_}} keys %$trim), "\n";
my $final_start = $gab->length;
my $final_end = 1;
while (my ($key, $value) = each %$trim) {
my ($opt, $dnafrag_id) = $key =~ m/(\w+)_(\d+)/;
my $ref_ga = undef;
foreach my $this_ga (@{$gab->get_all_GenomicAligns()}) {
if ($this_ga->dnafrag_id == $dnafrag_id and $this_ga->dnafrag_start <= $value and
$this_ga->dnafrag_end >= $value) {
$ref_ga = $this_ga;
last;
}
}
if ($ref_ga) {
my ($tmp_gab, $start, $end);
if ($opt eq "from") {
($tmp_gab, $start, $end) = $gab->restrict_between_reference_positions($value, undef, $ref_ga);
} elsif ($opt eq "to") {
($tmp_gab, $start, $end) = $gab->restrict_between_reference_positions(undef, $value, $ref_ga);
my $tmp_start = $gab->length - $end + 1;
my $tmp_end = $gab->length - $start + 1;
$start = $tmp_start;
$end = $tmp_end;
} else {
die;
}
## Need to use the smallest start and largest end as the GAB may start with a gap for
## some of the GAs
if ($start < $final_start) {
$final_start = $start;
}
if ($end > $final_end) {
$final_end = $end;
}
print " DNAFRAG $dnafrag_id : $start -- $end (alignment coordinates)\n";
}
}
print " RESTRICT: $final_start -- $final_end (1 -- ", $gab->length, ")\n";
$gab = $gab->restrict_between_alignment_positions($final_start, $final_end);
## Check result
foreach my $this_ga (@{$gab->get_all_GenomicAligns()}) {
my $check = 0;
while (my ($key, $value) = each %$trim) {
my ($opt, $dnafrag_id) = $key =~ m/(\w+)_(\d+)/;
if ($dnafrag_id == $this_ga->dnafrag_id) {
if ($opt eq "from" and $this_ga->dnafrag_start == $value) {
$check = 1;
} elsif ($opt eq "to" and $this_ga->dnafrag_end == $value) {
$check = 1;
} else {
last;
}
}
}
die("Cannot trim this GAB as requested\n") if (!$check);
}
print "GAB trimmed as requested\n\n";
return $gab;
}
sub _write_gerp_dataflow {
my ($self, $gab, $mlss) = @_;
# my $species_set = "[";
# my $genome_db_set = $mlss->species_set_obj->genome_dbs;
# foreach my $genome_db (@$genome_db_set) {
# $species_set .= $genome_db->dbID . ",";
# }
# $species_set .= "]";
# my $output_id = "{genomic_align_block_id=>" . $gab->dbID . ",species_set=>" . $species_set;
my $output_id = { genomic_align_block_id => $gab->dbID };
$self->dataflow_output_id($output_id,1);
}
##########################################
#
# getter/setter methods
#
##########################################
sub add_fasta_files {
my ($self, $value) = @_;
my $fasta_files = $self->param('fasta_files');
push @$fasta_files, $value;
$self->param('fasta_files', $fasta_files);
}
sub add_species_order {
my ($self, $value) = @_;
my $species_order = $self->param('species_order');
push @$species_order, $value;
$self->param('species_order', $species_order);
}
sub get_species_tree {
my $self = shift;
my $newick_species_tree;
if (defined($self->param('species_tree'))) {
return $self->param('species_tree');
} elsif ($self->param('tree_file')) {
#open via a file (not currently used in the pipeline)
open(TREE_FILE, $self->param('tree_file')) or throw("Cannot open file ".$self->param('tree_file'));
$newick_species_tree = join("", <TREE_FILE>);
close(TREE_FILE);
} else {
#get from mlss_tag table
$newick_species_tree = $self->get_species_tree_string;
}
if (!defined($newick_species_tree)) {
return undef;
}
$newick_species_tree =~ s/^\s*//;
$newick_species_tree =~ s/\s*$//;
$newick_species_tree =~ s/[\r\n]//g;
my $species_tree =
Bio::EnsEMBL::Compara::Graph::NewickParser::parse_newick_into_tree($newick_species_tree);
#if the tree leaves are species names, need to convert these into genome_db_ids
my $genome_dbs = $self->compara_dba->get_GenomeDBAdaptor->fetch_all();
my %leaf_name;
my %leaf_check;
foreach my $genome_db (@$genome_dbs) {
my $name = $genome_db->name;
$name =~ tr/ /_/;
$leaf_name{$name} = $genome_db->dbID;
if ($name ne "Ancestral_sequences" and $name ne "ancestral_sequences") {
$leaf_check{$genome_db->dbID} = 2;
}
}
foreach my $leaf (@{$species_tree->get_all_leaves}) {
#check have names rather than genome_db_ids
if ($leaf->name =~ /\D+/) {
$leaf->name($leaf_name{lc($leaf->name)});
}
$leaf_check{lc($leaf->name)}++;
}
#Check have one instance in the tree of each genome_db in the database
#Don't worry about having extra elements in the tree that aren't in the
#genome_db table because these will be removed later
foreach my $name (keys %leaf_check) {
if ($leaf_check{$name} == 2) {
throw("Unable to find genome_db_id $name in species_tree\n");
}
}
$self->param('species_tree', $species_tree);
return $self->param('species_tree');
}
=head2 _load_DnaFragRegions
Arg [1] : int syteny_region_id
Example : $self->_load_DnaFragRegions();
Description: Gets the list of DnaFragRegions for this
syteny_region_id. Resulting DnaFragRegions are
stored using the dnafrag_regions getter/setter.
Returntype : listref of Bio::EnsEMBL::Compara::DnaFragRegion objects
Exception :
Warning :
=cut
sub _load_DnaFragRegions {
my ($self, $synteny_region_id) = @_;
my $dnafrag_regions = [];
# Fail if dbID has not been provided
return $dnafrag_regions if (!$synteny_region_id);
my $sra = $self->compara_dba->get_SyntenyRegionAdaptor;
my $sr = $sra->fetch_by_dbID($self->param('synteny_region_id'));
my $regions = $sr->regions();
#foreach my $dfr (@{$sr->children}) {
foreach my $dfr (@$regions) {
#$dfr->disavow_parent;
push(@{$dnafrag_regions}, $dfr);
}
#$sr->release_tree;
$self->param('dnafrag_regions', $dnafrag_regions);
}
=head2 _dump_fasta
Arg [1] : -none-
Example : $self->_dump_fasta();
Description: Dumps FASTA files in the order given by the tree
string (needed by Pecan). Resulting file names are
stored using the fasta_files getter/setter
Returntype : 1
Exception :
Warning :
=cut
sub _dump_fasta {
my $self = shift;
my $all_dnafrag_regions = $self->param('dnafrag_regions');
## Dump FASTA files in the order given by the tree string (needed by Pecan)
my @seqs;
if ($self->param('pecan_tree_string')) {
@seqs = ($self->param('pecan_tree_string') =~ /seq(\d+)/g);
} else {
@seqs = (1..scalar(@$all_dnafrag_regions));
}
foreach my $seq_id (@seqs) {
my $dfr = $all_dnafrag_regions->[$seq_id-1];
my $file = $self->worker_temp_directory . "/seq" . $seq_id . ".fa";
open F, ">$file" || throw("Couldn't open $file");
print F ">DnaFrag", $dfr->dnafrag_id, "|", $dfr->dnafrag->name, ".",
$dfr->dnafrag_start, "-", $dfr->dnafrag_end, ":", $dfr->dnafrag_strand,"\n";
my $slice = $dfr->slice;
throw("Cannot get slice for DnaFragRegion in DnaFrag #".$dfr->dnafrag_id) if (!$slice);
my $seq = $slice->get_repeatmasked_seq(undef, 1)->seq;
if ($seq =~ /[^ACTGactgNnXx]/) {
print STDERR $slice->name, " contains at least one non-ACTGactgNnXx character. These have been replaced by N's\n";
$seq =~ s/[^ACTGactgNnXx]/N/g;
}
$seq =~ s/(.{80})/$1\n/g;
chomp $seq;
print F $seq,"\n";
close F;
$self->add_fasta_files($file);
$self->add_species_order($dfr->dnafrag->genome_db_id);
#push @{$self->fasta_files}, $file;
#push @{$self->species_order}, $dfr->dnafrag->genome_db_id;
}
return 1;
}
=head2 _build_tree_string
Arg [1] : -none-
Example : $self->_build_tree_string();
Description: This method sets the tree_string using the orginal
species tree and the set of DnaFragRegions. The
tree is edited by the _update_tree method which
resort the DnaFragRegions (see _update_tree elsewwhere
in this document)
Returntype : -none-
Exception :
Warning :
=cut
sub _build_tree_string {
my $self = shift;
my $tree = $self->get_species_tree->copy;
return if (!$tree);
$tree = $self->_update_tree($tree);
#if duplications found, $tree will not be defined
return if (!$tree);
my $tree_string = $tree->newick_format('simple');
# Remove quotes around node labels
$tree_string =~ s/"(seq\d+)"/$1/g;
# Remove branch length if 0
$tree_string =~ s/\:0\.0+(\D)/$1/g;
$tree_string =~ s/\:0([^\.\d])/$1/g;
$tree->release_tree;
$self->param('pecan_tree_string', $tree_string);
}
=head2 _update_tree
Arg [1] : Bio::EnsEMBL::Compara::NestedSet $tree_root
Example : $self->_update_nodes_names($tree);
Description: This method updates the tree by removing or
duplicating the leaves according to the orginal
tree and the set of DnaFragRegions. The tree nodes
will be renamed seq1, seq2, seq3 and so on and the
DnaFragRegions will be resorted in order to match
the names of the nodes (the first DnaFragRegion will
correspond to seq1, the second to seq2 and so on).
Returntype : Bio::EnsEMBL::Compara::NestedSet (a tree)
Exception :
Warning :
=cut
sub _update_tree {
my $self = shift;
my $tree = shift;
my $all_dnafrag_regions = $self->param('dnafrag_regions');
my $ordered_dnafrag_regions = [];
my $idx = 1;
my $all_leaves = $tree->get_all_leaves;
foreach my $this_leaf (@$all_leaves) {
my $these_dnafrag_regions = [];
## Look for DnaFragRegions belonging to this genome_db_id
foreach my $this_dnafrag_region (@$all_dnafrag_regions) {
if ($this_dnafrag_region->dnafrag->genome_db_id == $this_leaf->name) {
push (@$these_dnafrag_regions, $this_dnafrag_region);
}
}
if (@$these_dnafrag_regions == 1) {
## If only 1 has been found...
$this_leaf->name("seq".$idx++); #.".".$these_dnafrag_regions->[0]->dnafrag_id);
push(@$ordered_dnafrag_regions, $these_dnafrag_regions->[0]);
} elsif (@$these_dnafrag_regions > 1) {
## If more than 1 has been found...
$self->param('found_a_duplication', 1);
return;
#No longer use code below, call Ortheus to find better tree
foreach my $this_dnafrag_region (@$these_dnafrag_regions) {
my $new_node = new Bio::EnsEMBL::Compara::NestedSet;
$new_node->name("seq".$idx++);
$new_node->distance_to_parent(0);
push(@$ordered_dnafrag_regions, $this_dnafrag_region);
$this_leaf->add_child($new_node);
}
} else {
## If none has been found...
$this_leaf->disavow_parent;
$tree = $tree->minimize_tree;
}
}
$self->param('dnafrag_regions', $ordered_dnafrag_regions);
if (scalar(@$all_dnafrag_regions) != scalar(@$ordered_dnafrag_regions) or
scalar(@$all_dnafrag_regions) != scalar(@{$tree->get_all_leaves})) {
throw("Tree has a wrong number of leaves after updating the node names");
}
if ($tree->get_child_count == 1) {
my $child = $tree->children->[0];
$child->parent->merge_children($child);
$child->disavow_parent;
}
return $tree;
}
sub _run_ortheus {
my ($self) = @_;
my $fake_analysis = Bio::EnsEMBL::Analysis->new;
#run Ortheus.py without running MAKE_FINAL_ALIGNMENT ie OrtheusC
my $options = " -y";
my $ortheus_runnable = new Bio::EnsEMBL::Analysis::Runnable::Ortheus(
-workdir => $self->worker_temp_directory,
-fasta_files => $self->param('fasta_files'),
#-tree_string => $self->tree_string,
-species_tree => $self->get_species_tree->newick_format('simple'),
-species_order => $self->param('species_order'),
-analysis => $fake_analysis,
-parameters => $self->param('java_options'),
-options => $options,
);
$ortheus_runnable->run_analysis;
my $tree_file = $self->worker_temp_directory . "/output.$$.tree";
if (-e $tree_file) {
## Ortheus estimated the tree. Overwrite the order of the fasta files and get the tree
open(F, $tree_file) || throw("Could not open tree file <$tree_file>");
my ($newick, $files) = <F>;
close(F);
$newick =~ s/[\r\n]+$//;
$self->param('pecan_tree_string', $newick);
$files =~ s/[\r\n]+$//;
my $all_files = [split(" ", $files)];
#store ordered fasta_files
#$self->{'_fasta_files'} = $all_files;
$self->param('fasta_files', $all_files);
print STDOUT "**NEWICK: $newick\nFILES: ", join(" -- ", @$all_files), "\n";
} else {
throw("Ortheus was unable to create a tree");
}
}
1;
| kumarsaurabh20/ensembl-compara | modules/Bio/EnsEMBL/Compara/RunnableDB/MercatorPecan/Pecan.pm | Perl | apache-2.0 | 27,804 |
package Google::Ads::AdWords::v201409::ConversionTrackerService::mutate;
use strict;
use warnings;
{ # BLOCK to scope variables
sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201409' }
__PACKAGE__->__set_name('mutate');
__PACKAGE__->__set_nillable();
__PACKAGE__->__set_minOccurs();
__PACKAGE__->__set_maxOccurs();
__PACKAGE__->__set_ref();
use base qw(
SOAP::WSDL::XSD::Typelib::Element
Google::Ads::SOAP::Typelib::ComplexType
);
our $XML_ATTRIBUTE_CLASS;
undef $XML_ATTRIBUTE_CLASS;
sub __get_attr_class {
return $XML_ATTRIBUTE_CLASS;
}
use Class::Std::Fast::Storable constructor => 'none';
use base qw(Google::Ads::SOAP::Typelib::ComplexType);
{ # BLOCK to scope variables
my %operations_of :ATTR(:get<operations>);
__PACKAGE__->_factory(
[ qw( operations
) ],
{
'operations' => \%operations_of,
},
{
'operations' => 'Google::Ads::AdWords::v201409::ConversionTrackerOperation',
},
{
'operations' => 'operations',
}
);
} # end BLOCK
} # end of BLOCK
1;
=pod
=head1 NAME
Google::Ads::AdWords::v201409::ConversionTrackerService::mutate
=head1 DESCRIPTION
Perl data type class for the XML Schema defined element
mutate from the namespace https://adwords.google.com/api/adwords/cm/v201409.
Applies the list of mutate operations such as adding or updating conversion trackers. <p class="note"><b>Note:</b> {@link ConversionTrackerOperation} does not support the <code>REMOVE</code> operator. In order to 'disable' a conversion type, send a <code>SET</code> operation for the conversion tracker with the <code>status</code> property set to <code>DISABLED</code></p> @param operations A list of mutate operations to perform. @return The list of the conversion trackers as they appear after mutation, in the same order as they appeared in the list of operations. @throws com.google.ads.api.services.common.error.ApiException if problems occurred while updating the data.
=head1 PROPERTIES
The following properties may be accessed using get_PROPERTY / set_PROPERTY
methods:
=over
=item * operations
$element->set_operations($data);
$element->get_operations();
=back
=head1 METHODS
=head2 new
my $element = Google::Ads::AdWords::v201409::ConversionTrackerService::mutate->new($data);
Constructor. The following data structure may be passed to new():
{
operations => $a_reference_to, # see Google::Ads::AdWords::v201409::ConversionTrackerOperation
},
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| gitpan/GOOGLE-ADWORDS-PERL-CLIENT | lib/Google/Ads/AdWords/v201409/ConversionTrackerService/mutate.pm | Perl | apache-2.0 | 2,537 |
#!/usr/bin/perl -w
#
# Copyright 2012, Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This example adds multiple keywords to an ad group. To get ad groups run
# basic_operations/get_ad_groups.pl.
#
# Tags: AdGroupCriterionService.mutate
# Author: David Torres <api.davidtorres@gmail.com>
use strict;
use lib "../../../lib";
use Google::Ads::AdWords::Client;
use Google::Ads::AdWords::Logging;
use Google::Ads::AdWords::v201406::AdGroupBidModifierOperation;
use Google::Ads::AdWords::v201406::AdGroupBidModifier;
use Google::Ads::AdWords::v201406::Platform;
use Cwd qw(abs_path);
use constant BID_MODIFIER => 1.5;
# Replace with valid values of your account.
my $ad_group_id = "INSERT_AD_GROUP_ID_HERE";
# Example main subroutine.
sub add_ad_group_bid_modifier {
my $client = shift;
my $ad_group_id = shift;
# Create mobile platform. The ID can be found in the documentation.
# https://developers.google.com/adwords/api/docs/appendix/platforms
my $mobile = Google::Ads::AdWords::v201406::Platform->new({
id => 30001
});
# Create the bid modifier.
my $modifier = Google::Ads::AdWords::v201406::AdGroupBidModifier->new({
adGroupId => $ad_group_id,
criterion => $mobile,
bidModifier => BID_MODIFIER
});
# Create ADD operation.
my $operation =
Google::Ads::AdWords::v201406::AdGroupBidModifierOperation->new({
operator => "ADD",
operand => $modifier
});
# Update campaign criteria.
my $result = $client->AdGroupBidModifierService()->mutate({
operations => [$operation]
});
# Display campaign criteria.
if ($result->get_value()) {
foreach my $modifier (@{$result->get_value()}) {
printf "Ad Group ID '%s', criterion ID '%s', " .
"and type '%s' was modified with bid %.2f.\n",
$modifier->get_adGroupId(),
$modifier->get_criterion()->get_id(),
$modifier->get_criterion()->get_type(),
$modifier->get_bidModifier();
}
} else {
print "No ad group bid modifier was added.\n";
}
return 1;
}
# Don't run the example if the file is being included.
if (abs_path($0) ne abs_path(__FILE__)) {
return 1;
}
# Log SOAP XML request, response and API errors.
Google::Ads::AdWords::Logging::enable_all_logging();
# Get AdWords Client, credentials will be read from ~/adwords.properties.
my $client = Google::Ads::AdWords::Client->new({version => "v201406"});
# By default examples are set to die on any server returned fault.
$client->set_die_on_faults(1);
# Call the example
add_ad_group_bid_modifier($client, $ad_group_id);
| gitpan/Google-Ads-AdWords-Client | examples/v201406/advanced_operations/add_ad_group_bid_modifier.pl | Perl | apache-2.0 | 3,111 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Lucy::Plan::Architecture;
use Lucy;
our $VERSION = '0.004000';
$VERSION = eval $VERSION;
1;
__END__
| kidaa/lucy | perl/lib/Lucy/Plan/Architecture.pm | Perl | apache-2.0 | 894 |
#
# @@@ START COPYRIGHT @@@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# @@@ END COPYRIGHT @@@
#
use strict;
use Exporter ();
use sqconfigdb;
package sqnameserver;
# set g_debugFlag to 1 to print detailed debugging info
my $g_debugFlag = 0;
my @g_nodeNames;
my $g_ok;
my $errors = 0;
my $stmt;
# Display persist configuration statement if not already displayed.
sub displayStmt
{
$errors++;
if ($_[0] == 1)
{
print "For \"$stmt\":\n";
# Set flag that statement has been displayed
$_[0] = 0;
}
}
sub parseComma {
my ($s) = @_;
if ($s =~ /(,\s*)/) {
$s =~ s:$1::;
return (1, $s);
} else {
displayStmt($g_ok);
print " Error: Expecting ',', but saw $s\n"; #T
return (0, '');
}
}
sub parseEnd {
my ($s) = @_;
if ($s =~ /^\s*?$/) {
return 1;
} else {
displayStmt($g_ok);
print " Error: Expecting <eoln>, but saw $s\n"; #T
return 0;
}
}
sub parseEq {
my ($s) = @_;
if ($s =~ /(=\s*)/) {
$s =~ s:$1::;
return (1, $s);
} else {
displayStmt($g_ok);
print " Error: Expecting '=', but saw $s\n"; #T
return (0, '');
}
}
sub parseStatement {
my ($s) = @_;
if ($g_debugFlag) {
print "stmt: $s\n";
}
if ($s =~ /^#/) {
} elsif ($s =~ /^\s*$/) {
} elsif ($s =~ /(nodes)\s*/) {
my $k = $1;
$s =~ s:$k\s*::;
my $eq;
($eq, $s) = parseEq($s);
if ($eq) {
while ($s =~ /([A-Za-z0-9.\-]+)(\s*,\s*)/) {
my $nodeName = $1;
$s =~ s:$nodeName$2::;
push(@g_nodeNames, $nodeName);
}
if ($s =~ /([A-Za-z0-9.\-]+)/) {
my $nodeName = $1;
$s =~ s:$nodeName::;
push(@g_nodeNames, $nodeName);
parseEnd($s);
} else {
displayStmt($g_ok);
print " Error: Expecting <hostname> e.g. n054, but saw $s\n"; #T
}
}
} else {
displayStmt($g_ok);
my $k = $s;
if ($s =~ /^([A-Za-z_]+)/) {
$k = $1;
}
print " Error: Invalid keyword $k, expecting nodes\n"; #T
}
}
sub validateNameserver
{
if ($errors == 0) {
sqconfigdb::delDbNameServerData();
my $nodeName;
foreach $nodeName (@g_nodeNames) {
sqconfigdb::addDbNameServer( $nodeName );
}
}
return $errors;
}
sub parseStmt
{
$stmt = $_;
chomp($stmt);
$g_ok = 1;
parseStatement($stmt);
if ($errors != 0) { # Had errors
return 1;
}
}
# Below is to return true; this is required when this module is referenced via a "use" statement in another module
# (if we had variables defined and assigned in addition to functions, we would not need to include this implicit return)
1;
| apache/incubator-trafodion | core/sqf/sql/scripts/sqnameserver.pm | Perl | apache-2.0 | 3,664 |
#!/usr/bin/perl -w
use strict;
use CoGe::Algos::KsCalc;
use Data::Dumper;
use File::Temp;
my $TEMPDIR = "/tmp/";
my $object = CoGe::Algos::KsCalc->new ();
$object->version(5);
$object->name1("At1g10850");
$object->name2("At1g60630");
$object->palign();
$object->print_align();
print "\n\n";
print length($object->prot1),":", length($object->gaplessP1),"\n";
print length($object->prot2),":", length($object->gaplessP2),"\n";
print length($object->dna1),":", length($object->gaplessD1),"\n";
print length($object->dna2),":", length($object->gaplessD2),"\n";
print "\n\n";
print $object->dna1,"\n";
#print $object->gaplessP1,"\n";
#print $object->gaplessP2,"\n";
#print $object->gaplessD1,"\n";
#print $object->gaplessD2,"\n";
print $object->phylip_align;
#$object->print_align(seq1=>$object->gaplessD1, seq2=>$object->gaplessD2);
#print Dumper $object;
my $aln = new File::Temp ( TEMPLATE=>'Ks__XXXXX',
DIR=>$TEMPDIR,
SUFFIX=>'.aln',
UNLINK=>0);
print Dumper $object->KsCalc($aln);
| asherkhb/coge | modules/Algos/KsCalc/scripts/test.pl | Perl | bsd-2-clause | 1,003 |
package #
Date::Manip::Offset::off036;
# Copyright (c) 2008-2014 Sullivan Beck. All rights reserved.
# This program is free software; you can redistribute it and/or modify it
# under the same terms as Perl itself.
# This file was automatically generated. Any changes to this file will
# be lost the next time 'tzdata' is run.
# Generated on: Fri Nov 21 11:03:44 EST 2014
# Data version: tzdata2014j
# Code version: tzcode2014j
# This module contains data from the zoneinfo time zone database. The original
# data was obtained from the URL:
# ftp://ftp.iana.orgtz
use strict;
use warnings;
require 5.010000;
our ($VERSION);
$VERSION='6.48';
END { undef $VERSION; }
our ($Offset,%Offset);
END {
undef $Offset;
undef %Offset;
}
$Offset = '+01:34:52';
%Offset = (
0 => [
'europe/athens',
],
);
1;
| nriley/Pester | Source/Manip/Offset/off036.pm | Perl | bsd-2-clause | 851 |
#!/usr/bin/perl -w
use strict;
use CoGeX;
use Getopt::Long;
use vars qw($DEBUG $file $coge $GO $source @skip_names);
GetOptions(
"file|f=s"=>\$file,
"debug"=>\$DEBUG,
"go"=>\$GO,
"source=i"=>\$source,
"skip_name=s"=>\@skip_names,
);
my $connstr = 'dbi:mysql:dbname=coge;host=HOST;port=PORT';
$coge = CoGeX->connect($connstr, 'USER', 'PASSWORD' );
unless (-r $file)
{
print qq{
Usage: $0 <dag chainer aligncoords file> -go
OPTIONS:
-f dag chainer aligncoords file (usually generated by SynMap)
-go set to actually add data to database (DEFAULT undefined)
-debug debugging messages (DEFAULT undefined)
-source set to 1 or 2 to specify if the first or second feature in each column is the source
for annotations. The source feature's annotations will be copied to the other
feature (the sink feature). (DEFAULT 0. This means that there is no default
source specified an an error will be thrown.)
-skip_name set this to some string and it will be used in a regex /^skip_name/
to skip a type of feature names. For example /^NP/ and /^YP/ are
skipped by default. Add as many as you like by:
-skip_name name1 -skip_name name2
};
exit;
}
$source = 0 unless $source;
print "NO SOURCE SPECIFIED! No annotation mappings will happen.\n" unless $source;
print "go option is set to 0. No annotation mappings will happen.\n" unless $GO;
open (IN, $file);
while (<IN>)
{
chomp;
next unless $_;
next if /^#/;
my (@line) = split /\t/;
my (@seq1) = split/\|\|/, $line[1];
my (@seq2) = split/\|\|/, $line[5];
my ($dsgid1, $chr1) = $line[0] =~/a(\d+)_(.+)/;
my ($dsgid2, $chr2) = $line[4] =~/a(\d+)_(.+)/;
my ($fid1) = $seq1[6];
my ($fid2) = $seq2[6];
my $feat1 = $coge->resultset('Feature')->find($fid1);
my $feat2 = $coge->resultset('Feature')->find($fid2);
if ($DEBUG)
{
print "ANNOTATIONS:\n";
print $feat1->annotation_pretty_print."\n\n";
print $feat2->annotation_pretty_print."\n";
print "\n-------------------\n";
}
my ($source_feat, $sink_feat);
($source_feat, $sink_feat) = ($feat1, $feat2) if $source == 1;
($source_feat, $sink_feat) = ($feat2, $feat1) if $source == 2;
print "SOURCE FEATURE IS $source\n" if $DEBUG && $source;
map_annotation(source=>$source_feat, sink=>$sink_feat) if ($source_feat && $sink_feat);
print "\n-------------------\n" if $DEBUG;
}
close IN;
sub map_annotation
{
my %opts = @_;
my $source = $opts{source}; #mapping annotations from
my $sink = $opts{sink}; #mapping annotations to
print "Cloning annotations from ", $source->id, " to ", $sink->id,"\n" if $GO;
#creating source mapping annotation
my $anno_type = $coge->resultset('AnnotationType')->find_or_create({name=>"Homology Mapping"});
my $mapping_anno = "<span class='link' onclick=window.open('OrganismView.pl?oid=".$source->organism->id."')>".$source->organism->name."</span>";
$mapping_anno .= " <span class='link' onclick=window.open('OrganismView.pl?dsid=".$source->dataset->id."')>"."(v".$source->dataset->version.")</span>";
$sink->add_to_annotations({annotation=>$mapping_anno,
annotation_type_id=>$anno_type->id}) if $GO;
#end source mapping annotation
# print $mapping_anno,"\n";
name: foreach my $name ($source->feature_names)
{
next if $name->name =~ /^NP/;
next if $name->name =~ /^YP/;
foreach my $item (@skip_names)
{
next name if $name->name =~ /^$item/;
}
print "NAME: ", $name->name, "\n" if $DEBUG;# && $name =~ /_/;
$sink->add_to_feature_names({name=>$name->name,
description=>$name->description}) if $GO;
}
foreach my $anno ($source->annotations)
{
print "ANNO: ",$anno->annotation,"\n" if $DEBUG;
$sink->add_to_annotations({annotation=>$anno->annotation,
annotation_type_id=>$anno->annotation_type->id}) if $GO;
}
}
| asherkhb/coge | scripts/diags/add_annotations_using_dag_file.pl | Perl | bsd-2-clause | 4,010 |
#! /usr/bin/env perl
$retry = $ARGV[0];
if ($retry gt 0) {
print "PRE script succeeds\n";
$file = "job_dagman_retry-B-nodeA.retry";
open(OUT, ">$file") or die "Can't open $file\n";
print OUT "$retry\n";
close(OUT);
} else {
print "PRE script fails\n";
exit 1;
}
| djw8605/htcondor | src/condor_tests/job_dagman_retry-B-nodeA-pre.pl | Perl | apache-2.0 | 270 |
package File::Spec::Win32;
use strict;
use vars qw(@ISA $VERSION);
require File::Spec::Unix;
$VERSION = '3.39_02';
$VERSION =~ tr/_//;
@ISA = qw(File::Spec::Unix);
# Some regexes we use for path splitting
my $DRIVE_RX = '[a-zA-Z]:';
my $UNC_RX = '(?:\\\\\\\\|//)[^\\\\/]+[\\\\/][^\\\\/]+';
my $VOL_RX = "(?:$DRIVE_RX|$UNC_RX)";
=head1 NAME
File::Spec::Win32 - methods for Win32 file specs
=head1 SYNOPSIS
require File::Spec::Win32; # Done internally by File::Spec if needed
=head1 DESCRIPTION
See File::Spec::Unix for a documentation of the methods provided
there. This package overrides the implementation of these methods, not
the semantics.
=over 4
=item devnull
Returns a string representation of the null device.
=cut
sub devnull {
return "nul";
}
sub rootdir { '\\' }
=item tmpdir
Returns a string representation of the first existing directory
from the following list:
$ENV{TMPDIR}
$ENV{TEMP}
$ENV{TMP}
SYS:/temp
C:\system\temp
C:/temp
/tmp
/
The SYS:/temp is preferred in Novell NetWare and the C:\system\temp
for Symbian (the File::Spec::Win32 is used also for those platforms).
Since Perl 5.8.0, if running under taint mode, and if the environment
variables are tainted, they are not used.
=cut
my $tmpdir;
sub tmpdir {
return $tmpdir if defined $tmpdir;
$tmpdir = $_[0]->_tmpdir( map( $ENV{$_}, qw(TMPDIR TEMP TMP) ),
'SYS:/temp',
'C:\system\temp',
'C:/temp',
'/tmp',
'/' );
}
=item case_tolerant
MSWin32 case-tolerance depends on GetVolumeInformation() $ouFsFlags == FS_CASE_SENSITIVE,
indicating the case significance when comparing file specifications.
Since XP FS_CASE_SENSITIVE is effectively disabled for the NT subsubsystem.
See http://cygwin.com/ml/cygwin/2007-07/msg00891.html
Default: 1
=cut
sub case_tolerant {
eval { require Win32API::File; } or return 1;
my $drive = shift || "C:";
my $osFsType = "\0"x256;
my $osVolName = "\0"x256;
my $ouFsFlags = 0;
Win32API::File::GetVolumeInformation($drive, $osVolName, 256, [], [], $ouFsFlags, $osFsType, 256 );
if ($ouFsFlags & Win32API::File::FS_CASE_SENSITIVE()) { return 0; }
else { return 1; }
}
=item file_name_is_absolute
As of right now, this returns 2 if the path is absolute with a
volume, 1 if it's absolute with no volume, 0 otherwise.
=cut
sub file_name_is_absolute {
my ($self,$file) = @_;
if ($file =~ m{^($VOL_RX)}o) {
my $vol = $1;
return ($vol =~ m{^$UNC_RX}o ? 2
: $file =~ m{^$DRIVE_RX[\\/]}o ? 2
: 0);
}
return $file =~ m{^[\\/]} ? 1 : 0;
}
=item catfile
Concatenate one or more directory names and a filename to form a
complete path ending with a filename
=cut
sub catfile {
shift;
# Legacy / compatibility support
#
shift, return _canon_cat( "/", @_ )
if $_[0] eq "";
# Compatibility with File::Spec <= 3.26:
# catfile('A:', 'foo') should return 'A:\foo'.
return _canon_cat( ($_[0].'\\'), @_[1..$#_] )
if $_[0] =~ m{^$DRIVE_RX\z}o;
return _canon_cat( @_ );
}
sub catdir {
shift;
# Legacy / compatibility support
#
return ""
unless @_;
shift, return _canon_cat( "/", @_ )
if $_[0] eq "";
# Compatibility with File::Spec <= 3.26:
# catdir('A:', 'foo') should return 'A:\foo'.
return _canon_cat( ($_[0].'\\'), @_[1..$#_] )
if $_[0] =~ m{^$DRIVE_RX\z}o;
return _canon_cat( @_ );
}
sub path {
my @path = split(';', $ENV{PATH});
s/"//g for @path;
@path = grep length, @path;
unshift(@path, ".");
return @path;
}
=item canonpath
No physical check on the filesystem, but a logical cleanup of a
path. On UNIX eliminated successive slashes and successive "/.".
On Win32 makes
dir1\dir2\dir3\..\..\dir4 -> \dir\dir4 and even
dir1\dir2\dir3\...\dir4 -> \dir\dir4
=cut
sub canonpath {
# Legacy / compatibility support
#
return $_[1] if !defined($_[1]) or $_[1] eq '';
return _canon_cat( $_[1] );
}
=item splitpath
($volume,$directories,$file) = File::Spec->splitpath( $path );
($volume,$directories,$file) = File::Spec->splitpath( $path,
$no_file );
Splits a path into volume, directory, and filename portions. Assumes that
the last file is a path unless the path ends in '\\', '\\.', '\\..'
or $no_file is true. On Win32 this means that $no_file true makes this return
( $volume, $path, '' ).
Separators accepted are \ and /.
Volumes can be drive letters or UNC sharenames (\\server\share).
The results can be passed to L</catpath> to get back a path equivalent to
(usually identical to) the original path.
=cut
sub splitpath {
my ($self,$path, $nofile) = @_;
my ($volume,$directory,$file) = ('','','');
if ( $nofile ) {
$path =~
m{^ ( $VOL_RX ? ) (.*) }sox;
$volume = $1;
$directory = $2;
}
else {
$path =~
m{^ ( $VOL_RX ? )
( (?:.*[\\/](?:\.\.?\Z(?!\n))?)? )
(.*)
}sox;
$volume = $1;
$directory = $2;
$file = $3;
}
return ($volume,$directory,$file);
}
=item splitdir
The opposite of L<catdir()|File::Spec/catdir>.
@dirs = File::Spec->splitdir( $directories );
$directories must be only the directory portion of the path on systems
that have the concept of a volume or that have path syntax that differentiates
files from directories.
Unlike just splitting the directories on the separator, leading empty and
trailing directory entries can be returned, because these are significant
on some OSs. So,
File::Spec->splitdir( "/a/b/c" );
Yields:
( '', 'a', 'b', '', 'c', '' )
=cut
sub splitdir {
my ($self,$directories) = @_ ;
#
# split() likes to forget about trailing null fields, so here we
# check to be sure that there will not be any before handling the
# simple case.
#
if ( $directories !~ m|[\\/]\Z(?!\n)| ) {
return split( m|[\\/]|, $directories );
}
else {
#
# since there was a trailing separator, add a file name to the end,
# then do the split, then replace it with ''.
#
my( @directories )= split( m|[\\/]|, "${directories}dummy" ) ;
$directories[ $#directories ]= '' ;
return @directories ;
}
}
=item catpath
Takes volume, directory and file portions and returns an entire path. Under
Unix, $volume is ignored, and this is just like catfile(). On other OSs,
the $volume become significant.
=cut
sub catpath {
my ($self,$volume,$directory,$file) = @_;
# If it's UNC, make sure the glue separator is there, reusing
# whatever separator is first in the $volume
my $v;
$volume .= $v
if ( (($v) = $volume =~ m@^([\\/])[\\/][^\\/]+[\\/][^\\/]+\Z(?!\n)@s) &&
$directory =~ m@^[^\\/]@s
) ;
$volume .= $directory ;
# If the volume is not just A:, make sure the glue separator is
# there, reusing whatever separator is first in the $volume if possible.
if ( $volume !~ m@^[a-zA-Z]:\Z(?!\n)@s &&
$volume =~ m@[^\\/]\Z(?!\n)@ &&
$file =~ m@[^\\/]@
) {
$volume =~ m@([\\/])@ ;
my $sep = $1 ? $1 : '\\' ;
$volume .= $sep ;
}
$volume .= $file ;
return $volume ;
}
sub _same {
lc($_[1]) eq lc($_[2]);
}
sub rel2abs {
my ($self,$path,$base ) = @_;
my $is_abs = $self->file_name_is_absolute($path);
# Check for volume (should probably document the '2' thing...)
return $self->canonpath( $path ) if $is_abs == 2;
if ($is_abs) {
# It's missing a volume, add one
my $vol = ($self->splitpath( $self->_cwd() ))[0];
return $self->canonpath( $vol . $path );
}
if ( !defined( $base ) || $base eq '' ) {
require Cwd ;
$base = Cwd::getdcwd( ($self->splitpath( $path ))[0] ) if defined &Cwd::getdcwd ;
$base = $self->_cwd() unless defined $base ;
}
elsif ( ! $self->file_name_is_absolute( $base ) ) {
$base = $self->rel2abs( $base ) ;
}
else {
$base = $self->canonpath( $base ) ;
}
my ( $path_directories, $path_file ) =
($self->splitpath( $path, 1 ))[1,2] ;
my ( $base_volume, $base_directories ) =
$self->splitpath( $base, 1 ) ;
$path = $self->catpath(
$base_volume,
$self->catdir( $base_directories, $path_directories ),
$path_file
) ;
return $self->canonpath( $path ) ;
}
=back
=head2 Note For File::Spec::Win32 Maintainers
Novell NetWare inherits its File::Spec behaviour from File::Spec::Win32.
=head1 COPYRIGHT
Copyright (c) 2004,2007 by the Perl 5 Porters. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the same terms as Perl itself.
=head1 SEE ALSO
See L<File::Spec> and L<File::Spec::Unix>. This package overrides the
implementation of these methods, not the semantics.
=cut
sub _canon_cat # @path -> path
{
my ($first, @rest) = @_;
my $volume = $first =~ s{ \A ([A-Za-z]:) ([\\/]?) }{}x # drive letter
? ucfirst( $1 ).( $2 ? "\\" : "" )
: $first =~ s{ \A (?:\\\\|//) ([^\\/]+)
(?: [\\/] ([^\\/]+) )?
[\\/]? }{}xs # UNC volume
? "\\\\$1".( defined $2 ? "\\$2" : "" )."\\"
: $first =~ s{ \A [\\/] }{}x # root dir
? "\\"
: "";
my $path = join "\\", $first, @rest;
$path =~ tr#\\/#\\\\#s; # xx/yy --> xx\yy & xx\\yy --> xx\yy
# xx/././yy --> xx/yy
$path =~ s{(?:
(?:\A|\\) # at begin or after a slash
\.
(?:\\\.)* # and more
(?:\\|\z) # at end or followed by slash
)+ # performance boost -- I do not know why
}{\\}gx;
# XXX I do not know whether more dots are supported by the OS supporting
# this ... annotation (NetWare or symbian but not MSWin32).
# Then .... could easily become ../../.. etc:
# Replace \.\.\. by (\.\.\.+) and substitute with
# { $1 . ".." . "\\.." x (length($2)-2) }gex
# ... --> ../..
$path =~ s{ (\A|\\) # at begin or after a slash
\.\.\.
(?=\\|\z) # at end or followed by slash
}{$1..\\..}gx;
# xx\yy\..\zz --> xx\zz
while ( $path =~ s{(?:
(?:\A|\\) # at begin or after a slash
[^\\]+ # rip this 'yy' off
\\\.\.
(?<!\A\.\.\\\.\.) # do *not* replace ^..\..
(?<!\\\.\.\\\.\.) # do *not* replace \..\..
(?:\\|\z) # at end or followed by slash
)+ # performance boost -- I do not know why
}{\\}sx ) {}
$path =~ s#\A\\##; # \xx --> xx NOTE: this is *not* root
$path =~ s#\\\z##; # xx\ --> xx
if ( $volume =~ m#\\\z# )
{ # <vol>\.. --> <vol>\
$path =~ s{ \A # at begin
\.\.
(?:\\\.\.)* # and more
(?:\\|\z) # at end or followed by slash
}{}x;
return $1 # \\HOST\SHARE\ --> \\HOST\SHARE
if $path eq ""
and $volume =~ m#\A(\\\\.*)\\\z#s;
}
return $path ne "" || $volume ? $volume.$path : ".";
}
1;
| leighpauls/k2cro4 | third_party/perl/perl/lib/File/Spec/Win32.pm | Perl | bsd-3-clause | 11,101 |
+{
locale_version => 0.88,
entry => <<'ENTRY', # for DUCET v6.1.0
0902 ; [.1FC1.0020.0002.0902][.FFF1.0000.0000.0000] # DEVANAGARI SIGN ANUSVARA
0901 ; [.1FC1.0021.0002.0901][.FFF1.0000.0000.0901] # DEVANAGARI SIGN CANDRABINDU
0903 ; [.1FC1.0020.0002.0903][.FFF2.0000.0000.0000] # DEVANAGARI SIGN VISARGA
0933 ; [.2002.0020.0002.0933][.FFF1.0000.0000.0000] # DEVANAGARI LETTER LLA
0934 ; [.2002.0020.0002.0933][.FFF1.0000.0000.0000][.0000.00F1.0002.093C] # DEVANAGARI LETTER LLLA
0915 094D 0937 ; [.2002.0020.0002.0915][.FFF2.0000.0000.0000] # <DEVANAGARI LETTER KA, DEVANAGARI SIGN VIRAMA, DEVANAGARI LETTER SSA>
ENTRY
};
| leighpauls/k2cro4 | third_party/perl/perl/lib/Unicode/Collate/Locale/kok.pl | Perl | bsd-3-clause | 654 |
do 'firewall-lib.pl';
@acl_features = ("newchain", "delchain", "policy", "apply", "unapply", "bootup", "setup", "cluster");
# acl_security_form(&options)
# Output HTML for editing security options for the acl module
sub acl_security_form
{
# Show editable tables
print "<tr> <td valign=top><b>$text{'acl_tables'}</b></td> <td colspan=3>\n";
local $t;
foreach $t (@known_tables) {
printf "<input type=checkbox name=%s value=1 %s> %s<br>\n",
$t, $_[0]->{$t} ? "checked" : "", $text{'index_table_'.$t};
}
print "</td> </tr>\n";
# Show allowed target types
print "<tr> <td><b>$text{'acl_jumps'}</b></td>\n";
print "<td colspan=3>",&ui_opt_textbox("jumps", $_[0]->{'jumps'}, 40,
$text{'acl_jall'}),"</td> </tr>\n";
# Show bootup/apply options
local ($f, $i);
foreach $f (@acl_features) {
print "<tr>\n" if ($i%2 == 0);
print "<td><b>",$text{'acl_'.$f},"</b></td> <td>\n";
printf "<input type=radio name=%s value=1 %s> %s\n",
$f, $_[0]->{$f} ? "checked" : "", $text{'yes'};
printf "<input type=radio name=%s value=0 %s> %s</td>\n",
$f, $_[0]->{$f} ? "" : "checked", $text{'no'};
print "</tr>\n" if ($i++%2 == 1);
}
}
# acl_security_save(&options)
# Parse the form for security options for the acl module
sub acl_security_save
{
local $t;
foreach $t (@known_tables) {
$_[0]->{$t} = $in{$t};
}
local $f;
foreach $f (@acl_features) {
$_[0]->{$f} = $in{$f};
}
$_[0]->{'jumps'} = $in{'jumps_def'} ? undef : $in{'jumps'};
}
| rcuvgd/Webmin22.01.2016 | firewall/acl_security.pl | Perl | bsd-3-clause | 1,447 |
#!/usr/local/bin/perl -w
use strict;
use DBI;
use CGI qw(escape unescape);
use lib "/usr/local/www/server/library/cgi-bin";
use MrsAbbott;
my $authorID = '69';
my ($firstName, $lastName) = getAuthorInfo ($authorID);
my $authorName = formatAuthorName ($firstName, $lastName);
my $newFirstName = 'John';
my $newLastName = 'Smith';
my $newAuthorName = formatAuthorName ($newFirstName, $newLastName);
my $isKnownID = findAuthorByNames ($newFirstName, $newLastName);
my $rowsAffected;
if ($isKnownID) {
print "<p>$newAuthorName is known (ID $isKnownID).\n",
$rowsAffected = replaceAuthor ($authorID, $isKnownID);
$authorID = $isKnownID;
} else {
print "<p>$newAuthorName is a new author! ";
$rowsAffected = editAuthorName
($authorID, $newFirstName, $newLastName);
}
print "Replaced ($rowsAffected rows affected).\n";
print "$authorName changed to <b>$newAuthorName</b>.</p>\n";
| wdenton/mrsabbott | old/auth.pl | Perl | mit | 919 |
package BkxMojo::Account;
use Mojo::Base 'Mojolicious::Controller';
use BkxMojo::Crud;
use MongoDB;
use MongoDB::OID;
use DateTime;
use String::Truncate qw(elide);
use List::MoreUtils qw(uniq);
use Digest::MD5 qw(md5_hex);
use List::Util qw( max min );
# Standard account dashboard
sub account {
my $self = shift;
my $db = $self->db;
my $users = $db->get_collection( 'users' );
my %user_details;
my $id = MongoDB::OID->new( value => $self->session('user_id') );
my $user = $users->find({ _id => $id });
if (my $doc = $user->next) {
$user_details{'email'} = $doc->{'email'};
$user_details{'name'} = $doc->{'name'};
$user_details{'website'} = $doc->{'website'};
$user_details{'twitter'} = $doc->{'social'}->{'twitter'};
$user_details{'github'} = $doc->{'social'}->{'github'};
$user_details{'gravatar'} = "https://secure.gravatar.com/avatar/" . md5_hex( lc( $doc->{'email'} ) ) . "?d=&s=40";
} else {
return $self->render_exception("user not found");
}
$self->render(user_details => \%user_details);
}
# import bkmrx
sub import {
my $self = shift;
$self->render( );
}
# edit tags
sub edit_tags {
my $self = shift;
my $offset = $self->param('offset') || 0;
my $tag = $self->param('tag');
my $type = $self->param('type');
my $page_size = 10;
my $db = $self->db;
my $bkmrx = $db->get_collection( 'bookmarks' );
my $user_id = $self->session('user_id');
my $temp_collection = 'temp_tags';
my $cmd = Tie::IxHash->new("mapreduce" => $bkmrx->{'name'},
"map" => _map_tags(),
"reduce" => _reduce_tags(),
"query" => {user_id => $user_id},
"out" => $temp_collection
);
my $result = $db->run_command($cmd);
die ("Mongo error: $result") unless ref($result) eq 'HASH';
my $temp_h = $db->get_collection( $temp_collection );
my $id_cursor = $temp_h->find()->sort({'value.count' => -1})->limit($page_size)->skip($offset);
my $total_results = $id_cursor->count;
my %tags;
# ensure tags come out in the right order
my $t = tie(%tags, 'Tie::IxHash');
while (my $doc = $id_cursor->next) {
$tags{$doc->{'_id'}} = $doc->{'value'}->{'count'};
}
my $last_result = min( ( $offset + $page_size ), $total_results );
my $first_result = min( ( $offset + 1 ), $last_result );
my $req_path = $self->req->url->path;
$self->render(
tags => \%tags,
first_result => $first_result,
last_result => $last_result,
total_results => $total_results,
pages => $self->paginate($total_results, $offset, $page_size, $req_path) );
}
# main bkmrx page
sub my_bkmrx {
my $self = shift;
my $offset = $self->param('offset') || 0;
my $query_tag = $self->param('tag') || '';
my $query_source = $self->param('source') || '';
my $user_id = $self->session('user_id');
my $page_size = 10;
my $db = $self->db;
my $bkmrx = $db->get_collection( 'bookmarks' );
my $res;
if ($query_tag && $query_source) {
$res = $bkmrx->find({ user_id => $user_id,
'meta.tags' => $query_tag,
'meta.source' => $query_source })->sort({added => -1})->limit($page_size)->skip($offset);
} elsif ($query_tag) {
$res = $bkmrx->find({ user_id => $user_id,
'meta.tags' => $query_tag })->sort({added => -1})->limit($page_size)->skip($offset);
} elsif ($query_source) {
$res = $bkmrx->find({ user_id => $user_id,
'meta.source' => $query_source })->sort({added => -1})->limit($page_size)->skip($offset);
} else {
$res = $bkmrx->find({ user_id => $user_id })->sort({added => -1})->limit($page_size)->skip($offset);
}
my $total_results = $res->count;
my (@bkx, @dates);
while (my $doc = $res->next) {
my $url = $doc->{'url'};
my ($disp_url) = $url =~ m{^[hf]tt?ps?://(?:www\.)?(.*)$}i;
$disp_url = elide($disp_url, 90);
my $title = $doc->{'meta'}->{'title'};
my $disp_title = elide($title, 55);
my $desc = $doc->{'meta'}->{'desc'} || '';
my $dt = DateTime->from_epoch( epoch => $doc->{'added'} );
my $added = $dt->day . " " . $dt->month_abbr . " " . $dt->year;
push(@bkx, {
b_id => $doc->{'_id'},
added => $added,
url => $url,
disp_url => $disp_url,
title => $title,
disp_title => $disp_title,
tags => $doc->{'meta'}->{'tags'},
status => $doc->{'meta'}->{'status'},
desc => $desc,
source => $doc->{'meta'}->{'source'},
});
push(@dates, $added);
}
my @uniq_dates = uniq @dates;
my $last_result = min( ( $offset + $page_size ), $total_results );
my $first_result = min( ( $offset + 1 ), $last_result );
my $req_path = $self->req->url->path;
my $heading = 'my bkmrx';
if ($query_source eq 'twitter') {
$heading = '<i class="icon-twitter"></i> your tweets';
} elsif ($query_source eq 'github') {
$heading = '<i class="icon-github"></i> your repos';
}
my %params = ( tag => $query_tag, source => $query_source );
$self->render(
first_result => $first_result,
last_result => $last_result,
total_results => $total_results,
pages => $self->paginate($total_results, $offset, $page_size, $req_path, \%params),
bkmrx => \@bkx,
dates => \@uniq_dates,
heading => $heading,
source => $query_source,
);
}
# register user
sub register {
my $self = shift;
my $username = $self->param('username');
my $email = $self->param('email');
my $pass = $self->param('pass');
my $pass2 = $self->param('pass2');
if ($pass !~ m{^$pass2$}) {
$self->flash(error => "passwords don't match!");
return $self->redirect_to('/login');
}
my $db = $self->db;
my $users = $db->get_collection( 'users' );
# search to see if username is unique before inserting
my $exists = $users->find({ username => $username })->count;
if ($exists > 0) {
$self->flash(error => "username already exists - please try again");
return $self->redirect_to('/login');
}
my $user_id = $users->insert({
username => $username,
email => $email,
pass => $self->bcrypt($pass),
joined => time(),
});
$self->session( user_id => $user_id->to_string );
$self->session( username => $username );
$self->redirect_to('/me/');
}
# addons page
sub addons {
my $self = shift;
# alter bookmarklet based on host
my $host = $self->req->url->base->host;
my $port = $self->req->url->base->port;
if ($port != 80 || $port != 443) {
$port = ":$port";
} else {
$port = "";
}
$self->render( host => $host, port => $port );
}
sub bklet {
my $self = shift;
return $self->redirect_to('/login') unless $self->session('username');
my $title = $self->param('title');
my $url = $self->param('url');
my $user_id = $self->session('user_id');
my $db = $self->db;
my $bkmrx = $db->get_collection('bookmarks');
my $res = $bkmrx->find({ user_id => $user_id, url => $url });
if ($res->count > 0) {
$self->flash(dupe => 'URL already bookmarked!');
}
$self->render( display_url => elide($url, 60) );
}
sub backup {
my $self = shift;
$self->render();
}
sub _map_tags {
return "function() {
this.meta.tags.forEach(function(tag) {
emit(tag, {count : 1});
});
};";
}
sub _reduce_tags {
return "function(prev, current) {
result = {count : 0};
current.forEach(function(item) {
result.count += item.count;
});
return result;
};";
}
1; | robhammond/bkmrx | lib/BkxMojo/Account.pm | Perl | mit | 7,271 |
package MEDIA;
use File::Path;
use File::Basename qw();
use Data::Dumper;
use JSON::XS qw(); ## used to parse filters
use lib "/backend/lib";
require ZOOVY;
require ZWEBSITE;
require ZTOOLKIT;
use strict;
use Encode qw();
$MEDIA::DEBUG = 0;
$MEDIA::CACHE_DIR = "/local/media-cache";
$MEDIA::max_image_size = 2000; ## Maximum dimension of a scaled image in x or y
@MEDIA::ext = qw(jpg gif jpeg png);
@MEDIA::ext2 = qw(pdf swf);
$MEDIA::max_name_length = 80;
# $MEDIA::max_age = 1167963563;
$MEDIA::max_age = 1168049100; ## 2007010518050000
## Jan 8 08:12
$MEDIA::CACHE_FID = undef;
$MEDIA::CACHE_FIDSTR = undef;
## NOTE: webapi uses format dir1|dir2|dir3 -- these functions convert from and to that format!
sub from_webapi { my ($pwd) = @_; $pwd =~ s/\|/\//gs; return($pwd); }
sub to_webapi { my ($pwd) = @_; $pwd =~ s/\//\|/gs; return($pwd); }
###############################################################################
## load_buffer
##
## Purpose: Gets a file from disk and returns it
## Accepts: A filename and an optional username
## Returns: Undef on failure, the contents of the file on success and the last
## modified time on success
##
###############################################################################
sub load_buffer {
my ($orig_filename, $USERNAME) = @_; # USERNAME is optional
# $MEDIA::DEBUG = ($orig_filename =~ m/ppslv2/) ? 1 : 0 ;
my $filename = $orig_filename;
if ((defined $USERNAME) && ($USERNAME ne '')) {
if (not defined $filename) { $filename = ''; }
my $subdir = undef;
if (index($filename,'/')>=0) {
$subdir = substr($filename,0,rindex($filename,'/'));
## arrgh -- this doesn't work either, because it lc's the ENTIRE path (e.g. A/test1a becomes a/test1a)
## if (length($subdir)>1) { $subdir = lc($subdir); }
$filename = substr($filename,length($subdir)+1);
if (substr($subdir,0,1) eq '/') { $subdir = substr($subdir,1); } # remove leading /
}
elsif ($filename) {
$subdir = uc(substr($filename, 0, 1));
}
##
## $filename = lc($filename); # NOTE: don't lowercase images, since this function is also used for params (e.g. H120-Bffffff)
##
$filename = &ZOOVY::resolve_userpath($USERNAME)."/IMAGES/$subdir/$filename";
$MEDIA::DEBUG && warn ("query_collection($orig_filename, $USERNAME): Filename changed to $filename");
}
if (open FILE, "<$filename") {
local $/ = undef;
my $buffer = <FILE>;
my @fileinfo = stat FILE;
close FILE;
$MEDIA::DEBUG && warn ("load_buffer($orig_filename, $USERNAME): Succeeded loading $filename!");
return $buffer, $fileinfo[9];
}
else {
$MEDIA::DEBUG && warn ("load_buffer($orig_filename, $USERNAME): Failed open $filename!");
}
return undef;
}
##
## /remote/cache
##
sub hashcache {
my ($USERNAME,$filename) = @_;
$filename =~ s/[\/\\]+/+/go;
$USERNAME = uc($USERNAME);
$filename = "$USERNAME:$filename";
my ($i,$i1,$i2) = (0,0,13);
foreach my $ch (split(//,$filename)) {
$i++;
$i1 += (ord($ch)*$i)%17;
if ($i1>=0xFFF) { $i1 -= 0xFFF; }
if (($i % 2)==0) {
$i2 += (ord($ch)*$i)%0xFE;
if ($i2>=0xFFF) { $i2 -= 0xFFF; }
}
}
## perl -e 'foreach my $z (0 .. 0xFF) { foreach my $y (0 .. 0xFF) { my ($subdir) = sprintf("%02X/%02X",$x,$y); system("mkdir -p /local/media-cache/$subdir"); } }'
my $basedir = $MEDIA::CACHE_DIR;
my $dir = sprintf("$basedir/%02X/%02X/%s",$i1%0xFF,$i2%0xFF);
if (! -d $dir) {
mkdir(sprintf("$basedir/%02X",$i1%0xFF));
mkdir(sprintf("$basedir/%02X/%02X",$i2%0xFF));
chmod 0777, sprintf("$basedir/%02X",$i1%0xFF);
chmod 0777, sprintf("$basedir/%02X/%02X",$i2%0xFF);
if ( -d $dir ) {
warn "could not create/write to dir: $dir\n";
$dir = undef;
}
}
if (not defined $dir) {
return(undef);
}
else {
my $filename = sprintf("%s/%s",$dir,$filename);
print STDERR "CACHEFILE: $dir\n";
return($filename);
}
}
##
## GetInfo Struct Errors
## err=>0 everything is kosher
## err=>1 serving a blank graphic
## err=>3 could not write original file
## err=>10 database lookup failure
## err=>11 file does not exist on disk
## err=>12 getinfo returned undef to serve image
## err=>50 invalid/unsupported image format.
## err=>98 file appears to be html
## err=>99 file corrupt, too small
## err=>100 image magick error (generic)
## err=>101 image magick could not determine image dimensions
## err=>996 filename must be .PNG .JPG or .GIF
## err=>997 filename must be provided
## err=>998 username not provided
## err=>999 unspecified application error (used by the application to handle unref iref result)
##
###############################################################################
## error_image
##
## Purpose: Returns a 1x1 image in GIF, JPG or PNG format in a certain color
## Accepts: A Color in hex RGB format with each byte being either FF or 00
## Returns: The corresponding image and the file format it was created in
##
###############################################################################
sub error_image {
my ($color, $format) = @_;
$color = uc(&ZTOOLKIT::def($color));
if ($color !~ m/^(00|FF)(00|FF)(00|FF)$/i) {
$color = 'FF0000'; ## Red by default
}
$format = lc(&ZTOOLKIT::def($format, 'gif'));
my $img = undef;
if (($format eq 'jpg') or ($format eq 'jpeg')) {
if ($color eq 'FFFFFF') { $img = 'FFD8FFE000104A46494600010101004800480000FFDB004300FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFC0000B080001000101011100FFC40014000100000000000000000000000000000003FFC40014100100000000000000000000000000000000FFDA0008010100003F0047FFD9'; }
elsif ($color eq '000000') { $img = 'FFD8FFE000104A46494600010101004800480000FFDB004300FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFC0000B080001000101011100FFC40014000100000000000000000000000000000003FFC40014100100000000000000000000000000000000FFDA0008010100003F0037FFD9'; }
elsif ($color eq 'FF0000') { $img = 'FFD8FFE000104A46494600010101004800480000FFDB004300FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFDB004301FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFC00011080001000103012200021101031101FFC4001500010100000000000000000000000000000002FFC40014100100000000000000000000000000000000FFC4001501010100000000000000000000000000000103FFC40014110100000000000000000000000000000000FFDA000C03010002110311003F0090028FFFD9'; }
elsif ($color eq 'FFFF00') { $img = 'FFD8FFE000104A46494600010101004800480000FFDB004300FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFDB004301FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFC00011080001000103012200021101031101FFC4001500010100000000000000000000000000000002FFC40014100100000000000000000000000000000000FFC4001501010100000000000000000000000000000103FFC40014110100000000000000000000000000000000FFDA000C03010002110311003F00B0132FFFD9'; }
elsif ($color eq '00FF00') { $img = 'FFD8FFE000104A46494600010101004800480000FFDB004300FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFDB004301FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFC00011080001000103012200021101031101FFC4001500010100000000000000000000000000000001FFC40014100100000000000000000000000000000000FFC40014010100000000000000000000000000000002FFC40014110100000000000000000000000000000000FFDA000C03010002110311003F00A0003FFFD9'; }
elsif ($color eq '00FFFF') { $img = 'FFD8FFE000104A46494600010101004800480000FFDB004300FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFDB004301FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFC00011080001000103012200021101031101FFC4001500010100000000000000000000000000000002FFC40014100100000000000000000000000000000000FFC4001501010100000000000000000000000000000103FFC40014110100000000000000000000000000000000FFDA000C03010002110311003F00A00A6FFFD9'; }
elsif ($color eq '0000FF') { $img = 'FFD8FFE000104A46494600010101004800480000FFDB004300FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFDB004301FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFC00011080001000103012200021101031101FFC4001500010100000000000000000000000000000002FFC40014100100000000000000000000000000000000FFC4001501010100000000000000000000000000000103FFC40014110100000000000000000000000000000000FFDA000C03010002110311003F0080140FFFD9'; }
elsif ($color eq 'FF00FF') { $img = 'FFD8FFE000104A46494600010101004800480000FFDB004300FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFDB004301FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFC00011080001000103012200021101031101FFC4001500010100000000000000000000000000000001FFC40014100100000000000000000000000000000000FFC40014010100000000000000000000000000000002FFC40014110100000000000000000000000000000000FFDA000C03010002110311003F008019BFFFD9'; }
$format = 'jpg';
}
elsif ($format eq 'png') {
if ($color eq 'FFFFFF') { $img = '89504E470D0A1A0A0000000D4948445200000001000000010100000000376EF92400000002624B47440001DD8A13A4000000097048597300000048000000480046C96B3E0000000A49444154789C636C0000008400821E067BAD0000000049454E44AE426082'; }
elsif ($color eq '000000') { $img = '89504E470D0A1A0A0000000D4948445200000001000000010100000000376EF92400000002624B47440000AA8D2332000000097048597300000048000000480046C96B3E0000000A49444154789C63640000000400022164AD6A0000000049454E44AE426082'; }
elsif ($color eq 'FF0000') { $img = '89504E470D0A1A0A0000000D494844520000000100000001010300000025DB56CA00000003504C5445FF000019E20937000000097048597300000048000000480046C96B3E0000000A49444154789C63640000000400022164AD6A0000000049454E44AE426082'; }
elsif ($color eq 'FFFF00') { $img = '89504E470D0A1A0A0000000D494844520000000100000001010300000025DB56CA00000003504C5445FFFF008AC6F445000000097048597300000048000000480046C96B3E0000000A49444154789C63640000000400022164AD6A0000000049454E44AE426082'; }
elsif ($color eq '00FF00') { $img = '89504E470D0A1A0A0000000D494844520000000100000001010300000025DB56CA00000003504C544500FF00345EC0A8000000097048597300000048000000480046C96B3E0000000A49444154789C63640000000400022164AD6A0000000049454E44AE426082'; }
elsif ($color eq '00FFFF') { $img = '89504E470D0A1A0A0000000D494844520000000100000001010300000025DB56CA00000003504C544500FFFF195C2F25000000097048597300000048000000480046C96B3E0000000A49444154789C63640000000400022164AD6A0000000049454E44AE426082'; }
elsif ($color eq '0000FF') { $img = '89504E470D0A1A0A0000000D494844520000000100000001010300000025DB56CA00000003504C54450000FF8A78D257000000097048597300000048000000480046C96B3E0000000A49444154789C63640000000400022164AD6A0000000049454E44AE426082'; }
elsif ($color eq 'FF00FF') { $img = '89504E470D0A1A0A0000000D494844520000000100000001010300000025DB56CA00000003504C5445FF00FF34E0E6BA000000097048597300000048000000480046C96B3E0000000A49444154789C63640000000400022164AD6A0000000049454E44AE426082'; }
}
else {
## GIF format by default
$format = 'gif';
$img = '47494638376101000100800000'.$color.'0000002C00000000010001000002024401003B';
}
return pack('H'.length($img), $img), $format;
}
###############################################################################
## build_image
##
## Purpose: Calls ImageMagick and creates a new version of an exising image
## Accepts: A filename, the arguments used to create a modified version, and
## an optional file format (if absent, will choose smallest from jpg
## or gif)
## Returns: Contents of the actual image and its file format OR
## Contents of a 1x1 image representing the error by its color, and a
## file format.
##
## Error Image Colors:
## FF0000 - Red - Image read / imagemagick object creation problem
## FFFF00 - Yellow - Imagemagick object corruption problem
## 00FF00 - Green - Scaling / Sampling Problem
## 00FFFF - Aqua - Drawing / Background Color Problem
## FF00FF - Purple - Compositing problem
## 0000FF - Blue - Imagemagick output corruption problem
## White and black are supported but we haven't assigned them to any
## significance yet.
## See error_image()
##
###############################################################################
sub build_image {
my ($source_filename, $argsref, $format) = @_;
if ($MEDIA::DEBUG) { print STDERR "build_image ".Dumper($source_filename,$argsref,$format)."\n"; }
## Make sure format is set
my $blob = undef;
my $result = undef;
$format = lc(&ZTOOLKIT::def($format));
if ($MEDIA::DEBUG) { print STDERR "source: $source_filename [format:$format]\n"; }
if (($format ne 'png') && ($format ne "gif") && ($format ne "jpg")) { $format = 'jpg'; }
if (not -f $source_filename) {
## crap, okay so basically we are asking for a file we don't got.
($blob) = &MEDIA::blankout();
$format = 'gif';
$result = { err=>1, errmsg=>"Could not load $source_filename" };
}
require Image::Magick;
my $source_image = Image::Magick->new();
if (not defined $blob) {
## Read in the source file or return a red 1x1 image
$result = &MEDIA::magick_result($source_image->Read($source_filename),"reading $source_filename in build_image()");
if (defined $result) {
$blob = &MEDIA::error_image('FF0000', $format);
}
}
my $source_width = -1;
my $source_height = -1;
if (not defined $blob) {
$source_width = $source_image->Get('width');
$source_height = $source_image->Get('height');
## Get the height and width or return a yellow 1x1 image
unless ($source_width && $source_height) {
$result = { err=>101, errmsg=>"Error getting image dimensions" };
($blob) = &MEDIA::error_image('FFFF00', $format);
}
}
##
## SANITY:
## $source_width, $source_height contain the actual image size.
## $source_image contains a reference to the actual image
## OR $result is defined with an error.
my ($output_image, $output_image_trans) = (undef,undef);
if (not defined $result) {
my $width = &ZTOOLKIT::def($argsref->{'W'}, 0);
my $height = &ZTOOLKIT::def($argsref->{'H'}, 0);
if (($width == 0) && ($height == 0)) {
## if we have a 0 in height and width then use the actual image size.
$width = $source_width;
$height = $source_height;
}
elsif (defined($argsref->{'M'}) || ($width==0) || ($height==0)) {
## If we're in minimal mode, just use the directly scaled size
($width,$height) = &MEDIA::minsize($source_width,$source_height,$width,$height);
}
## If the actual and the desired sizes are the same and there's no bg color,
## then skip doing all the scaling BS
if (($source_width == $width) && ($source_height == $height) && (not defined $argsref->{'B'})) {
## Its OK to base it off the original image, cause we're the same size
## and we're not forcing a background color
$output_image = $source_image;
}
else {
## Scale the image...
my ($x_offset,$y_offset,$scale_width,$scale_height);
if ($MEDIA::DEBUG) { print STDERR "scaling: ($source_width == $width) && ($source_height == $height)\n"; }
if (($source_width == $width) && ($source_height == $height)) {
$x_offset = 0;
$y_offset = 0;
}
elsif (defined $argsref->{'C'}) {
$x_offset = int(($width - $source_width) / 2);
$y_offset = int(($height - $source_height) / 2);
}
elsif (($scale_width>0) && ($scale_height>0)) {
## if we already know the size then don't do math again, because we could have rounding erro
$x_offset = 0;
$y_offset = 0;
}
else {
## See how much each axis needs to be scaled by
my $width_ratio = ($width / $source_width);
my $height_ratio = ($height / $source_height);
## fudgefactor is the percentage we can be off, this comes into play since
## there can be rounding issues.
my ($fudgefactor) = ($source_width>$source_height)?$source_width:$source_height;
if ( int($width_ratio*$fudgefactor) == int($height_ratio*$fudgefactor) ) {
## Scale the same on both axes (e.g. a fudge factor of 1000 means both aspect ratios differ by <0.1%)
$scale_width = $width; # int($width_ratio * $source_width);
$scale_height = $height; # int($height_ratio * $source_height);
$x_offset = 0;
$y_offset = 0;
}
elsif ($height_ratio >= $width_ratio) {
## we have to scale more on the width (i.e., it has a smaller
## value), then use it to scale the image
$scale_width = int($width_ratio * $source_width);
$scale_height = int($width_ratio * $source_height);
$x_offset = 0;
$y_offset = int(($height - $scale_height) / 2);
}
elsif ($height_ratio < $width_ratio) {
## we have to scale more on the height (i.e., it has a smaller
## value), then use it to scale the image
$scale_width = int($height_ratio * $source_width);
$scale_height = int($height_ratio * $source_height);
$x_offset = int(($width - $scale_width) / 2);
$y_offset = 0;
}
else {
## never reached
}
if ((abs($scale_width-$width)<=1) && (abs($scale_height-$height)<=1)) {
## okay, so we got some sort of rounding issue that fudgefactor didn't catch
$scale_width = $width;
$scale_height = $height;
}
}
## Okay, we're going to need to do *SOME* scaling.
if ((defined $scale_width) && (defined $scale_height)) {
if (defined $argsref->{'P'}) {
## Pixel-sample scale the image if we have the P flag
## (looks better for some transparent GIFs)
## Sample the image or return a green 1x1 image
$result = &MEDIA::magick_result(
$source_image->Sample('width' => $scale_width,'height' => $scale_height),
"sampling $source_filename to $scale_width x $scale_height in build_image()"
);
}
else {
## Regular scaling
## Scale the image or return a green 1x1 image
$result = &MEDIA::magick_result(
$source_image->Scale('width' => $scale_width,'height' => $scale_height),
"scaling $source_filename to $scale_width x $scale_height in build_image()"
);
}
}
#print STDERR Dumper($argsref);
#print STDERR "FORMAT: $format ARGSREF: $argsref->{'T'}\n";
# Create the output image
$output_image = Image::Magick->new('size' => $width.'x'.$height);
if (defined $result) {
## we've already encountered an error.
}
elsif (($format eq 'png') && (defined $argsref->{'T'})) {
## no background stuff will be done if we're asking for a transparency.
$result = &magick_result(
$output_image->Read("xc:transparent"),
"\$output_image->Read() from $source_filename in build_image()");
}
elsif (($format eq 'gif') && (not defined $argsref->{'B'})) {
# We're transparent!
## Read the source file or return a red 1x1 image
if (not defined $result) {
$result = &magick_result(
$output_image->Read(),
"\$output_image->Read() from $source_filename in build_image()");
}
if (not defined $result) {
$result = &magick_result(
$output_image->Draw('primitive' => 'Matte','method' => 'Replace','points' => '0,0'),
"\$output_image->Draw(...) from $source_filename in build_image()");
}
}
else {
# We have a background color, or are outputting to
# a format that needs a background color
## Change the background color or return an aqua 1x1 image
if (not defined $argsref->{'B'}) { $argsref->{'B'} = 'FFFFFF'; }
$result = &magick_result(
$output_image->Read('xc:#'.$argsref->{'B'}),
"\$output_image->Read('xc:#$argsref->{'B'}') from $source_filename in build_image()");
}
## Paste the input image over the output image, offset so it is
## centered on the image
if (not defined $result) {
$result = &magick_result(
$output_image->Composite('compose' => 'over','image' => $source_image,'x' => $x_offset,'y' => $y_offset),
"compositing $source_image onto \$output_image in build_image()"
);
}
}
if (defined $argsref->{'F'}) {
## yippe..we gots an output filter.
# $output_image = $source_image;
# $result = &magick_result(
# $output_image->Read("xc:transparent"),
# "\$output_image->Read() from $source_filename in build_image()");
# $result = &magick_result(
# $output_image->Flip()
# );
# $result = &magick_result(
# $output_image->Blur('factor'=>50)
# );
# $result = &magick_result(
# $output_image->Mogrify("Blur",'factor'=>50)
# );
# $result = &magick_result(
# $output_image->Mogrify("Emboss")
# );
my ($script) = $argsref->{'F'};
# print STDERR "SCRIPT: $script\n";
$script =~ s/[^a-z]+//g;
my $jscoder = JSON::XS->new();
print STDERR "RUNNING: /httpd/static/graphics/imgfilters/$script.txt\n";
open F, "</httpd/static/graphics/imgfilters/$script.txt";
my @LAYERS = ($output_image);
while (<F>) {
chomp();
next if ($_ eq '');
my ($layer, $cmd,$jsontxt) = split(/[\t]+/, $_, 3);
next if (substr($cmd,0,1) eq '#');
print STDERR "RUNNING LINE: $cmd ($jsontxt)\n";
my %params = ();
if ($jsontxt ne '') {
my $paramsref = $jscoder->decode($jsontxt);
# my $paramsref = JSON::XS::decode_json($jsontxt);
if (ref($paramsref) eq 'HASH') {
%params = %{$paramsref};
}
}
print STDERR "RUNNING[$cmd] params...".&ZTOOLKIT::buildparams(\%params)."\n";
# $params{'factor'} = 100;
if ($cmd eq 'Set') {
$LAYERS[$layer]->Set(%params);
}
elsif ($cmd eq 'New') {
$LAYERS[$layer] = Image::Magick->new(%params);
$LAYERS[$layer]->Read("xc:transparent"); ## always read in a transparent background.
}
elsif ($cmd eq 'Composite') {
$params{'image'} = $LAYERS[$params{'image'}];
$LAYERS[$layer]->Composite(%params);
}
elsif ($cmd eq 'Montage') {
$params{'image'} = $LAYERS[$params{'image'}];
$LAYERS[$layer]->Composite(%params);
}
else {
$LAYERS[$layer]->Mogrify("$cmd",%params);
}
}
close F;
$output_image = $LAYERS[0];
}
# $output_image->Set(colorspace=>'gray');
# $output_image->Quantize();
# $output_image->Montage(geometry=>'160x160',gravity=>"North",borderwidth=>10,compose=>"Over",filename=>"/httpd/htdocs/images/zoovysmall.gif");
# $output_image->Montage(geometry=>'160x160', tile=>'2x2', texture=>'granite:');
# $output_image->Draw(pen=>'black', primitive=>'rectangle', points=>'20,20 100,100');
# Composite compose=>{Over, In, Out, Atop, Xor, Plus, Minus, Add, Subtract, Difference, Bumpmap, Replace, ReplaceRed, ReplaceGreen, ReplaceBlue, ReplaceMatte, Blend, Displace}, image=>image-handle, geometry=>geometry, x=>integer, y=>integer, gravity=>{NorthWest, North, NorthEast, West, Center, East, SouthWest, South, SouthEast}
# my $xi = Image::Magick->new();
# $xi->Read("/httpd/htdocs/images/zoovysmall.gif");
# $output_image->Composite( compose=>'Atop', image=>$xi, x=>0, y=>0, gravity=>'South');
if (defined $result) { ($blob) = &error_image('FF00FF', $format); }
}
if (not defined $blob) {
## there is a conversion error, going from large JPG to large PNG
## calling a TRIM puts the JPG into imagemagick format so it doesn't
## barf
# if ($format eq 'png') { $output_image->Trim(); }
if ($MEDIA::DEBUG) { print STDERR "outputting format[$format]\n"; }
$output_image->Set('magick' => $format);
$blob = $output_image->ImageToBlob();
$result = { err=>0 };
}
if ((not defined $blob) || (length($blob) == 0)) {
$result = { err=>100, errmsg=>"build_image($source_filename, $argsref, $format) Zero length or undefined output from ImageToBlob for $format format from $source_filename.\n" };
($blob) = &error_image('0000FF', $format);
}
return ($blob, $format, $result);
}
###############################################################################
## blankout
##
## Purpose: Returns a blank 1x1 GIF image
sub blankout {
return pack("H84", "4749463839610100010080FF00C0C0C000000021F90401000000002C000000000100010000010132003B");
}
###############################################################################
## MEDIA::serve_image
##
## Purpose: Looks up an image in the user's image library, and returns a
## version of that image based on the arguments string passed
## Accepts: A username, an image name (with optional .ext) and an argument
## string (see decode_args for more information on the arg str)
## Returns: Undef on failure. On success it returns the location of the file
## for the image just created, a buffer of the image file just
## created, the format of the image, the last modified time. and
## 1 for actual buffer, 2 for link contents buffer, and 0 for error
## processing image but we're returning a colored image buffer
##
###############################################################################
sub serve_image {
my ($USERNAME, $FQIMG, $req_argstr, %options) = @_;
$FQIMG =~ s/[^\w\.\/]//gis; # strip undesirable characters
$FQIMG =~ s/[\/]+/\//g; # convert // to / to avoid attacks
my $format = '';
my $collection = $FQIMG;
my $extensions = join('|', @MEDIA::ext, 'jpeg');
if ($FQIMG =~ m/^(.*?)\.($extensions)/) {
$collection = $1;
$format = $2;
if ($format eq 'jpeg') { $format = 'jpg'; }
}
my $result = undef;
my $userdir = &ZOOVY::resolve_userpath($USERNAME).'/IMAGES';
my ($subdir,$image,$ext) = &MEDIA::parse_filename($FQIMG);
# print STDERR "SERVE_IMAGE: $subdir,$image,$ext\n";
# load the info about the collection and return a pointer to the structure.
my ($iref) = &MEDIA::getinfo($USERNAME, $FQIMG,DB=>2);
if (not defined $iref) { $result = { err=>12, errmsg=>"Internal error [12]" }; }
#$VAR1 = {
# 'orig_timestamp' => '1093917322',
# 'original' => 'bonsai_shipped.jpg',
# 'ver' => '1.3',
# 'orig_height' => 480,
# 'orig_width' => 640,
# 'created' => '1093918192',
# 'subs' => {},
# 'orig_filesize' => 194549
# };
if ($iref->{'err'}>0) {
## image lookup FAILED (does not exist)
return(undef);
}
my ($argsref, $argstr) = &MEDIA::parse_args($USERNAME, "$subdir/$image", $req_argstr);
my $orig_format = $iref->{'Format'};
if (($argsref->{'H'} == $iref->{'H'}) && ($argsref->{'W'} == $iref->{'W'}) && (not defined $argsref->{'P'})) {
## okay so we're asking for the same height and width
$argstr = '-';
if ($format eq $orig_format) {
## hmm.. we might want to create some sort of symlink here.
# symlink ($orig_file, $file)
}
}
##
## if we were queried for the original then just return it. right away (saves some time)
##
my ($filename,$buf,$lastmod) = (undef,undef,undef);
## CREATE THE IMAGE IF IT DOESN'T EXIST
if ($orig_format eq '') {
## GUESS: make sure we've got a valid orig_format (extension)
if (-f "$userdir/$subdir/$image.jpg") { $orig_format = 'jpg'; warn "guessed orig_format is jpg [requested: $format]"; }
elsif (-f "$userdir/$subdir/$image.gif") { $orig_format = 'gif'; warn "guessed orig_format is gif [requested: $format]"; }
elsif (-f "$userdir/$subdir/$image.png") { $orig_format = 'png'; warn "guessed orig_format is png [requested: $format]"; }
}
if (($argstr eq '-') && (($format eq '') || ($format eq $orig_format) )) {
$iref->{'Format'} = $orig_format;
$filename = "$subdir/$iref->{'ImgName'}.$iref->{'Format'}";
($buf, $lastmod) = &MEDIA::load_buffer($filename, $USERNAME);
if ($lastmod>0) {
$result = $iref; $result->{'err'} = 0;
return($filename,$buf,$iref->{'Format'},$lastmod,$result);
}
else {
## failed on load original.. hmm.. crap.
warn("missed on load original $USERNAME:[$filename]\n");
return($filename,undef,undef,0,undef);
}
}
## if we're asking for a transparency, it should always be a png.
if (defined $argsref->{'T'}) { $format = 'png'; }
#if (defined $argsref->{'Z'}) {
# $filename = "/httpd/htdocs/images/zoovy_main.gif";
# ($buf, $lastmod) = &MEDIA::load_buffer($filename);
# $result = { err=>0 };
# return ($filename,$buf,'gif',time()-3600,$result);
# }
## SEARCH FOR THE INSTANCE OF THE IMAGE (this return an undef buf if the file doesn't exist)
$filename = "$subdir/$image-$argstr.$format";
my $cachefile = &MEDIA::hashcache($USERNAME,$filename);
if ((defined $cachefile) && (-f $cachefile)) {
($buf, $lastmod) = &MEDIA::load_buffer($cachefile);
if ($lastmod < $MEDIA::max_age) { $lastmod = 0; }
if (($lastmod > 0)) {
if ($MEDIA::DEBUG) { print STDERR "RETURNED CACHEFILE $cachefile [$argstr]\n"; }
$result = $iref; $result->{'err'} = 0;
return($filename,$buf,$format,$lastmod,$result);
}
}
## SANITY: if we made it here, then we could not find the image and we should try and create
## it from the original
## $MEDIA::DEBUG++;
$filename = "$subdir/$image.$orig_format";
($buf, $format, $result) = &MEDIA::build_image("$userdir/$filename", $argsref, $format);
## WRITE OUT THE NEW IMAGE INSTANCE
$lastmod = $^T-3600;
if ((defined $result) && ($result->{'err'}==0)) {
if ($argstr eq '-') {
$filename = "$subdir/$iref->{'ImgName'}-.$ext";
}
else {
$filename = "$subdir/$iref->{'ImgName'}-$argstr.$ext";
}
if (open FILE, ">$cachefile") {
print FILE $buf;
close FILE;
chmod(0666, $cachefile);
chown($ZOOVY::EUID,$ZOOVY::EGID, $cachefile);
}
$result = $iref;
$result->{'err'} = 0;
}
else {
## hmm.. some sort of result occurred.. the file wasn't written, hope we have $buf set
}
return (
"$subdir/$filename",
$buf,
$ext,
$lastmod,
$result
);
}
###############################################################################
## magick_result
##
## Purpose: Imagemagick can output warnings for just about every operation it
## can do. This is a shortcut function for processing the output and
## putting some useful information to the logs.
## Accepts: An image magick warning and a piece of text describing what we
## tried to do (in case it was a failure and we want to scream)
## Returns: 0 on failure, 1 on success
##
###############################################################################
sub magick_result {
my ($warning, $operation) = @_;
my ($errnum) = ($warning =~ m/(\d+)/);
if (defined($warning) && $warning) {
if ($errnum >= 400) {
# print STDERR "IMGLIB: Failure $operation - ImageMagick warning '$warning'\n";
return( {err=>100, errmsg=>"ImageMagick err[$errnum]: $operation" });
}
elsif (($errnum == 325) && ($warning =~ m/extraneous bytes before marker/)) {
## Happens for a lot of images and appears to be completely non-critical
return(undef);
}
}
return undef;
}
###############################################################################
## minimal_size
##
## Purpose: Does all the algebra for resizing an image
## Accepts: A username, an orignal image name, a requested width and a
## requested height
## Returns: A new width and a new height
##
###############################################################################
sub minsize {
my ($orig_width, $orig_height, $request_width, $request_height) = @_;
if (($request_width == 0) || ($request_width > $MEDIA::max_image_size)) {
$request_width = $MEDIA::max_image_size;
}
if (($request_height == 0) || ($request_height > $MEDIA::max_image_size)) {
$request_height = $MEDIA::max_image_size;
}
my ($width,$height);
if (($request_width == $orig_width) && ($request_height == $orig_height)) {
## silly user, the images are the same size.
$width = $request_width;
$height = $request_height;
}
else {
# See how much each axis needs to be scaled by
my $width_ratio = ($request_width / $orig_width);
my $height_ratio = ($request_height / $orig_height);
# If the scale values are equal (meaning its already proportional)
if ($width_ratio == $height_ratio) {
## this will prevent possible rounding errors
$width = $request_width;
$height = $request_height;
}
elsif ($width_ratio < $height_ratio) {
## we have to scale more on the width (i.e., it has a smaller
## value), then use it to scale the image
$width = int($width_ratio * $orig_width);
$height = int($width_ratio * $orig_height);
}
else {
## we have to scale more on the height (i.e., it has a smaller
## value), then use it to scale the image
$width = int($height_ratio * $orig_width);
$height = int($height_ratio * $orig_height);
}
}
return ($width, $height);
}
###############################################################################
## decode_args
##
## Purpose: Takes an argument string for images and returns a normalized hash
## and string version of the args
## Accepts: The argument string, a username, and an image name
## Returns: A string of properly formatted arguments and a hashref version of the arguments
##
## W Width - numeric (0 for orig width)
## H Height - numeric (0 for orig height)
## B Background - Bg color in RRGGBB format, lack of it means make it
## transparent or black if no transparency is available
## M Minimal - will not buffer out to the total size of the image
## C Crop - Disables scaling... made for logos, it will clip an
## image vice scaling it
## P Pixel Sampled - Scaling mode... makes some scaled transparent GIFs look
## better
## F Filter - Not saved as an actual flag, the file is reconstructed
## and not loaded from cache
## Z Zoovy - Defaulted on fail for URL get for image, not saved
##
## T Transparency - this will only work with PNG.
##
###############################################################################
sub parse_args {
my ($USERNAME, $FILENAME, $args_in) = @_;
if (not defined $args_in) { $args_in = ''; }
my @arglist = qw(W H B M P Z T F V); # Args minus -F and -Z so we don't output any fscked images
# Remove any non-word or dash characters.
$args_in =~ s/[^A-Za-z0-9_-]//gis;
my $argsref = {};
# Compile a hash of the arguments, regardless of order
foreach my $arg (split /\-/, $args_in) {
next unless defined($arg);
$arg =~ m/^([WwHhBbMmPpFfZzTtVv])(\w*)$/;
next unless defined $1;
my $letter = uc($1);
my $value = defined($2) ? lc($2) : '';
$argsref->{$letter} = $value;
}
my $args_out = '';
if (scalar keys %{$argsref}) {
# Handle the background color specially
if (not defined $argsref->{'B'}) {
## no background image
}
elsif ($argsref->{'B'} eq 'tttttt') {
$argsref->{'T'}++;
delete $argsref->{'B'};
}
elsif (defined $argsref->{'B'}) {
$argsref->{'B'} =~ s/[^a-f0-9]//g;
if (length($argsref->{'B'}) != 6) {
delete $argsref->{'B'};
}
}
else {
## never reached!
}
# We alwyas have a width and height
## NOTE: sometimes a height/width value of "X" means that the minimal size routine failed
## So we couldn't figure out the correct height and width (probably a database failure) and we've
## decided the correct behavior is to treat the image as (max size)
if ((not defined $argsref->{'W'}) || ($argsref->{'W'} !~ m/^\d+$/)) {
$argsref->{'W'} = 0;
}
elsif ($argsref->{'W'} > $MEDIA::max_image_size) {
$argsref->{'W'} = $MEDIA::max_image_size;
}
if ((not defined $argsref->{'H'}) || ($argsref->{'H'} !~ m/^\d+$/)) {
$argsref->{'H'} = 0;
}
elsif ($argsref->{'H'} > $MEDIA::max_image_size) {
$argsref->{'H'} = $MEDIA::max_image_size;
}
## Restructured this whole thing, brian's mods were causing egregious crashes.
## Missing or zero W or H should now properly force an image into mininal mode. -AK 11/21/02
## FUCK YOU ANTHONY - bh
if (
(($argsref->{'W'} == 0) && ($argsref->{'H'} != 0)) ||
(($argsref->{'H'} == 0) && ($argsref->{'W'} != 0))) {
## NOTE: we leave the 0's in there, until we actually request the correct size.
$argsref->{'M'} = ''; ## Force minimal mode
}
## Special case of no width, height or other attribs. Serve up the original
if (($argsref->{'W'} == 0) && ($argsref->{'H'} == 0) && ((scalar keys %{$argsref}) == 2)) {
## This is tied to the folowing if statement in such a way that either you're returning
## a - only for args, or the normalized compiled version of the args.
$argsref = {};
$args_out = '-';
}
else {
# Re-output the args, in order
foreach my $letter (@arglist) {
next unless defined($argsref->{$letter});
$args_out .= "$letter$argsref->{$letter}-";
}
# Remove a trailing slash if present
$args_out =~ s/\-$//;
}
}
else {
$args_out = '-';
}
return($argsref, $args_out);
}
##
## takes an iref, and returns the filepath (on disk) to a given image for stat'ing
##
sub iref_to_filepath {
my ($USERNAME,$iref) = @_;
die('not actually implemented');
}
##
## takes an iref, returns the proper image filename (ex: pwd/image.ext) for an image
##
sub iref_to_imgname {
my ($USERNAME,$iref) = @_;
if (not defined $iref) {
return("**ERR[iref_not_set]**/notfound.gif");
}
my $dbh = &DBINFO::db_user_connect($USERNAME);
my $MID = &ZOOVY::resolve_mid($USERNAME);
my $pstmt = "select FNAME from IFOLDERS where MID=$MID /* $USERNAME */ and FID=".int($iref->{'FID'});
if ($MEDIA::DEBUG) { print STDERR $pstmt."\n"; }
my $sth = $dbh->prepare($pstmt);
$sth->execute();
my $pwd = '**ERR[FID:'.int($iref->{'FID'}).']**'; # this *SHOULD* get overwritten
if ($sth->rows()) { ($pwd) = $sth->fetchrow(); }
$sth->finish();
&DBINFO::db_user_close();
my $fqname = sprintf("%s/%s.%s",$pwd,$iref->{'ImgName'},$iref->{'Format'});
if ($MEDIA::DEBUG) { print STDERR "USERNAME:$USERNAME FQNAME[$fqname]\n"; }
return($fqname);
}
##
## GetInfo
## parameters:
## FILENAME - this is a fully qualified path (e.g. subdir/image.gif)
## IMGBUF - a buffer, assumed to be an image, that will be read in
## DB - 1 tells the system to check the database, 2=check database but fail to actual file (DB=0)
## DETAIL - 0 = no instance, 1 = instance info
## SKIP_DISK => 0|1 (0 is default) means never go to disk even if an image exists.
##
##
## returns: (info hashref)
## err=>0
## FILENAME
## EXT, H, W, SIZE, TS
## FID
##
##
# $iref = { err=>0, ImgName=>$image, Format=>$ext, FID=>$FID,
# TS=>$fileinfo[10], MERCHANT=>$USERNAME, MID=>$MID, ItExists=>0,
# MasterSize=>length($options{'IMGBUF'}), H=>$height, W=>$width };
#
sub getinfo {
my ($USERNAME,$FILENAME,%options) = @_;
my ($subdir,$image,$ext) = &MEDIA::parse_filename($FILENAME);
if ($MEDIA::DEBUG) { print STDERR "SUBDIR: $subdir [$FILENAME]\n"; }
my $result = undef;
# if (not defined $options{'CACHE'}) {}
# elsif ($options{'CACHE'}<0) {
# my $FID = undef;
#
# my $data = undef;
# my $PRT = abs($options{'CACHE'})-1;
# my $pubfile = &ZOOVY::pubfile($USERNAME, $PRT, 'images.cdb');
# if ($pubfile ne '') {
# my $cdb = CDB_File->TIEHASH($pubfile);
# if ($cdb->EXISTS("*$subdir")) { $FID = $cdb->FETCH("*$subdir"); } else { $FID = 0; }
# if ($cdb->EXISTS("$FID:$image")) { $data = $cdb->FETCH("$FID:$image"); }
# $cdb = undef;
# }
# if (defined $data) { $result = YAML::Syck::Load($data); }
# }
# print STDERR "MEDIA::getinfo did not use publisher file!\n";
my $MID = &ZOOVY::resolve_mid($USERNAME);
my $DB = (defined $options{'DB'})?int($options{'DB'}):1; ## assume we can do a database lookup
my $FID = undef;
if (not defined $result) {
$FID = &MEDIA::resolve_fid($USERNAME,$subdir);
}
if (defined $options{'IMGBUF'}) { $DB = -1; } # found it (we'll technically we were passed it!)
## added FID>0
if (defined $result) {
}
elsif (($DB>0) && ($FID>0)) {
#+------------+-------------------------+------+-----+---------+----------------+
#| Field | Type | Null | Key | Default | Extra |
#+------------+-------------------------+------+-----+---------+----------------+
#| Id | int(11) | NO | PRI | NULL | auto_increment |
#| ImgName | varchar(45) | NO | | NULL | |
#| Format | enum('gif','jpg','png') | YES | | NULL | |
#| TS | int(10) unsigned | NO | | 0 | |
#| MERCHANT | varchar(20) | NO | | NULL | |
#| MID | int(11) | NO | MUL | 0 | |
#| FID | int(11) | NO | | 0 | |
#| ItExists | tinyint(4) | NO | | 0 | |
#| ThumbSize | int(10) unsigned | NO | | 0 | |
#| MasterSize | int(10) unsigned | NO | | 0 | |
#| H | smallint(6) | NO | | -1 | |
#| W | smallint(6) | NO | | -1 | |
#+------------+-------------------------+------+-----+---------+----------------+
my $dbh = &DBINFO::db_user_connect($USERNAME);
my $pstmt = "select * from IMAGES where FID=$FID and MID=$MID /* $USERNAME */ and ImgName=".$dbh->quote($image);
if ($MEDIA::DEBUG) { print STDERR $pstmt."\n"; }
my $sth = $dbh->prepare($pstmt);
$sth->execute();
if ($sth->rows()) {
$result = $sth->fetchrow_hashref();
if (($result->{'W'}>0) && ($result->{'H'}>0) && ($result->{'Format'} ne '')) {
$DB = -1; ## found it!
}
else {
$DB = 0; $result = undef; ## corrupt db record ( go to disk )
}
}
else {
if ($DB==1) { $result = { err=>10, errmsg=>"could not find file in database" }; }
}
$sth->finish();
&DBINFO::db_user_close();
# use Data::Dumper;
# print STDERR 'getinfo: '.Dumper($result);
}
my @fileinfo = ();
## DB=0 go directly to disk
## DB=2 if we get here, iz corrupt! (rebuild db record)
if ((($DB==0) || ($DB==2)) && (not defined $result)) {
## don't use the database (we'll just open the file and load it into IMGBUF then fall through)
my $userdir = &ZOOVY::resolve_userpath($USERNAME).'/IMAGES';
foreach my $ext ('jpg','gif','png') {
next if (defined $options{'IMGBUF'}); # found it already!
print STDERR "getinfo reading from $userdir/$subdir/$image.$ext\n";
my @fileinfo = stat("$userdir/$subdir/$image.$ext");
next unless ($fileinfo[7]>0); # check the size
## the file exists, lets read it in.
$/ = undef;
open F, "<$userdir/$subdir/$image.$ext";
$options{'IMGBUF'} = <F>;
close F;
$/ = "\n";
}
if (not defined $options{'IMGBUF'}) {
$result = { err=>11, errmsg=>"Could not find original file on disk" }
}
}
if (defined $result) {
## already got a result
}
elsif ((defined $options{'IMGBUF'}) && (defined $options{'SKIP_DISK'}) && ($options{'SKIP_DISK'}>0)) {
## return an error, rather than try to load actual image
$result = { err=>12, errmsg=>"Prohibited from attempting to load original" }
}
elsif (defined $options{'IMGBUF'}) {
require Image::Magick;
my $imgblob = Image::Magick->new();
# perl -e 'use Image::Magick; $image=Image::Magick->new; print $image->get("Version");'
#if ($imgblob->get("Version") ne 'ImageMagick 6.5.3-3 2009-07-03 Q16') {
# warn "I am running a different version of ImageMagick than I should be.";
# }
$result = &MEDIA::magick_result(
$imgblob->BlobToImage($options{'IMGBUF'}),
"reading $subdir/$image.$ext"
);
if (not defined $result) {
## if result is still undef - then image was read in successfully
my $width = $imgblob->Get('width');
my $height = $imgblob->Get('height');
if ($imgblob->VERSION() eq '5.56') {
## old image magick on app3 (we'll just trust the file extension)
}
else {
my $mime = $imgblob->Get('mime');
if ($mime eq 'image/jpeg') { $ext = 'jpg'; }
elsif ($mime eq 'image/gif') { $ext = 'gif'; }
elsif ($mime eq 'image/png') { $ext = 'png'; }
elsif (not defined $mime) {}
else { warn "found unknown mime [$mime]"; }
}
if (not defined $fileinfo[10]) { $fileinfo[10] = time(); }
if (&ZTOOLKIT::def($width) && &ZTOOLKIT::def($height)) {
$result = { err=>0, ImgName=>$image, Format=>$ext, FID=>$FID,
TS=>$fileinfo[10], MERCHANT=>$USERNAME, MID=>$MID, ItExists=>0,
MasterSize=>length($options{'IMGBUF'}), H=>$height, W=>$width };
}
## okay since it wasn't in the database, we should update the database
my $dbh = &DBINFO::db_user_connect($USERNAME);
my $pstmt = sprintf(
"update IMAGES set Format=%s,H=%d,W=%d,MasterSize=%d where MID=%d /* %s */ and FID=%d and ImgName=%s",
$dbh->quote($ext),$height,$width,length($options{'IMGBUF'}),$MID,$USERNAME,$FID,$dbh->quote($image));
my $rows_affected = $dbh->do($pstmt);
## added 2007-05-23 - patti, do's return # of rows affected or zero (0E0)
if (int($rows_affected) == 0) {
my $pstmt = sprintf(
"insert into IMAGES (TS,MERCHANT,Format,H,W,MasterSize,MID,FID,ImgName) values ".
"(%s,%s,%s,%d,%d,%d,%d,%d,%s)",
$^T,$dbh->quote($USERNAME),$dbh->quote($ext),$height,$width,length($options{'IMGBUF'}),$MID,$FID,$dbh->quote($image));
$dbh->do($pstmt);
}
&DBINFO::db_user_close();
}
}
# if (($options{'DETAIL'}&1)==1) {
# ## adds a *INSTANCES to the result which is an array of images
# my @files = &MEDIA::related_files($USERNAME,$FILENAME);
# $result->{'*INSTANCES'} = [];
# foreach my $file (@files) {
# if ($file =~ /^$image\-(.*?)\./) {
# my $args = $1;
# @fileinfo = stat "$userdir/$subdir/$file";
# my ($argsref,$argstr) = MEDIA::parse_args($USERNAME,$image,$args);
#
# push @{$result->{'*INSTANCES'}}, { Img=>$file, Size=>$fileinfo[7], TS=>$fileinfo[10], H=>$argsref->{'H'}, W=>$argsref->{'W'} };
# }
# }
# }
# ## Does the image look good to imagemagick?
# )
# {
# $MEDIA::DEBUG && &msg("new_collection_binfile_hash($USERNAME, $imagename, $nuke): Image looks good to imagemagick");
# {
# $collection_hash = {
# 'original' => $original,
# 'created' => time(),
# 'orig_filesize' => $fileinfo[7],
# 'orig_timestamp' => $fileinfo[10],
# 'orig_width' => $width,
# 'orig_height' => $height,
# 'ver' => $IMGLIB::version,
# 'subs' => {},
# };
# }
# else
# {
# &msg("new_collection_binfile_hash($USERNAME, $imagename, $nuke): Unable to get width or height from ImageMagick on $userdir/$subdir/$original for $imagename!");
# }
return($result);
}
###############################################################################
## find_instances
##
## Purpose: Locates all of the modified copies of an original image
## Accepts: A user name, an original image name, whether to sort the output,
## and whether to ignore the .bin file and read the directory the
## image is in directly.
## Returns: An array of filenames of the instances of the original image
## NOTE: this is a *VERY* expensive call, and should be used with care.
##
###############################################################################
#sub related_files {
# my ($USERNAME, $FILENAME) = @_;
#
# my $userdir = &ZOOVY::resolve_userpath($USERNAME).'/IMAGES';
# my ($subdir,$image,$ext) = &MEDIA::parse_filename($FILENAME);
#
# my @files = ();
# if (opendir IMAGES, "$userdir/$subdir") {
# ## Looks for all instance and original image names
# while (my $ifile = readdir IMAGES) {
# next unless ($ifile =~ m/^$image[\.\-]/);
# push @files, $ifile;
# }
# closedir IMAGES;
# }
# return @files;
# }
##
## nukes an image and all instances
## %opts
## original=>0 means save the original (nuke_collection) [defaults to 1]
## instances=>0 means save the instances [defaults to 1]
##
sub nuke {
my ($USERNAME,$FILENAME,%opts) = @_;
## &IMGLIB::nuke_collection($USERNAME,"$PWD/$FILE",1);
## &IMGLIB::nuke_instances($USERNAME,"$PWD/$FILE",0);
my ($gref) = &ZWEBSITE::fetch_globalref($USERNAME);
if (defined $gref->{'%tuning'}) {
if ($gref->{'%tuning'}->{'inhibit_image_nukes'}) {
warn "Sorry, can't remove images due to %tuning->inhibit_image_nukes";
return(undef);
}
}
my $nuke_orig = (defined $opts{'original'})?int($opts{'original'}):1;
# my $nuke_inst = (defined $opts{'instances'})?int($opts{'instances'}):1;
my $userdir = &ZOOVY::resolve_userpath($USERNAME).'/IMAGES';
my ($subdir,$image,$ext) = &MEDIA::parse_filename($FILENAME);
# my @files = &MEDIA::related_files($USERNAME,"$FILENAME");
# foreach my $file (@files) {
# my $unlink = 0;
# if ($file =~ /\.bin$/) { $unlink++; }
# elsif (($nuke_inst) && ($file =~ /^$image\-/)) { $unlink++; } # found an instance
# elsif (($nuke_orig) && ($file =~ /^$image\./)) { $unlink++; } # found an instance
# if ($unlink) {
# unlink "$userdir/$subdir/$file";
# }
# }
## if we're removing the original, then we really ought to remove it from the database too.
if ($nuke_orig) {
&MEDIA::delimage($USERNAME,$subdir,$image);
}
return(undef);
}
############################################################
##
## parameters:
##
## returns:
## a GetInfo struct
sub store {
my ($USERNAME,$FILENAME,$IMGBUF,%params) = @_;
my $iref = undef;
if ((not defined $iref) && ((not defined $USERNAME) || ($USERNAME eq ''))) {
$iref = { err=>998, errmsg=>"Username not provided" };
}
if ((not defined $iref) && ((not defined $FILENAME) || ($FILENAME eq ''))) {
$iref = { err=>997, errmsg=>"Filename must be provided." };
}
# if ($FILENAME !~ /\.(jpg|gif|png)$/i) {
# $iref = { err=>996, errmsg=>"sorry only .JPG, .PNG, .GIF file formats are supported." };
# }
if ((not defined $iref) && (length($IMGBUF) <= 20)) {
$iref = { err=>99, errmsg=>"File too short" };
}
if ((not defined $iref) && ($IMGBUF =~ /^\s*<.*>\s*/)) {
$iref = { err=>98, errmsg=>"File appears to be html" };
}
my $userdir = &ZOOVY::resolve_userpath($USERNAME).'/IMAGES';
my ($subdir,$image,$ext) = &MEDIA::parse_filename($FILENAME);
$image =~ s/[_\s]+$//g; # strip any underscores at the end of the filename (e.g. image___)
#print STDERR "DEBUG: [$FILENAME]=[$subdir][$image][$ext]\n";
#print STDERR "DEBUG: [FILEPATH]=[$userdir/$subdir/$image.$ext]\n";
if (! -d "$userdir/$subdir") {
## make sure the path actually exists.
open F, ">>/tmp/folder";
print F "$USERNAME,$subdir\n";
close F;
&MEDIA::mkfolder($USERNAME,"$subdir");
}
my $extensions = join('|', @MEDIA::ext, 'jpeg');
my $extensions2 = join('|', @MEDIA::ext2);
if (defined $iref) {
}
elsif ($ext =~ m/^($extensions2)$/i) {
}
elsif ($ext =~ m/^($extensions)$/i) {
## this is good!
}
elsif ((defined $params{'allow_extension'}) && ($params{'allow_extension'})) {
## if this is true, the extension is allowed.
}
else {
$iref = { err=>50, errmsg=>"Invalid image format [$ext]!" };
}
if ($ext ne '') {
if (-f "$userdir/$subdir/$image.$ext") {
## TODO: file already exists, we should nuke it and all instances
&MEDIA::nuke($USERNAME,"$FILENAME");
}
}
if (defined $iref) {}
else {
if ($ext eq 'jpg') {
## NOTE: eventually we might want to handle TIFF and BMP here!
require Image::Magick;
my $p = Image::Magick->new(magick=>'jpg');
$p->BlobToImage($IMGBUF);
my ($format) = $p->get("format");
my ($cs) = $p->get("colorspace");
if ($cs eq 'CMYK') {
## warning: customer uploaded a CMYK jpg file!
$p->set("colorspace"=>"RGB");
($IMGBUF) = $p->ImageToBlob();
}
}
## format the filename
$image =~ s/^[\s]+//g; # strip leading space
$image =~ s/[\s]+$//g; # strip trailing space
my $filename = "$userdir/$subdir/$image.$ext";
my $got_fh = 0;
unless (open FILE, ">$filename") {
$iref = { err=>3, errmsg=>"Could not open file [$filename] for write access" };
}
unless (defined $iref) {
print FILE $IMGBUF;
close FILE;
chmod(0666, $filename);
chown($ZOOVY::EUID,$ZOOVY::EGID,$filename);
}
if (not $iref->{'err'} ) {
&MEDIA::addimage($USERNAME,$subdir,$image,$ext,time(),length($IMGBUF));
$iref->{'folder'} = $subdir;
$iref->{'image'} = "$image.$ext";
}
}
if (not defined $iref) {
$iref = &MEDIA::getinfo($USERNAME,sprintf("%s/%s.$ext",$subdir,$image,$ext),IMGBUF=>$IMGBUF);
}
if ($MEDIA::DEBUG) { use Data::Dumper; print STDERR "RESULT: ".Dumper($iref)."\n"; }
return($iref);
}
##
## assumes that asdf.gif should actually be A/asdf.gif
##
sub parse_filename {
my ($filename) = @_;
# print STDERR "1FILENAME:[$filename]\n";
$filename =~ s/[^\w\-\.\/]+/_/g; # strip everything but dashes, alphanum and periods
$filename =~ s/[\.]+/\./g; # change double periods to ..
$filename =~ s/^\.//g; # remove leading periods
$filename =~ s/[\s_]+$//; ## strip trailing spaces and underscoress
$filename =~ s/[\/]+/\//g; # remove duplicate slashes
if (substr($filename,0,1) eq '/') { $filename = substr($filename,1); } # remove a leading /
if ($filename eq '') { return(undef); }
## we got to get rid of periods
#print STDERR "FILE: $filename\n";
my ($name,$path,$suffix) = File::Basename::fileparse($filename,qr{\.[Jj][Pp][Ee][Gg]|\.[Jj][Pp][Gg]|\.[Pp][Nn][Gg]|\.[Gg][Ii][Ff]});
## note: suffix has a .jpeg or .jpg (notice the leading period)
$name = lc($name);
$name =~ s/[^a-z0-9\_]+/_/gs;
## Commented out due to conversation with Brian
# $name =~ s/[\s_]+$//g; ## NO TRAILING SPACES!
if (substr($path,0,2) eq './') { $path = substr($path,2); }
$filename = "$path$name$suffix";
## translate legacy filenames e.g. asdf.gif => A/asdf.gif
if (index($filename,'/')==-1) {
$filename = uc(substr($filename,0,1)).'/'.$filename;
}
my $ext = undef;
my $imgname = "";
# print STDERR "FILENAME[$filename]\n";
my $pos = rindex($filename,'.');
if ($pos>0) {
$imgname = substr($filename,0,$pos);
$ext = lc(substr($filename,$pos+1));
}
else {
## hmm.. no extensioN!
$imgname = $filename;
}
##
## SANITY: at this point $ext is either set, or it won't be.
## $imgname has something like A/asdf (so we'll need to split out subdir)
$pos = rindex($imgname,'/')+1;
# print STDERR "POS: $pos [$imgname]\n";
my $subdir = substr($imgname,0,$pos-1);
$imgname = lc(substr($imgname,$pos));
$imgname = substr($imgname,0,$MEDIA::max_name_length); # max image name length is 80 characters
if (length($subdir)==1) { $subdir = uc($subdir); } # single char dirs e.g. P/palm_m500 are always uppercase
my @pwds = ();
foreach my $str (split(/\//,$subdir)) {
if ((length($str)==1) && (scalar(@pwds)==0)) {
push @pwds, uc($str);
}
else {
push @pwds, lc($str);
}
}
$subdir = join('/',@pwds);
return($subdir,$imgname,$ext);
}
##
## returns a list of images for a given folder .. (used by webapi.pm)
## key: imagename.ext val: timestamp
##
sub folderdetail {
my ($USERNAME, $PWD) = @_;
my $dbh = &DBINFO::db_user_connect($USERNAME);
my $MID = &ZOOVY::resolve_mid($USERNAME);
my $FID = &MEDIA::resolve_fid($USERNAME,$PWD);
my %result = ();
my $pstmt = "select ImgName,Format,TS from IMAGES where MID=$MID and FID=".$FID;
my $sth = $dbh->prepare($pstmt);
$sth->execute();
while ( my ($i,$e,$ts) = $sth->fetchrow() ) {
$result{$i.(($e ne '')?'.'.$e:'')} = $ts;
}
$sth->finish();
&DBINFO::db_user_close();
return(\%result);
}
##
## returns an array of hashrefs
## keys in hashref: ImageCount,TS,ImgName,FID,ParentFID,ParentName
##
sub folderlist {
my ($USERNAME) = @_;
my $dbh = &DBINFO::db_user_connect($USERNAME);
my $MID = &ZOOVY::resolve_mid($USERNAME);
my @result = ();
my $pstmt = "select ImageCount,TS,FName,FID,ParentFID,ParentName from IFOLDERS where MID=$MID order by FName";
my $sth = $dbh->prepare($pstmt);
$sth->execute();
while ( my $hashref = $sth->fetchrow_hashref() ) {
push @result, $hashref;
}
$sth->finish();
&DBINFO::db_user_close();
return(\@result);
}
sub foldertree {
my ($USERNAME) = @_;
my %FOLDERS = ();
#foreach my $r (@{MEDIA::folderlist($USERNAME)}) {
# $r->{'FName'}
# }
}
sub r_foldertree {
}
##
## pass FID==0 to get all images
##
sub imglist {
my ($USERNAME,$FID) = @_;
my @results = ();
my $dbh = &DBINFO::db_user_connect($USERNAME);
my $MID = &ZOOVY::resolve_mid($USERNAME);
my $pstmt = "select ImgName,Format,FID from IMAGES where MID=$MID /* $USERNAME */";
if ($FID>0) { $pstmt .= " and FID=".int($FID); }
my $sth = $dbh->prepare($pstmt);
$sth->execute();
while ( my $ref = $sth->fetchrow_hashref() ) {
push @results, $ref;
}
$sth->finish();
&DBINFO::db_user_close();
return(\@results);
}
##
## note: pass a depth of -1 to not descend the tree!
##
sub reindex {
my ($USERNAME, $PWD, $DEPTH) = @_;
require Image::Magick;
if (not defined $PWD) { $PWD = ''; }
elsif ($PWD eq '/') { $PWD = ''; }
if (substr($PWD,0,1) eq '/') { $PWD = substr($PWD,1); } # strip leading / from PWD if necessary
# print "DOING PATH: $USERNAME $PWD\n";
if (not defined $DEPTH) { $DEPTH = 0; }
my $MID = &ZOOVY::resolve_mid($USERNAME);
my $OLDFID = &MEDIA::resolve_fid($USERNAME,$PWD);
if ($PWD eq '') {
my $dbh = &DBINFO::db_user_connect($USERNAME);
my $pstmt = "delete from IFOLDERS where MID=$MID";
# print STDERR $pstmt."\n";
$dbh->do($pstmt);
$pstmt = "delete from IMAGES where MID=$MID";
# print STDERR $pstmt."\n";
$dbh->do($pstmt);
&DBINFO::db_user_close();
}
## note: if PWD is set e.g. "A" then it should be "/$PWD" otherwise just ""
my $PATH = &ZOOVY::resolve_userpath($USERNAME).'/IMAGES'.(($PWD eq '')?'':"/$PWD");
my @SUBDIRS = ();
my @FILES = ();
my $D = undef;
opendir $D, $PATH;
my @NUKE = ();
while ( my $file = readdir($D)) {
# print "FILE: $file\n";
next if (substr($file,0,1) eq '.');
if (-d $PATH.'/'.$file) { push @SUBDIRS, $file; } # subdirs
elsif (($PWD eq '') || ($PWD eq '/')) {} # root directory is custom files (don't index)
## NOTE: the line below is *VERY* bad it removes images with a dash which can be uploaded as part of pm system.
# elsif ($file =~ /\-/) { push @NUKE, $PATH.'/'.$file; } # nuke instances
elsif ($file =~ /\.bin$/) { push @NUKE, "$PATH/$file"; } # ignore binfiles
else {
if (1) {
my $p = new Image::Magick;
$p->Read("$PATH/$file");
my ($format) = $p->get("format");
my ($cs) = $p->get("colorspace");
if ($cs eq 'CMYK') {
print STDERR "$PATH/$file UPGRADING CMYK FORMAT: $format [$cs]\n";
## converting from CMYK to RGB
$p->set("colorspace"=>"RGB");
$p->Write("$PATH/$file");
}
}
#next if (($format =~ /CompuServe/) && ($file =~ /\.gif$/));
#next if (($format =~ /Joint Photographic/) && ($file =~ /\.jpg$/));
#next if (($format =~ /Portable Network Graphics/) && ($file =~ /\.png$/));
if ($file =~ /(.*?)\.jpeg$/) {
## renames filename.jpeg to filename.jpg
$file = $1;
rename("$PATH/$file.jpeg","$PATH/$file.jpg");
$file .= ".jpg";
}
elsif ($file =~ /(.*?)\.(tif|bmp|tiff)$/) {
## converts files of specific types to .png
($file,my $ext) = ($1,$2);
my $p = Image::Magick->new();
$p->Read("$PATH/$file.$ext");
$p->Set('magick'=>'png');
$p->Write("$PATH/$file.png");
chmod(0666,"$PATH/$file.png");
chown($ZOOVY::EUID,$ZOOVY::EGID,"$PATH/$file.png");
rename("$PATH/$file.$ext","$PATH/.$file.$ext");
$file = "$file.png";
}
## strip any trailing spaces in the image name during a reindex
if ($file =~ /^(.*)[_\s]+\.(jpg|png|gif)$/) {
my ($file2,$ext2) = ($1,$2);
print STDERR "RENAMING FILE: $PATH/$file to $PATH/$file2.$ext2\n";
rename("$PATH/$file","$PATH/$file2.$ext2");
$file = "$file2.$ext2";
}
push @FILES, $file;
}
# keep actual images
}
closedir($D);
foreach my $nuke (@NUKE) {
# die("Should never be reached");
&ZOOVY::log($USERNAME,'',"MEDIA.REDINEX","Reindex script removed $nuke","WARN");
unlink($nuke);
}
# use Data::Dumper; print Dumper(\@SUBDIRS);
## Now, lets recurse through directories. (if any)
if (($DEPTH>=0) && ($DEPTH<5)) {
foreach my $d (@SUBDIRS) {
# print STDERR "INDEXING: $d\n";
&MEDIA::reindex($USERNAME, $PWD.'/'.$d, $DEPTH+1);
}
}
my $dbh = &DBINFO::db_user_connect($USERNAME);
if ($OLDFID > 0) {
## okay now lets blow out this directory in the database!
my $pstmt = "delete from IFOLDERS where MID=$MID and FID=".$dbh->quote($OLDFID);
# print STDERR $pstmt."\n";
$dbh->do($pstmt);
$pstmt = "delete from IMAGES where MID=$MID and FID=".$dbh->quote($OLDFID);
# print STDERR $pstmt."\n";
$dbh->do($pstmt);
}
&MEDIA::mkfolder($USERNAME,$PWD);
my $NEWFID = &MEDIA::resolve_fid($USERNAME,$PWD);
my $pstmt = "update IFOLDERS set ParentFID=$NEWFID where MID=$MID and ParentFID=$OLDFID";
# print STDERR $pstmt."\n";
$dbh->do($pstmt);
if ($PWD ne '') {
## okay, now lets interate through each file and add them to the database
foreach my $f (@FILES) {
my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,$blksize,$blocks) = stat($PATH.'/'.$f);
# print "F: $f\n";
my ($fimg, $ext) = split(/\./,$f);
&MEDIA::addimage($USERNAME,$PWD,$fimg,$ext,$mtime,$size);
## this line will update the sizes.
&MEDIA::getinfo($USERNAME,"$PWD/$f",CACHE=>0);
}
}
&DBINFO::db_user_close();
}
##
## call this when we add an image (maintains media library sync database)
##
sub addimage {
my ($USERNAME,$PWD,$IMGNAME,$FORMAT,$TS,$MASTERSIZE,$H,$W) = @_;
if (substr($PWD,0,1) eq '/') { $PWD = substr($PWD,1); } ## remove leading /
my $dbh = &DBINFO::db_user_connect($USERNAME);
my $MID = int(&ZOOVY::resolve_mid($USERNAME));
my $FID = &MEDIA::resolve_fid($USERNAME,$PWD);
if ($FID==-1) {
## hmm.. folder doesn't exist!?!?!
&MEDIA::mkfolder($USERNAME,$PWD);
$FID = &MEDIA::resolve_fid($USERNAME,$PWD);
}
if (length($IMGNAME)>$MEDIA::max_name_length) {
warn("Image [$IMGNAME] length is too long!");
$IMGNAME = substr($IMGNAME,0,$MEDIA::max_name_length);
}
if (not defined $FORMAT) { $FORMAT = ''; }
my %vars = ();
$vars{'ImgName'} = lc($IMGNAME);
$vars{'Format'} = $FORMAT;
$vars{'TS'} = $^T;
$vars{'MERCHANT'} = $USERNAME;
$vars{'MID'} = $MID;
$vars{'FID'} = $FID;
if (defined $MASTERSIZE) { $vars{'MasterSize'} = int($MASTERSIZE); }
if ((defined $H) && (defined $W)) { $vars{'H'} = $H; $vars{'W'} = $W; }
my $pstmt = &DBINFO::insert($dbh,'IMAGES',\%vars,debug=>2,key=>['MID','FID','ImgName']);
#my $pstmt = "insert into IMAGES (ImgName,Format,TS,MERCHANT,MID,FID,MasterSize) values (";
# $pstmt .= $dbh->quote($IMGNAME).','.$dbh->quote($FORMAT).','.time().','.$dbh->quote($USERNAME).','.$MID.','.$FID.','.int($MASTERSIZE).')';
# print STDERR $pstmt."\n";
if (defined $dbh->do($pstmt)) {
&MEDIA::bumpfolder($USERNAME,$PWD,+1);
}
&DBINFO::db_user_close();
return();
}
##
## call this when we delete an image (maintains media library sync database)
##
sub delimage {
my ($USERNAME,$PWD,$IMGNAME) = @_;
if (substr($PWD,0,1) eq '/') { $PWD = substr($PWD,1); } ## remove leading /
my $dbh = &DBINFO::db_user_connect($USERNAME);
my $MID = int(&ZOOVY::resolve_mid($USERNAME));
my $FID = &MEDIA::resolve_fid($USERNAME,$PWD);
my $pstmt = "select Id,Format from IMAGES where MID=$MID and FID=$FID and ImgName=".$dbh->quote($IMGNAME)." limit 1";
my $sth = $dbh->prepare($pstmt);
$sth->execute();
my ($Id,$Format) = $sth->fetchrow();
$sth->finish();
if (($FID > 0) && ($Id>0)) {
my $pstmt = "delete from IMAGES where MID=$MID /* $USERNAME */ and FID=$FID and Id=$Id and ImgName=".$dbh->quote($IMGNAME)." limit 1";
# print STDERR $pstmt."\n";
if (defined $dbh->do($pstmt)) {
my $userdir = &ZOOVY::resolve_userpath($USERNAME).'/IMAGES';
my ($subdir,$image,$ext) = &MEDIA::parse_filename("$PWD/$IMGNAME");
if ($ext eq '') { $ext = $Format; }
$ext = lc($ext);
unlink "$userdir/$subdir/$image.$ext";
&MEDIA::bumpfolder($USERNAME,$PWD,-1);
}
}
&DBINFO::db_user_close();
}
##
## returns the folder id for a given pwd
##
sub resolve_fid {
my ($USERNAME,$PWD) = @_;
if (substr($PWD,0,1) eq '/') { $PWD = substr($PWD,1); } ## remove leading /
if ($PWD eq '') { return 0; }
my ($MID) = &ZOOVY::resolve_mid($USERNAME);
if ($MEDIA::CACHE_FIDSTR eq "$MID!$PWD") {
return($MEDIA::CACHE_FID);
}
my $dbh = &DBINFO::db_user_connect($USERNAME);
my $pstmt = "select FID from IFOLDERS where MID=$MID and FNAME=".$dbh->quote($PWD);
if ($MEDIA::DEBUG) { print STDERR $pstmt."\n"; }
my $sth = $dbh->prepare($pstmt);
$sth->execute();
my ($FID) = $sth->fetchrow();
$sth->finish();
if (not defined $FID) { $FID = -1; }
&DBINFO::db_user_close();
$FID = int($FID);
## this is a global variable, that will prevent us from doing the same lookup twice
if ($FID>0) { $MEDIA::CACHE_FID = $FID; $MEDIA::CACHE_FIDSTR = "$MID!$PWD"; }
return($FID);
}
##
## returns 1 if a folder exists, 0 if not.
##
##
## creates a new folder
## returns: new PWD
sub mkfolder {
my ($USERNAME, $PWD) = @_;
$MEDIA::CACHE_FID = undef;
$MEDIA::CACHE_FIDSTR = undef;
if ($PWD eq '') { return(); }
my $DSTDIR = &ZOOVY::resolve_userpath($USERNAME)."/IMAGES/".$PWD;
if (substr($PWD,0,1) eq '/') { $PWD = substr($PWD,1); } ## remove leading /
$PWD =~ s/[\.]+/_/gs;
my $PARENT = $PWD;
if (rindex($PARENT,'/')>=0) { $PARENT = substr($PARENT,0,rindex($PARENT,'/')); } else { $PARENT = '/'; }
if ($PARENT eq '') { $PARENT = ''; }
my $dbh = &DBINFO::db_user_connect($USERNAME);
my $MID = int(&ZOOVY::resolve_mid($USERNAME));
my $ParentFID = -1;
if ($PARENT eq '') {
## verify the parent exists. and if it doesn't, perhaps we ought to create it.
}
elsif ($PARENT eq '/') {
## we assume /IMAGES always exists
}
elsif (-d $DSTDIR ) {
## yay it exists
}
else {
&mkfolder($USERNAME,$PARENT);
}
$ParentFID = &MEDIA::resolve_fid($USERNAME,$PARENT);
if ($ParentFID < 0) {
&MEDIA::mkfolder($USERNAME,$PARENT);
$ParentFID = &MEDIA::resolve_fid($USERNAME,$PARENT);
}
my $FID = &MEDIA::resolve_fid($USERNAME,$PWD);
print STDERR "FID:$FID PWD: $PWD\n";
if ($FID > 0) {
if (! -d $DSTDIR) {
warn "$DSTDIR doesn't actually exist, but has FID:$FID\n";
File::Path::mkpath($DSTDIR,0,0777);
}
}
elsif ($FID==-1) {
## physically create the directory
if (length($PWD)>1) {
$PWD = lc($PWD);
if ($PWD =~ /^[a-z0-9]\//) { $PWD = ucfirst($PWD); } # but keep uppercased single character directories.
}
else {
$PWD = uc($PWD);
}
my $PATH = &ZOOVY::resolve_userpath($USERNAME).'/IMAGES/'.$PWD;
# print STDERR "PATH: $PATH\n"; die();
mkdir($PATH);
chmod 0777, $PATH;
## create index in database.
my $pstmt = &DBINFO::insert($dbh,'IFOLDERS',{
FName=>$PWD,ImageCount=>0,MERCHANT=>$USERNAME,MID=>$MID,
ParentFID=>$ParentFID,ParentNAME=>$PARENT,TS=>$^T,ItExists=>1
},debug=>2);
$dbh->do($pstmt);
}
&DBINFO::db_user_close();
return($PWD);
}
##
## rmfolder - deletes a folder
## returns: parent PWD
sub rmfolder {
my ($USERNAME, $PWD) = @_;
$MEDIA::CACHE_FID = undef;
$MEDIA::CACHE_FIDSTR = undef;
if (substr($PWD,0,1) eq '/') { $PWD = substr($PWD,1); } ## remove leading /
my $MID = &ZOOVY::resolve_mid($USERNAME);
my $dbh = &DBINFO::db_user_connect($USERNAME);
my $pstmt = "delete from IFOLDERS where MID=$MID and FNAME=".$dbh->quote($PWD);
$dbh->do($pstmt);
&DBINFO::db_user_close();
my $PATH = &ZOOVY::resolve_userpath($USERNAME).'/IMAGES/';
my $imageref = &MEDIA::listimgs("$PATH/$PWD");
if (defined $imageref) {
foreach my $img (keys %{$imageref}) {
&MEDIA::delimage($USERNAME,$PWD,$img);
}
}
## need to delete bin files too
#opendir (DIR, $PATH.$PWD);
#my @files = grep /\.bin$/, readdir(DIR);
#closedir DIR;
#foreach my $file (@files) {
# unlink ($PATH.$PWD."/".$file);
# }
rmdir($PATH.$PWD);
## descend down a level
if (rindex($PWD,'/')>=0) { $PWD = substr($PWD,0,rindex($PWD,'/')); } else { $PWD = '/'; }
if ($PWD eq '') { $PWD = ''; }
return($PWD);
}
##
## call this anytime you add/remove/update an image in a folder! to maintain the count
##
sub bumpfolder {
my ($USERNAME, $PWD, $IMGCOUNT) = @_;
$IMGCOUNT = int($IMGCOUNT);
if (substr($PWD,0,1) eq '/') { $PWD = substr($PWD,1); } ## remove leading /
my $dbh = &DBINFO::db_user_connect($USERNAME);
my $MID = &ZOOVY::resolve_mid($USERNAME);
my $pstmt = "update IFOLDERS set TS=".time().",ImageCount=ImageCount+$IMGCOUNT where MID=$MID and FNAME=".$dbh->quote($PWD);
# print STDERR $pstmt."\n";
$dbh->do($pstmt);
&DBINFO::db_user_close();
}
##
## converts a filename to a base name. (e.g. asdf-w100-h100-bffff.jpg becomes asdf)
##
sub filespec {
my ($filename) = @_;
$filename =~ s/\..*?$//gs; # strip off extension
if (index($filename,'-')>0) {
$filename = substr($filename,0,index($filename,'-'));
}
return($filename);
}
##
## returns: a hashref of folders with the value as their respective file counts
##
sub folders {
my ($USERNAME,$PWD) = @_;
my %folders = ();
my %files = ();
my $dbh = &DBINFO::db_user_connect($USERNAME);
my $MID = &ZOOVY::resolve_mid($USERNAME);
my $parentFID = &MEDIA::resolve_fid($USERNAME,$PWD);
if ($parentFID == -1) {
## bad directory!
return(undef,undef);
}
my $pstmt = "select FName,ImageCount from IFOLDERS where ParentFID=$parentFID and MID=$MID /* $USERNAME */";
# print STDERR $pstmt."\n";
my $sth = $dbh->prepare($pstmt);
$sth->execute();
while ( my ($fname,$imgcount) = $sth->fetchrow() ) {
$folders{$fname} = $imgcount;
}
$sth->finish();
$pstmt = "select ImgName from IMAGES where FID=$parentFID and MID=$MID /* $USERNAME */";
$sth = $dbh->prepare($pstmt);
$sth->execute();
while ( my ($fname) = $sth->fetchrow() ) {
$files{$fname}++;
}
$sth->finish();
&DBINFO::db_user_close();
return(\%folders,\%files);
}
##
## returns: a hashref of folders with the value as their respsective file counts
##
sub foldersDEPRECATED {
my ($USERNAME,$PWD) = @_;
my %folders = ();
my %files = ();
if (substr($PWD,0,1) eq '/') { $PWD = substr($PWD,1); } # strip leading /
my $path = &ZOOVY::resolve_userpath($USERNAME).'/IMAGES';
if (($PWD eq '') || ($PWD eq '/')) {
## do nothing
}
else {
$path = $path.'/'.$PWD;
}
#mysql> desc IFOLDERS;
#+------------+------------------+------+-----+---------+----------------+
#| Field | Type | Null | Key | Default | Extra |
#+------------+------------------+------+-----+---------+----------------+
#| FID | int(11) | | PRI | NULL | auto_increment |
#| FName | varchar(35) | | | | |
#| ImageCount | int(11) | | | 0 | |
#| MERCHANT | varchar(20) | | | | |
#| MID | int(11) | | MUL | 0 | |
#| ParentFID | int(10) unsigned | | | 0 | |
#| ParentName | varchar(175) | | | | |
#| TS | int(10) unsigned | | | 0 | |
#| ItExists | tinyint(4) | | | 0 | |
#+------------+------------------+------+-----+---------+----------------+
#9 rows in set (0.03 sec)
# my $dbh = &DBINFO::db_user_connect($USERNAME);
# my $MID = &ZOOVY::resolve_mid($USERNAME);
# my $parentFID = &MEDIA::resolve_fid($USERNAME,$PWD);
#
# if (1) {
# my $pstmt = "select FName,ImageCount from IFOLDERS where ParentFID=$parentFID and MID=$MID /* $USERNAME */";
# my $sth = $dbh->prepare($pstmt);
# $sth->execute();
# while ( my ($fname,$imgcount) = $sth->fetchrow() ) {
# $fname = (($PWD ne '') && ($PWD ne '/'))?"$PWD/$fname":$fname;
# $folders{$fname} = $imgcount;
# }
# $sth->finish();
# }
#mysql> desc IMAGES;
#+------------+-------------------------+------+-----+---------+----------------+
#| Field | Type | Null | Key | Default | Extra |
#+------------+-------------------------+------+-----+---------+----------------+
#| Id | int(11) | | PRI | NULL | auto_increment |
#| ImgName | varchar(45) | | | | |
#| Format | enum('gif','jpg','png') | YES | | NULL | |
#| TS | int(10) unsigned | | | 0 | |
#| MERCHANT | varchar(20) | | | | |
#| MID | int(11) | | MUL | 0 | |
#| FID | int(11) | | | 0 | |
#| ItExists | tinyint(4) | | | 0 | |
#| ThumbSize | int(10) unsigned | | | 0 | |
#| MasterSize | int(10) unsigned | | | 0 | |
#+------------+-------------------------+------+-----+---------+----------------+
#10 rows in set (0.01 sec)
# if (1) {
# my $pstmt = "select ImgName from IMAGES where FID=$parentFID and MID=$MID /* $USERNAME */";
# my $sth = $dbh->prepare($pstmt);
# $sth->execute();
# while ( my ($fname) = $sth->fetchrow() ) {
# $files{$fname}++;
# }
# $sth->finish();
# }
# &DBINFO::db_user_close();
# # print STDERR "PATH: $path\n";
my $D;
opendir($D, $path);
while ( my $file = readdir($D) ) {
next if (substr($file,0,1) eq '.');
if (-d $path.'/'.$file) {
# print STDERR "FOLDER $file\n";
$folders{$file} = scalar(keys %{listimgs($path.'/'.$file,1)});
}
elsif ($file =~ /\.bin$/) {
## binfile! {don't count it}
}
else {
$files{&MEDIA::filespec($file)}++;
}
}
closedir($D);
return(\%folders,\%files);
}
##
## function: imgcount
## returns - the unique number of files in a specific directory
## parameters: directory
##
sub listimgs {
my ($dir) = @_;
my %imgs = ();
my $D1;
opendir ($D1, $dir);
while (my $file = readdir($D1)) {
next if (substr($file,0,1) eq '.');
next if ($file =~ /\.bin$/);
$imgs{&MEDIA::filespec($file)}++;
}
closedir($D1);
return(\%imgs);
}
1;
| CommerceRack/backend | lib/MEDIA.pm | Perl | mit | 74,896 |
#!/usr/bin/env perl
use 5.010;
use strict;
use warnings;
use Bio::DB::EUtilities;
my $factory = Bio::DB::EUtilities->new(-eutil => 'einfo',
-email => 'mymail@foo.bar',
-db => 'taxonomy');
# for quick simple output, use:
# $factory->print_all;
# or use snippets of the following for what you need
# get database info
say "Database: ",$factory->get_database;
say " Desc: ",$factory->get_description;
say " Name: ",$factory->get_menu_name;
say " Records: ",$factory->get_record_count;
say " Updated: ",$factory->get_last_update,"\n";
# iterate through FieldInfo and LinkInfo objects to get field and link data
while (my $field = $factory->next_FieldInfo) {
say "\tField code: ",$field->get_field_code;
say "\t name: ",$field->get_field_name;
say "\t desc: ",$field->get_field_description;
say "\t count: ",$field->get_term_count;
say "\tAttributes: ";
#say join ',', grep {$field->$_} qw(is_date
# is_singletoken is_hierarchy is_hidden is_numerical),"\n";
}
#while (my $link = $factory->next_LinkInfo) {
# say "\tLink name: ",$link->get_link_name;
# say "\t desc: ",$link->get_link_description;
# say "\t dbfrom: ",$link->get_dbfrom; # same as get_database()
# say "\t dbto: ",$link->get_dbto,"\n"; # database linked to
#}
| sestaton/sesbio | phylogenetics/eutils_taxonomy_methods.pl | Perl | mit | 1,393 |
package Yogafire::Command::Instance::sshtmux;
use Mouse;
extends qw(Yogafire::CommandBase Yogafire::Command::Attribute);
has state => (
traits => [qw(Getopt)],
isa => "Str",
is => "rw",
cmd_aliases => "s",
documentation => "specified instance status (running / stopped)",
);
has tagsname => (
traits => [qw(Getopt)],
isa => "Str",
is => "rw",
cmd_aliases => "n",
documentation => "specified instance tagsname.",
);
has filter => (
traits => [qw(Getopt)],
isa => "Str",
is => "rw",
cmd_aliases => "f",
documentation => "api filter. (ex.--filter='tag:keyname=value,instance-state-name=running')",
);
has user => (
traits => [qw(Getopt)],
isa => "Str",
is => "rw",
cmd_aliases => "u",
documentation => "specified login user",
);
has identity_file => (
traits => [qw(Getopt)],
isa => "Str",
is => "rw",
cmd_aliases => "i",
documentation => "specified identity file",
);
has port => (
traits => [qw(Getopt)],
isa => "Str",
is => "rw",
cmd_aliases => "p",
documentation => "specified port number",
);
has proxy => (
traits => [qw(Getopt)],
isa => "Str",
is => "rw",
documentation => "specified proxy server name(ip or dns or instance_id or tagsname).",
);
has sync => (
traits => [qw(Getopt)],
isa => "Bool",
is => "rw",
documentation => "enable synchronize-panes.",
);
has force => (
traits => [qw(Getopt)],
isa => "Bool",
is => "rw",
documentation => "force execute.",
);
has fuzzy => (
traits => [qw(Getopt)],
isa => "Bool",
is => "rw",
cmd_aliases => "z",
documentation => "Fuzzy host filter.",
);
no Mouse;
use Yogafire::CommandClass::InstanceProc;
sub abstract {'EC2 SSH Instance'}
sub command_names {'ssh-tmux'}
sub usage {
my ( $self, $opt, $args ) = @_;
$self->{usage}->{leader_text} = 'yoga ssh-tmux [-?] <tagsname>';
$self->{usage};
}
sub execute {
my ( $self, $opt, $args ) = @_;
my $proc = Yogafire::CommandClass::InstanceProc->new(
{
action => 'sshtmux',
opt => $opt,
force => $opt->{force},
interactive => 1,
multi => 1,
loop => $opt->{loop},
}
);
if($opt->{self}) {
$proc->self_process();
} else {
my $host = $args->[0];
# fuzzy finder
$host = "*${host}*" if $host && $opt->{fuzzy};
$opt->{host} = $host if $host;
$proc->action_process();
}
}
1;
| toritori0318/p5-Yogafire | lib/Yogafire/Command/Instance/sshtmux.pm | Perl | mit | 2,942 |
#------------------------------------------------------------------------------
# File: SonyIDC.pm
#
# Description: Read/write Sony IDC information
#
# Revisions: 2010/01/05 - P. Harvey Created
#------------------------------------------------------------------------------
package Image::ExifTool::SonyIDC;
use strict;
use vars qw($VERSION);
use Image::ExifTool qw(:DataAccess :Utils);
use Image::ExifTool::Exif;
$VERSION = '1.08';
# Sony IDC tags (ref PH)
%Image::ExifTool::SonyIDC::Main = (
WRITE_PROC => \&Image::ExifTool::Exif::WriteExif,
CHECK_PROC => \&Image::ExifTool::Exif::CheckExif,
GROUPS => { 0 => 'MakerNotes', 2 => 'Image' },
NOTES => 'Tags written by the Sony Image Data Converter utility in ARW images.',
SET_GROUP1 => 1,
0x201 => {
Name => 'IDCPreviewStart',
IsOffset => 1,
OffsetPair => 0x202,
DataTag => 'IDCPreview',
Writable => 'int32u',
Protected => 2,
},
0x202 => {
Name => 'IDCPreviewLength',
OffsetPair => 0x201,
DataTag => 'IDCPreview',
Writable => 'int32u',
Protected => 2,
},
0x8000 => {
Name => 'IDCCreativeStyle',
Writable => 'int32u',
PrintConvColumns => 2,
PrintConv => {
1 => 'Camera Setting',
2 => 'Standard',
3 => 'Real',
4 => 'Vivid',
5 => 'Adobe RGB',
6 => 'A100 Standard', # shows up as '-' in IDC menu
7 => 'Neutral',
8 => 'Portrait',
9 => 'Landscape',
10 => 'Clear',
11 => 'Deep',
12 => 'Light',
13 => 'Sunset',
14 => 'Night View',
15 => 'Autumn Leaves',
16 => 'B&W',
17 => 'Sepia',
},
},
0x8001 => {
Name => 'CreativeStyleWasChanged',
Writable => 'int32u',
Notes => 'set if the creative style was ever changed',
# (even if it was changed back again later)
PrintConv => { 0 => 'No', 1 => 'Yes' },
},
0x8002 => {
Name => 'PresetWhiteBalance',
Writable => 'int32u',
PrintConv => {
1 => 'Camera Setting',
2 => 'Color Temperature',
3 => 'Specify Gray Point',
4 => 'Daylight',
5 => 'Cloudy',
6 => 'Shade',
7 => 'Cool White Fluorescent',
8 => 'Day Light Fluorescent',
9 => 'Day White Fluorescent',
10 => 'Warm White Fluorescent',
11 => 'Tungsten',
12 => 'Flash',
13 => 'Auto',
},
},
0x8013 => { Name => 'ColorTemperatureAdj', Writable => 'int16u' },
0x8014 => { Name => 'PresetWhiteBalanceAdj',Writable => 'int32s' },
0x8015 => { Name => 'ColorCorrection', Writable => 'int32s' },
0x8016 => { Name => 'SaturationAdj', Writable => 'int32s' },
0x8017 => { Name => 'ContrastAdj', Writable => 'int32s' },
0x8018 => {
Name => 'BrightnessAdj',
Writable => 'int32s',
PrintConv => 'sprintf("%.2f", $val/300)', #JR
PrintConvInv => '$val * 300',
},
0x8019 => { Name => 'HueAdj', Writable => 'int32s' },
0x801a => { Name => 'SharpnessAdj', Writable => 'int32s' },
0x801b => { Name => 'SharpnessOvershoot', Writable => 'int32s' },
0x801c => { Name => 'SharpnessUndershoot', Writable => 'int32s' },
0x801d => { Name => 'SharpnessThreshold', Writable => 'int32s' },
0x801e => {
Name => 'NoiseReductionMode',
Writable => 'int16u',
PrintConv => {
0 => 'Off',
1 => 'On',
},
},
0x8021 => {
Name => 'GrayPoint',
Writable => 'int16u',
Count => 4,
},
0x8022 => {
Name => 'D-RangeOptimizerMode',
Writable => 'int16u',
PrintConv => {
0 => 'Off',
1 => 'Auto',
2 => 'Manual',
},
},
0x8023 => { Name => 'D-RangeOptimizerValue', Writable => 'int32s' },
0x8024 => { Name => 'D-RangeOptimizerHighlight',Writable => 'int32s' },
0x8026 => {
Name => 'HighlightColorDistortReduct',
Writable => 'int16u',
PrintConv => {
0 => 'Standard',
1 => 'Advanced',
},
},
0x8027 => {
Name => 'NoiseReductionValue',
Writable => 'int32s',
ValueConv => '($val + 100) / 2',
ValueConvInv => '$val * 2 - 100',
},
0x8028 => {
Name => 'EdgeNoiseReduction',
Writable => 'int32s',
ValueConv => '($val + 100) / 2',
ValueConvInv => '$val * 2 - 100',
},
0x8029 => {
Name => 'ColorNoiseReduction',
Writable => 'int32s',
ValueConv => '($val + 100) / 2',
ValueConvInv => '$val * 2 - 100',
},
0x802d => { Name => 'D-RangeOptimizerShadow', Writable => 'int32s' },
0x8030 => { Name => 'PeripheralIllumCentralRadius', Writable => 'int32s' },
0x8031 => { Name => 'PeripheralIllumCentralValue', Writable => 'int32s' },
0x8032 => { Name => 'PeripheralIllumPeriphValue', Writable => 'int32s' },
0x8040 => { #JR
Name => 'DistortionCompensation',
Writable => 'int32s',
PrintConv => {
-1 => 'n/a', # (fixed by lens)
1 => 'On',
2 => 'Off',
},
},
0x9000 => {
Name => 'ToneCurveBrightnessX',
Writable => 'int16u',
Count => -1,
},
0x9001 => {
Name => 'ToneCurveRedX',
Writable => 'int16u',
Count => -1,
},
0x9002 => {
Name => 'ToneCurveGreenX',
Writable => 'int16u',
Count => -1,
},
0x9003 => {
Name => 'ToneCurveBlueX',
Writable => 'int16u',
Count => -1,
},
0x9004 => {
Name => 'ToneCurveBrightnessY',
Writable => 'int16u',
Count => -1,
},
0x9005 => {
Name => 'ToneCurveRedY',
Writable => 'int16u',
Count => -1,
},
0x9006 => {
Name => 'ToneCurveGreenY',
Writable => 'int16u',
Count => -1,
},
0x9007 => {
Name => 'ToneCurveBlueY',
Writable => 'int16u',
Count => -1,
},
0x900d => { #JR
Name => 'ChromaticAberrationCorrection', # "Magnification Chromatic Aberration"
Writable => 'int32s',
PrintConv => { 1 => 'On', 2 => 'Off' },
},
0x900e => { #JR
Name => 'InclinationCorrection',
Writable => 'int32u',
PrintConv => { 0 => 'Off', 1 => 'On' },
},
0x900f => { #JR
Name => 'InclinationAngle',
Writable => 'int32s',
PrintConv => 'sprintf("%.1f deg", $val/1000)',
PrintConvInv => 'ToFloat($val) * 1000',
},
0x9010 => { #JR
Name => 'Cropping',
Writable => 'int32u',
PrintConv => { 0 => 'Off', 1 => 'On' },
},
0x9011 => { #JR
Name => 'CropArea',
Writable => 'int32u',
Count => 4,
},
0x9012 => { #JR
Name => 'PreviewImageSize',
Writable => 'int32u',
Count => 2,
},
0x9013 => { #JR (ARQ images)
Name => 'PxShiftPeriphEdgeNR',
Writable => 'int32s',
PrintConv => { 0 => 'Off', 1 => 'On' },
},
0x9014 => { #JR (ARQ images)
Name => 'PxShiftPeriphEdgeNRValue',
Writable => 'int32s',
PrintConv => 'sprintf("%.1f", $val/10)',
PrintConvInv => '$val * 10',
},
0x9017 => { Name => 'WhitesAdj', Writable => 'int32s' }, #JR
0x9018 => { Name => 'BlacksAdj', Writable => 'int32s' }, #JR
0x9019 => { Name => 'HighlightsAdj', Writable => 'int32s' }, #JR
0x901a => { Name => 'ShadowsAdj', Writable => 'int32s' }, #JR
0xd000 => { Name => 'CurrentVersion', Writable => 'int32u' },
0xd001 => {
Name => 'VersionIFD',
Groups => { 1 => 'Version0' },
Flags => 'SubIFD',
Notes => 'there is one VersionIFD for each entry in the "Version Stack"',
SubDirectory => {
DirName => 'Version0',
TagTable => 'Image::ExifTool::SonyIDC::Main',
Start => '$val',
Base => '$start',
MaxSubdirs => 20, # (IDC v3.0 writes max. 10)
RelativeBase => 1, # needed to write SubIFD with relative offsets
},
},
0xd100 => {
Name => 'VersionCreateDate',
Writable => 'string',
Groups => { 2 => 'Time' },
Notes => 'date/time when this entry was created in the "Version Stack"',
Shift => 'Time',
PrintConv => '$self->ConvertDateTime($val)',
PrintConvInv => '$self->InverseDateTime($val,0)',
},
0xd101 => {
Name => 'VersionModifyDate',
Writable => 'string',
Groups => { 2 => 'Time' },
Shift => 'Time',
PrintConv => '$self->ConvertDateTime($val)',
PrintConvInv => '$self->InverseDateTime($val,0)',
},
);
# extract IDC preview images as composite tags
%Image::ExifTool::SonyIDC::Composite = (
GROUPS => { 2 => 'Image' },
IDCPreviewImage => {
Groups => { 2 => 'Preview' },
Require => {
0 => 'IDCPreviewStart',
1 => 'IDCPreviewLength',
},
# extract all preview images (not just one)
RawConv => q{
@grps = $self->GetGroup($$val{0});
require Image::ExifTool::SonyIDC;
Image::ExifTool::SonyIDC::ExtractPreviews($self);
},
},
);
# add our composite tags
Image::ExifTool::AddCompositeTags('Image::ExifTool::SonyIDC');
# set "Permanent" flag for all tags
{
my $key;
foreach $key (TagTableKeys(\%Image::ExifTool::SonyIDC::Main)) {
$Image::ExifTool::SonyIDC::Main{$key}{Permanent} = 1;
}
}
#------------------------------------------------------------------------------
# Extract all IDC preview images
# Inputs: 0) ExifTool object ref
# Returns: data for "IDCPreviewImage" tag (which I have never seen),
# or undef if there was no preview in the SonyIDC IFD
sub ExtractPreviews($)
{
my $et = shift;
my $i = 1;
my $xtra = ' (1)';
my $preview;
# loop through all available IDC preview images in the order they were found
for (;;) {
my $key = "IDCPreviewStart$xtra";
unless (defined $$et{VALUE}{$key}) {
last unless $xtra;
$xtra = ''; # do the last tag extracted last
next;
}
# run through IDC preview images in the same order they were extracted
my $off = $et->GetValue($key, 'ValueConv') or last;
my $len = $et->GetValue("IDCPreviewLength$xtra", 'ValueConv') or last;
# get stack version from number in group 1 name
my $grp1 = $et->GetGroup($key, 1);
if ($grp1 =~ /(\d+)$/) {
my $tag = "IDCPreviewImage$1";
unless ($Image::ExifTool::Extra{$tag}) {
AddTagToTable(\%Image::ExifTool::Extra, $tag, {
Name => $tag,
Groups => { 0 => 'Composite', 1 => 'Composite', 2 => 'Preview'},
});
}
my $val = Image::ExifTool::Exif::ExtractImage($et, $off, $len, $tag);
$et->FoundTag($tag, $val, $et->GetGroup($key));
} else {
$preview = Image::ExifTool::Exif::ExtractImage($et, $off, $len, 'IDCPreviewImage');
}
# step to next set of tags unless we are done
last unless $xtra;
++$i;
$xtra = " ($i)";
}
return $preview;
}
1; # end
__END__
=head1 NAME
Image::ExifTool::SonyIDC - Read/write Sony IDC information
=head1 SYNOPSIS
This module is used by Image::ExifTool
=head1 DESCRIPTION
This module contains definitions required by Image::ExifTool to read and
write Sony Image Data Converter version 3.0 metadata in ARW images.
=head1 AUTHOR
Copyright 2003-2022, Phil Harvey (philharvey66 at gmail.com)
This library is free software; you can redistribute it and/or modify it
under the same terms as Perl itself.
=head1 SEE ALSO
L<Image::ExifTool::TagNames/SonyIDC Tags>,
L<Image::ExifTool(3pm)|Image::ExifTool>
=cut
| mceachen/exiftool_vendored | bin/lib/Image/ExifTool/SonyIDC.pm | Perl | mit | 12,237 |
package CXGN::Stock::Seedlot::Maintenance;
=head1 NAME
CXGN::Stock::Seedlot::Maintenance - a class to manage Seedlot Maintenance Events
=head1 DESCRIPTION
This class is used to store and retrieve maintenance actions and/or observations performed
for the proper maintenance of a particular Seedlot.
Seedlot maintenance events are stored as JSON stock props, where each maintanence event
has an associated cvterm_id of a cvterm from a 'seedlot maintenance' ontology. This
ontology defines that types of maintenance events that can be associated with a Seedlot.
=head1 USAGE
Seedlot Maintenance Events are associated directly with existing Seedlots and are linked to
cvterms (by cvterm_id) of terms in a loaded seedlot maintenance event ontology. The root of
this ontology must be specified in the sgn_local.conf using the `seedlot_maintenance_event_ontology_root`
term.
The CXGN::Stock::Seedlot Class has helper functions for storing and retrieving Seedlot Maintenance Events.
=head1 AUTHOR
David Waring <djw64@cornell.edu>
=cut
use Moose;
extends 'CXGN::JSONProp';
has 'cvterm_id' => (isa => 'Int', is => 'rw');
has 'cvterm_name' => (isa => 'Str', is => 'rw');
has 'value' => (isa => 'Str|Num', is => 'rw');
has 'notes' => (isa => 'Maybe[Str]', is => 'rw');
has 'operator' => (isa => 'Str', is => 'rw');
has 'timestamp' => (isa => 'Str', is => 'rw');
sub BUILD {
my $self = shift;
my $args = shift;
$self->prop_table('stockprop');
$self->prop_namespace('Stock::Stockprop');
$self->prop_primary_key('stockprop_id');
$self->prop_type('seedlot_maintenance_json');
$self->cv_name('stock_property');
$self->allowed_fields([ qw | cvterm_id cvterm_name value notes operator timestamp | ]);
$self->parent_table('stock');
$self->parent_primary_key('stock_id');
$self->load();
}
=head2 Class method: filter_events()
Usage: my $event_obj = CXGN::Stock::Seedlot::Maintenance->new({ bcs_schema => $schema });
my @events = $event_obj->filter_events($filters);
Desc: get all of the (optionally filtered) seedlot maintenance events associated with any of the matching seedlots
Args: - filters (optional): a hash of different filter types to apply, with the following keys:
- events: an arrayref of event ids
- names: an arrayref of hashes containing name filter options:
- value: a string or array (for IN comp) of seedlot name query params
- comp: the SQL comparison type (IN, LIKE)
- dates: an arrayref of hashes containing date filter options:
- date: date in YYYY-MM-DD format
- comp: comparison type (LIKE, <=, <, >, >=)
- types: an arrayref of hashes containing type/value filter options:
- cvterm_id: cvterm_id of maintenance event type
- values: (optional, default=any value) array of allowed values
- ignore: (optional, default=none) array of not allowed values
- operators: arrayref of operator names
- page (optional): the page number of results to return
- pageSize (optional): the number of results per page to return
Ret: a hashref with the results metadata and the matching seedlot events:
- page: current page number
- maxPage: the number of the last page
- pageSize: (max) number of results per page
- total: total number of results
- results: an arrayref of hashes of the seedlot's stored events, with the following keys:
- stock_id: the unique id of the seedlot
- uniquename: the unique name of the seedlot
- stockprop_id: the unique id of the maintenance event
- cvterm_id: id of seedlot maintenance event ontology term
- cvterm_name: name of seedlot maintenance event ontology term
- value: value of the seedlot maintenance event
- notes: additional notes/comments about the event
- operator: username of the person creating the event
- timestamp: timestamp string of when the event was created ('YYYY-MM-DD HH:MM:SS' format)
=cut
sub filter_events {
my $class = shift;
my $filters = shift;
my $page = shift;
my $pageSize = shift;
my $schema = $class->bcs_schema();
# Parse filters into search conditions
my @and;
my @or;
if ( defined $filters && defined $filters->{'events'} && scalar(@{$filters->{'events'}}) > 0 ) {
push(@and, { 'me.stockprop_id' => $filters->{'events'} });
}
if ( defined $filters && defined $filters->{'names'} && scalar(@{$filters->{'names'}}) > 0 ) {
foreach my $f (@{$filters->{'names'}}) {
if ( $f->{value} && $f->{comp} ) {
push(@and, { 'stock.uniquename' => { $f->{'comp'} => $f->{'value'} } });
}
}
}
if ( defined $filters && defined $filters->{'dates'} && scalar(@{$filters->{'dates'}}) > 0 ) {
foreach my $f (@{$filters->{'dates'}}) {
push(@and, { "value::json->>'timestamp'" => { $f->{'comp'} => $f->{'date'} } });
}
}
if ( defined $filters && defined $filters->{'types'} && scalar(@{$filters->{'types'}}) > 0 ) {
foreach my $f (@{$filters->{'types'}}) {
if ( $f->{values} ) {
my @c = (
{ "value::json->>'cvterm_id'" => $f->{cvterm_id} },
{ "value::json->>'value'" => $f->{values} }
);
push(@or, { "-and" => \@c });
}
elsif ( $f->{ignore} ) {
my @c = (
{ "value::json->>'cvterm_id'" => $f->{cvterm_id} },
{ "value::json->>'value'" => { "!=" => $f->{ignore} } }
);
push(@or, { "-and" => \@c });
}
else {
push(@or, { "value::json->>'cvterm_id'" => $f->{cvterm_id} });
}
}
}
if ( defined $filters && defined $filters->{'operators'} && scalar(@{$filters->{'operators'}}) > 0 ) {
push(@and, { "value::json->>'operator'" => $filters->{'operators'} });
}
# Build conditions
my %conditions = ();
if ( scalar(@and) > 0 ) {
$conditions{"-and"} = \@and;
}
if ( scalar(@or) > 0 ) {
$conditions{"-or"} = \@or;
}
# Perform the filtering
my $filtered_props = $class->filter_props({
schema => $schema,
conditions => \%conditions,
parent_fields => ["uniquename"],
order_by => { "-desc" => "value::json->>'timestamp'" },
page => $page,
pageSize => $pageSize
});
return $filtered_props;
}
=head2 Class method: overdue_events()
Usage: my $event_obj = CXGN::Stock::Seedlot::Maintenance->new({ bcs_schema => $schema });
my @seedlots = $event_obj->overdue_events($seedlots, $event, $date);
Desc: return the seedlots (from the specified list) that have not had the specified event performed
on or after the selected date
Args: - seedlots: an arrayref of seedlot names to check
- event: cvterm_id of event that should have been performed
- date: find seedlots that have not had the specified event performed after this date (YYYY-MM-DD format)
Ret: an arrayref with the status of each of the requested seedlots
- seedlot: seedlot name
- overdue: 1 if overdue, 0 if not
- timestamp: the timestamp of the last time the event was performed, if not overdue
=cut
sub overdue_events {
my $class = shift;
my $seedlots = shift;
my $event = shift;
my $date = shift;
my $schema = $class->bcs_schema();
# Find Seedlots that are not overdue
my %filters = (
names => $seedlots,
types => [ { cvterm_id => $event, ignore => 'Unsuccessful' } ],
dates => [ { date => $date . "00:00:00", comp => '>=' } ]
);
my $results = $class->filter_events(\%filters);
# Get the timestamps of the not overdue seedlots
my %not_overdue_seedlots;
foreach my $s (@{$results->{'results'}}) {
my $n = $s->{'uniquename'};
my $t = $s->{'timestamp'};
my $e = $not_overdue_seedlots{$n};
if ( !$e || $t > $e ) {
$not_overdue_seedlots{$n} = $t;
}
}
# Get the status of each of the requested seedlots
my @results = ();
foreach my $n (@$seedlots) {
my $t = $not_overdue_seedlots{$n};
my $o = $t ? 0 : 1;
push(@results, { seedlot => $n, overdue => $o, timestamp => $t });
}
# Sort so overdue seedlots are displayed first
my @sorted = sort { $b->{overdue} <=> $a->{overdue} } @results;
return \@sorted;
}
1;
| solgenomics/sgn | lib/CXGN/Stock/Seedlot/Maintenance.pm | Perl | mit | 9,128 |
/*
_________________________________________________________________________
| Copyright (C) 1982 |
| |
| David Warren, |
| SRI International, 333 Ravenswood Ave., Menlo Park, |
| California 94025, USA; |
| |
| Fernando Pereira, |
| Dept. of Architecture, University of Edinburgh, |
| 20 Chambers St., Edinburgh EH1 1JZ, Scotland |
| |
| This program may be used, copied, altered or included in other |
| programs only for academic purposes and provided that the |
| authorship of the initial program is aknowledged. |
| Use for commercial purposes without the previous written |
| agreement of the authors is forbidden. |
|_________________________________________________________________________|
*/
/* Simplifying and executing the logical form of a NL query. */
:-op(500,xfy,--).
:-op(359,xf,ject).
write_tree(T):-
numbervars(T,1,_),
wt(T,0),
fail.
write_tree(_).
wt((P:-Q),L) :- !, L1 is L+3,
write(P), tab(1), write((:-)), nl,
tab(L1), wt(Q,L1).
wt((P,Q),L) :- !, L1 is L-2,
wt(P,L), nl,
tab(L1), put_char('&'), tab(1), wt(Q,L).
wt({P},L) :- complex(P), !, L1 is L+2,
put_char('{'), tab(1), wt(P,L1), tab(1), put_char('}').
wt(E,L) :- decomp(E,H,P), !, L1 is L+2,
header(H), nl,
tab(L1), wt(P,L1).
wt(E,_) :- write(E).
header([]).
header([X|H]) :- write(X), tab(1), header(H).
decomp(setof(X,P,S),[S,=,setof,X],P).
decomp(\+(P),[\+],P) :- complex(P).
decomp(numberof(X,P,N),[N,=,numberof,X],P).
decomp(X^P,[exists,X|XX],P1) :- othervars(P,XX,P1).
othervars(X^P,[X|XX],P1) :- !, othervars(P,XX,P1).
othervars(P,[],P).
complex((_,_)).
complex({_}).
complex(setof(_,_,_)).
complex(numberof(_,_,_)).
complex(_^_).
complex(\+P) :- complex(P).
% Query execution.
respond([]) :- display('Nothing satisfies your question.'), nl.
respond([A|L]) :- reply(A), replies(L).
answer((answer([]):-E)) :- !, holds(E,B), yesno(B).
answer((answer([X]):-E)) :- !, seto(X,E,S), respond(S).
answer((answer(X):-E)) :- seto(X,E,S), respond(S).
seto(X,E,S) :- setof(X,satisfy(E),S), !.
seto(_X,_E,[]).
holds(E,true) :- satisfy(E), !.
holds(_E,false).
yesno(true) :- display('Yes.').
yesno(false) :- display('No.').
replies([]) :- display('.').
replies([A]) :- display(' and '), reply(A), display('.').
replies([A|X]) :- display(', '), reply(A), replies(X).
reply(N--U) :- !, write(N), display(' '), write(U).
reply(X) :- write(X).
satisfy((P,Q)) :- !, satisfy(P), satisfy(Q).
satisfy({P}) :- !, satisfy(P), !.
satisfy(_X^P) :- !, satisfy(P).
satisfy(\+P) :- satisfy(P), !, fail.
satisfy(\+_P) :- !.
satisfy(numberof(X,P,N)) :- !, setof(X,satisfy(P),S), length(S,N).
satisfy(setof(X,P,S)) :- !, setof(X,satisfy(P),S).
satisfy(+P) :- exceptionto(P), !, fail.
satisfy(+_P) :- !.
satisfy(X<Y) :- !, X<Y.
satisfy(X=<Y) :- !, X=<Y.
satisfy(X>=Y) :- !, X>=Y.
satisfy(X>Y) :- !, X>Y.
satisfy(P) :- P.
exceptionto(P) :-
functor(P,F,N), functor(P1,F,N),
pickargs(N,P,P1),
exception(P1).
exception(P) :- P, !, fail.
exception(_P).
pickargs(0,_,_) :- !.
pickargs(N,P,P1) :- N1 is N-1,
arg(N,P,S),
pick(S,X),
arg(N,P1,X),
pickargs(N1,P,P1).
pick([X|_S],X).
pick([_|S],X) :- !, pick(S,X).
pick([],_) :- !, fail.
pick(X,X).
| AppliedLogicSystems/ALSProlog | examples/chat80/talkr.pl | Perl | mit | 3,252 |
package Etsy::StatsD;
use strict;
use warnings;
use IO::Socket;
use Carp;
=head1 NAME
Etsy::StatsD
=head1 DESCRIPTION
=cut
=over
=item new (HOST, PORT, SAMPLE_RATE)
Create a new instance.
=cut
sub new {
my ($class, $host, $port, $sample_rate) = @_;
$host = 'localhost' unless defined $host;
$port = 8125 unless defined $port;
my $sock = new IO::Socket::INET(
PeerAddr => $host,
PeerPort => $port,
Proto => 'udp',
) or croak "Failed to initialize socket: $!";
bless {socket=>$sock, sample_rate=>$sample_rate}, $class;
}
=item timing(STAT, TIME, SAMPLE_RATE)
Log timing information
=cut
sub timing {
my ($self, $stat, $time, $sample_rate) = @_;
$self->send({$stat => "$time|ms"}, $sample_rate);
}
=item increment(STATS, SAMPLE_RATE)
Increment one of more stats counters.
=cut
sub increment {
my ($self, $stats, $sample_rate) = @_;
$self->update($stats, 1, $sample_rate);
}
=item decrement(STATS, SAMPLE_RATE)
Decrement one of more stats counters.
=cut
sub decrement {
my ($self, $stats, $sample_rate) = @_;
$self->update($stats, -1, $sample_rate);
}
=item update(STATS, DELTA, SAMPLE_RATE)
Update one of more stats counters by arbitrary amounts.
=cut
sub update {
my ($self, $stats, $delta, $sample_rate) = @_;
$delta = 1 unless defined $delta;
my %data;
if (ref($stats) eq 'ARRAY') {
%data = map {$_ => "$delta|c"} @$stats;
} else {
%data = ($stats => "$delta|c");
}
$self->send(\%data, $sample_rate);
}
=item send(DATA, SAMPLE_RATE)
Sending logging data; implicitly called by most of the other methods.
=back
=cut
sub send {
my ($self, $data, $sample_rate) = @_;
$sample_rate = $self->{sample_rate} unless defined $sample_rate;
my $sampled_data;
if ( defined($sample_rate) and $sample_rate < 1 ){
while (my($stat,$value) = each %$sampled_data) {
$sampled_data->{$stat} = "$value|\@$sample_rate" if rand() <= $sample_rate;
}
} else {
$sampled_data = $data;
}
return '0 but true' unless keys %$sampled_data;
#failures in any of this can be silently ignored
my $count=0;
my $socket = $self->{socket};
while (my($stat,$value) = each %$sampled_data) {
print $socket "$stat:$value\n";
++$count;
}
return $count;
}
=head1 AUTHOR
Steve Sanbeg L<http://www.buzzfeed.com/stv>
=cut
1; | Kongwtf/statsd | examples/Etsy/StatsD.pm | Perl | mit | 2,292 |
#
# Copyright 2019 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package cloud::aws::rds::mode::volume;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
my %map_type = (
"cluster" => "DbClusterIdentifier",
);
sub prefix_metric_output {
my ($self, %options) = @_;
return ucfirst($options{instance_value}->{type}) . " '" . $options{instance_value}->{display} . "' " . $options{instance_value}->{stat} . " ";
}
sub custom_metric_calc {
my ($self, %options) = @_;
$self->{result_values}->{timeframe} = $options{new_datas}->{$self->{instance} . '_timeframe'};
$self->{result_values}->{value} = $options{new_datas}->{$self->{instance} . '_' . $options{extra_options}->{metric} . '_' . $options{extra_options}->{stat}};
$self->{result_values}->{value_per_sec} = $self->{result_values}->{value} / $self->{result_values}->{timeframe};
$self->{result_values}->{stat} = $options{extra_options}->{stat};
$self->{result_values}->{metric} = $options{extra_options}->{metric};
$self->{result_values}->{display} = $options{new_datas}->{$self->{instance} . '_display'};
return 0;
}
sub custom_metric_threshold {
my ($self, %options) = @_;
my $exit = $self->{perfdata}->threshold_check(value => defined($self->{instance_mode}->{option_results}->{per_sec}) ? $self->{result_values}->{value_per_sec} : $self->{result_values}->{value},
threshold => [ { label => 'critical-' . lc($self->{result_values}->{metric}) . "-" . lc($self->{result_values}->{stat}), exit_litteral => 'critical' },
{ label => 'warning-' . lc($self->{result_values}->{metric}) . "-" . lc($self->{result_values}->{stat}), exit_litteral => 'warning' } ]);
return $exit;
}
sub custom_ops_perfdata {
my ($self, %options) = @_;
my $extra_label = '';
$extra_label = '_' . lc($self->{result_values}->{display}) if (!defined($options{extra_instance}) || $options{extra_instance} != 0);
$self->{output}->perfdata_add(label => lc($self->{result_values}->{metric}) . "_" . lc($self->{result_values}->{stat}) . $extra_label,
unit => defined($self->{instance_mode}->{option_results}->{per_sec}) ? 'ops/s' : 'ops',
value => sprintf("%.2f", defined($self->{instance_mode}->{option_results}->{per_sec}) ? $self->{result_values}->{value_per_sec} : $self->{result_values}->{value}),
warning => $self->{perfdata}->get_perfdata_for_output(label => 'warning-' . lc($self->{result_values}->{metric}) . "-" . lc($self->{result_values}->{stat})),
critical => $self->{perfdata}->get_perfdata_for_output(label => 'critical-' . lc($self->{result_values}->{metric}) . "-" . lc($self->{result_values}->{stat})),
);
}
sub custom_ops_output {
my ($self, %options) = @_;
my $msg ="";
if (defined($self->{instance_mode}->{option_results}->{per_sec})) {
$msg = sprintf("%s: %.2f ops/s", $self->{result_values}->{metric}, $self->{result_values}->{value_per_sec});
} else {
$msg = sprintf("%s: %.2f ops", $self->{result_values}->{metric}, $self->{result_values}->{value});
}
return $msg;
}
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'metric', type => 1, cb_prefix_output => 'prefix_metric_output', message_multiple => "All volume metrics are ok", skipped_code => { -10 => 1 } },
];
foreach my $statistic ('minimum', 'maximum', 'average', 'sum') {
foreach my $metric ('VolumeBytesUsed') {
my $entry = { label => lc($metric) . '-' . lc($statistic), set => {
key_values => [ { name => $metric . '_' . $statistic }, { name => 'display' }, { name => 'type' }, { name => 'stat' }, { name => 'timeframe' } ],
output_template => $metric . ': %.2f %s',
output_change_bytes => 1,
perfdatas => [
{ label => lc($metric) . '_' . lc($statistic), value => $metric . '_' . $statistic . '_absolute',
template => '%.2f', unit => 'B', min => 0, label_extra_instance => 1, instance_use => 'display_absolute' },
],
}
};
push @{$self->{maps_counters}->{metric}}, $entry;
}
foreach my $metric ('VolumeReadIOPs', 'VolumeWriteIOPs') {
my $entry = { label => lc($metric) . '-' . lc($statistic), set => {
key_values => [ { name => $metric . '_' . $statistic }, { name => 'display' }, { name => 'stat' }, { name => 'timeframe' } ],
closure_custom_calc => $self->can('custom_metric_calc'),
closure_custom_calc_extra_options => { metric => $metric, stat => $statistic },
closure_custom_output => $self->can('custom_ops_output'),
closure_custom_perfdata => $self->can('custom_ops_perfdata'),
closure_custom_threshold_check => $self->can('custom_metric_threshold'),
}
};
push @{$self->{maps_counters}->{metric}}, $entry;
}
}
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$options{options}->add_options(arguments => {
"type:s" => { name => 'type', default => 'cluster' },
"name:s@" => { name => 'name' },
"filter-metric:s" => { name => 'filter_metric' },
"per-sec" => { name => 'per_sec' },
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::check_options(%options);
if (!defined($self->{option_results}->{type}) || $self->{option_results}->{type} eq '') {
$self->{output}->add_option_msg(short_msg => "Need to specify --type option.");
$self->{output}->option_exit();
}
if ($self->{option_results}->{type} ne 'cluster') {
$self->{output}->add_option_msg(short_msg => "Instance type '" . $self->{option_results}->{type} . "' is not handled for this mode");
$self->{output}->option_exit();
}
if (!defined($self->{option_results}->{name}) || $self->{option_results}->{name} eq '') {
$self->{output}->add_option_msg(short_msg => "Need to specify --name option.");
$self->{output}->option_exit();
}
foreach my $instance (@{$self->{option_results}->{name}}) {
if ($instance ne '') {
push @{$self->{aws_instance}}, $instance;
}
}
$self->{aws_timeframe} = defined($self->{option_results}->{timeframe}) ? $self->{option_results}->{timeframe} : 600;
$self->{aws_period} = defined($self->{option_results}->{period}) ? $self->{option_results}->{period} : 60;
$self->{aws_statistics} = ['Average'];
if (defined($self->{option_results}->{statistic})) {
$self->{aws_statistics} = [];
foreach my $stat (@{$self->{option_results}->{statistic}}) {
if ($stat ne '') {
push @{$self->{aws_statistics}}, ucfirst(lc($stat));
}
}
}
foreach my $metric ('VolumeBytesUsed', 'VolumeReadIOPs', 'VolumeWriteIOPs') {
next if (defined($self->{option_results}->{filter_metric}) && $self->{option_results}->{filter_metric} ne ''
&& $metric !~ /$self->{option_results}->{filter_metric}/);
push @{$self->{aws_metrics}}, $metric;
}
}
sub manage_selection {
my ($self, %options) = @_;
my %metric_results;
foreach my $instance (@{$self->{aws_instance}}) {
$metric_results{$instance} = $options{custom}->cloudwatch_get_metrics(
region => $self->{option_results}->{region},
namespace => 'AWS/RDS',
dimensions => [ { Name => $map_type{$self->{option_results}->{type}}, Value => $instance } , { Name => 'EngineName', Value => 'aurora' } ],
metrics => $self->{aws_metrics},
statistics => $self->{aws_statistics},
timeframe => $self->{aws_timeframe},
period => $self->{aws_period},
);
foreach my $metric (@{$self->{aws_metrics}}) {
foreach my $statistic (@{$self->{aws_statistics}}) {
next if (!defined($metric_results{$instance}->{$metric}->{lc($statistic)}) && !defined($self->{option_results}->{zeroed}));
$self->{metric}->{$instance . "_" . lc($statistic)}->{display} = $instance;
$self->{metric}->{$instance . "_" . lc($statistic)}->{stat} = lc($statistic);
$self->{metric}->{$instance . "_" . lc($statistic)}->{type} = $self->{option_results}->{type};
$self->{metric}->{$instance . "_" . lc($statistic)}->{timeframe} = $self->{aws_timeframe};
$self->{metric}->{$instance . "_" . lc($statistic)}->{$metric . "_" . lc($statistic)} = defined($metric_results{$instance}->{$metric}->{lc($statistic)}) ? $metric_results{$instance}->{$metric}->{lc($statistic)} : 0;
}
}
}
if (scalar(keys %{$self->{metric}}) <= 0) {
$self->{output}->add_option_msg(short_msg => 'No metrics. Check your options or use --zeroed option to set 0 on undefined values');
$self->{output}->option_exit();
}
}
1;
__END__
=head1 MODE
Check RDS instances volume metrics.
Example:
perl centreon_plugins.pl --plugin=cloud::aws::rds::plugin --custommode=paws --mode=volume --region='eu-west-1'
--type='cluster' --name='centreon-db-ppd-cluster' --filter-metric='' --statistic='average'
--critical-volumebytesused-average='10' --verbose
Works for the following database engines : aurora.
See 'https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/rds-metricscollected.html' for more informations.
Default statistic: 'average' / All satistics are valid.
=over 8
=item B<--type>
Set the instance type (Required) (Can be: 'cluster').
=item B<--name>
Set the instance name (Required) (Can be multiple).
=item B<--filter-metric>
Filter metrics (Can be: 'VolumeBytesUsed', 'VolumeReadIOPs', 'VolumeWriteIOPs')
(Can be a regexp).
=item B<--warning-$metric$-$statistic$>
Thresholds warning ($metric$ can be: 'volumebytesused', 'volumereadiops', 'volumewriteiops',
$statistic$ can be: 'minimum', 'maximum', 'average', 'sum').
=item B<--critical-$metric$-$statistic$>
Thresholds critical ($metric$ can be: 'volumebytesused', 'volumereadiops', 'volumewriteiops',
$statistic$ can be: 'minimum', 'maximum', 'average', 'sum').
=back
=cut
| Sims24/centreon-plugins | cloud/aws/rds/mode/volume.pm | Perl | apache-2.0 | 11,627 |
#!/usr/bin/env perl
use warnings;
use Getopt::Long;
$use = "g2sdf.pl [-mem nnGB] [-FNTag tagName|-FNPrefix prefix] [-template gausianTemplate.g]\n"
." [-fixTorAtomTag tag] sdfFiles\n"
."Writes one file per sdf record named by the value of tagname.\n"
."If -FNPrefix is given then the output .g files are numbered.\n"
."fixTorAtomTag . Specify name for sdf field containing , separated list of fixed torsion atoms (0 based)\n"
."If neither FNTag nor FNPrefix is given then all out is writen to inName.g\n"
." If input is from stdein (no inName) output will be to stdout\n"
."If tagname is TITLE the TILE is used.\n"
."Default template is AM1 optimization.\n"
."Tamplate must contain #XYZ# string\n"
."The default memis 10GB\n";
my($script)=$0;
$script =~ /^\// || ( $script="$ENV{PWD}/$script" );
my($installDir) = $script =~ /(.*)\/[^\/]+$/;
my($guasTemplateDir) = "$installDir/data/gaussian";
my( $tFile, $ofnTag,$fnPrefix, $mem, $fixTorAtomTag ) = ('', '', '', '10GB', '');
GetOptions("FNTag=s" =>\$ofnTag,"FNPrefix=s"=>\$fnPrefix, "template=s" =>\$tFile,
"mem=s" =>\$mem, "fixTorAtomTag=s" => \$fixTorAtomTag)
|| &exitHelp();
#read template
my($template) = '';
$tFile || ($tFile = "$guasTemplateDir/default.g");
if( -e $tFile )
{ $template = `cat $tFile`;
}else
{ if( -e "$guasTemplateDir/$tFile" )
{ $template = `cat "$guasTemplateDir/$tFile"`;
}else
{ die "tempalte $template not found!\n";
}
}
if( $fixTorAtomTag )
{ if( $template !~ /#FIX#/ )
{ die "Template needs to conatin #FIX# placeholder for fix atom specification\n";
}
if( $template !~ /ModRedundant/i )
{ die "Template must use ModRedundant option for gaussian to use fixed atoms\n";
}
}
# loop over input files
my($record,$fixTorAtoms) = ('','');
my( $num, $lastFName ) = ( 1, '');
while(<>)
{ if( $lastFName ne $ARGV )
{ if( $record && $lastFName )
{ # last file did not end in $$$$
$fName = &cleanFName($lastFName);
&outSDF( $fName, $num++, $record );
$record = '';
$fName = '';
$fixTorAtoms = '';
}
$lastFName = $ARGV;
}
$record .= $_;
if( ! $fName && $ofnTag eq 'TITLE' )
{ $fName = $_;
}
if( m/<$ofnTag>/ )
{ $fName = <>;
} elsif( m/<$fixTorAtomTag>/ )
{ $fixTorAtoms = <>;
$fixTorAtoms = &convertToAtList($fixTorAtoms);
$fixTorAtoms = "D $fixTorAtoms F";
} elsif( $_ =~ m/^\$\$\$\$/ )
{ if( !$ofnTag && ! $fnPrefix ) { $fName = &cleanFName($ARGV); };
&outSDF( $fName, $num++, $record );
$record = '';
$fName = '';
}
}
if( $record )
{ if( !$ofnTag && ! $fnPrefix ) { $fName = &cleanFName($ARGV); };
&outSDF( $fName, $num++, $record );
}
# remove trailing .sdf
sub cleanFName
{ my( $fName ) = @_;
$fName =~ s/\.[^.]+$//;
return $fName;
}
sub outSDF
{ my( $fName, $num, $record ) = @_;
my( $gCom, $appendMode, $xyz, @xyz ) = ( '', '>', '' );
if( $fName )
{ $fName =~ s/\s$//g;
$fName =~ s/\s/_/g;
$fName =~ s/\.sdf$//ig;
}
if( $fnPrefix )
{ $fName = "$fnPrefix$num";
$appendMode = '>';
}
$xyz = `$ENV{OBABEL} -isdf -ogau <<sdf2g\n$record\nsdf2g`;
@xyz = split(/\n/, $xyz);
$xyz = join("\n", @xyz[4 .. $#xyz]);
$gCom = $template;
$gCom =~ s/#XYZ#/$xyz/g;
$gCom =~ s/#FName#/$fName/g;
$gCom =~ s/#mem#/$mem/g;
$gCom =~ s/#FIX#/$fixTorAtoms/g;
if( $fName eq "-" && !$ofnTag && ! $fnPrefix )
{ # input was from stdin ("-") and no ofnTag or FNPrefix was given
print $gCom;
} else
{ if( $fName )
{ $fName =~ s/\s$//g;
$fName =~ s/\s/_/g;
$fName =~ s/\.sdf$//ig;
}
if( $fnPrefix )
{ $fName = "$fnPrefix$num";
$appendMode = '>';
}
open(OFILE, "$appendMode$fName.g") || die "$appendMode$fName.g: $!";
print(OFILE $gCom);
close(OFILE);
}
}
sub exitHelp
{ warn("$use\nAvailable default templates (check: $guasTemplateDir):\n");
while( <$guasTemplateDir/*.g>)
{ s/.*\///;
warn("$_\n");
}
die "\n";
}
sub convertToAtList
{ my(@atList) = split(/ /,$_[0]);
my($at);
for $at (@atList) { $at++; }
return join(" ", @atList);
}
| chemalot/chemalot | bin/sdf2g.pl | Perl | apache-2.0 | 4,274 |
#
# Copyright 2015 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package hardware::server::sun::mgmt_cards::lib::telnet;
use strict;
use warnings;
use Net::Telnet;
sub telnet_error {
my ($output, $msg) = @_;
$output->output_add(severity => 'UNKNOWN',
short_msg => $msg);
$output->display();
$output->exit();
}
sub connect {
my (%options) = @_;
my $telnet_connection = new Net::Telnet(Timeout => $options{timeout});
$telnet_connection->open(Host => $options{hostname},
Port => $options{port},
Errmode => 'return') or telnet_error($options{output}, $telnet_connection->errmsg);
if (defined($options{closure})) {
&{$options{closure}}($telnet_connection);
}
if (defined($options{username}) && $options{username} ne "") {
$telnet_connection->waitfor(Match => '/login: $/i', Errmode => "return") or telnet_error($options{output}, $telnet_connection->errmsg);
$telnet_connection->print($options{username});
}
if (defined($options{password}) && $options{password} ne "") {
$telnet_connection->waitfor(Match => '/password: $/i', Errmode => "return") or telnet_error($options{output}, $telnet_connection->errmsg);
$telnet_connection->print($options{password});
# Check if successful
my ($prematch, $match);
if (defined($options{special_wait})) {
($prematch, $match) = $telnet_connection->waitfor(Match => '/login[: ]*$/i',
Match => '/username[: ]*$/i',
Match => '/password[: ]*$/i',
Match => '/' . $options{special_wait} . '/i',
Match => $telnet_connection->prompt,
Errmode => "return") or
telnet_error($options{output}, $telnet_connection->errmsg);
} else {
($prematch, $match) = $telnet_connection->waitfor(Match => '/login[: ]*$/i',
Match => '/username[: ]*$/i',
Match => '/password[: ]*$/i',
Match => $telnet_connection->prompt,
Errmode => "return") or
telnet_error($options{output}, $telnet_connection->errmsg);
}
if ($match =~ /login[: ]*$/i or $match =~ /username[: ]*$/i or $match =~ /password[: ]*$/i) {
$options{output}->output_add(severity => 'UNKNOWN',
short_msg => 'Login failed: bad name or password');
$options{output}->display();
$options{output}->exit();
}
}
# Sometimes need special characters
if (defined($options{noprompt})) {
return $telnet_connection;
}
if (!(defined($options{password}) && $options{password} ne "")) {
$telnet_connection->waitfor(Match => $telnet_connection->prompt,
Errmode => "return") or telnet_error($options{output}, $telnet_connection->errmsg);
}
return $telnet_connection;
}
1;
__END__
| s-duret/centreon-plugins | hardware/server/sun/mgmt_cards/lib/telnet.pm | Perl | apache-2.0 | 4,227 |
#!/usr/bin/env perl
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2021] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
=head1 NAME
import_vcf_compress.pl - imports genotypes directly into
compressed_genotype_single_bp from a VCF file
=cut
use strict;
use Bio::EnsEMBL::Registry;
use Getopt::Long;
use FileHandle;
use Data::Dumper;
use Time::HiRes qw(gettimeofday tv_interval);
use ImportUtils qw(debug load);
use constant DISTANCE => 100_000;
use constant MAX_SHORT => 2**16 -1;
my %Printable = ( "\\"=>'\\', "\r"=>'r', "\n"=>'n', "\t"=>'t', "\""=>'"' );
# get command-line options
my ($in_file, $species, $registry_file, $help, $host, $user, $password, $source, $population, $flank_size, $TMP_DIR, $TMP_FILE, $skip_multi, $use_gp, $sample_prefix);
my $args = scalar @ARGV;
GetOptions(
'input_file=s' => \$in_file,
'species=s' => \$species,
'registry=s' => \$registry_file,
'db_host=s' => \$host,
'user=s' => \$user,
'password=s' => \$password,
'help' => \$help,
'population=s' => \$population,
'flank=s' => \$flank_size,
'tmpdir=s' => \$TMP_DIR,
'tmpfile=s' => \$TMP_FILE,
'skip_multi' => \$skip_multi,
'gp' => \$use_gp,
'prefix=s' => \$sample_prefix,
);
# set defaults
$species ||= "human";
$flank_size ||= 200;
$sample_prefix ||= "";
# print usage message if requested or no args supplied
if(defined($help) || !$args) {
&usage;
exit(0);
}
die "ERROR: tmpdir not specified" unless defined $TMP_DIR;
$TMP_FILE ||= 'compress.txt';
$ImportUtils::TMP_DIR = $TMP_DIR;
$ImportUtils::TMP_FILE = $TMP_FILE;
# get registry
my $reg = 'Bio::EnsEMBL::Registry';
# load DB options from registry file if given
if(defined($registry_file)) {
$reg->load_all($registry_file);
}
# otherwise manually connect to DB server
else {
die "ERROR: Host or user not defined" unless defined $host and defined $user;
if(defined($password)) {
$reg->load_registry_from_db(-host => $host,-user => $user, -pass => $password);
}
else {
$reg->load_registry_from_db(-host => $host,-user => $user);
}
}
# define the filehandle to read input from
my $in_file_handle = new FileHandle;
if(defined($in_file)) {
# check defined input file exists
die("ERROR: Could not find input file ", $in_file, "\n") unless -e $in_file;
$in_file_handle->open($in_file) or die("ERROR: Could not read from input file ", $in_file, "\n");
}
# no file specified - try to read data off command line
else {
$in_file_handle = 'STDIN';
}
# connect to DB
my $vdba = Bio::EnsEMBL::Registry->get_DBAdaptor($species,'variation')
|| usage( "Cannot find variation db for $species in $registry_file" );
my $dbVar = $vdba->dbc->db_handle;
# get seq_region_id hash
my %seq_region_ids;
my ($seq_region_id, $chr_name);
my $sth = $dbVar->prepare(qq{SELECT seq_region_id, name FROM seq_region});
$sth->execute;
$sth->bind_columns(\$seq_region_id, \$chr_name);
$seq_region_ids{$chr_name} = $seq_region_id while $sth->fetch;
die("ERROR: seq_region not populated\n") unless scalar keys %seq_region_ids;
# now do population
die("ERROR: no population specified\n") unless defined $population;
our $pop_id;
# check existing
$sth = $dbVar->prepare(qq{select sample_id from sample where name = ?});
$sth->execute($population);
$sth->bind_columns(\$pop_id);
$sth->fetch;
if(!defined($pop_id)) {
# insert into sample
$sth = $dbVar->prepare(qq{insert into sample(name) values(?)});
$sth->execute($population);
$pop_id = $dbVar->last_insert_id(undef, undef, qw(sample sample_id));
# insert into population
$sth = $dbVar->prepare(qq{insert ignore into population(sample_id) values(?)});
$sth->execute($pop_id);
}
# create tmp table for indel/multibp genotypes
$sth = $dbVar->do(qq{
CREATE TABLE IF NOT EXISTS `individual_genotype_multiple_bp_named` (
`name` varchar(255) NOT NULL,
`allele_1` varchar(255) DEFAULT NULL,
`allele_2` varchar(255) DEFAULT NULL,
`sample_id` int(10) unsigned DEFAULT NULL,
KEY `variation_idx` (`name`),
KEY `sample_idx` (`sample_id`)
);
});
my (%headers, $first_sample_col, @sample_ids, $region_start, $region_end, $prev_region_end, $prev_seq_region, $genotypes);
my $start_time = time();
my $var_counter = 0;
our %times;
&start(5000);
# read the file
while(<$in_file_handle>) {
chomp;
# header lines
next if /^##/;
my @split = split /\t/;
# column definition line
if(/^#/) {
$headers{$split[$_]} = $_ for(0..$#split);
$first_sample_col = $headers{FORMAT} + 1;
# populate sample-type tables
$sth = $dbVar->prepare(qq{INSERT INTO sample(name) VALUES(?)});
my $sth2 = $dbVar->prepare(qq{INSERT INTO individual_population(population_sample_id, individual_sample_id) VALUES(?,?)});
my $sth3 = $dbVar->prepare(qq{INSERT INTO individual(sample_id, individual_type_id) VALUES(?,?)});
my $sth4 = $dbVar->prepare(qq{select sample_id from sample where name = ?});
for my $i($first_sample_col..$#split) {
my $sample_name = $sample_prefix.$split[$i];
my $sample_id;
$sth4->execute($sample_name);
$sth4->bind_columns(\$sample_id);
$sth4->fetch;
if(!$sample_id) {
$sth->execute($sample_name);
$sample_id = $dbVar->last_insert_id(undef, undef, qw(sample sample_id));
$sth2->execute($pop_id, $sample_id);
$sth3->execute($sample_id, 3);
}
push @sample_ids, $sample_id;
}
}
# data
else {
$var_counter++;
if($var_counter =~ /0000$/) {
warn "COUNTER $var_counter";
&end(10000);
&start(10000);
}
# parse into a hash
my %data = ();
$data{$_} = $split[$headers{$_}] for keys %headers;
# skip non-variant lines
#next if $data{ALT} eq '.';
# positions
my ($start, $end, $chr, $seq_region);
if($use_gp) {
foreach my $pair(split /\;/, $data{INFO}) {
my ($key, $value) = split /\=/, $pair;
if($key eq 'GP') {
($chr,$start) = split /\:/, $value;
$seq_region = $seq_region_ids{$chr};
$end = $start;
}
}
next unless defined($seq_region) and defined($start);
}
else {
($start, $end) = ($data{POS}, $data{POS});
$seq_region = $seq_region_ids{$data{'#CHROM'}};
}
# work out if this is an indel or multi-bp
my $is_indel = 0;
my $is_multi = 0;
$is_multi = 1 if length($data{REF}) > 1;
foreach my $alt(split /\,/, $data{REF}) {
$is_multi = 1 if length($alt) > 1;
}
if($data{ALT} =~ /D|I/) {
$is_indel = 1;
if($data{ALT} =~ /\,/) {
warn "WARNING: can't deal with multiple different indel types in one variation";
next;
}
# deletion
if($data{ALT} =~ /D/) {
my $num_deleted = $data{ALT};
$num_deleted =~ s/\D+//g;
$end += $num_deleted - 1;
$data{ALT} = "-";
}
# insertion
else {
$data{REF} = '-';
my $insert = $data{ALT};
$insert =~ s/^I//g;
$data{ALT} = $insert;
$start++;
}
}
next if $skip_multi && ($is_indel || $is_multi);
# get alleles
my @alleles = ($data{REF}, split /\,/, $data{ALT});
# get genotypes
my @genotypes;
# now do genotypes
my @rows;
for my $i($first_sample_col..$#split) {
my $sample_id = $sample_ids[$i-$first_sample_col];
my @bits;
my $gt = (split /\:/, $split[$i])[0];
foreach my $bit(split /\||\/|\\/, $gt) {
push @bits, $alleles[$bit] unless $bit eq '.';
}
# for indels, write directly to temp table
if($is_indel || $is_multi) {
push @rows,
"(".
(join ",",
(
'"'.$data{ID}.'"',
'"'.($bits[0] || '.').'"',
'"'.($bits[1] || '.').'"',
$sample_id
)
).
")";
}
# otherwise add to compress hash for writing later
elsif(scalar @bits) {
if (!defined $genotypes->{$sample_id}->{region_start}){
$genotypes->{$sample_id}->{region_start} = $start;
$genotypes->{$sample_id}->{region_end} = $end;
}
# write previous data?
#compare with the beginning of the region if it is within the DISTANCE of compression
if (
(abs($genotypes->{$sample_id}->{region_start} - $start) > DISTANCE()) ||
(abs($start - $genotypes->{$sample_id}->{region_end}) > MAX_SHORT) ||
(defined($prev_seq_region) && $seq_region != $prev_seq_region)
) {
#snp outside the region, print the region for the sample we have already visited and start a new one
print_file("$TMP_DIR/compressed_genotype.txt",$genotypes, $prev_seq_region, $sample_id);
delete $genotypes->{$sample_id}; #and remove the printed entry
$genotypes->{$sample_id}->{region_start} = $start;
}
# not first genotype
if ($start != $genotypes->{$sample_id}->{region_start}){
#compress information
my $blob = pack ("n",$start - $genotypes->{$sample_id}->{region_end} - 1);
$genotypes->{$sample_id}->{genotypes} .= &escape($blob).($bits[0] || '-').($bits[1] || '0');
}
# first genotype
else{
$genotypes->{$sample_id}->{genotypes} = ($bits[0] || '-').($bits[1] || '0');
}
$genotypes->{$sample_id}->{region_end} = $start;
}
}
# write indel/multibp genotypes
if(scalar @rows) {
my $vals = join ",", @rows;
$sth = $dbVar->prepare(qq{INSERT INTO individual_genotype_multiple_bp_named(name, allele_1, allele_2, sample_id) values$vals});
$sth->execute;
}
$prev_seq_region = $seq_region;
}
}
print_file("$TMP_DIR/compressed_genotype.txt",$genotypes, $prev_seq_region);
&import_genotypes($dbVar);
$dbVar->do(qq{DELETE FROM individual_genotype_multiple_bp_named WHERE allele_1 = '.' AND allele_2 = '.';});
&end(10000);
print "Took ", time() - $start_time, " to run\n";
sub usage {
my $usage =<<END;
Usage:
perl import_vcf_compress.pl [arguments]
Options
-h | --help Display this message and quit
-i | --input_file Input file - if not specified, reads from STDIN
--species Species to use [default: "human"]
--population Name of population [required]
--skip_multi Do not import multi bp or indel genotypes
--gp Use the GP info tag to extract location information
--prefix String to add as a prefix to sample names
-d | --db_host Manually define database host [default: "ensembldb.ensembl.org"]
-u | --user Database username [default: "anonymous"]
--password Database password [default: not used]
-r | --registry_file Registry file to use defines DB connections [default: not used]
Defining a registry file overrides above connection settings.
END
print $usage;
}
sub start {
my $id = shift;
$id ||= '-';
$times{$id} = [gettimeofday];
}
sub end {
my $id = shift;
$id ||= '-';
warn "Time for $id : ", tv_interval($times{$id}), "\n";
}
sub print_file{
my $file = shift;
my $genotypes = shift;
my $seq_region_id = shift;
my $sample_id = shift;
open( FH, ">>$file") or die "Could not add compressed information: $!\n";
if (!defined $sample_id){
#new chromosome, print all the genotypes and flush the hash
foreach my $sample_id (keys %{$genotypes}){
print FH join("\t",
$sample_id,
$seq_region_id,
$genotypes->{$sample_id}->{region_start},
$genotypes->{$sample_id}->{region_end},
1,
$genotypes->{$sample_id}->{genotypes}) . "\n";
}
}
else{
#only print the region corresponding to sample_id
print FH join("\t",
$sample_id,
$seq_region_id,
$genotypes->{$sample_id}->{region_start},
$genotypes->{$sample_id}->{region_end},
1,
$genotypes->{$sample_id}->{genotypes}) . "\n";
}
close FH;
}
# $special_characters_escaped = printable( $source_string );
sub escape ($) {
local $_ = ( defined $_[0] ? $_[0] : '' );
s/([\r\n\t\\\"])/\\$Printable{$1}/sg;
return $_;
}
sub import_genotypes{
my $dbVar = shift;
warn("Importing compressed genotype data");
my $call = "mv $TMP_DIR/compressed_genotype.txt $TMP_DIR/$TMP_FILE";
system($call);
load($dbVar,qw(compressed_genotype_single_bp sample_id seq_region_id seq_region_start seq_region_end seq_region_strand genotypes));
}
| Ensembl/ensembl-variation | scripts/import/import_vcf_compress.pl | Perl | apache-2.0 | 12,740 |
package Paws::ECS::Cluster;
use Moose;
has ActiveServicesCount => (is => 'ro', isa => 'Int', request_name => 'activeServicesCount', traits => ['NameInRequest']);
has ClusterArn => (is => 'ro', isa => 'Str', request_name => 'clusterArn', traits => ['NameInRequest']);
has ClusterName => (is => 'ro', isa => 'Str', request_name => 'clusterName', traits => ['NameInRequest']);
has PendingTasksCount => (is => 'ro', isa => 'Int', request_name => 'pendingTasksCount', traits => ['NameInRequest']);
has RegisteredContainerInstancesCount => (is => 'ro', isa => 'Int', request_name => 'registeredContainerInstancesCount', traits => ['NameInRequest']);
has RunningTasksCount => (is => 'ro', isa => 'Int', request_name => 'runningTasksCount', traits => ['NameInRequest']);
has Status => (is => 'ro', isa => 'Str', request_name => 'status', traits => ['NameInRequest']);
1;
### main pod documentation begin ###
=head1 NAME
Paws::ECS::Cluster
=head1 USAGE
This class represents one of two things:
=head3 Arguments in a call to a service
Use the attributes of this class as arguments to methods. You shouldn't make instances of this class.
Each attribute should be used as a named argument in the calls that expect this type of object.
As an example, if Att1 is expected to be a Paws::ECS::Cluster object:
$service_obj->Method(Att1 => { ActiveServicesCount => $value, ..., Status => $value });
=head3 Results returned from an API call
Use accessors for each attribute. If Att1 is expected to be an Paws::ECS::Cluster object:
$result = $service_obj->Method(...);
$result->Att1->ActiveServicesCount
=head1 DESCRIPTION
A regional grouping of one or more container instances on which you can
run task requests. Each account receives a default cluster the first
time you use the Amazon ECS service, but you may also create other
clusters. Clusters may contain more than one instance type
simultaneously.
=head1 ATTRIBUTES
=head2 ActiveServicesCount => Int
The number of services that are running on the cluster in an C<ACTIVE>
state. You can view these services with ListServices.
=head2 ClusterArn => Str
The Amazon Resource Name (ARN) that identifies the cluster. The ARN
contains the C<arn:aws:ecs> namespace, followed by the region of the
cluster, the AWS account ID of the cluster owner, the C<cluster>
namespace, and then the cluster name. For example,
C<arn:aws:ecs:I<region>:I<012345678910>:cluster/I<test> >..
=head2 ClusterName => Str
A user-generated string that you use to identify your cluster.
=head2 PendingTasksCount => Int
The number of tasks in the cluster that are in the C<PENDING> state.
=head2 RegisteredContainerInstancesCount => Int
The number of container instances registered into the cluster.
=head2 RunningTasksCount => Int
The number of tasks in the cluster that are in the C<RUNNING> state.
=head2 Status => Str
The status of the cluster. The valid values are C<ACTIVE> or
C<INACTIVE>. C<ACTIVE> indicates that you can register container
instances with the cluster and the associated instances can accept
tasks.
=head1 SEE ALSO
This class forms part of L<Paws>, describing an object used in L<Paws::ECS>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/ECS/Cluster.pm | Perl | apache-2.0 | 3,371 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2021] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
package EnsEMBL::Web::Document::HTML::DataAccess;
## Show appropriate information about data access, based on available services
use strict;
use base qw(EnsEMBL::Web::Document::HTML);
sub render {
my $self = shift;
my $sd = $self->hub->species_defs;
my @cells;
push @cells, qq(<h2>Small quantities of data</h2>
<p><a href="/info/data/export.html"><img src="/img/download_sequence.gif" class="float-right" style="width:200px;height:100px" alt="" title="Find out more" /></a>Many of the pages displaying Ensembl genomic data offer an <a href="export.html">export</a>
option, suitable for small amounts of data, e.g. a single gene sequence.</p>
<p>Click on the 'Export data' button in the lefthand menu of most pages to export:</p>
<ul>
<li>FASTA sequence</li>
<li>GTF or GFF features</li>
</ul>
<p>...and more!</p>
);
## REST
my $rest = $sd->ENSEMBL_REST_URL;
my $internal_only = $sd->ENSEMBL_REST_INTERNAL_ONLY;
if ($rest && !$internal_only) {
push @cells, qq(
<h2>Fast programmatic access</h2>
<p><a href="$rest"><img src="/img/download_api.gif" class="float-right" style="width:200px;height:100px" alt="" title="Visit our REST site" /></a>For fast access in any programming language, we recommend using our <a href="$rest">REST server</a>. Various REST endpoints provide access to vast amounts of Ensembl data.</p>
);
}
## File downloads
my $ftp = $sd->ENSEMBL_FTP_URL;
if ($ftp) {
push @cells, qq(
<h2>Complete datasets and databases</h2>
<p><a href="$rest"><img src="/img/download_code.gif" class="float-right" style="width:200px;height:100px" alt="" title="Find out more" /></a>Many datasets, e.g. all genes for a species, are available to download in a variety of formats from our <a href="$ftp">FTP site</a>.</p>
<p>Entire databases are also available via FTP as MySQL dumps.</p>
);
}
## Show Biomart info
if ($sd->ENSEMBL_MART_ENABLED) {
push @cells, qq(
<h2>Complex cross-database queries</h2>
<p><a href="/biomart/martview"><img src="/img/download_mart.gif" class="float-right" style="width:200px;height:100px" alt="" title="Try BioMart" /></a>More complex datasets can be retrieved using the <a href="biomart/">BioMart</a> data-mining tool.</p>
);
}
my $html = '<table class="blobs">';
my $count = 0;
foreach my $cell (@cells) {
if ($count % 2 == 0) {
$html .= '<tr>';
}
$html .= qq(<td>$cell</td>);
if ($count % 2 == 1) {
$html .= '</tr>';
}
$count++;
}
$html .= '</table>';
return $html;
}
1;
| Ensembl/ensembl-webcode | modules/EnsEMBL/Web/Document/HTML/DataAccess.pm | Perl | apache-2.0 | 3,268 |
#
# Copyright 2021 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package storage::oracle::zs::restapi::mode::pools;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
use centreon::plugins::templates::catalog_functions qw(catalog_status_threshold catalog_status_calc);
sub custom_status_output {
my ($self, %options) = @_;
my $msg = 'status : ' . $self->{result_values}->{status};
return $msg;
}
sub custom_usage_output {
my ($self, %options) = @_;
my ($total_size_value, $total_size_unit) = $self->{perfdata}->change_bytes(value => $self->{result_values}->{total_space});
my ($total_used_value, $total_used_unit) = $self->{perfdata}->change_bytes(value => $self->{result_values}->{used_space});
my ($total_free_value, $total_free_unit) = $self->{perfdata}->change_bytes(value => $self->{result_values}->{free_space});
my $msg = sprintf('space usage total: %s used: %s (%.2f%%) free: %s (%.2f%%)',
$total_size_value . " " . $total_size_unit,
$total_used_value . " " . $total_used_unit, $self->{result_values}->{prct_used_space},
$total_free_value . " " . $total_free_unit, $self->{result_values}->{prct_free_space}
);
return $msg;
}
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'pool', type => 1, cb_prefix_output => 'prefix_pool_output', message_multiple => 'All pools are ok', skipped_code => { -10 => 1 } },
];
$self->{maps_counters}->{pool} = [
{ label => 'status', threshold => 0, set => {
key_values => [ { name => 'status' }, { name => 'display' } ],
closure_custom_calc => \&catalog_status_calc,
closure_custom_output => $self->can('custom_status_output'),
closure_custom_perfdata => sub { return 0; },
closure_custom_threshold_check => \&catalog_status_threshold,
}
},
{ label => 'usage', nlabel => 'pool.space.usage.bytes', set => {
key_values => [ { name => 'used_space' }, { name => 'free_space' }, { name => 'prct_used_space' }, { name => 'prct_free_space' }, { name => 'total_space' }, { name => 'display' }, ],
closure_custom_output => $self->can('custom_usage_output'),
perfdatas => [
{ value => 'used_space', template => '%d', min => 0, max => 'total_space',
unit => 'B', cast_int => 1, label_extra_instance => 1, instance_use => 'display' },
],
}
},
{ label => 'usage-free', nlabel => 'pool.space.free.bytes', display_ok => 0, set => {
key_values => [ { name => 'free_space' }, { name => 'used_space' }, { name => 'prct_used_space' }, { name => 'prct_free_space' }, { name => 'total_space' }, { name => 'display' }, ],
closure_custom_output => $self->can('custom_usage_output'),
perfdatas => [
{ value => 'free_space', template => '%d', min => 0, max => 'total_space',
unit => 'B', cast_int => 1, label_extra_instance => 1, instance_use => 'display' },
],
}
},
{ label => 'usage-prct', nlabel => 'pool.space.usage.percentage', display_ok => 0, set => {
key_values => [ { name => 'prct_used_space' }, { name => 'display' } ],
output_template => 'used : %.2f %%',
perfdatas => [
{ value => 'prct_used_space', template => '%.2f', min => 0, max => 100,
unit => '%', label_extra_instance => 1, instance_use => 'display' },
],
}
},
];
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options, force_new_perfdata => 1);
bless $self, $class;
$options{options}->add_options(arguments => {
'filter-name:s' => { name => 'filter_name' },
'unknown-status:s' => { name => 'unknown_status', default => '' },
'warning-status:s' => { name => 'warning_status', default => '' },
'critical-status:s' => { name => 'critical_status', default => '%{status} !~ /online|exported/i' },
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::check_options(%options);
$self->change_macros(macros => ['warning_status', 'critical_status', 'unknown_status']);
}
sub prefix_pool_output {
my ($self, %options) = @_;
return "Pool '" . $options{instance_value}->{display} . "' ";
}
sub manage_selection {
my ($self, %options) = @_;
my $results = $options{custom}->request_api(url_path => '/api/storage/v1/pools');
$self->{pool} = {};
foreach (@{$results->{pools}}) {
if (defined($self->{option_results}->{filter_name}) && $self->{option_results}->{filter_name} ne '' &&
$_->{name} !~ /$self->{option_results}->{filter_name}/) {
$self->{output}->output_add(long_msg => "skipping pool '" . $_->{name} . "': no matching filter.", debug => 1);
next;
}
$self->{pool}->{$_->{name}} = {
display => $_->{name},
status => $_->{status},
total_space => $_->{usage}->{total},
used_space => $_->{usage}->{used},
free_space => $_->{usage}->{free},
prct_used_space =>
defined($_->{usage}->{total}) ? ($_->{usage}->{used} * 100 / $_->{usage}->{total}) : undef,
prct_free_space =>
defined($_->{usage}->{total}) ? ($_->{usage}->{free} * 100 / $_->{usage}->{total}) : undef,
};
}
if (scalar(keys %{$self->{pool}}) <= 0) {
$self->{output}->add_option_msg(short_msg => "No pool found");
$self->{output}->option_exit();
}
}
1;
__END__
=head1 MODE
Check pool usages.
=over 8
=item B<--filter-counters>
Only display some counters (regexp can be used).
Example: --filter-counters='^usage$'
=item B<--filter-name>
Filter pool name (can be a regexp).
=item B<--unknown-status>
Set unknon threshold for status (Default: '').
Can used special variables like: %{status}, %{display}
=item B<--warning-status>
Set warning threshold for status (Default: '').
Can used special variables like: %{status}, %{display}
=item B<--critical-status>
Set critical threshold for status (Default: '%{status} !~ /online|exported/i').
Can used special variables like: %{status}, %{display}
=item B<--warning-*> B<--critical-*>
Thresholds.
Can be: 'usage' (B), 'usage-free' (B), 'usage-prct' (%).
=back
=cut
| Tpo76/centreon-plugins | storage/oracle/zs/restapi/mode/pools.pm | Perl | apache-2.0 | 7,365 |
#
# Copyright 2021 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package apps::vmware::connector::mode::countvmhost;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
use centreon::plugins::templates::catalog_functions qw(catalog_status_threshold_ng);
sub custom_status_output {
my ($self, %options) = @_;
return 'status ' . $self->{result_values}->{status};
}
sub custom_status_calc {
my ($self, %options) = @_;
$self->{result_values}->{status} = $options{new_datas}->{$self->{instance} . '_state'};
return 0;
}
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'global', type => 0, skipped_code => { -10 => 1 } },
{ name => 'host', type => 1, cb_prefix_output => 'prefix_host_output', message_multiple => 'All ESX Hosts are ok' }
];
$self->{maps_counters}->{global} = [
{ label => 'total-on', nlabel => 'host.vm.poweredon.current.count', set => {
key_values => [ { name => 'poweredon' }, { name => 'total' } ],
output_template => '%s VM(s) poweredon',
perfdatas => [
{ label => 'poweredon', template => '%s', min => 0, max => 'total' }
]
}
},
{ label => 'total-off', nlabel => 'host.vm.poweredoff.current.count', set => {
key_values => [ { name => 'poweredoff' }, { name => 'total' } ],
output_template => '%s VM(s) poweredoff',
perfdatas => [
{ label => 'poweredoff', template => '%s', min => 0, max => 'total' }
]
}
},
{ label => 'total-suspended', nlabel => 'host.vm.suspended.current.count', set => {
key_values => [ { name => 'suspended' }, { name => 'total' } ],
output_template => '%s VM(s) suspended',
perfdatas => [
{ label => 'suspended', template => '%s', min => 0, max => 'total' }
]
}
}
];
$self->{maps_counters}->{host} = [
{
label => 'status', type => 2, unknown_default => '%{status} !~ /^connected$/i',
set => {
key_values => [ { name => 'state' } ],
closure_custom_calc => $self->can('custom_status_calc'),
closure_custom_output => $self->can('custom_status_output'),
closure_custom_perfdata => sub { return 0; },
closure_custom_threshold_check => \&catalog_status_threshold_ng
}
},
{ label => 'on', nlabel => 'host.vm.poweredon.current.count', set => {
key_values => [ { name => 'poweredon' }, { name => 'total' } ],
output_template => '%s VM(s) poweredon',
perfdatas => [
{ label => 'poweredon', template => '%s', min => 0, max => 'total', label_extra_instance => 1 }
]
}
},
{ label => 'off', nlabel => 'host.vm.poweredoff.current.count', set => {
key_values => [ { name => 'poweredoff' }, { name => 'total' } ],
output_template => '%s VM(s) poweredoff',
perfdatas => [
{ label => 'poweredoff', template => '%s', min => 0, max => 'total', label_extra_instance => 1 }
]
}
},
{ label => 'suspended', nlabel => 'host.vm.suspended.current.count', set => {
key_values => [ { name => 'suspended' }, { name => 'total' } ],
output_template => '%s VM(s) suspended',
perfdatas => [
{ label => 'suspended', template => '%s', min => 0, max => 'total', label_extra_instance => 1 }
]
}
}
];
}
sub prefix_host_output {
my ($self, %options) = @_;
return "Host '" . $options{instance_value}->{display} . "' : ";
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$options{options}->add_options(arguments => {
'esx-hostname:s' => { name => 'esx_hostname' },
'filter' => { name => 'filter' },
'scope-datacenter:s' => { name => 'scope_datacenter' },
'scope-cluster:s' => { name => 'scope_cluster' }
});
return $self;
}
sub manage_selection {
my ($self, %options) = @_;
$self->{global} = { poweredon => 0, poweredoff => 0, suspended => 0, total => 0 };
$self->{host} = {};
my $response = $options{custom}->execute(
params => $self->{option_results},
command => 'countvmhost'
);
foreach my $host_id (keys %{$response->{data}}) {
my $host_name = $response->{data}->{$host_id}->{name};
$self->{host}->{$host_name} = {
display => $host_name,
state => $response->{data}->{$host_id}->{state},
poweredon => $response->{data}->{$host_id}->{poweredon},
poweredoff => $response->{data}->{$host_id}->{poweredoff},
suspended => $response->{data}->{$host_id}->{suspended},
total => $response->{data}->{$host_id}->{poweredon} + $response->{data}->{$host_id}->{poweredoff} + $response->{data}->{$host_id}->{suspended},
};
$self->{global}->{poweredon} += $response->{data}->{$host_id}->{poweredon} if (defined($response->{data}->{$host_id}->{poweredon}));
$self->{global}->{poweredoff} += $response->{data}->{$host_id}->{poweredoff} if (defined($response->{data}->{$host_id}->{poweredoff}));
$self->{global}->{suspended} += $response->{data}->{$host_id}->{suspended} if (defined($response->{data}->{$host_id}->{suspended}));
}
$self->{global}->{total} = $self->{global}->{poweredon} + $self->{global}->{poweredoff} + $self->{global}->{suspended};
}
1;
__END__
=head1 MODE
Check number of vm running/off on ESX hosts.
=over 8
=item B<--esx-hostname>
ESX hostname to check.
If not set, we check all ESX.
=item B<--filter>
ESX hostname is a regexp.
=item B<--scope-datacenter>
Search in following datacenter(s) (can be a regexp).
=item B<--scope-cluster>
Search in following cluster(s) (can be a regexp).
=item B<--unknown-status>
Set warning threshold for status (Default: '%{status} !~ /^connected$/i').
Can used special variables like: %{status}
=item B<--warning-status>
Set warning threshold for status (Default: '').
Can used special variables like: %{status}
=item B<--critical-status>
Set critical threshold for status (Default: '').
Can used special variables like: %{status}
=item B<--warning-*>
Threshold warning.
Can be: 'total-on', 'total-off', 'total-suspended',
'on', 'off', 'suspended'.
=item B<--critical-*>
Threshold critical.
Can be: 'total-on', 'total-off', 'total-suspended',
'on', 'off', 'suspended'.
=back
=cut
| Tpo76/centreon-plugins | apps/vmware/connector/mode/countvmhost.pm | Perl | apache-2.0 | 7,626 |
#
# Copyright 2022 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package storage::storagetek::sl::snmp::mode::components::cap;
use strict;
use warnings;
use storage::storagetek::sl::snmp::mode::components::resources qw($map_status);
my $mapping = {
slCapStatusEnum => { oid => '.1.3.6.1.4.1.1211.1.15.4.20.1.6', map => $map_status },
};
my $oid_slCapEntry = '.1.3.6.1.4.1.1211.1.15.4.20.1';
sub load {
my ($self) = @_;
push @{$self->{request}}, { oid => $oid_slCapEntry };
}
sub check {
my ($self) = @_;
$self->{output}->output_add(long_msg => "Checking cartridge access ports");
$self->{components}->{cap} = {name => 'cap', total => 0, skip => 0};
return if ($self->check_filter(section => 'cap'));
foreach my $oid ($self->{snmp}->oid_lex_sort(keys %{$self->{results}->{$oid_slCapEntry}})) {
next if ($oid !~ /^$mapping->{slCapStatusEnum}->{oid}\.(.*)$/);
my $instance = $1;
my $result = $self->{snmp}->map_instance(mapping => $mapping, results => $self->{results}->{$oid_slCapEntry}, instance => $instance);
next if ($self->check_filter(section => 'cap', instance => $instance));
$self->{components}->{cap}->{total}++;
$self->{output}->output_add(long_msg => sprintf("cap '%s' status is '%s' [instance: %s].",
$instance, $result->{slCapStatusEnum},
$instance
));
my $exit = $self->get_severity(label => 'status', section => 'cap', value => $result->{slCapStatusEnum});
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("cap '%s' status is '%s'",
$instance, $result->{slCapStatusEnum}));
}
}
}
1; | centreon/centreon-plugins | storage/storagetek/sl/snmp/mode/components/cap.pm | Perl | apache-2.0 | 2,641 |
#
# Copyright 2015 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package hardware::server::sun::mgmt_cards::mode::showstatus;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
use centreon::plugins::misc;
my $thresholds = [
['Faulted', 'CRITICAL'],
['Degraded', 'WARNING'],
['Deconfigured', 'WARNING'],
['Maintenance', 'OK'],
['Normal', 'OK'],
];
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
$options{options}->add_options(arguments =>
{
"hostname:s" => { name => 'hostname' },
"username:s" => { name => 'username' },
"password:s" => { name => 'password' },
"timeout:s" => { name => 'timeout', default => 30 },
"command-plink:s" => { name => 'command_plink', default => 'plink' },
"threshold-overload:s@" => { name => 'threshold_overload' },
"exclude:s@" => { name => 'exclude' },
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
if (!defined($self->{option_results}->{hostname})) {
$self->{output}->add_option_msg(short_msg => "Need to specify a hostname.");
$self->{output}->option_exit();
}
if (!defined($self->{option_results}->{username})) {
$self->{output}->add_option_msg(short_msg => "Need to specify a username.");
$self->{output}->option_exit();
}
if (!defined($self->{option_results}->{password})) {
$self->{output}->add_option_msg(short_msg => "Need to specify a password.");
$self->{output}->option_exit();
}
$self->{overload_th} = [];
foreach my $val (@{$self->{option_results}->{threshold_overload}}) {
if ($val !~ /^(.*?),(.*)$/) {
$self->{output}->add_option_msg(short_msg => "Wrong treshold-overload option '" . $val . "'.");
$self->{output}->option_exit();
}
my ($status, $filter) = ($1, $2);
if ($self->{output}->is_litteral_status(status => $status) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong treshold-overload status '" . $val . "'.");
$self->{output}->option_exit();
}
push @{$self->{overload_th}}, {filter => $filter, status => $status};
}
}
sub get_severity {
my ($self, %options) = @_;
my $status = 'unknown'; # default
foreach (@{$self->{overload_th}}) {
if ($options{value} =~ /$_->{filter}/msi) {
$status = $_->{status};
return $status;
}
}
foreach (@{$thresholds}) {
if ($options{value} =~ /$$_[0]/msi) {
$status = $$_[1];
return $status;
}
}
return $status;
}
sub check_exclude {
my ($self, %options) = @_;
foreach (@{$self->{option_results}->{exclude}}) {
if ($options{value} =~ /$_/i) {
$self->{output}->output_add(long_msg => sprintf("Skip Component '%s'",
$options{value}));
return 1;
}
}
return 0;
}
sub check_tree {
my ($self) = @_;
my $total_components = 0;
my @stack = ({ indent => 0, long_instance => '', long_status => ''});
while ($self->{stdout} =~ /^([* \t]+)(.*)\s+Status:(.+?);/mg) {
my ($indent, $unit_number, $status) = (length($1), $2, $3);
my ($long_instance, $long_status);
while ($indent <= $stack[$#stack]->{indent}) {
pop @stack;
}
$long_instance = $stack[$#stack]->{long_instance} . '>' . $unit_number;
$long_status = $stack[$#stack]->{long_status} . ' > ' . $unit_number . ' Status:' . $status;
if ($indent > $stack[$#stack]->{indent}) {
push @stack, { indent => $indent,
long_instance => $stack[$#stack]->{long_instance} . '>' . $unit_number,
long_status => $stack[$#stack]->{long_status} . ' > ' . $unit_number . ' Status:' . $status };
}
next if ($self->check_exclude(value => $long_instance));
my $exit = $self->get_severity(value => $status);
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("Component '%s' status is '%s'",
$unit_number, $status));
}
$self->{output}->output_add(long_msg => sprintf("Component '%s' status is '%s' [%s] [%s]",
$unit_number, $status, $long_instance, $long_status));
$total_components++;
}
$self->{output}->output_add(severity => 'OK',
short_msg => sprintf("All %s components are ok.",
$total_components)
);
}
sub run {
my ($self, %options) = @_;
my ($lerror, $exit_code);
######
# Command execution
######
($lerror, $self->{stdout}, $exit_code) = centreon::plugins::misc::backtick(
command => $self->{option_results}->{command_plink},
timeout => $self->{option_results}->{timeout},
arguments => ['-batch', '-l', $self->{option_results}->{username},
'-pw', $self->{option_results}->{password},
$self->{option_results}->{hostname}, 'showhardconf'],
wait_exit => 1,
redirect_stderr => 1
);
$self->{stdout} =~ s/\r//g;
if ($lerror <= -1000) {
$self->{output}->output_add(severity => 'UNKNOWN',
short_msg => $self->{stdout});
$self->{output}->display();
$self->{output}->exit();
}
if ($exit_code != 0) {
$self->{stdout} =~ s/\n/ - /g;
$self->{output}->output_add(severity => 'UNKNOWN',
short_msg => "Command error: $self->{stdout}");
$self->{output}->display();
$self->{output}->exit();
}
$self->check_tree();
$self->{output}->display();
$self->{output}->exit();
}
1;
__END__
=head1 MODE
Check Sun Mxxxx (M3000, M4000,...) Hardware (through XSCF).
=over 8
=item B<--hostname>
Hostname to query.
=item B<--username>
ssh username.
=item B<--password>
ssh password.
=item B<--command-plink>
Plink command (default: plink). Use to set a path.
=item B<--timeout>
Timeout in seconds for the command (Default: 30).
=item B<--threshold-overload>
Set to overload default threshold values (syntax: status,regexp)
It used before default thresholds (order stays).
Example: --threshold-overload='UNKNOWN,Normal'
=item B<--exclude>
Filter components (multiple) (can be a regexp).
Example: --exclude='MEM#2B' --exclude='MBU_A>MEM#0B'.
=back
=cut
| s-duret/centreon-plugins | hardware/server/sun/mgmt_cards/mode/showstatus.pm | Perl | apache-2.0 | 8,326 |
package Paws::Batch::DescribeJobs;
use Moose;
has Jobs => (is => 'ro', isa => 'ArrayRef[Str|Undef]', traits => ['NameInRequest'], request_name => 'jobs', required => 1);
use MooseX::ClassAttribute;
class_has _api_call => (isa => 'Str', is => 'ro', default => 'DescribeJobs');
class_has _api_uri => (isa => 'Str', is => 'ro', default => '/v1/describejobs');
class_has _api_method => (isa => 'Str', is => 'ro', default => 'POST');
class_has _returns => (isa => 'Str', is => 'ro', default => 'Paws::Batch::DescribeJobsResponse');
class_has _result_key => (isa => 'Str', is => 'ro');
1;
### main pod documentation begin ###
=head1 NAME
Paws::Batch::DescribeJobs - Arguments for method DescribeJobs on Paws::Batch
=head1 DESCRIPTION
This class represents the parameters used for calling the method DescribeJobs on the
AWS Batch service. Use the attributes of this class
as arguments to method DescribeJobs.
You shouldn't make instances of this class. Each attribute should be used as a named argument in the call to DescribeJobs.
As an example:
$service_obj->DescribeJobs(Att1 => $value1, Att2 => $value2, ...);
Values for attributes that are native types (Int, String, Float, etc) can passed as-is (scalar values). Values for complex Types (objects) can be passed as a HashRef. The keys and values of the hashref will be used to instance the underlying object.
=head1 ATTRIBUTES
=head2 B<REQUIRED> Jobs => ArrayRef[Str|Undef]
A space-separated list of up to 100 job IDs.
=head1 SEE ALSO
This class forms part of L<Paws>, documenting arguments for method DescribeJobs in L<Paws::Batch>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/Batch/DescribeJobs.pm | Perl | apache-2.0 | 1,800 |
:- module(detlogentypes,[
denotes_a/1,
denotes_b/1,
'denotes_$VAR'/1,
'denotes_.'/3,
'denotes_[]'/1,
denotes_symbol/1]).
denotes_symbol(a).
denotes_symbol(b).
denotes_symbol(.(_,_)).
denotes_symbol([]).
'denotes_$VAR'([dynamic,var]).
'denotes_.'(_,[dynamic],[dynamic]).
'denotes_.'(_,[dynamic,list],[dynamic,list]).
'denotes_.'([dynamic],[dynamic,list,static],[dynamic,list]).
'denotes_.'([dynamic],[dynamic,static],[dynamic]).
'denotes_.'(_,[dynamic,var],[dynamic]).
'denotes_.'([dynamic,list],[dynamic,list,static],[dynamic,list]).
'denotes_.'([dynamic,list],[dynamic,static],[dynamic]).
'denotes_.'([dynamic,list,static],[dynamic],[dynamic]).
'denotes_.'([dynamic,list,static],[dynamic,list,static],[dynamic,list,static]).
'denotes_.'([dynamic,list,static],[dynamic,static],[dynamic,static]).
'denotes_.'([dynamic,static],[dynamic,list,static],[dynamic,list,static]).
'denotes_.'([dynamic,static],[dynamic,static],[dynamic,static]).
'denotes_.'([dynamic,var],[dynamic,list,static],[dynamic,list]).
'denotes_.'([dynamic,var],[dynamic,static],[dynamic]).
'denotes_[]'([dynamic,list,static]).
denotes_a([dynamic,static]).
denotes_b([dynamic,static]).
| leuschel/logen | old_logen/filter_prop/Tests/logenbta.pl | Perl | apache-2.0 | 1,169 |
%:- module(school,_,[profiler,expander]).
:- module(dummy,_,[catch]).
%,[catch,expander]).
%:- module(school,_,[catch,profile,expander]).
:- use_module(library(aggregates)).
%:- timing student/2.
student(aaa, aaa).
student(tom, cs342).
student(tom, cs453).
teacher(binkley, cs453).
teacher(binkley, cs342).
course(cs453, eco103, tue).
course(cs342, eco103, fri).
prog(L) :- findall( (S, T, R), p(S, T, R), L).
p(S, T, R) :-
student(S, C1),student(S, C2),
teacher(T, C1), teacher(T, C2),
course(C1, R, _D1), course(C2, R, _D2),
\+(C1 = C2).
| leuschel/ecce | www/CiaoDE/ciao/contrib/profiler/dummy.pl | Perl | apache-2.0 | 550 |
## no critic (RequireUseStrict)
package Tapper::Producer::NewestPackage;
# ABSTRACT: produce preconditions via find latest changed file
use Moose;
use YAML;
use 5.010;
use Tapper::Config;
use File::stat;
=head2 younger
Comparator for files by mtime.
=cut
sub younger { stat($a)->mtime() <=> stat($b)->mtime() }
=head2 produce
Produce resulting precondition.
=cut
sub produce {
my ($self, $job, $produce) = @_;
my $source_dir = $produce->{source_dir};
my @files = sort younger <$source_dir/*>;
return {
error => 'No files found in $source_dir',
} if not @files;
my $use_file = pop @files;
my $nfs = Tapper::Config->subconfig->{paths}{prc_nfs_mountdir};
die "$use_file not available to Installer" unless $use_file=~/^$nfs/;
my $retval = [{
precondition_type => 'package',
filename => $use_file,
},];
return {
precondition_yaml => Dump(@$retval),
};
}
1;
| tapper/Tapper-Producer | lib/Tapper/Producer/NewestPackage.pm | Perl | bsd-2-clause | 1,264 |
#!/usr/bin/perl
# $Id: mklibs.pl 439 2011-04-27 16:17:42Z mkasper $
# arguments: binaries_tree
use File::Find;
exit unless $#ARGV == 0;
undef @liblist;
# check_libs(path)
sub check_libs {
@filestat = stat($File::Find::name);
if ((($filestat[2] & 0170000) == 0100000) &&
($filestat[2] & 0111) && (!/.ko$/)) {
@curlibs = qx{/usr/bin/ldd -f "%p\n" $File::Find::name 2>/dev/null};
push(@liblist, @curlibs);
}
}
# walk the directory tree
find(\&check_libs, $ARGV[0]);
# throw out dupes
undef %hlib;
@hlib{@liblist} = ();
@liblist = sort keys %hlib;
foreach $lib (@liblist) {
chomp($lib);
if ($lib eq "not found") {
print STDERR "Warning: one or more libraries were not found!\n";
} else {
print substr($lib, 1) . "\n";
}
}
| jetshaw/m0n0wall_x | trunk/build/minibsd/mklibs.pl | Perl | bsd-3-clause | 749 |
# SNMP::Info::CiscoPower
# $Id$
#
# Copyright (c) 2008 Bill Fenner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of California, Santa Cruz nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
package SNMP::Info::CiscoPower;
use strict;
use Exporter;
use SNMP::Info;
@SNMP::Info::CiscoPower::ISA = qw/SNMP::Info Exporter/;
@SNMP::Info::CiscoPower::EXPORT_OK = qw//;
use vars qw/$VERSION %MIBS %FUNCS %GLOBALS %MUNGE/;
$VERSION = '3.29';
%MIBS = ( 'CISCO-POWER-ETHERNET-EXT-MIB' => 'cpeExtPsePortEntPhyIndex',
'CISCO-CDP-MIB' => 'cdpCachePowerConsumption' );
%GLOBALS = ();
%FUNCS = (
'cpeth_ent_phy' => 'cpeExtPsePortEntPhyIndex',
'peth_port_power' => 'cpeExtPsePortPwrConsumption',
);
%MUNGE = ();
# Cisco overcame the limitation of the module.port index of the
# pethPsePortTable by adding another mapping table, which maps
# a pethPsePortTable row to an entPhysicalTable index, which can
# then be mapped to ifIndex.
sub peth_port_ifindex {
my $cpeth = shift;
my $partial = shift;
my $ent_phy = $cpeth->cpeth_ent_phy($partial);
my $e_port = $cpeth->e_port();
my $peth_port_ifindex = {};
foreach my $i ( keys %$ent_phy ) {
if ( $e_port->{ $ent_phy->{$i} } ) {
$peth_port_ifindex->{$i} = $e_port->{ $ent_phy->{$i} };
}
}
return $peth_port_ifindex;
}
# peth_port_neg_power uses the same index as the other peth_port_* tables.
# However, cdpCachePowerConsumption uses <ifIndex>.<neighbor>.
# Therefore, we have to invert peth_port_ifindex, to get to
# the index that is expected and the rest of the code can re-invert it.
sub peth_port_neg_power {
my $cpeth = shift;
my $partial = shift;
# Ignoring partial, since it's not easy to implement properly.
my $index = $cpeth->peth_port_ifindex();
my %inverse_index;
foreach my $i ( keys %$index ) {
$inverse_index{ $index->{$i} } = $i;
}
my $neg_power = $cpeth->cdpCachePowerConsumption();
my $peth_port_neg_power = {};
foreach my $i ( keys %$neg_power ) {
my( $ifIndex, $nbrIndex ) = split( /\./, $i );
if ( defined( $inverse_index{ $ifIndex } ) ) {
$peth_port_neg_power->{ $inverse_index{ $ifIndex } } = $neg_power->{ $i };
}
}
return $peth_port_neg_power;
}
1;
__END__
=head1 NAME
SNMP::Info::CiscoPower - SNMP Interface to data stored in
F<CISCO-POWER-ETHERNET-EXT-MIB>.
=head1 AUTHOR
Bill Fenner
=head1 SYNOPSIS
# Let SNMP::Info determine the correct subclass for you.
my $poe = new SNMP::Info(
AutoSpecify => 1,
Debug => 1,
DestHost => 'myswitch',
Community => 'public',
Version => 2
)
or die "Can't connect to DestHost.\n";
my $class = $poe->class();
print "SNMP::Info determined this device to fall under subclass : $class\n";
=head1 DESCRIPTION
The Info::PowerEthernet class needs a per-device helper function to
properly map the C<pethPsePortTable> to C<ifIndex> values. This class
provides that helper, using F<CISCO-POWER-ETHERNET-EXT-MIB>.
It does not define any helpers for the extra values that this MIB
contains.
Create or use a device subclass that inherit this class. Do not use directly.
For debugging purposes you can call this class directly as you would
SNMP::Info
my $poe = new SNMP::Info::CiscoPower (...);
=head2 Inherited Classes
none.
Note that it requires that the device inherits from Info::Entity.
=head2 Required MIBs
=over
=item F<CISCO-POWER-ETHERNET-EXT-MIB>
=back
=head1 GLOBALS
none.
=head1 TABLE METHODS
These are methods that return tables of information in the form of a reference
to a hash.
=head2 Power Port Table
=over
=item $poe->peth_port_ifindex()
Maps the C<pethPsePortTable> to C<ifIndex> by way of the F<ENTITY-MIB>.
=item $poe->peth_port_power()
Power supplied by PoE ports, in milliwatts
(C<cpeExtPsePortPwrConsumption>)
=back
=head2 CDP Port table
=over
=item $poe->peth_port_neg_power()
Power negotiated using CDP, in milliwatts
(C<cdpCachePowerConsumption>)
=back
=cut
| lucwillems/SNMP-INFO | Info/CiscoPower.pm | Perl | bsd-3-clause | 5,587 |
/* Part of XPCE --- The SWI-Prolog GUI toolkit
Author: Jan Wielemaker and Anjo Anjewierden
E-mail: J.Wielemaker@vu.nl
WWW: http://www.swi-prolog.org/packages/xpce/
Copyright (c) 2002-2013, University of Amsterdam
VU University Amsterdam
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
:- module(pce_keybinding, []).
:- use_module(pce_boot(pce_principal)).
:- use_module(pce_boot(pce_realise)).
:- multifile
binding/3.
message_level(silent).
%message_level(informational).
/*******************************
* STYLE PREFERENCES *
*******************************/
%! binding(+ModeName, +TableName, +Modifications)
%
% Specify bindings for alternative key-binding-styles.
%
% @param ModeName Name of the key-binding-style
% @param TableName Syntax table to modify
% @param Modifications List of Key-Method
binding(cua, editor,
[ '\\C-v' = paste,
'\\C-s' = save_buffer
]).
binding(cua, 'emacs$fundamental',
[ '\\C-f' = isearch_forward,
'\\C-o' = open,
'\\C-n' = new,
'\\C-p' = print
]).
binding(apple, editor,
[ '\\es' = save_buffer,
'\\ez' = undo
]).
binding(apple, 'emacs$fundamental',
[ '\\ec' = copy_or_capitalize_word,
'\\ex' = cut_or_execute_extended_command
]).
binding(apple, emacs_page,
[ '\\ev' = paste_or_scroll_down
]).
/*******************************
* CHANGE BINDINGS *
*******************************/
%! set_keybinding_style(+Id)
%
% Runtime modification of the current key-binding style.
set_keybinding_style(Mode) :-
current_style(Mode),
!.
set_keybinding_style(emacs) :-
!,
send(@key_bindings, for_all,
message(@arg2, unmodify)),
set_style(emacs).
set_keybinding_style(Style) :-
set_keybinding_style(emacs),
( binding(Style, Table, Modifications),
get(@key_bindings, member, Table, KB),
modify(Modifications, KB),
fail
; true
),
set_style(Style).
modify([], _).
modify([Mod|T], KB) :-
modify1(Mod, KB),
modify(T, KB).
modify1(Key = Command, KB) :-
get(KB?bindings, value, Key, Command),
!.
modify1(Key = Command, KB) :-
send(KB, save_default, Key),
send(KB, function, Key, Command),
get(KB, name, Table),
message_level(Level),
print_message(Level, format('~w (~p): ~w --> ~w',
[Table, KB, Key, Command])).
modify1(delete(Key), KB) :-
\+ get(KB?bindings, value, Key, _),
!.
modify1(delete(Key), KB) :-
send(KB, save_default, Key),
get(KB, bindings, Bindings),
send(Bindings, delete, Key),
get(KB, name, Table),
message_level(Level),
print_message(Level, format('~w: deleted ~w', [Table, Key])).
/*******************************
* DYNAMIC TABLES *
*******************************/
:- pce_extend_class(key_binding).
class_variable(style, name,
[ 'X'(emacs),
windows(cua),
apple(apple)
],
"Basic binding style (emacs,cua,apple)").
%! current_style(-Style) is det.
%! set_style(+Style) is det.
%
% Manipulate the style. The style is stored in the class-variable
% key_binding.style, so it can be set in the users preferences
% file.
current_style(Style) :-
get(@pce, convert, key_binding, class, Class),
get(Class, class_variable, style, Var),
get(Var, value, Style).
set_style(Style) :-
get(@pce, convert, key_binding, class, Class),
get(Class, class_variable, style, Var),
send(Var, value, Style).
apply_preferences(KB) :->
"Apply CUA-mode preferences"::
send(KB, apply_cua),
send(KB, bind_resources). % bind from ~/.xpce/Defaults
apply_cua(KB) :->
"Apply our local overrides"::
current_style(Mode),
( Mode == emacs
-> true
; get(KB, name, Name),
binding(Mode, Name, Modifications)
-> modify(Modifications, KB)
; true
).
save_default(KB, Key:name) :->
"Save default binding for Key"::
( get(KB, attribute, modified, Undo)
-> true
; send(KB, attribute, modified, new(Undo, sheet))
),
( get(Undo, value, Key, _)
-> true % Already saved this one
; get(KB, bindings, Bindings),
( get(Bindings, value, Key, Command)
-> send(Undo, value, Key, Command)
; send(Undo, value, Key, @nil)
)
).
unmodify(KB) :->
"Replay recorded modifications"::
( get(KB, attribute, modified, Undo)
-> send(Undo, for_all,
message(KB, unbind, @arg1?name, @arg1?value)),
send(KB, delete_attribute, modified)
; true
).
unbind(KB, Key:name, Command:[name|code]*) :->
"Restore saved binding for Key"::
get(KB, name, Table),
message_level(Level),
( Command == @nil
-> get(KB, bindings, Sheet),
send(Sheet, delete, Key),
print_message(Level,
format('~w: deleted ~w', [Table, Key]))
; send(KB, function, Key, Command),
print_message(Level,
format('~w (~p): ~w --> ~w',
[Table, KB, Key, Command]))
).
:- pce_end_class(key_binding).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Runtime switching is connected to @pce as the operation influences an
unknown number of unknown key_binding objects.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
:- pce_extend_class(pce).
:- pce_group(preferences).
key_binding_style(_Pce, Style:name) :->
"Set the key-binding style"::
set_keybinding_style(Style).
key_binding_style(_Pce, Style:name) :<-
"Get the key-binding style"::
current_style(Style).
key_binding_styles(_Pce, Styles:chain) :<-
"List of supported styles"::
findall(Style, binding(Style, _Class, _Mod), StyleList),
sort([emacs|StyleList], Sorted),
new(Styles, chain),
add_styles(Sorted, Styles).
add_styles([], _).
add_styles([H|T], Chain) :-
send(Chain, append, H),
add_styles(T, Chain).
:- pce_end_class(pce).
% Create the type key_binding_style, a dynamic `name-of' type
% holding the defined key-binding styles.
make_key_binding_style_type :-
get(@pce, convert, key_binding_style, type, Type),
send(Type, name_reference, key_binding_style_type),
send(Type, kind, name_of),
get(@pce, key_binding_styles, Styles),
send(Type, slot, context, Styles).
:- initialization make_key_binding_style_type.
/*******************************
* APPLE *
*******************************/
:- pce_extend_class(editor).
copy_or_capitalize_word(E, Arg:[int]) :->
"Command-c copies; ESC c capitalizes word"::
( Arg == @default,
send(@event, has_modifier, m)
-> send(E, copy)
; send(E, capitalize_word, Arg)
).
cut_or_execute_extended_command(E, Arg:[int]) :->
"Command-X cut; ESC-x starts extended command"::
( Arg == @default,
send(@event, has_modifier, m)
-> send(E, cut)
; send(E, noarg_call, execute_extended_command, Arg)
).
paste_or_scroll_down(E, Arg:[int]) :->
"Command-v pasts; ESC v scrolls down"::
( Arg == @default,
send(@event, has_modifier, m)
-> send(E, paste)
; send(E, scroll_down, Arg)
).
:- pce_end_class(editor).
:- pce_extend_class(list_browser).
paste_or_scroll_down(LB, Arg:[int]) :->
"Forward to ->scroll_down (Apple keybinding)"::
send(LB, scroll_down, Arg).
:- pce_end_class(list_browser).
| TeamSPoon/logicmoo_workspace | docker/rootfs/usr/local/lib/swipl/xpce/prolog/boot/pce_keybinding.pl | Perl | mit | 9,232 |
% BEGIN LICENSE BLOCK
% Version: CMPL 1.1
%
% The contents of this file are subject to the Cisco-style Mozilla Public
% License Version 1.1 (the "License"); you may not use this file except
% in compliance with the License. You may obtain a copy of the License
% at www.eclipse-clp.org/license.
%
% Software distributed under the License is distributed on an "AS IS"
% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
% the License for the specific language governing rights and limitations
% under the License.
%
% The Original Code is The ECLiPSe Constraint Logic Programming System.
% The Initial Developer of the Original Code is Cisco Systems, Inc.
% Portions created by the Initial Developer are
% Copyright (C) 1998 - 2006 Cisco Systems, Inc. All Rights Reserved.
%
% Contributor(s): Joachim Schimpf, IC-Parc
%
% END LICENSE BLOCK
% ----------------------------------------------------------------------
% System: ECLiPSe Constraint Logic Programming System
% Version: $Id: changeset.pl,v 1.2 2009/07/16 09:11:25 jschimpf Exp $
%
% Description: Predicates to efficiently compute the set of
% variables modified during a propagation sequence.
%
% Authors: J.Schimpf, IC-Parc
%
% ----------------------------------------------------------------------
:- module(changeset).
:- comment(categories, ["Algorithms","Constraints"]).
:- comment(summary, "Compute sets of modified variables").
:- comment(author, "Joachim Schimpf, IC-Parc").
:- comment(copyright, "Cisco Systems, Inc.").
:- comment(date, "$Date: 2009/07/16 09:11:25 $").
:- comment(monitor_changes_arr/5, [
summary:"Monitor variables for modifications",
amode:monitor_changes_arr(+,+,+,+,-),
args:["VarArr":"A structure containing variables",
"Prio":"Priority for the monitoring demons",
"CondList":"Suspension list spec",
"AttrMod":"Suspension list attribute module",
"ChangeStream":"A lazy list of lists of changes"],
desc:html("
This predicate monitors an array of variables for certain
modifications, and creates a continuous stream of lists of indices of
modified variables, e.g.
<PRE>
monitor_changes_arr(VarArr, 8, [min of fd, max of fd], fd, Stream)
</PRE>
will monitor the variables in VarArr for modifications of their min/max
fd-attributes. The monitor will run with a priority of 8 to 9.
All variable modifications that occur between two wakings of the
monitor will be detected by the monitor. It will then create a list
of indices of the modified variables, and append this list to
ChangeStream.
<PRE>
[eclipse 4]: X1::1..9, X2::1..8,
monitor_changes_arr(v(X1,X2), 8,
[min of fd, max of fd], fd, Changes),
X1 #> X2, X2 #>= 5.
Changes = [[1], [2, 1]|More]
X1 = X1{[6..9]}
X2 = X2{[5..8]}
</PRE>
What happened here is that the first constraint X1 #> X2 caused X1 to
change its lower bound, therefore [1] was appended to the Changes list.
Then X2 #>= 5 raised the lower bound of X2 and (because X1 #> X2)
the lower bound of X1, therefore both variable indices [1,2] were
appended to the Changes list.
<P>
The priority of the monitor should be set up such that is is lower than
the priority of the propagation constraints. In that case, the lists
that get appended to ChangeStream represent exactly the set of variables
(without duplicates) that were modified by one propagation sequence.
<P>
Note that the ChangeStream can be used to trigger actions whenever
new changes get appended, for example:
<PRE>
delay report_changes(X) if var(X).
report_changes([]).
report_changes([X|T]) :-
printf(\"The following variables changed: %Mw%n%b\", [X]),
report_changes(T).
[eclipse 11]: X1::1..9, X2::1..8,
monitor_changes_arr(v(X1,X2), 8,
[min of fd, max of fd], fd, Changes),
report_changes(Changes),
X1 #> X2, X2 #>= 5.
The following variables changed: [1]
The following variables changed: [2, 1]
...
<PRE>
")]).
:- comment(monitor_changes/6, [
summary:"Monitor variables for modifications",
amode:monitor_changes(+,+,+,+,+,-),
args:["Vars":"A list containing variables",
"Templates":"A list of terms corresponding to the variables",
"Prio":"Priority for the monitoring demons",
"CondList":"Suspension list spec",
"AttrMod":"Suspension list attribute module",
"ChangeStream":"A lazy list of lists of changes"],
desc:html("
Like monitor_changes_arr/5, but (instead of array indices) the
ChangeStream contains the elements of the Templates-list that
correspond to the modified variables, thus allowing arbitrary
information to be conveyed to the code that processes the changes.
<PRE>
[eclipse 10]: X1::1..9, X2::1..8,
monitor_changes([X1,X2],[info(1,X1),info(2,X2)], 8,
[min of fd, max of fd], fd, Stream),
X1 #> X2, X2 #>= 5.
Stream = [[info(1, X1{[6..9]})], [info(2, X2{[5..8]}), info(1, X1)]|More]
X1 = X1{[6..9]}
X2 = X2{[5..8]}
</PRE>
")]).
:- export
monitor_changes_arr/5,
monitor_changes/6.
:- import setarg/3 from sepia_kernel.
:- import get_attribute/3 from sepia_kernel.
monitor_changes_arr(VarArr, Prio, Cond, AttrMod, ChangeStream) :-
Store = set(Empty),
functor(VarArr, _, N),
setup_demons_arr(VarArr, Prio, Cond, AttrMod, Store, N),
Prio1 is Prio+1,
suspend(flush_change_list(Store, ChangeStream), Prio1, Empty->inst).
setup_demons_arr(_VarArr, _Prio, _Cond, _AttrMod, _Store, 0) :- !.
setup_demons_arr(VarArr, Prio, Cond, AttrMod, Store, N) :-
arg(N, VarArr, X),
N1 is N-1,
make_suspension(change_monitor(X, N, Store, Susp), Prio, Susp),
insert_all(X, Susp, Cond, AttrMod),
setup_demons_arr(VarArr, Prio, Cond, AttrMod, Store, N1).
monitor_changes(Vars, Templates, Prio, Cond, AttrMod, ChangeStream) :-
Store = set(Empty),
setup_demons(Vars, Templates, Prio, Cond, AttrMod, Store),
Prio1 is Prio+1,
suspend(flush_change_list(Store, ChangeStream), Prio1, Empty->inst).
setup_demons([], _Templates, _Prio, _Cond, _AttrMod, _Store).
setup_demons([X|Xs], [T|Ts], Prio, Cond, AttrMod, Store) :-
make_suspension(change_monitor(X, T, Store, Susp), Prio, Susp),
insert_all(X, Susp, Cond, AttrMod),
setup_demons(Xs, Ts, Prio, Cond, AttrMod, Store).
insert_all(_, _, [], _).
insert_all(X, Susp, [Cond|Conds], AttrMod) :-
insert_suspension(X, Susp, Cond, AttrMod),
insert_all(X, Susp, Conds, AttrMod).
:- demon change_monitor/4.
change_monitor(X, Template, Store, _Susp) :-
var(X),
Store = set(Old),
( Old = [] -> true ; true ),
setarg(1, Store, [Template|Old]).
change_monitor(X, Template, Store, Susp) :-
nonvar(X),
Store = set(Old),
( Old = [] -> true ; true ),
setarg(1, Store, [Template|Old]),
kill_suspension(Susp).
flush_change_list(Store, ChangeStream) :-
Store = set(Changes),
get_priority(Prio1),
suspend(flush_change_list(Store, More), Prio1, Empty->inst),
setarg(1, Store, Empty),
ChangeStream = [Changes|More]. % last!
/*
lib(fd).
List = [X1,X2,X3,X4,X5,X6,X7,X8,X9],
List::0..9,
Terms = [1-X1,2-X2,3-X3,4-X4,5-X5,6-X6,7-X7,8-X8,9-X9],
monitor_changes(List, Terms, 8, [min of fd, max of fd], fd, Stream),
report_changes(Stream),
call_priority((X4#>6, X7#<3, X2=5),5).
List = [X1,X2,X3,X4,X5,X6,X7,X8,X9], List::0..9, VarArr =.. [vars|List],
monitor_changes_arr(VarArr, 8, [min of fd, max of fd], fd, Stream),
report_changes(Stream),
call_priority((X4#>6, X7#<3, X2=5),5).
List = [X1,X2,X3,X4,X5,X6,X7,X8,X9], List::0..9, VarArr =.. [vars|List],
monitor_changes_arr(VarArr, 8, [min of fd, max of fd], fd, Stream),
report_changes(Stream),
X4#>6, X7#<3, X2=5.
delay report_changes(X) if var(X).
report_changes([]).
report_changes([X|T]) :-
printf("changes: %Mw%n%b", [X]),
report_changes(T).
re_solve(_Handle, _VarArr, []).
re_solve(Handle, VarArr, [ChangedVarIndices|More]) :-
transfer_some_bounds(ChangedVars),
lp_re_solve(Handle, ChangedVarIndices, ObjVal),
suspend(lp_re_solve(Handle, More), 9, More->inst).
*/
| kishoredbn/barrelfish | usr/eclipseclp/icparc_solvers/changeset.pl | Perl | mit | 8,007 |
#!/usr/bin/perl
# Api for querying goals
# INPUT parameters:
# num - limits the number of results. Default 1000
# startTime - The beginning of the timerange. Defaults to one day
# endTime - The end of the time range. Defaults to current date.
# [onlyTopGoals] - Optional argument for getting only top goals. Default false.
# datetime format = "2013-09-10T23:00:14+09:00"
#
#my $dateTimeFormat = '%Y.%m.%dT%T%O';
#my $dateTimeFormat = '%Y.%m.%dT%T';#TODO Add timezone handling
my $dateTimeFormat = '%Y-%m-%dT%T%z';
#TODO Add timezone handling
# OUTPUT
# TODO: Output JSONP
# Format: JSON
# title
# goalPath - string representation of the path from top goal to the current goal
# creator
# dateTime
# subGoal - list of subgoal urls
no warnings 'utf8';
use DateTime;
use Date::Parse;
use DateTime::Format::Strptime;
use JSON;
use Try::Tiny;
use CGI qw/:standard/;
use CGI::Cookie;
use Data::Dumper;
require("sparql.pl");
require("debug_log.pl");
# Configuration
my $graph_uri = "http://collab.open-opinion.org";
#$debug = true;# Uncomment this line to run in debug mode.
# End config
print "Access-Control-Allow-Origin: *\n";
print "Content-Type: application/json; charset=UTF-8\n\n";
my $q = CGI->new;
my @params = $q->param();
my $goalURI = uri_unescape( $q->param('goalURI') );
logGeneral("-goal search [$goalURI]");
my @goalUris = [];
if ( $goalURI eq "" ){
$goalURI = undef;
}else{
@goalUris = split( ",", uri_unescape ( $goalURI ) );
}
# Parse parameters
$num = uri_unescape( $q->param('num') );
if ( !defined( $num ) ){
$num = 100;
}
#if( ! (defined $goalURI) ){
if ( defined( $q->param('endTime') ) && !($q->param('endTime') eq "") ){
# Parse the parameter
my $parser = DateTime::Format::Strptime->new(
pattern => $dateTimeFormat,
#on_error => 'undef',
);
$endTime = $parser->parse_datetime( uri_unescape( $q->param('endTime') ) );
}
if ( defined( $q->param('startTime') ) ){
# Parse the parameter
my $parser = DateTime::Format::Strptime->new(
pattern => $dateTimeFormat,
#on_error => 'croak',
);
#$startTime = $parser->parse_datetime( $q->param('startTime') );
$startTime = $parser->parse_datetime( uri_unescape( $q->param('startTime') ) );
}
if( defined($startTime) && !defined($endTime) ){
$endTime = DateTime->new(
year => 2100,
month => 1,
day => 1,
hour => 1,
minute => 1,
second => 1,
time_zone => 'America/Chicago',
);
#"DateTime->now();
# logGeneral("Startdef[$startTime]");
# logGeneral("Startdef[$endTime]");
}
if( !defined($startTime) && defined($endTime) ){
$startTime = DateTime->new(
year => 1000,
month => 1,
day => 1,
hour => 1,
minute => 1,
second => 1,
time_zone => 'America/Chicago',
);
# logGeneral("Enddef[$startTime]");
# logGeneral("Enddef[$endTime]");
#"DateTime->now();
}
# if ( !defined ( $startTime ) ){
# $startTime = $endTime->clone();
# # Set default time range
# $startTime->add( days => -30 );
# }
my $dateType = uri_unescape ( $q->param( 'dateType' ) );
my $onlyTopGoals = uri_unescape ( $q->param( 'onlyTopGoals' ) );
my $created = uri_unescape ( $q->param( 'created' ) );
my $keyword = uri_unescape ( $q->param( 'keyword' ) );
logGeneral("kw" . $keyword);
my @goalStatus = split( ";", uri_unescape ( $q->param( 'goalStatus' ) ) );
my $locationURI = uri_unescape ( $q->param( 'locationURI' ) );
my $creatorURI = uri_unescape ( $q->param( 'creatorURI' ) );
my $wisherURI = uri_unescape ( $q->param( 'wisherURI' ) );
my $participantURI = uri_unescape ( $q->param( 'participantURI' ) );
my @goalTags = split( ";", uri_unescape ( $q->param( 'tags' ) ) );
# Create link between issue and references
my @parts = ();
if ( $locationURI ){
logGeneral($locationURI);
@parts = split(',', $locationURI);
}
#}
# Generate Sparql query
if($debugFlag){
logGeneral("Debug mode on.");
}
try{
my %cookies = CGI::Cookie->fetch;
my $userURI = $cookies{'userURI'}->value;
my $usr = $cookies{'userName'}->value;
}catch{
};
#logGeneral("User [$usr] [$userURI]");
#http://collab.open-opinion.org/resource/people/85dd5be5-0490-6af8-827b-2b71e588a36b
# Prefix
my $sparql = "PREFIX dc: <http://purl.org/dc/terms/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
select distinct ?goal ?title ?desc ?submDate ?requiredTargetDate ?desiredTargetDate ?completedDate ?status ?locationURI ?creator
?debug
where {
?goal rdf:type socia:Goal;
dc:title ?title.
OPTIONAL { ?goal dc:description ?desc. }
OPTIONAL { ?goal dc:dateSubmitted ?submDate }
#OPTIONAL { ?goal socia:subGoalOf ?parentGoal }
OPTIONAL { ?goal socia:requiredTargetDate ?requiredTargetDate }
OPTIONAL { ?goal socia:desiredTargetDate ?desiredTargetDate }
OPTIONAL { ?goal socia:completedDate ?completedDate }
OPTIONAL { ?goal socia:status ?status }
OPTIONAL { ?goal dc:spatial ?locationURI}
OPTIONAL { ?goal dc:creator ?creator}
#OPTIONAL { ?goal socia:subGoalOf ?parentGoal }
OPTIONAL { ?goal socia:wisher ?goalWisherURI }
OPTIONAL { ?goal socia:participant ?participant. }
OPTIONAL { ?goal dc:requires ?tag }
OPTIONAL { ?goal socia:isDebug ?debug }
";
#OPTIONAL {
# GRAPH <http://collab.open-opinion.org>{
# OPTIONAL { ?creator foaf:name ?creatorName.}
# OPTIONAL { ?creator foaf:img ?imageURI. }
# OPTIONAL { ?creator go:url ?fbURI. }
# }
#}
if ( defined($goalURI) && $goalURI){
if ( @goalUris ){
my $aa = 1;
$sparql .= "FILTER ( ";
foreach my $gu (@goalUris) {
if( $aa == 1 ){
$aa = 0;
}
else{
$sparql .= " || ";
}
$sparql .= "?goal = <$gu> && 1=1";
}
$sparql .= " && 1=1 ) \n";
}
#$sparql .= "FILTER ( ?goal = <$goalURI>)";
}else{
# Keyword search
if ( defined($keyword) && !($keyword eq "") ){
logGeneral("Goal");
$sparql .= " FILTER( REGEX(?title, '''$keyword''', \"i\") ) \n";
}
# Creator
if ( $creatorURI ){
$sparql .= "FILTER ( ?creator = <$creatorURI>)";
}
# Wisher
if ( $wisherURI ){
$sparql .= "FILTER ( ?goalWisherURI = <$wisherURI>)";
}
# Participant
if ( $participantURI ){
$sparql .= "FILTER ( ?participant = <$participantURI>)";
}
# Status search
if ( @goalStatus ){
my $f = 1;
$sparql .= "FILTER ( ";
foreach my $val (@goalStatus) {
if( $f == 1 ){
$f = 0;
}
else{
$sparql .= " || ";
}
$sparql .= "?status = \"$val\" && 1=1";
}
$sparql .= " && 1=1 ) \n";
}
# Tag search
if ( @goalTags ){
my $ft = 1;
$sparql .= "FILTER ( ";
foreach my $val (@goalTags) {
if( $ft == 1 ){
$ft = 0;
}
else{
$sparql .= " || ";
}
$sparql .= "?tag = <$val> && 1=1";
}
$sparql .= " && 1=1 ) \n";
}
# Filter multiple locations
if ( scalar @parts > 0 ){
$sparql .= " FILTER ( ?locationURI IN (";
for ( $i = 0; $i < scalar @parts; $i++ ){
logGeneral("Adding <".$parts[$i].">");
# Add new related
if ( $i > 0 ){
$sparql .= ", ";
}
$sparql .= "<".$parts[$i].">";
}
$sparql .= ") )";
}
if( defined($startTime) && defined($endTime) ){
# Time range searches
# Created date = submitted date
if ( ( $dateType eq 'CreatedDate' )){
$sparql .= " FILTER ( ?submDate >= xsd:dateTime(\"" . $startTime->strftime("%Y%m%d") . "T00:00:00+09:00\") && ?submDate <= xsd:dateTime(\"" . $endTime->strftime("%Y%m%d") . "T23:59:00+09:00\") )\n";
}
if ( ( $dateType eq 'DesiredDate' )){
$sparql .= " FILTER ( ?desiredTargetDate >= xsd:date(\"" . $startTime->strftime("%Y%m%d") . "\") && ?desiredTargetDate <= xsd:date(\"" . $endTime->strftime("%Y%m%d") . "\") )\n";
}
if ( ( $dateType eq 'RequiredDate' )){
$sparql .= " FILTER ( ?requiredTargetDate >= xsd:date(\"" . $startTime->strftime("%Y%m%d") . "\") && ?requiredTargetDate <= xsd:date(\"" . $endTime->strftime("%Y%m%d") . "\") )\n";
}
}
if ( !defined($debugFlag) ){
$sparql .= " FILTER NOT EXISTS { ?goal socia:isDebug ?debug } ";
}
}
$sparql .= "}
ORDER BY DESC(?submDate)
LIMIT $num";
#
logGeneral("$sparql");
#print Dumper @goalUris;
#print $goalURI;
my $result_json = execute_sparql( $sparql );
my $test = decode_json $result_json;
# The virtuoso`s json is not good, create well formatted dataset
my %result = {};
$result->{goals} = [];
#$result->{query} = $sparql;
# Loop all goals and do group by
for ( $i = 0; $i < scalar @{$test->{'results'}->{'bindings'}}; $i++ ){
# Add new goal
#print "adding new goal\n";
$tmp = {};
#$tmp->{cntSubGoals} = $test->{results}->{bindings}[$i]->{cntSubGoals}{value};
#$tmp->{wishers} = [];
$tmp->{url} = $test->{results}->{bindings}[$i]->{goal}{value};
$tmp->{title} = $test->{results}->{bindings}[$i]->{title}{value};
$tmp->{requiredTargetDate} = $test->{results}->{bindings}[$i]->{requiredTargetDate}{value};
$tmp->{desiredTargetDate} = $test->{results}->{bindings}[$i]->{desiredTargetDate}{value};
$tmp->{completedDate} = $test->{results}->{bindings}[$i]->{completedDate}{value};
$tmp->{status} = $test->{results}->{bindings}[$i]->{status}{value};
$tmp->{creator} = $test->{results}->{bindings}[$i]->{creator}{value};
$tmp->{creatorUrl} = $test->{results}->{bindings}[$i]->{creator}{value};
$tmp->{creatorImageURI} = $test->{results}->{bindings}[$i]->{imageURI}{value};
$tmp->{creatorName} = $test->{results}->{bindings}[$i]->{creatorName}{value};
$tmp->{locationURI} = $test->{results}->{bindings}[$i]->{locationURI}{value};
#$$tmp->{path} = [];
$tmp->{dateTime} = $test->{results}->{bindings}[$i]->{submDate}{value};
$tmp->{createdDate} = $test->{results}->{bindings}[$i]->{submDate}{value};
#$tmp->{wisherURI} = $test->{results}->{bindings}[$i]->{goalWisherURI}{value};
#$tmp->{wisherName} = $test->{results}->{bindings}[$i]->{wisherName}{value};
#$tmp->{wisherImageURI} = $test->{results}->{bindings}[$i]->{wisherImageURI}{value};
push(@{$result->{goals}}, $tmp);
}
# Build the paths to root node
# TODO IMPORTANT fix to use concurrency. Needs concurrent hash or....
#for ( $i = 0; $i < scalar @{$result->{'goals'}}; $i++ ){
# my $path = BuildPath( $result->{'goals'}[$i]->{url} );
# $result->{goals}[$i]->{goalPath} = $path;
#}
# Return the result
my $js = new JSON;
print $js->pretty->encode( $result);
exit;
# END
sub BuildPath{
my $workURI = $_[0];
my @resultArray = ();
my $index = 0;
my $resultString = "";
my $isFirst = 1;
while ( $workURI ){
my $query = "PREFIX dc: <http://purl.org/dc/terms/>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
select distinct ?goal ?title ?parentGoal
where {
?goal rdf:type socia:Goal;
dc:title ?title.
OPTIONAL { ?goal socia:subGoalOf ?parentGoal }
FILTER ( ?goal = <$workURI>)}";
try{
my $temp = execute_sparql( $query );
my $result_json = decode_json($temp);
my %pathPoint = ();
if($isFirst == 1 ){
$isFirst = 0;
}else{
$resultString = " > " . $resultString
}
$resultString = $result_json->{results}{bindings}[0]->{title}{value} . $resultString;
$pathPoint->{index} = $index;
$pathPoint->{title} = $result_json->{results}{bindings}[0]->{title}{value};
$pathPoint->{URI} = $workURI;
push(@resultArray, $pathPoint );
#print $workURI . " " .$index."\n";
$index = $index + 1;
$workURI = $result_json->{results}{bindings}[0]->{parentGoal}{value};
} catch {
# Error ocurrend, end building the path
$workURI = False;
}
}
print $resultString
return $resultString;
#return @resultArray;
}
| siramatu/goalshare | api/query_goals.pl | Perl | mit | 11,887 |
#!/usr/bin/perl -w
use strict;
#### DEBUG
my $DEBUG = 0;
#$DEBUG = 1;
=head2
NAME dumpDb
PURPOSE
DUMP ALL THE TABLES IN A DATABASE TO .TSV FILES
INPUT
1. DATABASE NAME
2. LOCATION TO PRINT .TSV FILES
OUTPUT
1. ONE .TSV FILE FOR EACH TABLE IN DATABASE
USAGE
./dumpDb.pl <--db String> <--outputdir String> [-h]
--db : Name of database
--outputdir : Location of output directory
--help : print this help message
< option > denotes REQUIRED argument
[ option ] denotes OPTIONAL argument
EXAMPLE
perl dumpDb.pl --db agua --outputdir /agua/0.6/bin/sql/dump
=cut
#### TIME
my $time = time();
#### USE LIBS
use FindBin qw($Bin);
use lib "$Bin/../../lib";
#### INTERNAL MODULES
use Agua::Configure;
use Agua::DBaseFactory;
use Timer;
use Util;
use Conf::Yaml;
#### EXTERNAL MODULES
use Data::Dumper;
use File::Path;
use File::Copy;
use Getopt::Long;
#### GET OPTIONS
my $db;
my $dumpfile;
my $outputdir;
my $help;
GetOptions (
'db=s' => \$db,
'dumpfile=s' => \$dumpfile,
'outputdir=s' => \$outputdir,
'help' => \$help) or die "No options specified. Try '--help'\n";
if ( defined $help ) { usage(); }
#### FLUSH BUFFER
$| =1;
#### CHECK INPUTS
die "Database not defined (option --db)\n" if not defined $db;
die "Output directory not defined (option --outputdir)\n" if not defined $outputdir;
die "File with same name as output directory already exists: $outputdir\n" if -f $outputdir;
#### CREATE OUTPUT DIRECTORY
File::Path::mkpath($outputdir) if not -d $outputdir;
die "Can't create output directory: $outputdir\n" if not -d $outputdir;
#### SET LOG
my $logfile = "/tmp/dumpdb.log";
my $log = 2;
my $printlog = 5;
#### GET CONF
my $configfile = "$Bin/../../conf/config.yaml";
my $conf = Conf::Yaml->new({
inputfile => $configfile,
logfile => $logfile,
log => 2,
printlog => 5
});
#### GET DATABASE INFO
my $dbtype = $conf->getKey("database", 'DBTYPE');
my $database = $conf->getKey("database", 'DATABASE');
my $user = $conf->getKey("database", 'USER');
my $password = $conf->getKey("database", 'PASSWORD');
print "dumpDb.pl dbtype: $dbtype\n" if $DEBUG;
print "dumpDb.pl user: $user\n" if $DEBUG;
print "dumpDb.pl password: $password\n" if $DEBUG;
print "dumpDb.pl database: $database\n" if $DEBUG;
#### CREATE OUTPUT DIRECTORY
File::Path::mkpath($outputdir) if not -d $outputdir;
die "Can't create output directory: $outputdir\n" if not -d $outputdir;
my $object = Agua::Configure->new({
conf => $conf,
database => $db,
configfile => $configfile,
logfile => $logfile,
log => $log,
printlog => $printlog
});
$object->setDbh();
my $timestamp = $object->_getTimestamp($database, $user, $password);
$timestamp =~ s/:/-/g;
$dumpfile = "$outputdir/$db.$timestamp.dump" if not defined $dumpfile;
$object->_dumpDb($database, $user, $password, $dumpfile);
print "dumpfile:\n\n$dumpfile\n\n";
#### PRINT RUN TIME
my $runtime = Timer::runtime( $time, time() );
print "\n";
print "dumpDb.pl Run time: $runtime\n";
print "dumpDb.pl Completed $0\n";
print Util::datetime(), "\n";
print "dumpDb.pl ****************************************\n\n\n";
exit;
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
# SUBROUTINES
#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
sub usage
{
print `perldoc $0`;
exit;
}
| aguadev/aguadev | bin/scripts/dumpDb.pl | Perl | mit | 3,428 |
=head1 LICENSE
Copyright 2015 EMBL - European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
package Bio::Metadata::Validate::TextAttributeValidator;
use strict;
use warnings;
use Moose;
use namespace::autoclean;
with 'Bio::Metadata::Validate::AttributeValidatorRole';
sub validate_attribute {
my ( $self, $rule, $attribute, $o ) = @_;
if ( !$attribute->value ) {
$o->outcome('error');
$o->message( 'no text provided, text should be one of the following:'
. $rule->join_valid_values(',') );
}
elsif ($rule->count_valid_values() > 0) {
if ( !$rule->find_valid_value( sub {$attribute->value eq $_} ) ) {
$o->outcome('error');
$o->message( 'value is not in list of valid values:'
. $rule->join_valid_values(',') );
}
else {
$o->outcome('pass');
}
}
else {
$o->outcome('pass');
}
return $o;
}
__PACKAGE__->meta->make_immutable;
1;
| FAANG/validate-metadata | lib/Bio/Metadata/Validate/TextAttributeValidator.pm | Perl | apache-2.0 | 1,500 |
package Google::Ads::AdWords::v201409::OfflineConversionFeedService::RequestHeader;
use strict;
use warnings;
{ # BLOCK to scope variables
sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201409' }
__PACKAGE__->__set_name('RequestHeader');
__PACKAGE__->__set_nillable();
__PACKAGE__->__set_minOccurs();
__PACKAGE__->__set_maxOccurs();
__PACKAGE__->__set_ref();
use base qw(
SOAP::WSDL::XSD::Typelib::Element
Google::Ads::AdWords::v201409::SoapHeader
);
}
1;
=pod
=head1 NAME
Google::Ads::AdWords::v201409::OfflineConversionFeedService::RequestHeader
=head1 DESCRIPTION
Perl data type class for the XML Schema defined element
RequestHeader from the namespace https://adwords.google.com/api/adwords/cm/v201409.
=head1 METHODS
=head2 new
my $element = Google::Ads::AdWords::v201409::OfflineConversionFeedService::RequestHeader->new($data);
Constructor. The following data structure may be passed to new():
$a_reference_to, # see Google::Ads::AdWords::v201409::SoapHeader
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| gitpan/GOOGLE-ADWORDS-PERL-CLIENT | lib/Google/Ads/AdWords/v201409/OfflineConversionFeedService/RequestHeader.pm | Perl | apache-2.0 | 1,059 |
#!/usr/bin/env perl
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Don't change the above line.
# Change the PATH in the myManager.ksh script if you want to use another perl.
=head1 NAME
manage_id_mapping_tables.pl - script to delete (and optionally backup) ID
mapping results
=head1 SYNOPSIS
manage_id_mapping_tables.pl [arguments]
Required arguments:
--dbname, db_name=NAME database name NAME
--host, --dbhost, --db_host=HOST database host HOST
--port, --dbport, --db_port=PORT database port PORT
--user, --dbuser, --db_user=USER database username USER
--pass, --dbpass, --db_pass=PASS database passwort PASS
Optional arguments:
--conffile, --conf=FILE read parameters from FILE
(default: conf/Conversion.ini)
--logfile, --log=FILE log to FILE (default: *STDOUT)
--logpath=PATH write logfile to PATH (default: .)
--logappend, --log_append append to logfile (default: truncate)
--loglevel=LEVEL define log level (default: INFO)
-i, --interactive run script interactively (default: true)
-n, --dry_run, --dry don't write results to database
-h, --help, -? print help (this message)
=head1 DESCRIPTION
This script will delete stable ID mapping data from a database. The script is
intended to be run interactively (your configuration will be overridden).
The tables that will be emptied are:
gene_stable_id
transcript_stable_id
translation_stable_id
exon_stable_id
mapping_session
stable_id_event
gene_archive
peptide_archive
Optionally (by interactive selection), the current tables can be backed up.
Backkup tables will get suffices of _bak_0, _bak_1, etc. (where the correct
number is determined automatically from existing backup tables). There is also
an option to drop existing backup tables.
Deleting from the current tables can also be skipped, so effectively this
script can do three different things (or any combination of them), depending on
your answers in the interactive process:
- drop existing backup tables
- backup current tables
- delete from current tables
=head1 AUTHOR
Patrick Meidl <meidl@ebi.ac.uk>, Ensembl core API team
=head1 CONTACT
Please post comments/questions to the Ensembl development list
<http://lists.ensembl.org/mailman/listinfo/dev>
=cut
use strict;
use warnings;
no warnings 'uninitialized';
use FindBin qw($Bin);
use Bio::EnsEMBL::Utils::ConfParser;
use Bio::EnsEMBL::Utils::Logger;
use Bio::EnsEMBL::Utils::ScriptUtils qw(user_proceed);
use Bio::EnsEMBL::DBSQL::DBAdaptor;
my @tables = qw(
gene_stable_id
transcript_stable_id
translation_stable_id
exon_stable_id
mapping_session
stable_id_event
gene_archive
peptide_archive
);
my %suffnum = ();
# parse configuration and commandline arguments
my $conf =
new Bio::EnsEMBL::Utils::ConfParser(-SERVERROOT => "$Bin/../../../..",
-DEFAULT_CONF => "" );
$conf->parse_options( 'host=s' => 1,
'port=n' => 1,
'user=s' => 1,
'pass=s' => 0,
'dbname=s' => 1, );
# get log filehandle and print heading and parameters to logfile
my $logger =
new Bio::EnsEMBL::Utils::Logger(
-LOGFILE => $conf->param('logfile'),
-LOGPATH => $conf->param('logpath'),
-LOGAPPEND => $conf->param('logappend'),
-VERBOSE => $conf->param('verbose'),
);
# always run interactively
$conf->param( 'interactive', 1 );
# initialise log
$logger->init_log( $conf->list_param_values );
# connect to database and get adaptors
my $dba =
new Bio::EnsEMBL::DBSQL::DBAdaptor( -host => $conf->param('host'),
-port => $conf->param('port'),
-user => $conf->param('user'),
-pass => $conf->param('pass'),
-dbname => $conf->param('dbname'),
-group => 'core', );
$dba->dnadb($dba);
my $dbh = $dba->dbc->db_handle;
# first check which tables are populated
&list_table_counts;
# then look for existing backup tables
my $sfx = &list_backup_counts;
# aks user if he wants to drop backup tables
if ( %suffnum
and
user_proceed(
"Drop any backup tables? (you will be able chose which ones)",
$conf->param('interactive'), 'n' ) )
{
&drop_backup_tables;
}
# ask user if current tables should be backed up
if (user_proceed( "Backup current tables?", $conf->param('interactive'),
'y' ) )
{
&backup_tables($sfx);
}
# delete from tables
if ( user_proceed( "Delete from current tables?",
$conf->param('interactive'),
'n' ) )
{
&delete_from_tables;
}
# finish logfile
$logger->finish_log;
### END main ###
sub list_table_counts {
$logger->info("Current table counts:\n\n");
&list_counts( [@tables] );
}
sub list_backup_counts {
my $new_num = -1;
foreach my $table (@tables) {
my $thetable = $table;
if ( $table =~ /^([^_]+)_stable_id/ ) {
$thetable = $1;
}
my $sth = $dbh->prepare(qq(SHOW TABLES LIKE "${thetable}_bak_%"));
$sth->execute;
while ( my ($bak) = $sth->fetchrow_array ) {
if ($bak =~ /_bak_(\d+)$/) {
my $num = $1;
$suffnum{$num} = 1;
$new_num = $num if ( $num > $new_num );
}
}
$sth->finish;
}
$logger->info("Backup tables found:\n\n") if (%suffnum);
foreach my $num ( sort keys %suffnum ) {
my @t = ();
foreach my $table (@tables) {
my $thetable = $table;
if ( $table =~ /^([^_]+)_stable_id/ ) {
$thetable = $1;
}
push @t, "${thetable}_bak_$num";
}
&list_counts( [@t] );
$logger->info("\n");
}
my $sfx = '_bak_' . ++$new_num;
return $sfx;
} ## end sub list_backup_counts
sub list_counts {
my $tabs = shift;
unless ( $tabs and ref($tabs) eq 'ARRAY' ) {
throw("Need an arrayref.");
}
$logger->info( sprintf( "%-30s%-8s\n", qw(TABLE COUNT) ), 1 );
$logger->info( ( '-' x 38 ) . "\n", 1 );
my $fmt = "%-30s%8d\n";
foreach my $table (@$tabs) {
my $sth;
my $thetable = $table;
if ( $table =~ /^([^_]+)_stable_id/ ) {
$thetable = $1;
$sth = $dbh->prepare(
qq(SELECT COUNT(*) FROM $thetable WHERE stable_id IS NOT NULL));
}
else {
$sth = $dbh->prepare(qq(SELECT COUNT(*) FROM $thetable));
}
$sth->execute;
my $count = $sth->fetchrow_arrayref->[0];
$sth->finish;
$logger->info( sprintf( $fmt, $thetable, $count ), 1 );
}
$logger->info("\n");
} ## end sub list_counts
sub drop_backup_tables {
foreach my $num ( sort keys %suffnum ) {
my $suffix = "_bak_$num";
if ( user_proceed( qq(Drop backup tables with suffix ${suffix}?),
$conf->param('interactive'), 'n' ) )
{
foreach my $table (@tables) {
my $thetable = $table;
if ( $table =~ /^([^_]+)_stable_id/ ) {
$thetable = $1;
}
my $bak_table = "${thetable}${suffix}";
$logger->info( "$bak_table\n", 1 );
unless ( $conf->param('dry_run') ) {
$dbh->do(qq(DROP TABLE $bak_table));
}
}
# remove the suffix number
delete $suffnum{$num};
}
}
$logger->info("\n");
# recalculate the suffix number to use for current backup
my $max_num = reverse sort keys %suffnum;
$sfx = '_bak_' . ++$max_num;
} ## end sub drop_backup_tables
sub backup_tables {
my $sfx = shift;
throw("Need a backup table suffix.") unless ( defined($sfx) );
$logger->info(qq(\nWill use '$sfx' as suffix for backup tables\n));
$logger->info(qq(\nBacking up tables...\n));
my $fmt1 = "%-30s";
my $fmt2 = "%8d\n";
foreach my $table (@tables) {
my $thetable = $table;
if ( $table =~ /^([^_]+)_stable_id/ ) {
$thetable = $1;
}
$logger->info( sprintf( $fmt1, $thetable ), 1 );
my $c = 0;
if ( !$conf->param('dry_run') &&
$dbh->do(qq(CREATE TABLE ${thetable}${sfx} LIKE ${thetable})) )
{
$c = $dbh->do(
qq(INSERT INTO ${thetable}${sfx} SELECT * FROM ${thetable}));
}
$logger->info( sprintf( $fmt2, $c ) );
}
$logger->info(qq(Done.\n));
} ## end sub backup_tables
sub delete_from_tables {
my $fmt1 = "%-30s";
my $fmt2 = "%8d\n";
$logger->info(qq(\nDeleting from current tables...\n));
foreach my $table (@tables) {
my $thetable = $table;
if ( $table =~ /^([^_]+)_stable_id/ ) {
$thetable = $1;
}
$logger->info( sprintf( $fmt1, $thetable ), 1 );
my $c = 0;
unless ( $conf->param('dry_run') ) {
if ( $table =~ /^([^_]+)_stable_id/ ) {
$c = $dbh->do(qq(UPDATE $thetable SET stable_id=NULL));
}
else {
$c = $dbh->do(qq(TRUNCATE $thetable));
}
}
$logger->info( sprintf( $fmt2, $c ) );
}
$logger->info(qq(Done.\n));
} ## end sub delete_from_tables
| at7/ensembl | misc-scripts/id_mapping/utils/manage_id_mapping_tables.pl | Perl | apache-2.0 | 9,833 |
#!/usr/bin/perl -w
use strict;
use Data::Dumper;
use CoGeX;
use Getopt::Long;
my $GO = 0;
my $DEBUG = 1;
my ($dsid, $ds_name, $ds_desc, $ds_link, $ds_version, $source_name, $source_desc, $source_link, $source_id);
my $add_gene =0;
my $add_cds =0;
my $add_type_to_name =0;
my @names;
my @skip_anno_types;
my @skip_feat_types;
my @anno_names;
my $connstr = 'dbi:mysql:dbname=coge;host=HOST;port=PORT';
my$coge = CoGeX->connect($connstr, 'USER', 'PASSWORD' );
#$coge->storage->debugobj(new DBIxProfiler());
#$coge->storage->debug(1);
GetOptions (
"source_name=s" => \$source_name, # datasource
"source_desc=s" => \$source_desc,
"source_link=s" => \$source_link,
"source_id=s" => \$source_id,
"ds_name=s" => \$ds_name,# datasetid
"ds_desc=s" => \$ds_desc,
"ds_link=s" => \$ds_link,
"ds_version=s" => \$ds_version,
"dsid=i" => \$dsid,
"go=s" => \$GO,
"debug=s" => \$DEBUG,
"name=s" => \@names,
"anno_name=s" => \@anno_names,
"add_gene_feature" => \$add_gene,
"add_cds_feature" => \$add_cds,
"add_type_to_name"=>\$add_type_to_name, #adds type (column 2) to name
"skip_anno_type=s"=>\@skip_anno_types,
"skip_feat_type=s"=>\@skip_feat_types,
);
if ($source_name)
{
my $source = $coge->resultset("DataSource")->find_or_create({name=>$source_name,description=>$source_desc, link=>$source_link});
$source_id = $source->id;
}
my $ds = generate_ds(ds_name => $ds_name,
ds_desc => $ds_desc,
ds_link => $ds_link,
ds_version => $ds_version,
ds_id =>$dsid,
source_id=>$source_id,
);
# $coge->resultset('Dataset')->find($dsid);
unless ($ds)
{
warn "unable to find or create a valid dataset entry";
exit;
}
print "Working on dataset: ", $ds->name. " (".$ds->id.")\n";
#some defaults to check for in names and annotations
push @names, "mRNA" unless @names;
push @anno_names, "Description";
push @anno_names, "biotype";
my %anno_names = map {$_,1} @anno_names if @anno_names;
my %check_names = map {$_,1} @names;
my %skip_anno_types = map {$_,1} @skip_anno_types;
my %skip_feat_types = map {$_,1} @skip_feat_types;
warn "-go flag is not true, nothing will be added to the database.\n" unless $GO;
my %data;
my %annos;
my %master_names;
my %feat_types; #store feature type objects
my ($anno_type) = $coge->resultset('AnnotationType')->search({name=>"note"}); #generic annotation type
my $prev_type;
while (<>)
{
next if /^#/;
chomp;
next unless $_;
my @line = split /\t/;
next if $line[2] eq "clone";
# next if $line[2] eq "mRNA";
next if $line[2] eq "intron";
next if $line[2] eq "chromosome";
my $chr;
$chr = $line[0];
$chr =~ s/chromosome//i;
$chr =~ s/chr//i;
$chr =~ s/^_//i;
$chr =~ s/^0//g;
($chr) = split /\s+/,$chr;
my %names;
my $name;
foreach my $item (split /;/, $line[-1])
{
my $tmp;
$item =~ s/"//g;
$item =~ s/^\s+//;
$item =~ s/\s+$//;
my ($type, $info) = $item =~ /=/ ? split (/=/,$item,2) : (split / /,$item,2);
$info =~ s/\%([A-Fa-f0-9]{2})/pack('C', hex($1))/seg;
next if $skip_anno_types{$type};
if ($check_names{$type})
{
$names{$info} =1;
$name = $info unless $name;
if ($info =~ /\.\d+$/)
{
my $tmp = $info;
$tmp =~ s/\.\d$//;
$names{$tmp}=1;
}
if ($info =~ /^LOC_/)
{
my $tmp = $info;
$tmp =~ s/^LOC_//;
$names{$tmp}=1;
$tmp =~ s/\.\d$//;
$names{$tmp}=1;
}
}
$line[2] = "Transposable Element" if $info =~ /transpos/i;
next unless $name; #No name, don't know what to do!
$annos{$name}{"$type: $info"}=1 if $anno_names{$type};
}
foreach my $i (keys %names)
{
foreach my $j (keys %names)
{
$master_names{$i}{$j}=1;
$master_names{$j}{$i}=1;
}
}
next unless $name; #No name, don't know what to do!
next if $skip_feat_types{$line[2]};
my $strand = $line[6] =~ /-/ ? -1 :1;
my $type = ($line[2]);
$type = "mRNA" if $type =~ /^exon$/i;
$type = "mRNA" if $type =~ /^five_prime_UTR$/i;
$type = "mRNA" if $type =~ /^three_prime_UTR$/i;
my @type = ($type);
push @type, "CDS" if $add_cds && $type eq "mRNA";
####add
push @type, "mRNA" if $type eq "CDS";
foreach my $type (@type)
{
push @{$data{$line[1]}{$name}{$type}{loc}}, {
start=>$line[3],
stop=>$line[4],
strand=>$strand,
chr=>$chr,
};
map {$data{$line[1]}{$name}{$type}{names}{$_}=1} keys %names;
}
# print Dumper \%data;
}
if ($add_gene)
{
foreach my $source (keys %data)
{
name: foreach my $name (keys %{$data{$source}})
{
my $start;
my $stop;
my $strand;
my $chr;
my %names;
foreach my $type (keys %{$data{$source}{$name}})
{
map {$names{$_}=1} keys %{$data{$source}{$name}{$type}{names}};
foreach my $loc (@{$data{$source}{$name}{$type}{loc}})
{
next name if $type eq "gene";
$start = $loc->{start} unless $start;
$start = $loc->{start} if $loc->{start} < $start;
$stop = $loc->{stop} unless $stop;
$stop = $loc->{stop} if $loc->{stop} > $stop;
$strand = $loc->{strand};
$chr = $loc->{chr};
}
}
$data{$source}{$name}{gene}{loc}=[{
start=>$start,
stop=>$stop,
strand=>$strand,
chr=>$chr,
}];
$data{$source}{$name}{gene}{names}= \%names;
}
}
}
#print Dumper \%data;
#print Dumper \%annos;
#exit;
foreach my $source (keys %data)
{
foreach my $name (keys %{$data{$source}})
{
foreach my $feat_type (keys %{$data{$source}{$name}})
{
my ($start) = sort {$a<=>$b} map {$_->{start}} @{$data{$source}{$name}{$feat_type}{loc}};
my ($stop) = sort {$b<=>$a} map {$_->{stop}} @{$data{$source}{$name}{$feat_type}{loc}};
my ($strand) = map {$_->{strand}} @{$data{$source}{$name}{$feat_type}{loc}};
my ($chr) = map {$_->{chr}} @{$data{$source}{$name}{$feat_type}{loc}};
$feat_types{$feat_type} = $coge->resultset('FeatureType')->find_or_create( { name => $feat_type } ) if $GO && !$feat_types{$feat_type};
my $feat_type_obj = $feat_types{$feat_type};
print "Creating feature of type $feat_type\n" if $DEBUG;
my $feat = $ds->add_to_features({
feature_type_id => $feat_type_obj->id,
start=>$start,
stop=>$stop,
chromosome=>$chr,
strand=>$strand,
}) if $GO;
my $featid = $feat ? $feat->id : "no_go";
foreach my $loc (@{$data{$source}{$name}{$feat_type}{loc}})
{
print "\tAdding location $chr:(".$loc->{start}."-".$loc->{stop}.", $strand)\n" if $DEBUG;
my $loc_tmp = $feat->add_to_locations(
{
start => $loc->{start},
stop => $loc->{stop},
strand => $loc->{strand},
chromosome => $loc->{chr}
}
) if $GO;
}
my $names = $data{$source}{$name}{$feat_type}{names};
my @names = keys %$names;
foreach my $tmp (@names)
{
foreach my $item (keys %{$master_names{$tmp}})
{
$names->{$item}=1;
}
}
foreach my $tmp (keys %{$names})
{
print "\tAdding name $tmp to feature ", $featid ,"\n" if $DEBUG;
my $feat_name = $feat->add_to_feature_names({
name=>$tmp,
# feature_id=>$featid,
}) if $GO ;
if ($annos{$tmp})
{
foreach my $anno (keys %{$annos{$tmp}})
{
print "\tAdding annotation $anno\n" if $DEBUG;
my $annoo = $feat->add_to_annotations({annotation=>$anno, annotation_type_id => $anno_type->id}) if $GO && $anno;
}
}
}
}
}
}
print "Completed working on dataset: ", $ds->name. " (".$ds->id.")\n";
sub generate_ds
{
my %opts = @_;
my $ds_name = $opts{ds_name};
my $ds_desc = $opts{ds_desc};
my $ds_link = $opts{ds_link};
my $ds_version = $opts{ds_version};
my $ds_id = $opts{ds_id};
my $source_id = $opts{source_id};
unless ($ds_name || $ds_id)
{
warn "no dataset name or database id specified\n";
return;
}
my $ds = $ds_id ? $coge->resultset('Dataset')->find($ds_id) :
$coge->resultset('Dataset')->find_or_create({
name => $ds_name,
description => $ds_desc,
link => $ds_link,
data_source_id => $source_id,
version=>$ds_version,
});;
return $ds;
}
| LyonsLab/coge | scripts/old/gff_annotation_loader_msu_rice.pl | Perl | bsd-2-clause | 8,543 |
package App::Netdisco::Util::ExpandParams;
use base qw/CGI::Expand/;
use strict;
use warnings;
sub max_array {0}
sub separator {'.[]'}
sub split_name {
my $class = shift;
my $name = shift;
$name =~ /^ ([^\[\]\.]+) /xg;
my @segs = $1;
push @segs, ( $name =~ / \G (?: \[ ([^\[\]\.]+) \] ) /xg );
return @segs;
}
sub join_name {
my $class = shift;
my ( $first, @segs ) = @_;
return $first unless @segs;
return "$first\[" . join( '][', @segs ) . "]";
}
1;
__END__
=head1 NAME
App::Netdisco::Util::ExpandParams
=head1 DESCRIPTION
CGI::Expand subclass with Rails like tokenization for parameters passed
during DataTables server-side processing.
=cut
| gitpan/App-Netdisco | lib/App/Netdisco/Util/ExpandParams.pm | Perl | bsd-3-clause | 696 |
#
# This is not a runnable script, it is a Perl module, a collection of variables, subroutines, etc.
# to be used in other scripts.
#
# To get help about exported variables and subroutines, please execute the following command:
#
# perldoc tools.pm
#
# or see POD (Plain Old Documentation) imbedded to the source...
#
#
#//===----------------------------------------------------------------------===//
#//
#// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
#// See https://llvm.org/LICENSE.txt for license information.
#// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#//
#//===----------------------------------------------------------------------===//
#
=head1 NAME
B<tools.pm> -- A collection of subroutines which are widely used in Perl scripts.
=head1 SYNOPSIS
use FindBin;
use lib "$FindBin::Bin/lib";
use tools;
=head1 DESCRIPTION
B<Note:> Because this collection is small and intended for widely using in particular project,
all variables and functions are exported by default.
B<Note:> I have some ideas how to improve this collection, but it is in my long-term plans.
Current shape is not ideal, but good enough to use.
=cut
package tools;
use strict;
use warnings;
use vars qw( @ISA @EXPORT @EXPORT_OK %EXPORT_TAGS );
require Exporter;
@ISA = qw( Exporter );
my @vars = qw( $tool );
my @utils = qw( check_opts validate );
my @opts = qw( get_options );
my @print = qw( debug info warning cmdline_error runtime_error question );
my @name = qw( get_vol get_dir get_file get_name get_ext cat_file cat_dir );
my @file = qw( which abs_path rel_path real_path make_dir clean_dir copy_dir move_dir del_dir change_dir copy_file move_file del_file );
my @io = qw( read_file write_file );
my @exec = qw( execute backticks );
my @string = qw{ pad };
@EXPORT = ( @utils, @opts, @vars, @print, @name, @file, @io, @exec, @string );
use UNIVERSAL ();
use FindBin;
use IO::Handle;
use IO::File;
use IO::Dir;
# Not available on some machines: use IO::Zlib;
use Getopt::Long ();
use Pod::Usage ();
use Carp ();
use File::Copy ();
use File::Path ();
use File::Temp ();
use File::Spec ();
use POSIX qw{ :fcntl_h :errno_h };
use Cwd ();
use Symbol ();
use Data::Dumper;
use vars qw( $tool $verbose $timestamps );
$tool = $FindBin::Script;
my @warning = ( sub {}, \&warning, \&runtime_error );
sub check_opts(\%$;$) {
my $opts = shift( @_ ); # Reference to hash containing real options and their values.
my $good = shift( @_ ); # Reference to an array containing all known option names.
my $msg = shift( @_ ); # Optional (non-mandatory) message.
if ( not defined( $msg ) ) {
$msg = "unknown option(s) passed"; # Default value for $msg.
}; # if
# I'll use these hashes as sets of options.
my %good = map( ( $_ => 1 ), @$good ); # %good now is filled with all known options.
my %bad; # %bad is empty.
foreach my $opt ( keys( %$opts ) ) { # For each real option...
if ( not exists( $good{ $opt } ) ) { # Look its name in the set of known options...
$bad{ $opt } = 1; # Add unknown option to %bad set.
delete( $opts->{ $opt } ); # And delete original option.
}; # if
}; # foreach $opt
if ( %bad ) { # If %bad set is not empty...
my @caller = caller( 1 ); # Issue a warning.
local $Carp::CarpLevel = 2;
Carp::cluck( $caller[ 3 ] . ": " . $msg . ": " . join( ", ", sort( keys( %bad ) ) ) );
}; # if
return 1;
}; # sub check_opts
# --------------------------------------------------------------------------------------------------
# Purpose:
# Check subroutine arguments.
# Synopsis:
# my %opts = validate( params => \@_, spec => { ... }, caller => n );
# Arguments:
# params -- A reference to subroutine's actual arguments.
# spec -- Specification of expected arguments.
# caller -- ...
# Return value:
# A hash of validated options.
# Description:
# I would like to use Params::Validate module, but it is not a part of default Perl
# distribution, so I cannot rely on it. This subroutine resembles to some extent to
# Params::Validate::validate_with().
# Specification of expected arguments:
# { $opt => { type => $type, default => $default }, ... }
# $opt -- String, option name.
# $type -- String, expected type(s). Allowed values are "SCALAR", "UNDEF", "BOOLEAN",
# "ARRAYREF", "HASHREF", "CODEREF". Multiple types may listed using bar:
# "SCALAR|ARRAYREF". The type string is case-insensitive.
# $default -- Default value for an option. Will be used if option is not specified or
# undefined.
#
sub validate(@) {
my %opts = @_; # Temporary use %opts for parameters of `validate' subroutine.
my $params = $opts{ params };
my $caller = ( $opts{ caller } or 0 ) + 1;
my $spec = $opts{ spec };
undef( %opts ); # Ok, Clean %opts, now we will collect result of the subroutine.
# Find out caller package, filename, line, and subroutine name.
my ( $pkg, $file, $line, $subr ) = caller( $caller );
my @errors; # We will collect errors in array not to stop on the first found error.
my $error =
sub ($) {
my $msg = shift( @_ );
push( @errors, "$msg at $file line $line.\n" );
}; # sub
# Check options.
while ( @$params ) {
# Check option name.
my $opt = shift( @$params );
if ( not exists( $spec->{ $opt } ) ) {
$error->( "Invalid option `$opt'" );
shift( @$params ); # Skip value of unknow option.
next;
}; # if
# Check option value exists.
if ( not @$params ) {
$error->( "Option `$opt' does not have a value" );
next;
}; # if
my $val = shift( @$params );
# Check option value type.
if ( exists( $spec->{ $opt }->{ type } ) ) {
# Type specification exists. Check option value type.
my $actual_type;
if ( ref( $val ) ne "" ) {
$actual_type = ref( $val ) . "REF";
} else {
$actual_type = ( defined( $val ) ? "SCALAR" : "UNDEF" );
}; # if
my @wanted_types = split( m{\|}, lc( $spec->{ $opt }->{ type } ) );
my $wanted_types = join( "|", map( $_ eq "boolean" ? "scalar|undef" : quotemeta( $_ ), @wanted_types ) );
if ( $actual_type !~ m{\A(?:$wanted_types)\z}i ) {
$actual_type = lc( $actual_type );
$wanted_types = lc( join( " or ", map( "`$_'", @wanted_types ) ) );
$error->( "Option `$opt' value type is `$actual_type' but expected to be $wanted_types" );
next;
}; # if
}; # if
if ( exists( $spec->{ $opt }->{ values } ) ) {
my $values = $spec->{ $opt }->{ values };
if ( not grep( $_ eq $val, @$values ) ) {
$values = join( ", ", map( "`$_'", @$values ) );
$error->( "Option `$opt' value is `$val' but expected to be one of $values" );
next;
}; # if
}; # if
$opts{ $opt } = $val;
}; # while
# Assign default values.
foreach my $opt ( keys( %$spec ) ) {
if ( not defined( $opts{ $opt } ) and exists( $spec->{ $opt }->{ default } ) ) {
$opts{ $opt } = $spec->{ $opt }->{ default };
}; # if
}; # foreach $opt
# If we found any errors, raise them.
if ( @errors ) {
die join( "", @errors );
}; # if
return %opts;
}; # sub validate
# =================================================================================================
# Get option helpers.
# =================================================================================================
=head2 Get option helpers.
=cut
# -------------------------------------------------------------------------------------------------
=head3 get_options
B<Synopsis:>
get_options( @arguments )
B<Description:>
It is very simple wrapper arounf Getopt::Long::GetOptions. It passes all arguments to GetOptions,
and add definitions for standard help options: --help, --doc, --verbose, and --quiet.
When GetOptions finishes, this subroutine checks exit code, if it is non-zero, standard error
message is issued and script terminated.
If --verbose or --quiet option is specified, C<tools.pm_verbose> environment variable is set.
It is the way to propagate verbose/quiet mode to callee Perl scripts.
=cut
sub get_options {
Getopt::Long::Configure( "no_ignore_case" );
Getopt::Long::GetOptions(
"h0|usage" => sub { Pod::Usage::pod2usage( -exitval => 0, -verbose => 0 ); },
"h1|h|help" => sub { Pod::Usage::pod2usage( -exitval => 0, -verbose => 1 ); },
"h2|doc|manual" => sub { Pod::Usage::pod2usage( -exitval => 0, -verbose => 2 ); },
"version" => sub { print( "$tool version $main::VERSION\n" ); exit( 0 ); },
"v|verbose" => sub { ++ $verbose; $ENV{ "tools.pm_verbose" } = $verbose; },
"quiet" => sub { -- $verbose; $ENV{ "tools.pm_verbose" } = $verbose; },
"with-timestamps" => sub { $timestamps = 1; $ENV{ "tools.pm_timestamps" } = $timestamps; },
@_, # Caller arguments are at the end so caller options overrides standard.
) or cmdline_error();
}; # sub get_options
# =================================================================================================
# Print utilities.
# =================================================================================================
=pod
=head2 Print utilities.
Each of the print subroutines prepends each line of its output with the name of current script and
the type of information, for example:
info( "Writing file..." );
will print
<script>: (i): Writing file...
while
warning( "File does not exist!" );
will print
<script>: (!): File does not exist!
Here are exported items:
=cut
# -------------------------------------------------------------------------------------------------
sub _format_message($\@;$) {
my $prefix = shift( @_ );
my $args = shift( @_ );
my $no_eol = shift( @_ ); # Do not append "\n" to the last line.
my $message = "";
my $ts = "";
if ( $timestamps ) {
my ( $sec, $min, $hour, $day, $month, $year ) = gmtime();
$month += 1;
$year += 1900;
$ts = sprintf( "%04d-%02d-%02d %02d:%02d:%02d UTC: ", $year, $month, $day, $hour, $min, $sec );
}; # if
for my $i ( 1 .. @$args ) {
my @lines = split( "\n", $args->[ $i - 1 ] );
for my $j ( 1 .. @lines ) {
my $line = $lines[ $j - 1 ];
my $last_line = ( ( $i == @$args ) and ( $j == @lines ) );
my $eol = ( ( substr( $line, -1 ) eq "\n" ) or defined( $no_eol ) ? "" : "\n" );
$message .= "$ts$tool: ($prefix) " . $line . $eol;
}; # foreach $j
}; # foreach $i
return $message;
}; # sub _format_message
#--------------------------------------------------------------------------------------------------
=pod
=head3 $verbose
B<Synopsis:>
$verbose
B<Description:>
Package variable. It determines verbosity level, which affects C<warning()>, C<info()>, and
C<debug()> subroutines .
The variable gets initial value from C<tools.pm_verbose> environment variable if it is exists.
If the environment variable does not exist, variable is set to 2.
Initial value may be overridden later directly or by C<get_options> function.
=cut
$verbose = exists( $ENV{ "tools.pm_verbose" } ) ? $ENV{ "tools.pm_verbose" } : 2;
#--------------------------------------------------------------------------------------------------
=pod
=head3 $timestamps
B<Synopsis:>
$timestamps
B<Description:>
Package variable. It determines whether C<debug()>, C<info()>, C<warning()>, C<runtime_error()>
subroutines print timestamps or not.
The variable gets initial value from C<tools.pm_timestamps> environment variable if it is exists.
If the environment variable does not exist, variable is set to false.
Initial value may be overridden later directly or by C<get_options()> function.
=cut
$timestamps = exists( $ENV{ "tools.pm_timestamps" } ) ? $ENV{ "tools.pm_timestamps" } : 0;
# -------------------------------------------------------------------------------------------------
=pod
=head3 debug
B<Synopsis:>
debug( @messages )
B<Description:>
If verbosity level is 3 or higher, print debug information to the stderr, prepending it with "(#)"
prefix.
=cut
sub debug(@) {
if ( $verbose >= 3 ) {
STDOUT->flush();
STDERR->print( _format_message( "#", @_ ) );
}; # if
return 1;
}; # sub debug
#--------------------------------------------------------------------------------------------------
=pod
=head3 info
B<Synopsis:>
info( @messages )
B<Description:>
If verbosity level is 2 or higher, print information to the stderr, prepending it with "(i)" prefix.
=cut
sub info(@) {
if ( $verbose >= 2 ) {
STDOUT->flush();
STDERR->print( _format_message( "i", @_ ) );
}; # if
}; # sub info
#--------------------------------------------------------------------------------------------------
=head3 warning
B<Synopsis:>
warning( @messages )
B<Description:>
If verbosity level is 1 or higher, issue a warning, prepending it with "(!)" prefix.
=cut
sub warning(@) {
if ( $verbose >= 1 ) {
STDOUT->flush();
warn( _format_message( "!", @_ ) );
}; # if
}; # sub warning
# -------------------------------------------------------------------------------------------------
=head3 cmdline_error
B<Synopsis:>
cmdline_error( @message )
B<Description:>
Print error message and exit the program with status 2.
This function is intended to complain on command line errors, e. g. unknown
options, invalid arguments, etc.
=cut
sub cmdline_error(;$) {
my $message = shift( @_ );
if ( defined( $message ) ) {
if ( substr( $message, -1, 1 ) ne "\n" ) {
$message .= "\n";
}; # if
} else {
$message = "";
}; # if
STDOUT->flush();
die $message . "Try --help option for more information.\n";
}; # sub cmdline_error
# -------------------------------------------------------------------------------------------------
=head3 runtime_error
B<Synopsis:>
runtime_error( @message )
B<Description:>
Print error message and exits the program with status 3.
This function is intended to complain on runtime errors, e. g.
directories which are not found, non-writable files, etc.
=cut
sub runtime_error(@) {
STDOUT->flush();
die _format_message( "x", @_ );
}; # sub runtime_error
#--------------------------------------------------------------------------------------------------
=head3 question
B<Synopsis:>
question( $prompt; $answer, $choices )
B<Description:>
Print $promp to the stderr, prepending it with "question:" prefix. Read a line from stdin, chop
"\n" from the end, it is answer.
If $answer is defined, it is treated as first user input.
If $choices is specified, it could be a regexp for validating user input, or a string. In latter
case it interpreted as list of characters, acceptable (case-insensitive) choices. If user enters
non-acceptable answer, question continue asking until answer is acceptable.
If $choices is not specified, any answer is acceptable.
In case of end-of-file (or Ctrl+D pressed by user), $answer is C<undef>.
B<Examples:>
my $answer;
question( "Save file [yn]? ", $answer, "yn" );
# We accepts only "y", "Y", "n", or "N".
question( "Press enter to continue or Ctrl+C to abort..." );
# We are not interested in answer value -- in case of Ctrl+C the script will be terminated,
# otherwise we continue execution.
question( "File name? ", $answer );
# Any answer is acceptable.
=cut
sub question($;\$$) {
my $prompt = shift( @_ );
my $answer = shift( @_ );
my $choices = shift( @_ );
my $a = ( defined( $answer ) ? $$answer : undef );
if ( ref( $choices ) eq "Regexp" ) {
# It is already a regular expression, do nothing.
} elsif ( defined( $choices ) ) {
# Convert string to a regular expression.
$choices = qr/[@{ [ quotemeta( $choices ) ] }]/i;
}; # if
for ( ; ; ) {
STDERR->print( _format_message( "?", @{ [ $prompt ] }, "no_eol" ) );
STDERR->flush();
if ( defined( $a ) ) {
STDOUT->print( $a . "\n" );
} else {
$a = <STDIN>;
}; # if
if ( not defined( $a ) ) {
last;
}; # if
chomp( $a );
if ( not defined( $choices ) or ( $a =~ m/^$choices$/ ) ) {
last;
}; # if
$a = undef;
}; # forever
if ( defined( $answer ) ) {
$$answer = $a;
}; # if
}; # sub question
# -------------------------------------------------------------------------------------------------
# Returns volume part of path.
sub get_vol($) {
my $path = shift( @_ );
my ( $vol, undef, undef ) = File::Spec->splitpath( $path );
return $vol;
}; # sub get_vol
# Returns directory part of path.
sub get_dir($) {
my $path = File::Spec->canonpath( shift( @_ ) );
my ( $vol, $dir, undef ) = File::Spec->splitpath( $path );
my @dirs = File::Spec->splitdir( $dir );
pop( @dirs );
$dir = File::Spec->catdir( @dirs );
$dir = File::Spec->catpath( $vol, $dir, undef );
return $dir;
}; # sub get_dir
# Returns file part of path.
sub get_file($) {
my $path = shift( @_ );
my ( undef, undef, $file ) = File::Spec->splitpath( $path );
return $file;
}; # sub get_file
# Returns file part of path without last suffix.
sub get_name($) {
my $path = shift( @_ );
my ( undef, undef, $file ) = File::Spec->splitpath( $path );
$file =~ s{\.[^.]*\z}{};
return $file;
}; # sub get_name
# Returns last suffix of file part of path.
sub get_ext($) {
my $path = shift( @_ );
my ( undef, undef, $file ) = File::Spec->splitpath( $path );
my $ext = "";
if ( $file =~ m{(\.[^.]*)\z} ) {
$ext = $1;
}; # if
return $ext;
}; # sub get_ext
sub cat_file(@) {
my $path = shift( @_ );
my $file = pop( @_ );
my @dirs = @_;
my ( $vol, $dirs ) = File::Spec->splitpath( $path, "no_file" );
@dirs = ( File::Spec->splitdir( $dirs ), @dirs );
$dirs = File::Spec->catdir( @dirs );
$path = File::Spec->catpath( $vol, $dirs, $file );
return $path;
}; # sub cat_file
sub cat_dir(@) {
my $path = shift( @_ );
my @dirs = @_;
my ( $vol, $dirs ) = File::Spec->splitpath( $path, "no_file" );
@dirs = ( File::Spec->splitdir( $dirs ), @dirs );
$dirs = File::Spec->catdir( @dirs );
$path = File::Spec->catpath( $vol, $dirs, "" );
return $path;
}; # sub cat_dir
# =================================================================================================
# File and directory manipulation subroutines.
# =================================================================================================
=head2 File and directory manipulation subroutines.
=over
=cut
# -------------------------------------------------------------------------------------------------
=item C<which( $file, @options )>
Searches for specified executable file in the (specified) directories.
Raises a runtime eroror if no executable file found. Returns a full path of found executable(s).
Options:
=over
=item C<-all> =E<gt> I<bool>
Do not stop on the first found file. Note, that list of full paths is returned in this case.
=item C<-dirs> =E<gt> I<ref_to_array>
Specify directory list to search through. If option is not passed, PATH environment variable
is used for directory list.
=item C<-exec> =E<gt> I<bool>
Whether check for executable files or not. By default, C<which> searches executable files.
However, on Cygwin executable check never performed.
=back
Examples:
Look for "echo" in the directories specified in PATH:
my $echo = which( "echo" );
Look for all occurrences of "cp" in the PATH:
my @cps = which( "cp", -all => 1 );
Look for the first occurrence of "icc" in the specified directories:
my $icc = which( "icc", -dirs => [ ".", "/usr/local/bin", "/usr/bin", "/bin" ] );
Look for the C<omp_lib.f> file:
my @omp_lib = which( "omp_lib.f", -all => 1, -exec => 0, -dirs => [ @include ] );
=cut
sub which($@) {
my $file = shift( @_ );
my %opts = @_;
check_opts( %opts, [ qw( -all -dirs -exec ) ] );
if ( $opts{ -all } and not wantarray() ) {
local $Carp::CarpLevel = 1;
Carp::cluck( "`-all' option passed to `which' but list is not expected" );
}; # if
if ( not defined( $opts{ -exec } ) ) {
$opts{ -exec } = 1;
}; # if
my $dirs = ( exists( $opts{ -dirs } ) ? $opts{ -dirs } : [ File::Spec->path() ] );
my @found;
my @exts = ( "" );
if ( $^O eq "MSWin32" and $opts{ -exec } ) {
if ( defined( $ENV{ PATHEXT } ) ) {
push( @exts, split( ";", $ENV{ PATHEXT } ) );
} else {
# If PATHEXT does not exist, use default value.
push( @exts, qw{ .COM .EXE .BAT .CMD } );
}; # if
}; # if
loop:
foreach my $dir ( @$dirs ) {
foreach my $ext ( @exts ) {
my $path = File::Spec->catfile( $dir, $file . $ext );
if ( -e $path ) {
# Executable bit is not reliable on Cygwin, do not check it.
if ( not $opts{ -exec } or -x $path or $^O eq "cygwin" ) {
push( @found, $path );
if ( not $opts{ -all } ) {
last loop;
}; # if
}; # if
}; # if
}; # foreach $ext
}; # foreach $dir
if ( not @found ) {
# TBD: We need to introduce an option for conditional enabling this error.
# runtime_error( "Could not find \"$file\" executable file in PATH." );
}; # if
if ( @found > 1 ) {
# TBD: Issue a warning?
}; # if
if ( $opts{ -all } ) {
return @found;
} else {
return $found[ 0 ];
}; # if
}; # sub which
# -------------------------------------------------------------------------------------------------
=item C<abs_path( $path, $base )>
Return absolute path for an argument.
Most of the work is done by C<File::Spec->rel2abs()>. C<abs_path()> additionally collapses
C<dir1/../dir2> to C<dir2>.
It is not so naive and made intentionally. For example on Linux* OS in Bash if F<link/> is a symbolic
link to directory F<some_dir/>
$ cd link
$ cd ..
brings you back to F<link/>'s parent, not to parent of F<some_dir/>,
=cut
sub abs_path($;$) {
my ( $path, $base ) = @_;
$path = File::Spec->rel2abs( $path, ( defined( $base ) ? $base : $ENV{ PWD } ) );
my ( $vol, $dir, $file ) = File::Spec->splitpath( $path );
while ( $dir =~ s{/(?!\.\.)[^/]*/\.\.(?:/|\z)}{/} ) {
}; # while
$path = File::Spec->canonpath( File::Spec->catpath( $vol, $dir, $file ) );
return $path;
}; # sub abs_path
# -------------------------------------------------------------------------------------------------
=item C<rel_path( $path, $base )>
Return relative path for an argument.
=cut
sub rel_path($;$) {
my ( $path, $base ) = @_;
$path = File::Spec->abs2rel( abs_path( $path ), $base );
return $path;
}; # sub rel_path
# -------------------------------------------------------------------------------------------------
=item C<real_path( $dir )>
Return real absolute path for an argument. In the result all relative components (F<.> and F<..>)
and U<symbolic links are resolved>.
In most cases it is not what you want. Consider using C<abs_path> first.
C<abs_path> function from B<Cwd> module works with directories only. This function works with files
as well. But, if file is a symbolic link, function does not resolve it (yet).
The function uses C<runtime_error> to raise an error if something wrong.
=cut
sub real_path($) {
my $orig_path = shift( @_ );
my $real_path;
my $message = "";
if ( not -e $orig_path ) {
$message = "\"$orig_path\" does not exists";
} else {
# Cwd::abs_path does not work with files, so in this case we should handle file separately.
my $file;
if ( not -d $orig_path ) {
( my $vol, my $dir, $file ) = File::Spec->splitpath( File::Spec->rel2abs( $orig_path ) );
$orig_path = File::Spec->catpath( $vol, $dir );
}; # if
{
local $SIG{ __WARN__ } = sub { $message = $_[ 0 ]; };
$real_path = Cwd::abs_path( $orig_path );
};
if ( defined( $file ) ) {
$real_path = File::Spec->catfile( $real_path, $file );
}; # if
}; # if
if ( not defined( $real_path ) or $message ne "" ) {
$message =~ s/^stat\(.*\): (.*)\s+at .*? line \d+\s*\z/$1/;
runtime_error( "Could not find real path for \"$orig_path\"" . ( $message ne "" ? ": $message" : "" ) );
}; # if
return $real_path;
}; # sub real_path
# -------------------------------------------------------------------------------------------------
=item C<make_dir( $dir, @options )>
Make a directory.
This function makes a directory. If necessary, more than one level can be created.
If directory exists, warning issues (the script behavior depends on value of
C<-warning_level> option). If directory creation fails or C<$dir> exists but it is not a
directory, error issues.
Options:
=over
=item C<-mode>
The numeric mode for new directories, 0750 (rwxr-x---) by default.
=back
=cut
sub make_dir($@) {
my $dir = shift( @_ );
my %opts =
validate(
params => \@_,
spec => {
parents => { type => "boolean", default => 1 },
mode => { type => "scalar", default => 0777 },
},
);
my $prefix = "Could not create directory \"$dir\"";
if ( -e $dir ) {
if ( -d $dir ) {
} else {
runtime_error( "$prefix: it exists, but not a directory." );
}; # if
} else {
eval {
File::Path::mkpath( $dir, 0, $opts{ mode } );
}; # eval
if ( $@ ) {
$@ =~ s{\s+at (?:[a-zA-Z0-9 /_.]*/)?tools\.pm line \d+\s*}{};
runtime_error( "$prefix: $@" );
}; # if
if ( not -d $dir ) { # Just in case, check it one more time...
runtime_error( "$prefix." );
}; # if
}; # if
}; # sub make_dir
# -------------------------------------------------------------------------------------------------
=item C<copy_dir( $src_dir, $dst_dir, @options )>
Copy directory recursively.
This function copies a directory recursively.
If source directory does not exist or not a directory, error issues.
Options:
=over
=item C<-overwrite>
Overwrite destination directory, if it exists.
=back
=cut
sub copy_dir($$@) {
my $src = shift( @_ );
my $dst = shift( @_ );
my %opts = @_;
my $prefix = "Could not copy directory \"$src\" to \"$dst\"";
if ( not -e $src ) {
runtime_error( "$prefix: \"$src\" does not exist." );
}; # if
if ( not -d $src ) {
runtime_error( "$prefix: \"$src\" is not a directory." );
}; # if
if ( -e $dst ) {
if ( -d $dst ) {
if ( $opts{ -overwrite } ) {
del_dir( $dst );
} else {
runtime_error( "$prefix: \"$dst\" already exists." );
}; # if
} else {
runtime_error( "$prefix: \"$dst\" is not a directory." );
}; # if
}; # if
execute( [ "cp", "-R", $src, $dst ] );
}; # sub copy_dir
# -------------------------------------------------------------------------------------------------
=item C<move_dir( $src_dir, $dst_dir, @options )>
Move directory.
Options:
=over
=item C<-overwrite>
Overwrite destination directory, if it exists.
=back
=cut
sub move_dir($$@) {
my $src = shift( @_ );
my $dst = shift( @_ );
my %opts = @_;
my $prefix = "Could not copy directory \"$src\" to \"$dst\"";
if ( not -e $src ) {
runtime_error( "$prefix: \"$src\" does not exist." );
}; # if
if ( not -d $src ) {
runtime_error( "$prefix: \"$src\" is not a directory." );
}; # if
if ( -e $dst ) {
if ( -d $dst ) {
if ( $opts{ -overwrite } ) {
del_dir( $dst );
} else {
runtime_error( "$prefix: \"$dst\" already exists." );
}; # if
} else {
runtime_error( "$prefix: \"$dst\" is not a directory." );
}; # if
}; # if
execute( [ "mv", $src, $dst ] );
}; # sub move_dir
# -------------------------------------------------------------------------------------------------
=item C<clean_dir( $dir, @options )>
Clean a directory: delete all the entries (recursively), but leave the directory.
Options:
=over
=item C<-force> => bool
If a directory is not writable, try to change permissions first, then clean it.
=item C<-skip> => regexp
Regexp. If a directory entry mached the regexp, it is skipped, not deleted. (As a subsequence,
a directory containing skipped entries is not deleted.)
=back
=cut
sub _clean_dir($);
sub _clean_dir($) {
our %_clean_dir_opts;
my ( $dir ) = @_;
my $skip = $_clean_dir_opts{ skip }; # Regexp.
my $skipped = 0; # Number of skipped files.
my $prefix = "Cleaning `$dir' failed:";
my @stat = stat( $dir );
my $mode = $stat[ 2 ];
if ( not @stat ) {
runtime_error( $prefix, "Cannot stat `$dir': $!" );
}; # if
if ( not -d _ ) {
runtime_error( $prefix, "It is not a directory." );
}; # if
if ( not -w _ ) { # Directory is not writable.
if ( not -o _ or not $_clean_dir_opts{ force } ) {
runtime_error( $prefix, "Directory is not writable." );
}; # if
# Directory is not writable but mine. Try to change permissions.
chmod( $mode | S_IWUSR, $dir )
or runtime_error( $prefix, "Cannot make directory writable: $!" );
}; # if
my $handle = IO::Dir->new( $dir ) or runtime_error( $prefix, "Cannot read directory: $!" );
my @entries = File::Spec->no_upwards( $handle->read() );
$handle->close() or runtime_error( $prefix, "Cannot read directory: $!" );
foreach my $entry ( @entries ) {
my $path = cat_file( $dir, $entry );
if ( defined( $skip ) and $entry =~ $skip ) {
++ $skipped;
} else {
if ( -l $path ) {
unlink( $path ) or runtime_error( $prefix, "Cannot delete symlink `$path': $!" );
} else {
stat( $path ) or runtime_error( $prefix, "Cannot stat `$path': $! " );
if ( -f _ ) {
del_file( $path );
} elsif ( -d _ ) {
my $rc = _clean_dir( $path );
if ( $rc == 0 ) {
rmdir( $path ) or runtime_error( $prefix, "Cannot delete directory `$path': $!" );
}; # if
$skipped += $rc;
} else {
runtime_error( $prefix, "`$path' is neither a file nor a directory." );
}; # if
}; # if
}; # if
}; # foreach
return $skipped;
}; # sub _clean_dir
sub clean_dir($@) {
my $dir = shift( @_ );
our %_clean_dir_opts;
local %_clean_dir_opts =
validate(
params => \@_,
spec => {
skip => { type => "regexpref" },
force => { type => "boolean" },
},
);
my $skipped = _clean_dir( $dir );
return $skipped;
}; # sub clean_dir
# -------------------------------------------------------------------------------------------------
=item C<del_dir( $dir, @options )>
Delete a directory recursively.
This function deletes a directory. If directory can not be deleted or it is not a directory, error
message issues (and script exists).
Options:
=over
=back
=cut
sub del_dir($@) {
my $dir = shift( @_ );
my %opts = @_;
my $prefix = "Deleting directory \"$dir\" failed";
our %_clean_dir_opts;
local %_clean_dir_opts =
validate(
params => \@_,
spec => {
force => { type => "boolean" },
},
);
if ( not -e $dir ) {
# Nothing to do.
return;
}; # if
if ( not -d $dir ) {
runtime_error( "$prefix: it is not a directory." );
}; # if
_clean_dir( $dir );
rmdir( $dir ) or runtime_error( "$prefix." );
}; # sub del_dir
# -------------------------------------------------------------------------------------------------
=item C<change_dir( $dir )>
Change current directory.
If any error occurred, error issues and script exits.
=cut
sub change_dir($) {
my $dir = shift( @_ );
Cwd::chdir( $dir )
or runtime_error( "Could not chdir to \"$dir\": $!" );
}; # sub change_dir
# -------------------------------------------------------------------------------------------------
=item C<copy_file( $src_file, $dst_file, @options )>
Copy file.
This function copies a file. If source does not exist or is not a file, error issues.
Options:
=over
=item C<-overwrite>
Overwrite destination file, if it exists.
=back
=cut
sub copy_file($$@) {
my $src = shift( @_ );
my $dst = shift( @_ );
my %opts = @_;
my $prefix = "Could not copy file \"$src\" to \"$dst\"";
if ( not -e $src ) {
runtime_error( "$prefix: \"$src\" does not exist." );
}; # if
if ( not -f $src ) {
runtime_error( "$prefix: \"$src\" is not a file." );
}; # if
if ( -e $dst ) {
if ( -f $dst ) {
if ( $opts{ -overwrite } ) {
del_file( $dst );
} else {
runtime_error( "$prefix: \"$dst\" already exists." );
}; # if
} else {
runtime_error( "$prefix: \"$dst\" is not a file." );
}; # if
}; # if
File::Copy::copy( $src, $dst ) or runtime_error( "$prefix: $!" );
# On Windows* OS File::Copy preserves file attributes, but on Linux* OS it doesn't.
# So we should do it manually...
if ( $^O =~ m/^linux\z/ ) {
my $mode = ( stat( $src ) )[ 2 ]
or runtime_error( "$prefix: cannot get status info for source file." );
chmod( $mode, $dst )
or runtime_error( "$prefix: cannot change mode of destination file." );
}; # if
}; # sub copy_file
# -------------------------------------------------------------------------------------------------
sub move_file($$@) {
my $src = shift( @_ );
my $dst = shift( @_ );
my %opts = @_;
my $prefix = "Could not move file \"$src\" to \"$dst\"";
check_opts( %opts, [ qw( -overwrite ) ] );
if ( not -e $src ) {
runtime_error( "$prefix: \"$src\" does not exist." );
}; # if
if ( not -f $src ) {
runtime_error( "$prefix: \"$src\" is not a file." );
}; # if
if ( -e $dst ) {
if ( -f $dst ) {
if ( $opts{ -overwrite } ) {
#
} else {
runtime_error( "$prefix: \"$dst\" already exists." );
}; # if
} else {
runtime_error( "$prefix: \"$dst\" is not a file." );
}; # if
}; # if
File::Copy::move( $src, $dst ) or runtime_error( "$prefix: $!" );
}; # sub move_file
# -------------------------------------------------------------------------------------------------
sub del_file($) {
my $files = shift( @_ );
if ( ref( $files ) eq "" ) {
$files = [ $files ];
}; # if
foreach my $file ( @$files ) {
debug( "Deleting file `$file'..." );
my $rc = unlink( $file );
if ( $rc == 0 && $! != ENOENT ) {
# Reporn an error, but ignore ENOENT, because the goal is achieved.
runtime_error( "Deleting file `$file' failed: $!" );
}; # if
}; # foreach $file
}; # sub del_file
# -------------------------------------------------------------------------------------------------
=back
=cut
# =================================================================================================
# File I/O subroutines.
# =================================================================================================
=head2 File I/O subroutines.
=cut
#--------------------------------------------------------------------------------------------------
=head3 read_file
B<Synopsis:>
read_file( $file, @options )
B<Description:>
Read file and return its content. In scalar context function returns a scalar, in list context
function returns list of lines.
Note: If the last of file does not terminate with newline, function will append it.
B<Arguments:>
=over
=item B<$file>
A name or handle of file to read from.
=back
B<Options:>
=over
=item B<-binary>
If true, file treats as a binary file: no newline conversion, no truncating trailing space, no
newline removing performed. Entire file returned as a scalar.
=item B<-bulk>
This option is allowed only in binary mode. Option's value should be a reference to a scalar.
If option present, file content placed to pointee scalar and function returns true (1).
=item B<-chomp>
If true, newline characters are removed from file content. By default newline characters remain.
This option is not applicable in binary mode.
=item B<-keep_trailing_space>
If true, trainling space remain at the ends of lines. By default all trailing spaces are removed.
This option is not applicable in binary mode.
=back
B<Examples:>
Return file as single line, remove trailing spaces.
my $bulk = read_file( "message.txt" );
Return file as list of lines with removed trailing space and
newline characters.
my @bulk = read_file( "message.txt", -chomp => 1 );
Read a binary file:
my $bulk = read_file( "message.txt", -binary => 1 );
Read a big binary file:
my $bulk;
read_file( "big_binary_file", -binary => 1, -bulk => \$bulk );
Read from standard input:
my @bulk = read_file( \*STDIN );
=cut
sub read_file($@) {
my $file = shift( @_ ); # The name or handle of file to read from.
my %opts = @_; # Options.
my $name;
my $handle;
my @bulk;
my $error = \&runtime_error;
my @binopts = qw( -binary -error -bulk ); # Options available in binary mode.
my @txtopts = qw( -binary -error -keep_trailing_space -chomp -layer ); # Options available in text (non-binary) mode.
check_opts( %opts, [ @binopts, @txtopts ] );
if ( $opts{ -binary } ) {
check_opts( %opts, [ @binopts ], "these options cannot be used with -binary" );
} else {
check_opts( %opts, [ @txtopts ], "these options cannot be used without -binary" );
}; # if
if ( not exists( $opts{ -error } ) ) {
$opts{ -error } = "error";
}; # if
if ( $opts{ -error } eq "warning" ) {
$error = \&warning;
} elsif( $opts{ -error } eq "ignore" ) {
$error = sub {};
} elsif ( ref( $opts{ -error } ) eq "ARRAY" ) {
$error = sub { push( @{ $opts{ -error } }, $_[ 0 ] ); };
}; # if
if ( ( ref( $file ) eq "GLOB" ) or UNIVERSAL::isa( $file, "IO::Handle" ) ) {
$name = "unknown";
$handle = $file;
} else {
$name = $file;
if ( get_ext( $file ) eq ".gz" and not $opts{ -binary } ) {
$handle = IO::Zlib->new( $name, "rb" );
} else {
$handle = IO::File->new( $name, "r" );
}; # if
if ( not defined( $handle ) ) {
$error->( "File \"$name\" could not be opened for input: $!" );
}; # if
}; # if
if ( defined( $handle ) ) {
if ( $opts{ -binary } ) {
binmode( $handle );
local $/ = undef; # Set input record separator to undef to read entire file as one line.
if ( exists( $opts{ -bulk } ) ) {
${ $opts{ -bulk } } = $handle->getline();
} else {
$bulk[ 0 ] = $handle->getline();
}; # if
} else {
if ( defined( $opts{ -layer } ) ) {
binmode( $handle, $opts{ -layer } );
}; # if
@bulk = $handle->getlines();
# Special trick for UTF-8 files: Delete BOM, if any.
if ( defined( $opts{ -layer } ) and $opts{ -layer } eq ":utf8" ) {
if ( substr( $bulk[ 0 ], 0, 1 ) eq "\x{FEFF}" ) {
substr( $bulk[ 0 ], 0, 1 ) = "";
}; # if
}; # if
}; # if
$handle->close()
or $error->( "File \"$name\" could not be closed after input: $!" );
} else {
if ( $opts{ -binary } and exists( $opts{ -bulk } ) ) {
${ $opts{ -bulk } } = "";
}; # if
}; # if
if ( $opts{ -binary } ) {
if ( exists( $opts{ -bulk } ) ) {
return 1;
} else {
return $bulk[ 0 ];
}; # if
} else {
if ( ( @bulk > 0 ) and ( substr( $bulk[ -1 ], -1, 1 ) ne "\n" ) ) {
$bulk[ -1 ] .= "\n";
}; # if
if ( not $opts{ -keep_trailing_space } ) {
map( $_ =~ s/\s+\n\z/\n/, @bulk );
}; # if
if ( $opts{ -chomp } ) {
chomp( @bulk );
}; # if
if ( wantarray() ) {
return @bulk;
} else {
return join( "", @bulk );
}; # if
}; # if
}; # sub read_file
#--------------------------------------------------------------------------------------------------
=head3 write_file
B<Synopsis:>
write_file( $file, $bulk, @options )
B<Description:>
Write file.
B<Arguments:>
=over
=item B<$file>
The name or handle of file to write to.
=item B<$bulk>
Bulk to write to a file. Can be a scalar, or a reference to scalar or an array.
=back
B<Options:>
=over
=item B<-backup>
If true, create a backup copy of file overwritten. Backup copy is placed into the same directory.
The name of backup copy is the same as the name of file with `~' appended. By default backup copy
is not created.
=item B<-append>
If true, the text will be added to existing file.
=back
B<Examples:>
write_file( "message.txt", \$bulk );
# Write file, take content from a scalar.
write_file( "message.txt", \@bulk, -backup => 1 );
# Write file, take content from an array, create a backup copy.
=cut
sub write_file($$@) {
my $file = shift( @_ ); # The name or handle of file to write to.
my $bulk = shift( @_ ); # The text to write. Can be reference to array or scalar.
my %opts = @_; # Options.
my $name;
my $handle;
check_opts( %opts, [ qw( -append -backup -binary -layer ) ] );
my $mode = $opts{ -append } ? "a": "w";
if ( ( ref( $file ) eq "GLOB" ) or UNIVERSAL::isa( $file, "IO::Handle" ) ) {
$name = "unknown";
$handle = $file;
} else {
$name = $file;
if ( $opts{ -backup } and ( -f $name ) ) {
copy_file( $name, $name . "~", -overwrite => 1 );
}; # if
$handle = IO::File->new( $name, $mode )
or runtime_error( "File \"$name\" could not be opened for output: $!" );
}; # if
if ( $opts{ -binary } ) {
binmode( $handle );
} elsif ( $opts{ -layer } ) {
binmode( $handle, $opts{ -layer } );
}; # if
if ( ref( $bulk ) eq "" ) {
if ( defined( $bulk ) ) {
$handle->print( $bulk );
if ( not $opts{ -binary } and ( substr( $bulk, -1 ) ne "\n" ) ) {
$handle->print( "\n" );
}; # if
}; # if
} elsif ( ref( $bulk ) eq "SCALAR" ) {
if ( defined( $$bulk ) ) {
$handle->print( $$bulk );
if ( not $opts{ -binary } and ( substr( $$bulk, -1 ) ne "\n" ) ) {
$handle->print( "\n" );
}; # if
}; # if
} elsif ( ref( $bulk ) eq "ARRAY" ) {
foreach my $line ( @$bulk ) {
if ( defined( $line ) ) {
$handle->print( $line );
if ( not $opts{ -binary } and ( substr( $line, -1 ) ne "\n" ) ) {
$handle->print( "\n" );
}; # if
}; # if
}; # foreach
} else {
Carp::croak( "write_file: \$bulk must be a scalar or reference to (scalar or array)" );
}; # if
$handle->close()
or runtime_error( "File \"$name\" could not be closed after output: $!" );
}; # sub write_file
#--------------------------------------------------------------------------------------------------
=cut
# =================================================================================================
# Execution subroutines.
# =================================================================================================
=head2 Execution subroutines.
=over
=cut
#--------------------------------------------------------------------------------------------------
sub _pre {
my $arg = shift( @_ );
# If redirection is not required, exit.
if ( not exists( $arg->{ redir } ) ) {
return 0;
}; # if
# Input parameters.
my $mode = $arg->{ mode }; # Mode, "<" (input ) or ">" (output).
my $handle = $arg->{ handle }; # Handle to manipulate.
my $redir = $arg->{ redir }; # Data, a file name if a scalar, or file contents, if a reference.
# Output parameters.
my $save_handle;
my $temp_handle;
my $temp_name;
# Save original handle (by duping it).
$save_handle = Symbol::gensym();
$handle->flush();
open( $save_handle, $mode . "&" . $handle->fileno() )
or die( "Cannot dup filehandle: $!" );
# Prepare a file to IO.
if ( UNIVERSAL::isa( $redir, "IO::Handle" ) or ( ref( $redir ) eq "GLOB" ) ) {
# $redir is reference to an object of IO::Handle class (or its decedant).
$temp_handle = $redir;
} elsif ( ref( $redir ) ) {
# $redir is a reference to content to be read/written.
# Prepare temp file.
( $temp_handle, $temp_name ) =
File::Temp::tempfile(
"$tool.XXXXXXXX",
DIR => File::Spec->tmpdir(),
SUFFIX => ".tmp",
UNLINK => 1
);
if ( not defined( $temp_handle ) ) {
runtime_error( "Could not create temp file." );
}; # if
if ( $mode eq "<" ) {
# It is a file to be read by child, prepare file content to be read.
$temp_handle->print( ref( $redir ) eq "SCALAR" ? ${ $redir } : @{ $redir } );
$temp_handle->flush();
seek( $temp_handle, 0, 0 );
# Unfortunatelly, I could not use OO interface to seek.
# ActivePerl 5.6.1 complains on both forms:
# $temp_handle->seek( 0 ); # As declared in IO::Seekable.
# $temp_handle->setpos( 0 ); # As described in documentation.
} elsif ( $mode eq ">" ) {
# It is a file for output. Clear output variable.
if ( ref( $redir ) eq "SCALAR" ) {
${ $redir } = "";
} else {
@{ $redir } = ();
}; # if
}; # if
} else {
# $redir is a name of file to be read/written.
# Just open file.
if ( defined( $redir ) ) {
$temp_name = $redir;
} else {
$temp_name = File::Spec->devnull();
}; # if
$temp_handle = IO::File->new( $temp_name, $mode )
or runtime_error( "file \"$temp_name\" could not be opened for " . ( $mode eq "<" ? "input" : "output" ) . ": $!" );
}; # if
# Redirect handle to temp file.
open( $handle, $mode . "&" . $temp_handle->fileno() )
or die( "Cannot dup filehandle: $!" );
# Save output parameters.
$arg->{ save_handle } = $save_handle;
$arg->{ temp_handle } = $temp_handle;
$arg->{ temp_name } = $temp_name;
}; # sub _pre
sub _post {
my $arg = shift( @_ );
# Input parameters.
my $mode = $arg->{ mode }; # Mode, "<" or ">".
my $handle = $arg->{ handle }; # Handle to save and set.
my $redir = $arg->{ redir }; # Data, a file name if a scalar, or file contents, if a reference.
# Parameters saved during preprocessing.
my $save_handle = $arg->{ save_handle };
my $temp_handle = $arg->{ temp_handle };
my $temp_name = $arg->{ temp_name };
# If no handle was saved, exit.
if ( not $save_handle ) {
return 0;
}; # if
# Close handle.
$handle->close()
or die( "$!" );
# Read the content of temp file, if necessary, and close temp file.
if ( ( $mode ne "<" ) and ref( $redir ) ) {
$temp_handle->flush();
seek( $temp_handle, 0, 0 );
if ( $^O =~ m/MSWin/ ) {
binmode( $temp_handle, ":crlf" );
}; # if
if ( ref( $redir ) eq "SCALAR" ) {
${ $redir } .= join( "", $temp_handle->getlines() );
} elsif ( ref( $redir ) eq "ARRAY" ) {
push( @{ $redir }, $temp_handle->getlines() );
}; # if
}; # if
if ( not UNIVERSAL::isa( $redir, "IO::Handle" ) ) {
$temp_handle->close()
or die( "$!" );
}; # if
# Restore handle to original value.
$save_handle->flush();
open( $handle, $mode . "&" . $save_handle->fileno() )
or die( "Cannot dup filehandle: $!" );
# Close save handle.
$save_handle->close()
or die( "$!" );
# Delete parameters saved during preprocessing.
delete( $arg->{ save_handle } );
delete( $arg->{ temp_handle } );
delete( $arg->{ temp_name } );
}; # sub _post
#--------------------------------------------------------------------------------------------------
=item C<execute( [ @command ], @options )>
Execute specified program or shell command.
Program is specified by reference to an array, that array is passed to C<system()> function which
executes the command. See L<perlfunc> for details how C<system()> interprets various forms of
C<@command>.
By default, in case of any error error message is issued and script terminated (by runtime_error()).
Function returns an exit code of program.
Alternatively, he function may return exit status of the program (see C<-ignore_status>) or signal
(see C<-ignore_signal>) so caller may analyze it and continue execution.
Options:
=over
=item C<-stdin>
Redirect stdin of program. The value of option can be:
=over
=item C<undef>
Stdin of child is attached to null device.
=item a string
Stdin of child is attached to a file with name specified by option.
=item a reference to a scalar
A dereferenced scalar is written to a temp file, and child's stdin is attached to that file.
=item a reference to an array
A dereferenced array is written to a temp file, and child's stdin is attached to that file.
=back
=item C<-stdout>
Redirect stdout. Possible values are the same as for C<-stdin> option. The only difference is
reference specifies a variable receiving program's output.
=item C<-stderr>
It similar to C<-stdout>, but redirects stderr. There is only one additional value:
=over
=item an empty string
means that stderr should be redirected to the same place where stdout is redirected to.
=back
=item C<-append>
Redirected stream will not overwrite previous content of file (or variable).
Note, that option affects both stdout and stderr.
=item C<-ignore_status>
By default, subroutine raises an error and exits the script if program returns non-exit status. If
this options is true, no error is raised. Instead, status is returned as function result (and $@ is
set to error message).
=item C<-ignore_signal>
By default, subroutine raises an error and exits the script if program die with signal. If
this options is true, no error is raised in such a case. Instead, signal number is returned (as
negative value), error message is placed to C<$@> variable.
If command is not even started, -256 is returned.
=back
Examples:
execute( [ "cmd.exe", "/c", "dir" ] );
# Execute NT shell with specified options, no redirections are
# made.
my $output;
execute( [ "cvs", "-n", "-q", "update", "." ], -stdout => \$output );
# Execute "cvs -n -q update ." command, output is saved
# in $output variable.
my @output;
execute( [ qw( cvs -n -q update . ) ], -stdout => \@output, -stderr => undef );
# Execute specified command, output is saved in @output
# variable, stderr stream is redirected to null device
# (/dev/null in Linux* OS and nul in Windows* OS).
=cut
sub execute($@) {
# !!! Add something to complain on unknown options...
my $command = shift( @_ );
my %opts = @_;
my $prefix = "Could not execute $command->[ 0 ]";
check_opts( %opts, [ qw( -stdin -stdout -stderr -append -ignore_status -ignore_signal ) ] );
if ( ref( $command ) ne "ARRAY" ) {
Carp::croak( "execute: $command must be a reference to array" );
}; # if
my $stdin = { handle => \*STDIN, mode => "<" };
my $stdout = { handle => \*STDOUT, mode => ">" };
my $stderr = { handle => \*STDERR, mode => ">" };
my $streams = {
stdin => $stdin,
stdout => $stdout,
stderr => $stderr
}; # $streams
for my $stream ( qw( stdin stdout stderr ) ) {
if ( exists( $opts{ "-$stream" } ) ) {
if ( ref( $opts{ "-$stream" } ) !~ m/\A(|SCALAR|ARRAY)\z/ ) {
Carp::croak( "execute: -$stream option: must have value of scalar, or reference to (scalar or array)." );
}; # if
$streams->{ $stream }->{ redir } = $opts{ "-$stream" };
}; # if
if ( $opts{ -append } and ( $streams->{ $stream }->{ mode } ) eq ">" ) {
$streams->{ $stream }->{ mode } = ">>";
}; # if
}; # foreach $stream
_pre( $stdin );
_pre( $stdout );
if ( defined( $stderr->{ redir } ) and not ref( $stderr->{ redir } ) and ( $stderr->{ redir } eq "" ) ) {
if ( exists( $stdout->{ redir } ) ) {
$stderr->{ redir } = $stdout->{ temp_handle };
} else {
$stderr->{ redir } = ${ $stdout->{ handle } };
}; # if
}; # if
_pre( $stderr );
my $rc = system( @$command );
my $errno = $!;
my $child = $?;
_post( $stderr );
_post( $stdout );
_post( $stdin );
my $exit = 0;
my $signal_num = $child & 127;
my $exit_status = $child >> 8;
$@ = "";
if ( $rc == -1 ) {
$@ = "\"$command->[ 0 ]\" failed: $errno";
$exit = -256;
if ( not $opts{ -ignore_signal } ) {
runtime_error( $@ );
}; # if
} elsif ( $signal_num != 0 ) {
$@ = "\"$command->[ 0 ]\" failed due to signal $signal_num.";
$exit = - $signal_num;
if ( not $opts{ -ignore_signal } ) {
runtime_error( $@ );
}; # if
} elsif ( $exit_status != 0 ) {
$@ = "\"$command->[ 0 ]\" returned non-zero status $exit_status.";
$exit = $exit_status;
if ( not $opts{ -ignore_status } ) {
runtime_error( $@ );
}; # if
}; # if
return $exit;
}; # sub execute
#--------------------------------------------------------------------------------------------------
=item C<backticks( [ @command ], @options )>
Run specified program or shell command and return output.
In scalar context entire output is returned in a single string. In list context list of strings
is returned. Function issues an error and exits script if any error occurs.
=cut
sub backticks($@) {
my $command = shift( @_ );
my %opts = @_;
my @output;
check_opts( %opts, [ qw( -chomp ) ] );
execute( $command, -stdout => \@output );
if ( $opts{ -chomp } ) {
chomp( @output );
}; # if
return ( wantarray() ? @output : join( "", @output ) );
}; # sub backticks
#--------------------------------------------------------------------------------------------------
sub pad($$$) {
my ( $str, $length, $pad ) = @_;
my $lstr = length( $str ); # Length of source string.
if ( $lstr < $length ) {
my $lpad = length( $pad ); # Length of pad.
my $count = int( ( $length - $lstr ) / $lpad ); # Number of pad repetitions.
my $tail = $length - ( $lstr + $lpad * $count );
$str = $str . ( $pad x $count ) . substr( $pad, 0, $tail );
}; # if
return $str;
}; # sub pad
# --------------------------------------------------------------------------------------------------
=back
=cut
#--------------------------------------------------------------------------------------------------
return 1;
#--------------------------------------------------------------------------------------------------
=cut
# End of file.
| endlessm/chromium-browser | third_party/llvm/openmp/runtime/tools/lib/tools.pm | Perl | bsd-3-clause | 57,582 |
package AsposePdfCloud::Object::DocumentProperties;
require 5.6.0;
use strict;
use warnings;
use utf8;
use JSON qw(decode_json);
use Data::Dumper;
use Module::Runtime qw(use_module);
use Log::Any qw($log);
use Date::Parse;
use DateTime;
use base "AsposePdfCloud::Object::BaseObject";
#
#
#
#NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually.
#
my $swagger_types = {
'List' => 'ARRAY[DocumentProperty]',
'Links' => 'ARRAY[Link]'
};
my $attribute_map = {
'List' => 'List',
'Links' => 'Links'
};
# new object
sub new {
my ($class, %args) = @_;
my $self = {
#
'List' => $args{'List'},
#
'Links' => $args{'Links'}
};
return bless $self, $class;
}
# get swagger type of the attribute
sub get_swagger_types {
return $swagger_types;
}
# get attribute mappping
sub get_attribute_map {
return $attribute_map;
}
1;
| asposepdf/Aspose_Pdf_Cloud | SDKs/Aspose.Pdf-Cloud-SDK-for-Perl/lib/AsposePdfCloud/Object/DocumentProperties.pm | Perl | mit | 949 |
=head1 NAME
XML::LibXML::ErrNo - Structured Errors
This module is based on xmlerror.h libxml2 C header file. It defines symbolic
constants for all libxml2 error codes. Currently libxml2 uses over 480
different error codes. See also XML::LibXML::Error.
=head1 AUTHORS
Matt Sergeant,
Christian Glahn,
Petr Pajas
=head1 VERSION
2.0014
=head1 COPYRIGHT
2001-2007, AxKit.com Ltd.
2002-2006, Christian Glahn.
2006-2009, Petr Pajas.
=cut
| Dokaponteam/ITF_Project | xampp/perl/vendor/lib/XML/LibXML/ErrNo.pod | Perl | mit | 442 |
# This file is auto-generated by the Perl DateTime Suite time zone
# code generator (0.07) This code generator comes with the
# DateTime::TimeZone module distribution in the tools/ directory
#
# Generated from /tmp/rnClxBLdxJ/australasia. Olson data version 2013a
#
# Do not edit this file directly.
#
package DateTime::TimeZone::Australia::Currie;
{
$DateTime::TimeZone::Australia::Currie::VERSION = '1.57';
}
use strict;
use Class::Singleton 1.03;
use DateTime::TimeZone;
use DateTime::TimeZone::OlsonDB;
@DateTime::TimeZone::Australia::Currie::ISA = ( 'Class::Singleton', 'DateTime::TimeZone' );
my $spans =
[
[
DateTime::TimeZone::NEG_INFINITY, # utc_start
59789888672, # utc_end 1895-08-31 14:24:32 (Sat)
DateTime::TimeZone::NEG_INFINITY, # local_start
59789923200, # local_end 1895-09-01 00:00:00 (Sun)
34528,
0,
'LMT',
],
[
59789888672, # utc_start 1895-08-31 14:24:32 (Sat)
60455174400, # utc_end 1916-09-30 16:00:00 (Sat)
59789924672, # local_start 1895-09-01 00:24:32 (Sun)
60455210400, # local_end 1916-10-01 02:00:00 (Sun)
36000,
0,
'EST',
],
[
60455174400, # utc_start 1916-09-30 16:00:00 (Sat)
60465790800, # utc_end 1917-01-31 13:00:00 (Wed)
60455214000, # local_start 1916-10-01 03:00:00 (Sun)
60465830400, # local_end 1917-02-01 00:00:00 (Thu)
39600,
1,
'EST',
],
[
60465790800, # utc_start 1917-01-31 13:00:00 (Wed)
60470290800, # utc_end 1917-03-24 15:00:00 (Sat)
60465830400, # local_start 1917-02-01 00:00:00 (Thu)
60470330400, # local_end 1917-03-25 02:00:00 (Sun)
39600,
1,
'EST',
],
[
60470290800, # utc_start 1917-03-24 15:00:00 (Sat)
61252041600, # utc_end 1941-12-31 16:00:00 (Wed)
60470326800, # local_start 1917-03-25 01:00:00 (Sun)
61252077600, # local_end 1942-01-01 02:00:00 (Thu)
36000,
0,
'EST',
],
[
61252041600, # utc_start 1941-12-31 16:00:00 (Wed)
61259554800, # utc_end 1942-03-28 15:00:00 (Sat)
61252081200, # local_start 1942-01-01 03:00:00 (Thu)
61259594400, # local_end 1942-03-29 02:00:00 (Sun)
39600,
1,
'EST',
],
[
61259554800, # utc_start 1942-03-28 15:00:00 (Sat)
61275283200, # utc_end 1942-09-26 16:00:00 (Sat)
61259590800, # local_start 1942-03-29 01:00:00 (Sun)
61275319200, # local_end 1942-09-27 02:00:00 (Sun)
36000,
0,
'EST',
],
[
61275283200, # utc_start 1942-09-26 16:00:00 (Sat)
61291004400, # utc_end 1943-03-27 15:00:00 (Sat)
61275322800, # local_start 1942-09-27 03:00:00 (Sun)
61291044000, # local_end 1943-03-28 02:00:00 (Sun)
39600,
1,
'EST',
],
[
61291004400, # utc_start 1943-03-27 15:00:00 (Sat)
61307337600, # utc_end 1943-10-02 16:00:00 (Sat)
61291040400, # local_start 1943-03-28 01:00:00 (Sun)
61307373600, # local_end 1943-10-03 02:00:00 (Sun)
36000,
0,
'EST',
],
[
61307337600, # utc_start 1943-10-02 16:00:00 (Sat)
61322454000, # utc_end 1944-03-25 15:00:00 (Sat)
61307377200, # local_start 1943-10-03 03:00:00 (Sun)
61322493600, # local_end 1944-03-26 02:00:00 (Sun)
39600,
1,
'EST',
],
[
61322454000, # utc_start 1944-03-25 15:00:00 (Sat)
62182821600, # utc_end 1971-06-30 14:00:00 (Wed)
61322490000, # local_start 1944-03-26 01:00:00 (Sun)
62182857600, # local_end 1971-07-01 00:00:00 (Thu)
36000,
0,
'EST',
],
[
62182821600, # utc_start 1971-06-30 14:00:00 (Wed)
62193369600, # utc_end 1971-10-30 16:00:00 (Sat)
62182857600, # local_start 1971-07-01 00:00:00 (Thu)
62193405600, # local_end 1971-10-31 02:00:00 (Sun)
36000,
0,
'EST',
],
[
62193369600, # utc_start 1971-10-30 16:00:00 (Sat)
62203651200, # utc_end 1972-02-26 16:00:00 (Sat)
62193409200, # local_start 1971-10-31 03:00:00 (Sun)
62203690800, # local_end 1972-02-27 03:00:00 (Sun)
39600,
1,
'EST',
],
[
62203651200, # utc_start 1972-02-26 16:00:00 (Sat)
62224819200, # utc_end 1972-10-28 16:00:00 (Sat)
62203687200, # local_start 1972-02-27 02:00:00 (Sun)
62224855200, # local_end 1972-10-29 02:00:00 (Sun)
36000,
0,
'EST',
],
[
62224819200, # utc_start 1972-10-28 16:00:00 (Sat)
62235705600, # utc_end 1973-03-03 16:00:00 (Sat)
62224858800, # local_start 1972-10-29 03:00:00 (Sun)
62235745200, # local_end 1973-03-04 03:00:00 (Sun)
39600,
1,
'EST',
],
[
62235705600, # utc_start 1973-03-03 16:00:00 (Sat)
62256268800, # utc_end 1973-10-27 16:00:00 (Sat)
62235741600, # local_start 1973-03-04 02:00:00 (Sun)
62256304800, # local_end 1973-10-28 02:00:00 (Sun)
36000,
0,
'EST',
],
[
62256268800, # utc_start 1973-10-27 16:00:00 (Sat)
62267155200, # utc_end 1974-03-02 16:00:00 (Sat)
62256308400, # local_start 1973-10-28 03:00:00 (Sun)
62267194800, # local_end 1974-03-03 03:00:00 (Sun)
39600,
1,
'EST',
],
[
62267155200, # utc_start 1974-03-02 16:00:00 (Sat)
62287718400, # utc_end 1974-10-26 16:00:00 (Sat)
62267191200, # local_start 1974-03-03 02:00:00 (Sun)
62287754400, # local_end 1974-10-27 02:00:00 (Sun)
36000,
0,
'EST',
],
[
62287718400, # utc_start 1974-10-26 16:00:00 (Sat)
62298604800, # utc_end 1975-03-01 16:00:00 (Sat)
62287758000, # local_start 1974-10-27 03:00:00 (Sun)
62298644400, # local_end 1975-03-02 03:00:00 (Sun)
39600,
1,
'EST',
],
[
62298604800, # utc_start 1975-03-01 16:00:00 (Sat)
62319168000, # utc_end 1975-10-25 16:00:00 (Sat)
62298640800, # local_start 1975-03-02 02:00:00 (Sun)
62319204000, # local_end 1975-10-26 02:00:00 (Sun)
36000,
0,
'EST',
],
[
62319168000, # utc_start 1975-10-25 16:00:00 (Sat)
62330659200, # utc_end 1976-03-06 16:00:00 (Sat)
62319207600, # local_start 1975-10-26 03:00:00 (Sun)
62330698800, # local_end 1976-03-07 03:00:00 (Sun)
39600,
1,
'EST',
],
[
62330659200, # utc_start 1976-03-06 16:00:00 (Sat)
62351222400, # utc_end 1976-10-30 16:00:00 (Sat)
62330695200, # local_start 1976-03-07 02:00:00 (Sun)
62351258400, # local_end 1976-10-31 02:00:00 (Sun)
36000,
0,
'EST',
],
[
62351222400, # utc_start 1976-10-30 16:00:00 (Sat)
62362108800, # utc_end 1977-03-05 16:00:00 (Sat)
62351262000, # local_start 1976-10-31 03:00:00 (Sun)
62362148400, # local_end 1977-03-06 03:00:00 (Sun)
39600,
1,
'EST',
],
[
62362108800, # utc_start 1977-03-05 16:00:00 (Sat)
62382672000, # utc_end 1977-10-29 16:00:00 (Sat)
62362144800, # local_start 1977-03-06 02:00:00 (Sun)
62382708000, # local_end 1977-10-30 02:00:00 (Sun)
36000,
0,
'EST',
],
[
62382672000, # utc_start 1977-10-29 16:00:00 (Sat)
62393558400, # utc_end 1978-03-04 16:00:00 (Sat)
62382711600, # local_start 1977-10-30 03:00:00 (Sun)
62393598000, # local_end 1978-03-05 03:00:00 (Sun)
39600,
1,
'EST',
],
[
62393558400, # utc_start 1978-03-04 16:00:00 (Sat)
62414121600, # utc_end 1978-10-28 16:00:00 (Sat)
62393594400, # local_start 1978-03-05 02:00:00 (Sun)
62414157600, # local_end 1978-10-29 02:00:00 (Sun)
36000,
0,
'EST',
],
[
62414121600, # utc_start 1978-10-28 16:00:00 (Sat)
62425008000, # utc_end 1979-03-03 16:00:00 (Sat)
62414161200, # local_start 1978-10-29 03:00:00 (Sun)
62425047600, # local_end 1979-03-04 03:00:00 (Sun)
39600,
1,
'EST',
],
[
62425008000, # utc_start 1979-03-03 16:00:00 (Sat)
62445571200, # utc_end 1979-10-27 16:00:00 (Sat)
62425044000, # local_start 1979-03-04 02:00:00 (Sun)
62445607200, # local_end 1979-10-28 02:00:00 (Sun)
36000,
0,
'EST',
],
[
62445571200, # utc_start 1979-10-27 16:00:00 (Sat)
62456457600, # utc_end 1980-03-01 16:00:00 (Sat)
62445610800, # local_start 1979-10-28 03:00:00 (Sun)
62456497200, # local_end 1980-03-02 03:00:00 (Sun)
39600,
1,
'EST',
],
[
62456457600, # utc_start 1980-03-01 16:00:00 (Sat)
62477020800, # utc_end 1980-10-25 16:00:00 (Sat)
62456493600, # local_start 1980-03-02 02:00:00 (Sun)
62477056800, # local_end 1980-10-26 02:00:00 (Sun)
36000,
0,
'EST',
],
[
62477020800, # utc_start 1980-10-25 16:00:00 (Sat)
62487907200, # utc_end 1981-02-28 16:00:00 (Sat)
62477060400, # local_start 1980-10-26 03:00:00 (Sun)
62487946800, # local_end 1981-03-01 03:00:00 (Sun)
39600,
1,
'EST',
],
[
62487907200, # utc_start 1981-02-28 16:00:00 (Sat)
62508470400, # utc_end 1981-10-24 16:00:00 (Sat)
62487943200, # local_start 1981-03-01 02:00:00 (Sun)
62508506400, # local_end 1981-10-25 02:00:00 (Sun)
36000,
0,
'EST',
],
[
62508470400, # utc_start 1981-10-24 16:00:00 (Sat)
62521776000, # utc_end 1982-03-27 16:00:00 (Sat)
62508510000, # local_start 1981-10-25 03:00:00 (Sun)
62521815600, # local_end 1982-03-28 03:00:00 (Sun)
39600,
1,
'EST',
],
[
62521776000, # utc_start 1982-03-27 16:00:00 (Sat)
62540524800, # utc_end 1982-10-30 16:00:00 (Sat)
62521812000, # local_start 1982-03-28 02:00:00 (Sun)
62540560800, # local_end 1982-10-31 02:00:00 (Sun)
36000,
0,
'EST',
],
[
62540524800, # utc_start 1982-10-30 16:00:00 (Sat)
62553225600, # utc_end 1983-03-26 16:00:00 (Sat)
62540564400, # local_start 1982-10-31 03:00:00 (Sun)
62553265200, # local_end 1983-03-27 03:00:00 (Sun)
39600,
1,
'EST',
],
[
62553225600, # utc_start 1983-03-26 16:00:00 (Sat)
62571974400, # utc_end 1983-10-29 16:00:00 (Sat)
62553261600, # local_start 1983-03-27 02:00:00 (Sun)
62572010400, # local_end 1983-10-30 02:00:00 (Sun)
36000,
0,
'EST',
],
[
62571974400, # utc_start 1983-10-29 16:00:00 (Sat)
62582860800, # utc_end 1984-03-03 16:00:00 (Sat)
62572014000, # local_start 1983-10-30 03:00:00 (Sun)
62582900400, # local_end 1984-03-04 03:00:00 (Sun)
39600,
1,
'EST',
],
[
62582860800, # utc_start 1984-03-03 16:00:00 (Sat)
62603424000, # utc_end 1984-10-27 16:00:00 (Sat)
62582896800, # local_start 1984-03-04 02:00:00 (Sun)
62603460000, # local_end 1984-10-28 02:00:00 (Sun)
36000,
0,
'EST',
],
[
62603424000, # utc_start 1984-10-27 16:00:00 (Sat)
62614310400, # utc_end 1985-03-02 16:00:00 (Sat)
62603463600, # local_start 1984-10-28 03:00:00 (Sun)
62614350000, # local_end 1985-03-03 03:00:00 (Sun)
39600,
1,
'EST',
],
[
62614310400, # utc_start 1985-03-02 16:00:00 (Sat)
62634873600, # utc_end 1985-10-26 16:00:00 (Sat)
62614346400, # local_start 1985-03-03 02:00:00 (Sun)
62634909600, # local_end 1985-10-27 02:00:00 (Sun)
36000,
0,
'EST',
],
[
62634873600, # utc_start 1985-10-26 16:00:00 (Sat)
62645760000, # utc_end 1986-03-01 16:00:00 (Sat)
62634913200, # local_start 1985-10-27 03:00:00 (Sun)
62645799600, # local_end 1986-03-02 03:00:00 (Sun)
39600,
1,
'EST',
],
[
62645760000, # utc_start 1986-03-01 16:00:00 (Sat)
62665718400, # utc_end 1986-10-18 16:00:00 (Sat)
62645796000, # local_start 1986-03-02 02:00:00 (Sun)
62665754400, # local_end 1986-10-19 02:00:00 (Sun)
36000,
0,
'EST',
],
[
62665718400, # utc_start 1986-10-18 16:00:00 (Sat)
62678419200, # utc_end 1987-03-14 16:00:00 (Sat)
62665758000, # local_start 1986-10-19 03:00:00 (Sun)
62678458800, # local_end 1987-03-15 03:00:00 (Sun)
39600,
1,
'EST',
],
[
62678419200, # utc_start 1987-03-14 16:00:00 (Sat)
62697772800, # utc_end 1987-10-24 16:00:00 (Sat)
62678455200, # local_start 1987-03-15 02:00:00 (Sun)
62697808800, # local_end 1987-10-25 02:00:00 (Sun)
36000,
0,
'EST',
],
[
62697772800, # utc_start 1987-10-24 16:00:00 (Sat)
62710473600, # utc_end 1988-03-19 16:00:00 (Sat)
62697812400, # local_start 1987-10-25 03:00:00 (Sun)
62710513200, # local_end 1988-03-20 03:00:00 (Sun)
39600,
1,
'EST',
],
[
62710473600, # utc_start 1988-03-19 16:00:00 (Sat)
62729827200, # utc_end 1988-10-29 16:00:00 (Sat)
62710509600, # local_start 1988-03-20 02:00:00 (Sun)
62729863200, # local_end 1988-10-30 02:00:00 (Sun)
36000,
0,
'EST',
],
[
62729827200, # utc_start 1988-10-29 16:00:00 (Sat)
62741923200, # utc_end 1989-03-18 16:00:00 (Sat)
62729866800, # local_start 1988-10-30 03:00:00 (Sun)
62741962800, # local_end 1989-03-19 03:00:00 (Sun)
39600,
1,
'EST',
],
[
62741923200, # utc_start 1989-03-18 16:00:00 (Sat)
62761276800, # utc_end 1989-10-28 16:00:00 (Sat)
62741959200, # local_start 1989-03-19 02:00:00 (Sun)
62761312800, # local_end 1989-10-29 02:00:00 (Sun)
36000,
0,
'EST',
],
[
62761276800, # utc_start 1989-10-28 16:00:00 (Sat)
62773372800, # utc_end 1990-03-17 16:00:00 (Sat)
62761316400, # local_start 1989-10-29 03:00:00 (Sun)
62773412400, # local_end 1990-03-18 03:00:00 (Sun)
39600,
1,
'EST',
],
[
62773372800, # utc_start 1990-03-17 16:00:00 (Sat)
62792726400, # utc_end 1990-10-27 16:00:00 (Sat)
62773408800, # local_start 1990-03-18 02:00:00 (Sun)
62792762400, # local_end 1990-10-28 02:00:00 (Sun)
36000,
0,
'EST',
],
[
62792726400, # utc_start 1990-10-27 16:00:00 (Sat)
62806032000, # utc_end 1991-03-30 16:00:00 (Sat)
62792766000, # local_start 1990-10-28 03:00:00 (Sun)
62806071600, # local_end 1991-03-31 03:00:00 (Sun)
39600,
1,
'EST',
],
[
62806032000, # utc_start 1991-03-30 16:00:00 (Sat)
62822361600, # utc_end 1991-10-05 16:00:00 (Sat)
62806068000, # local_start 1991-03-31 02:00:00 (Sun)
62822397600, # local_end 1991-10-06 02:00:00 (Sun)
36000,
0,
'EST',
],
[
62822361600, # utc_start 1991-10-05 16:00:00 (Sat)
62837481600, # utc_end 1992-03-28 16:00:00 (Sat)
62822401200, # local_start 1991-10-06 03:00:00 (Sun)
62837521200, # local_end 1992-03-29 03:00:00 (Sun)
39600,
1,
'EST',
],
[
62837481600, # utc_start 1992-03-28 16:00:00 (Sat)
62853811200, # utc_end 1992-10-03 16:00:00 (Sat)
62837517600, # local_start 1992-03-29 02:00:00 (Sun)
62853847200, # local_end 1992-10-04 02:00:00 (Sun)
36000,
0,
'EST',
],
[
62853811200, # utc_start 1992-10-03 16:00:00 (Sat)
62868931200, # utc_end 1993-03-27 16:00:00 (Sat)
62853850800, # local_start 1992-10-04 03:00:00 (Sun)
62868970800, # local_end 1993-03-28 03:00:00 (Sun)
39600,
1,
'EST',
],
[
62868931200, # utc_start 1993-03-27 16:00:00 (Sat)
62885260800, # utc_end 1993-10-02 16:00:00 (Sat)
62868967200, # local_start 1993-03-28 02:00:00 (Sun)
62885296800, # local_end 1993-10-03 02:00:00 (Sun)
36000,
0,
'EST',
],
[
62885260800, # utc_start 1993-10-02 16:00:00 (Sat)
62900380800, # utc_end 1994-03-26 16:00:00 (Sat)
62885300400, # local_start 1993-10-03 03:00:00 (Sun)
62900420400, # local_end 1994-03-27 03:00:00 (Sun)
39600,
1,
'EST',
],
[
62900380800, # utc_start 1994-03-26 16:00:00 (Sat)
62916710400, # utc_end 1994-10-01 16:00:00 (Sat)
62900416800, # local_start 1994-03-27 02:00:00 (Sun)
62916746400, # local_end 1994-10-02 02:00:00 (Sun)
36000,
0,
'EST',
],
[
62916710400, # utc_start 1994-10-01 16:00:00 (Sat)
62931830400, # utc_end 1995-03-25 16:00:00 (Sat)
62916750000, # local_start 1994-10-02 03:00:00 (Sun)
62931870000, # local_end 1995-03-26 03:00:00 (Sun)
39600,
1,
'EST',
],
[
62931830400, # utc_start 1995-03-25 16:00:00 (Sat)
62948160000, # utc_end 1995-09-30 16:00:00 (Sat)
62931866400, # local_start 1995-03-26 02:00:00 (Sun)
62948196000, # local_end 1995-10-01 02:00:00 (Sun)
36000,
0,
'EST',
],
[
62948160000, # utc_start 1995-09-30 16:00:00 (Sat)
62963884800, # utc_end 1996-03-30 16:00:00 (Sat)
62948199600, # local_start 1995-10-01 03:00:00 (Sun)
62963924400, # local_end 1996-03-31 03:00:00 (Sun)
39600,
1,
'EST',
],
[
62963884800, # utc_start 1996-03-30 16:00:00 (Sat)
62980214400, # utc_end 1996-10-05 16:00:00 (Sat)
62963920800, # local_start 1996-03-31 02:00:00 (Sun)
62980250400, # local_end 1996-10-06 02:00:00 (Sun)
36000,
0,
'EST',
],
[
62980214400, # utc_start 1996-10-05 16:00:00 (Sat)
62995334400, # utc_end 1997-03-29 16:00:00 (Sat)
62980254000, # local_start 1996-10-06 03:00:00 (Sun)
62995374000, # local_end 1997-03-30 03:00:00 (Sun)
39600,
1,
'EST',
],
[
62995334400, # utc_start 1997-03-29 16:00:00 (Sat)
63011664000, # utc_end 1997-10-04 16:00:00 (Sat)
62995370400, # local_start 1997-03-30 02:00:00 (Sun)
63011700000, # local_end 1997-10-05 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63011664000, # utc_start 1997-10-04 16:00:00 (Sat)
63026784000, # utc_end 1998-03-28 16:00:00 (Sat)
63011703600, # local_start 1997-10-05 03:00:00 (Sun)
63026823600, # local_end 1998-03-29 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63026784000, # utc_start 1998-03-28 16:00:00 (Sat)
63043113600, # utc_end 1998-10-03 16:00:00 (Sat)
63026820000, # local_start 1998-03-29 02:00:00 (Sun)
63043149600, # local_end 1998-10-04 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63043113600, # utc_start 1998-10-03 16:00:00 (Sat)
63058233600, # utc_end 1999-03-27 16:00:00 (Sat)
63043153200, # local_start 1998-10-04 03:00:00 (Sun)
63058273200, # local_end 1999-03-28 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63058233600, # utc_start 1999-03-27 16:00:00 (Sat)
63074563200, # utc_end 1999-10-02 16:00:00 (Sat)
63058269600, # local_start 1999-03-28 02:00:00 (Sun)
63074599200, # local_end 1999-10-03 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63074563200, # utc_start 1999-10-02 16:00:00 (Sat)
63089683200, # utc_end 2000-03-25 16:00:00 (Sat)
63074602800, # local_start 1999-10-03 03:00:00 (Sun)
63089722800, # local_end 2000-03-26 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63089683200, # utc_start 2000-03-25 16:00:00 (Sat)
63102988800, # utc_end 2000-08-26 16:00:00 (Sat)
63089719200, # local_start 2000-03-26 02:00:00 (Sun)
63103024800, # local_end 2000-08-27 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63102988800, # utc_start 2000-08-26 16:00:00 (Sat)
63121132800, # utc_end 2001-03-24 16:00:00 (Sat)
63103028400, # local_start 2000-08-27 03:00:00 (Sun)
63121172400, # local_end 2001-03-25 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63121132800, # utc_start 2001-03-24 16:00:00 (Sat)
63138067200, # utc_end 2001-10-06 16:00:00 (Sat)
63121168800, # local_start 2001-03-25 02:00:00 (Sun)
63138103200, # local_end 2001-10-07 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63138067200, # utc_start 2001-10-06 16:00:00 (Sat)
63153187200, # utc_end 2002-03-30 16:00:00 (Sat)
63138106800, # local_start 2001-10-07 03:00:00 (Sun)
63153226800, # local_end 2002-03-31 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63153187200, # utc_start 2002-03-30 16:00:00 (Sat)
63169516800, # utc_end 2002-10-05 16:00:00 (Sat)
63153223200, # local_start 2002-03-31 02:00:00 (Sun)
63169552800, # local_end 2002-10-06 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63169516800, # utc_start 2002-10-05 16:00:00 (Sat)
63184636800, # utc_end 2003-03-29 16:00:00 (Sat)
63169556400, # local_start 2002-10-06 03:00:00 (Sun)
63184676400, # local_end 2003-03-30 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63184636800, # utc_start 2003-03-29 16:00:00 (Sat)
63200966400, # utc_end 2003-10-04 16:00:00 (Sat)
63184672800, # local_start 2003-03-30 02:00:00 (Sun)
63201002400, # local_end 2003-10-05 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63200966400, # utc_start 2003-10-04 16:00:00 (Sat)
63216086400, # utc_end 2004-03-27 16:00:00 (Sat)
63201006000, # local_start 2003-10-05 03:00:00 (Sun)
63216126000, # local_end 2004-03-28 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63216086400, # utc_start 2004-03-27 16:00:00 (Sat)
63232416000, # utc_end 2004-10-02 16:00:00 (Sat)
63216122400, # local_start 2004-03-28 02:00:00 (Sun)
63232452000, # local_end 2004-10-03 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63232416000, # utc_start 2004-10-02 16:00:00 (Sat)
63247536000, # utc_end 2005-03-26 16:00:00 (Sat)
63232455600, # local_start 2004-10-03 03:00:00 (Sun)
63247575600, # local_end 2005-03-27 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63247536000, # utc_start 2005-03-26 16:00:00 (Sat)
63263865600, # utc_end 2005-10-01 16:00:00 (Sat)
63247572000, # local_start 2005-03-27 02:00:00 (Sun)
63263901600, # local_end 2005-10-02 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63263865600, # utc_start 2005-10-01 16:00:00 (Sat)
63279590400, # utc_end 2006-04-01 16:00:00 (Sat)
63263905200, # local_start 2005-10-02 03:00:00 (Sun)
63279630000, # local_end 2006-04-02 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63279590400, # utc_start 2006-04-01 16:00:00 (Sat)
63295315200, # utc_end 2006-09-30 16:00:00 (Sat)
63279626400, # local_start 2006-04-02 02:00:00 (Sun)
63295351200, # local_end 2006-10-01 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63295315200, # utc_start 2006-09-30 16:00:00 (Sat)
63310435200, # utc_end 2007-03-24 16:00:00 (Sat)
63295354800, # local_start 2006-10-01 03:00:00 (Sun)
63310474800, # local_end 2007-03-25 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63310435200, # utc_start 2007-03-24 16:00:00 (Sat)
63327369600, # utc_end 2007-10-06 16:00:00 (Sat)
63310471200, # local_start 2007-03-25 02:00:00 (Sun)
63327405600, # local_end 2007-10-07 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63327369600, # utc_start 2007-10-06 16:00:00 (Sat)
63343094400, # utc_end 2008-04-05 16:00:00 (Sat)
63327409200, # local_start 2007-10-07 03:00:00 (Sun)
63343134000, # local_end 2008-04-06 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63343094400, # utc_start 2008-04-05 16:00:00 (Sat)
63358819200, # utc_end 2008-10-04 16:00:00 (Sat)
63343130400, # local_start 2008-04-06 02:00:00 (Sun)
63358855200, # local_end 2008-10-05 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63358819200, # utc_start 2008-10-04 16:00:00 (Sat)
63374544000, # utc_end 2009-04-04 16:00:00 (Sat)
63358858800, # local_start 2008-10-05 03:00:00 (Sun)
63374583600, # local_end 2009-04-05 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63374544000, # utc_start 2009-04-04 16:00:00 (Sat)
63390268800, # utc_end 2009-10-03 16:00:00 (Sat)
63374580000, # local_start 2009-04-05 02:00:00 (Sun)
63390304800, # local_end 2009-10-04 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63390268800, # utc_start 2009-10-03 16:00:00 (Sat)
63405993600, # utc_end 2010-04-03 16:00:00 (Sat)
63390308400, # local_start 2009-10-04 03:00:00 (Sun)
63406033200, # local_end 2010-04-04 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63405993600, # utc_start 2010-04-03 16:00:00 (Sat)
63421718400, # utc_end 2010-10-02 16:00:00 (Sat)
63406029600, # local_start 2010-04-04 02:00:00 (Sun)
63421754400, # local_end 2010-10-03 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63421718400, # utc_start 2010-10-02 16:00:00 (Sat)
63437443200, # utc_end 2011-04-02 16:00:00 (Sat)
63421758000, # local_start 2010-10-03 03:00:00 (Sun)
63437482800, # local_end 2011-04-03 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63437443200, # utc_start 2011-04-02 16:00:00 (Sat)
63453168000, # utc_end 2011-10-01 16:00:00 (Sat)
63437479200, # local_start 2011-04-03 02:00:00 (Sun)
63453204000, # local_end 2011-10-02 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63453168000, # utc_start 2011-10-01 16:00:00 (Sat)
63468892800, # utc_end 2012-03-31 16:00:00 (Sat)
63453207600, # local_start 2011-10-02 03:00:00 (Sun)
63468932400, # local_end 2012-04-01 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63468892800, # utc_start 2012-03-31 16:00:00 (Sat)
63485222400, # utc_end 2012-10-06 16:00:00 (Sat)
63468928800, # local_start 2012-04-01 02:00:00 (Sun)
63485258400, # local_end 2012-10-07 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63485222400, # utc_start 2012-10-06 16:00:00 (Sat)
63500947200, # utc_end 2013-04-06 16:00:00 (Sat)
63485262000, # local_start 2012-10-07 03:00:00 (Sun)
63500986800, # local_end 2013-04-07 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63500947200, # utc_start 2013-04-06 16:00:00 (Sat)
63516672000, # utc_end 2013-10-05 16:00:00 (Sat)
63500983200, # local_start 2013-04-07 02:00:00 (Sun)
63516708000, # local_end 2013-10-06 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63516672000, # utc_start 2013-10-05 16:00:00 (Sat)
63532396800, # utc_end 2014-04-05 16:00:00 (Sat)
63516711600, # local_start 2013-10-06 03:00:00 (Sun)
63532436400, # local_end 2014-04-06 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63532396800, # utc_start 2014-04-05 16:00:00 (Sat)
63548121600, # utc_end 2014-10-04 16:00:00 (Sat)
63532432800, # local_start 2014-04-06 02:00:00 (Sun)
63548157600, # local_end 2014-10-05 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63548121600, # utc_start 2014-10-04 16:00:00 (Sat)
63563846400, # utc_end 2015-04-04 16:00:00 (Sat)
63548161200, # local_start 2014-10-05 03:00:00 (Sun)
63563886000, # local_end 2015-04-05 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63563846400, # utc_start 2015-04-04 16:00:00 (Sat)
63579571200, # utc_end 2015-10-03 16:00:00 (Sat)
63563882400, # local_start 2015-04-05 02:00:00 (Sun)
63579607200, # local_end 2015-10-04 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63579571200, # utc_start 2015-10-03 16:00:00 (Sat)
63595296000, # utc_end 2016-04-02 16:00:00 (Sat)
63579610800, # local_start 2015-10-04 03:00:00 (Sun)
63595335600, # local_end 2016-04-03 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63595296000, # utc_start 2016-04-02 16:00:00 (Sat)
63611020800, # utc_end 2016-10-01 16:00:00 (Sat)
63595332000, # local_start 2016-04-03 02:00:00 (Sun)
63611056800, # local_end 2016-10-02 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63611020800, # utc_start 2016-10-01 16:00:00 (Sat)
63626745600, # utc_end 2017-04-01 16:00:00 (Sat)
63611060400, # local_start 2016-10-02 03:00:00 (Sun)
63626785200, # local_end 2017-04-02 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63626745600, # utc_start 2017-04-01 16:00:00 (Sat)
63642470400, # utc_end 2017-09-30 16:00:00 (Sat)
63626781600, # local_start 2017-04-02 02:00:00 (Sun)
63642506400, # local_end 2017-10-01 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63642470400, # utc_start 2017-09-30 16:00:00 (Sat)
63658195200, # utc_end 2018-03-31 16:00:00 (Sat)
63642510000, # local_start 2017-10-01 03:00:00 (Sun)
63658234800, # local_end 2018-04-01 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63658195200, # utc_start 2018-03-31 16:00:00 (Sat)
63674524800, # utc_end 2018-10-06 16:00:00 (Sat)
63658231200, # local_start 2018-04-01 02:00:00 (Sun)
63674560800, # local_end 2018-10-07 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63674524800, # utc_start 2018-10-06 16:00:00 (Sat)
63690249600, # utc_end 2019-04-06 16:00:00 (Sat)
63674564400, # local_start 2018-10-07 03:00:00 (Sun)
63690289200, # local_end 2019-04-07 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63690249600, # utc_start 2019-04-06 16:00:00 (Sat)
63705974400, # utc_end 2019-10-05 16:00:00 (Sat)
63690285600, # local_start 2019-04-07 02:00:00 (Sun)
63706010400, # local_end 2019-10-06 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63705974400, # utc_start 2019-10-05 16:00:00 (Sat)
63721699200, # utc_end 2020-04-04 16:00:00 (Sat)
63706014000, # local_start 2019-10-06 03:00:00 (Sun)
63721738800, # local_end 2020-04-05 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63721699200, # utc_start 2020-04-04 16:00:00 (Sat)
63737424000, # utc_end 2020-10-03 16:00:00 (Sat)
63721735200, # local_start 2020-04-05 02:00:00 (Sun)
63737460000, # local_end 2020-10-04 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63737424000, # utc_start 2020-10-03 16:00:00 (Sat)
63753148800, # utc_end 2021-04-03 16:00:00 (Sat)
63737463600, # local_start 2020-10-04 03:00:00 (Sun)
63753188400, # local_end 2021-04-04 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63753148800, # utc_start 2021-04-03 16:00:00 (Sat)
63768873600, # utc_end 2021-10-02 16:00:00 (Sat)
63753184800, # local_start 2021-04-04 02:00:00 (Sun)
63768909600, # local_end 2021-10-03 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63768873600, # utc_start 2021-10-02 16:00:00 (Sat)
63784598400, # utc_end 2022-04-02 16:00:00 (Sat)
63768913200, # local_start 2021-10-03 03:00:00 (Sun)
63784638000, # local_end 2022-04-03 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63784598400, # utc_start 2022-04-02 16:00:00 (Sat)
63800323200, # utc_end 2022-10-01 16:00:00 (Sat)
63784634400, # local_start 2022-04-03 02:00:00 (Sun)
63800359200, # local_end 2022-10-02 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63800323200, # utc_start 2022-10-01 16:00:00 (Sat)
63816048000, # utc_end 2023-04-01 16:00:00 (Sat)
63800362800, # local_start 2022-10-02 03:00:00 (Sun)
63816087600, # local_end 2023-04-02 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63816048000, # utc_start 2023-04-01 16:00:00 (Sat)
63831772800, # utc_end 2023-09-30 16:00:00 (Sat)
63816084000, # local_start 2023-04-02 02:00:00 (Sun)
63831808800, # local_end 2023-10-01 02:00:00 (Sun)
36000,
0,
'EST',
],
[
63831772800, # utc_start 2023-09-30 16:00:00 (Sat)
63848102400, # utc_end 2024-04-06 16:00:00 (Sat)
63831812400, # local_start 2023-10-01 03:00:00 (Sun)
63848142000, # local_end 2024-04-07 03:00:00 (Sun)
39600,
1,
'EST',
],
[
63848102400, # utc_start 2024-04-06 16:00:00 (Sat)
63863827200, # utc_end 2024-10-05 16:00:00 (Sat)
63848138400, # local_start 2024-04-07 02:00:00 (Sun)
63863863200, # local_end 2024-10-06 02:00:00 (Sun)
36000,
0,
'EST',
],
];
sub olson_version { '2013a' }
sub has_dst_changes { 59 }
sub _max_year { 2023 }
sub _new_instance
{
return shift->_init( @_, spans => $spans );
}
sub _last_offset { 36000 }
my $last_observance = bless( {
'format' => 'EST',
'gmtoff' => '10:00',
'local_start_datetime' => bless( {
'formatter' => undef,
'local_rd_days' => 719709,
'local_rd_secs' => 0,
'offset_modifier' => 0,
'rd_nanosecs' => 0,
'tz' => bless( {
'name' => 'floating',
'offset' => 0
}, 'DateTime::TimeZone::Floating' ),
'utc_rd_days' => 719709,
'utc_rd_secs' => 0,
'utc_year' => 1972
}, 'DateTime' ),
'offset_from_std' => 0,
'offset_from_utc' => 36000,
'until' => [],
'utc_start_datetime' => bless( {
'formatter' => undef,
'local_rd_days' => 719708,
'local_rd_secs' => 50400,
'offset_modifier' => 0,
'rd_nanosecs' => 0,
'tz' => bless( {
'name' => 'floating',
'offset' => 0
}, 'DateTime::TimeZone::Floating' ),
'utc_rd_days' => 719708,
'utc_rd_secs' => 50400,
'utc_year' => 1972
}, 'DateTime' )
}, 'DateTime::TimeZone::OlsonDB::Observance' )
;
sub _last_observance { $last_observance }
my $rules = [
bless( {
'at' => '2:00s',
'from' => '2008',
'in' => 'Apr',
'letter' => '',
'name' => 'AT',
'offset_from_std' => 0,
'on' => 'Sun>=1',
'save' => '0',
'to' => 'max',
'type' => undef
}, 'DateTime::TimeZone::OlsonDB::Rule' ),
bless( {
'at' => '2:00s',
'from' => '2001',
'in' => 'Oct',
'letter' => '',
'name' => 'AT',
'offset_from_std' => 3600,
'on' => 'Sun>=1',
'save' => '1:00',
'to' => 'max',
'type' => undef
}, 'DateTime::TimeZone::OlsonDB::Rule' )
]
;
sub _rules { $rules }
1;
| liuyangning/WX_web | xampp/perl/vendor/lib/DateTime/TimeZone/Australia/Currie.pm | Perl | mit | 31,405 |
#!/usr/bin/env perl
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
use Mojo::Base -strict;
use Schema;
use Test::IntegrationTestHelper;
use strict;
use warnings;
use Data::Dumper;
my $t = Test::Mojo->new('TrafficOps');
#print "t: " . Dumper( $t->ua->server->app->routes->children->[0]->{pattern} );
foreach my $i ( $t->ua->server->app->routes->children ) {
foreach my $j (@$i) {
my $method = $j->{via}->[0]; #GET/POST
my $path = $j->{pattern}{pattern}; #/url
my $package = $j->{pattern}{defaults}{namespace}; # UI/API
my $format = $j->{pattern}{constraints}{format}[0];
my $controller = $j->{pattern}{defaults}{controller};
my $action = $j->{pattern}{defaults}{action};
if ( defined($package) && defined($method) && defined($action) && defined($path) && defined($controller) ) {
#print "$method\t$path \t\t\t\t{:action =>$action, :package =>$package, :controller=>$controller} \n";
my $max_length = 80;
my $method_and_path = sprintf( "%-6s %s", $method, $path );
if ( defined($format) ) {
$method_and_path = $method_and_path . "." . $format;
}
my $method_and_path_length = length($method_and_path);
my $spacing = ' ' x ( $max_length - $method_and_path_length );
my $fully_qualified_package = $package . "::" . $controller . "->" . $action;
my $line = sprintf( "%s %s %s\n", $method_and_path, $spacing, $fully_qualified_package );
print($line);
#printf( "%s\n", '-' x length($line) );
#printf( "%-5s %-40s {:action => %s, :package=> %s, :controller=> %s}\n", $method, $path, $action, $package, $controller );
}
#print "j: " . Dumper( $j->{pattern}{pattern} );
}
}
#print "t: " . Dumper( $t->ua->server->app->routes->children->[0]->pattern );
| mdb/incubator-trafficcontrol | traffic_ops/app/bin/routes.pl | Perl | apache-2.0 | 2,316 |
#!/usr/bin/perl
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) 2011 - 2016, Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://curl.haxx.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
###########################################################################
my $max_column = 79;
my $indent = 2;
my $warnings;
my $errors;
my $supressed; # whitelisted problems
my $file;
my $dir=".";
my $wlist;
my $windows_os = $^O eq 'MSWin32' || $^O eq 'msys' || $^O eq 'cygwin';
my $verbose;
my %whitelist;
my %warnings = (
'LONGLINE' => "Line longer than $max_column",
'TABS' => 'TAB characters not allowed',
'TRAILINGSPACE' => 'Trailing white space on the line',
'CPPCOMMENTS' => '// comment detected',
'SPACEBEFOREPAREN' => 'space before an open parenthesis',
'SPACEAFTERPAREN' => 'space after open parenthesis',
'SPACEBEFORECLOSE' => 'space before a close parenthesis',
'SPACEBEFORECOMMA' => 'space before a comma',
'RETURNNOSPACE' => 'return without space',
'COMMANOSPACE' => 'comma without following space',
'BRACEELSE' => '} else on the same line',
'PARENBRACE' => '){ without sufficient space',
'SPACESEMILCOLON' => 'space before semicolon',
'BANNEDFUNC' => 'a banned function was used',
'FOPENMODE' => 'fopen needs a macro for the mode string',
'BRACEPOS' => 'wrong position for an open brace',
'INDENTATION' => 'wrong start column for code',
'COPYRIGHT' => 'file missing a copyright statement',
'BADCOMMAND' => 'bad !checksrc! instruction',
'UNUSEDIGNORE' => 'a warning ignore was not used',
'OPENCOMMENT' => 'file ended with a /* comment still "open"'
);
sub readwhitelist {
open(W, "<$dir/checksrc.whitelist");
my @all=<W>;
for(@all) {
$windows_os ? $_ =~ s/\r?\n$// : chomp;
$whitelist{$_}=1;
}
close(W);
}
sub checkwarn {
my ($name, $num, $col, $file, $line, $msg, $error) = @_;
my $w=$error?"error":"warning";
my $nowarn=0;
#if(!$warnings{$name}) {
# print STDERR "Dev! there's no description for $name!\n";
#}
# checksrc.whitelist
if($whitelist{$line}) {
$nowarn = 1;
}
# !checksrc! controlled
elsif($ignore{$name}) {
$ignore{$name}--;
$ignore_used{$name}++;
$nowarn = 1;
if(!$ignore{$name}) {
# reached zero, enable again
enable_warn($name, $line, $file, $l);
}
}
if($nowarn) {
$supressed++;
if($w) {
$swarnings++;
}
else {
$serrors++;
}
return;
}
if($w) {
$warnings++;
}
else {
$errors++;
}
$col++;
print "$file:$num:$col: $w: $msg ($name)\n";
print " $line\n";
if($col < 80) {
my $pref = (' ' x $col);
print "${pref}^\n";
}
}
$file = shift @ARGV;
while(1) {
if($file =~ /-D(.*)/) {
$dir = $1;
$file = shift @ARGV;
next;
}
elsif($file =~ /-W(.*)/) {
$wlist .= " $1 ";
$file = shift @ARGV;
next;
}
elsif($file =~ /^(-h|--help)/) {
undef $file;
last;
}
last;
}
if(!$file) {
print "checksrc.pl [option] <file1> [file2] ...\n";
print " Options:\n";
print " -D[DIR] Directory to prepend file names\n";
print " -h Show help output\n";
print " -W[file] Whitelist the given file - ignore all its flaws\n";
print "\nDetects and warns for these problems:\n";
for(sort keys %warnings) {
printf (" %-18s: %s\n", $_, $warnings{$_});
}
exit;
}
readwhitelist();
do {
if("$wlist" !~ / $file /) {
my $fullname = $file;
$fullname = "$dir/$file" if ($fullname !~ '^\.?\.?/');
scanfile($fullname);
}
$file = shift @ARGV;
} while($file);
sub checksrc_clear {
undef %ignore;
undef %ignore_set;
undef @ignore_line;
}
sub checksrc_endoffile {
my ($file) = @_;
for(keys %ignore_set) {
if($ignore_set{$_} && !$ignore_used{$_}) {
checkwarn("UNUSEDIGNORE", $ignore_set{$_},
length($_)+11, $file,
$ignore_line[$ignore_set{$_}],
"Unused ignore: $_");
}
}
}
sub enable_warn {
my ($what, $line, $file, $l) = @_;
# switch it back on, but warn if not triggered!
if(!$ignore_used{$what}) {
checkwarn("UNUSEDIGNORE",
$line, length($what) + 11, $file, $l,
"No warning was inhibited!");
}
$ignore_set{$what}=0;
$ignore_used{$what}=0;
$ignore{$what}=0;
}
sub checksrc {
my ($cmd, $line, $file, $l) = @_;
if($cmd =~ / *([^ ]*) *(.*)/) {
my ($enable, $what) = ($1, $2);
$what =~ s: *\*/$::; # cut off end of C comment
# print "ENABLE $enable WHAT $what\n";
if($enable eq "disable") {
my ($warn, $scope)=($1, $2);
if($what =~ /([^ ]*) +(.*)/) {
($warn, $scope)=($1, $2);
}
else {
$warn = $what;
$scope = 1;
}
# print "IGNORE $warn for SCOPE $scope\n";
if($scope eq "all") {
$scope=999999;
}
if($ignore_set{$warn}) {
checkwarn("BADCOMMAND",
$line, 0, $file, $l,
"$warn already disabled from line $ignore_set{$warn}");
}
else {
$ignore{$warn}=$scope;
$ignore_set{$warn}=$line;
$ignore_line[$line]=$l;
}
}
elsif($enable eq "enable") {
enable_warn($what, $line, $file, $l);
}
else {
checkwarn("BADCOMMAND",
$line, 0, $file, $l,
"Illegal !checksrc! command");
}
}
}
sub scanfile {
my ($file) = @_;
my $line = 1;
my $prevl;
my $l;
open(R, "<$file") || die "failed to open $file";
my $incomment=0;
my $copyright=0;
checksrc_clear(); # for file based ignores
while(<R>) {
$windows_os ? $_ =~ s/\r?\n$// : chomp;
my $l = $_;
my $ol = $l; # keep the unmodified line for error reporting
my $column = 0;
# check for !checksrc! commands
if($l =~ /\!checksrc\! (.*)/) {
my $cmd = $1;
checksrc($cmd, $line, $file, $l)
}
# check for a copyright statement
if(!$copyright && ($l =~ /copyright .* \d\d\d\d/i)) {
$copyright=1;
}
# detect long lines
if(length($l) > $max_column) {
checkwarn("LONGLINE", $line, length($l), $file, $l,
"Longer than $max_column columns");
}
# detect TAB characters
if($l =~ /^(.*)\t/) {
checkwarn("TABS",
$line, length($1), $file, $l, "Contains TAB character", 1);
}
# detect trailing white space
if($l =~ /^(.*)[ \t]+\z/) {
checkwarn("TRAILINGSPACE",
$line, length($1), $file, $l, "Trailing whitespace");
}
# ------------------------------------------------------------
# Above this marker, the checks were done on lines *including*
# comments
# ------------------------------------------------------------
# strip off C89 comments
comment:
if(!$incomment) {
if($l =~ s/\/\*.*\*\// /g) {
# full /* comments */ were removed!
}
if($l =~ s/\/\*.*//) {
# start of /* comment was removed
$incomment = 1;
}
}
else {
if($l =~ s/.*\*\///) {
# end of comment */ was removed
$incomment = 0;
goto comment;
}
else {
# still within a comment
$l="";
}
}
# ------------------------------------------------------------
# Below this marker, the checks were done on lines *without*
# comments
# ------------------------------------------------------------
# crude attempt to detect // comments without too many false
# positives
if($l =~ /^([^"\*]*)[^:"]\/\//) {
checkwarn("CPPCOMMENTS",
$line, length($1), $file, $l, "\/\/ comment");
}
# check spaces after for/if/while
if($l =~ /^(.*)(for|if|while) \(/) {
if($1 =~ / *\#/) {
# this is a #if, treat it differently
}
else {
checkwarn("SPACEBEFOREPAREN", $line, length($1)+length($2), $file, $l,
"$2 with space");
}
}
# check spaces after open parentheses
if($l =~ /^(.*[a-z])\( /i) {
checkwarn("SPACEAFTERPAREN",
$line, length($1)+1, $file, $l,
"space after open parenthesis");
}
# check spaces before close parentheses, unless it was a space or a
# close parenthesis!
if($l =~ /(.*[^\) ]) \)/) {
checkwarn("SPACEBEFORECLOSE",
$line, length($1)+1, $file, $l,
"space before close parenthesis");
}
# check spaces before comma!
if($l =~ /(.*[^ ]) ,/) {
checkwarn("SPACEBEFORECOMMA",
$line, length($1)+1, $file, $l,
"space before comma");
}
# check for "return(" without space
if($l =~ /^(.*)return\(/) {
if($1 =~ / *\#/) {
# this is a #if, treat it differently
}
else {
checkwarn("RETURNNOSPACE", $line, length($1)+6, $file, $l,
"return without space before paren");
}
}
# check for comma without space
if($l =~ /^(.*),[^ \n]/) {
my $pref=$1;
my $ign=0;
if($pref =~ / *\#/) {
# this is a #if, treat it differently
$ign=1;
}
elsif($pref =~ /\/\*/) {
# this is a comment
$ign=1;
}
elsif($pref =~ /[\"\']/) {
$ign = 1;
# There is a quote here, figure out whether the comma is
# within a string or '' or not.
if($pref =~ /\"/) {
# withing a string
}
elsif($pref =~ /\'$/) {
# a single letter
}
else {
$ign = 0;
}
}
if(!$ign) {
checkwarn("COMMANOSPACE", $line, length($pref)+1, $file, $l,
"comma without following space");
}
}
# check for "} else"
if($l =~ /^(.*)\} *else/) {
checkwarn("BRACEELSE",
$line, length($1), $file, $l, "else after closing brace on same line");
}
# check for "){"
if($l =~ /^(.*)\)\{/) {
checkwarn("PARENBRACE",
$line, length($1)+1, $file, $l, "missing space after close paren");
}
# check for space before the semicolon last in a line
if($l =~ /^(.*[^ ].*) ;$/) {
checkwarn("SPACESEMILCOLON",
$line, length($1), $file, $ol, "space before last semicolon");
}
# scan for use of banned functions
if($l =~ /^(.*\W)(sprintf|vsprintf|strcat|strncat|_mbscat|_mbsncat|_tcscat|_tcsncat|wcscat|wcsncat|gets)\s*\(/) {
checkwarn("BANNEDFUNC",
$line, length($1), $file, $ol,
"use of $2 is banned");
}
# scan for use of non-binary fopen without the macro
if($l =~ /^(.*\W)fopen\s*\([^,]*, *\"([^"]*)/) {
my $mode = $2;
if($mode !~ /b/) {
checkwarn("FOPENMODE",
$line, length($1), $file, $ol,
"use of non-binary fopen without FOPEN_* macro: $mode");
}
}
# check for open brace first on line but not first column
# only alert if previous line ended with a close paren and wasn't a cpp
# line
if((($prevl =~ /\)\z/) && ($prevl !~ /^ *#/)) && ($l =~ /^( +)\{/)) {
checkwarn("BRACEPOS",
$line, length($1), $file, $ol, "badly placed open brace");
}
# if the previous line starts with if/while/for AND ends with an open
# brace, check that this line is indented $indent more steps, if not
# a cpp line
if($prevl =~ /^( *)(if|while|for)\(.*\{\z/) {
my $first = length($1);
# this line has some character besides spaces
if(($l !~ /^ *#/) && ($l =~ /^( *)[^ ]/)) {
my $second = length($1);
my $expect = $first+$indent;
if($expect != $second) {
my $diff = $second - $first;
checkwarn("INDENTATION", $line, length($1), $file, $ol,
"not indented $indent steps, uses $diff)");
}
}
}
$line++;
$prevl = $ol;
}
if(!$copyright) {
checkwarn("COPYRIGHT", 1, 0, $file, "", "Missing copyright statement", 1);
}
if($incomment) {
checkwarn("OPENCOMMENT", 1, 0, $file, "", "Missing closing comment", 1);
}
checksrc_endoffile($file);
close(R);
}
if($errors || $warnings || $verbose) {
printf "checksrc: %d errors and %d warnings\n", $errors, $warnings;
if($supressed) {
printf "checksrc: %d errors and %d warnings suppressed\n",
$serrors,
$swarnings;
}
exit 5; # return failure
}
| resetnow/premake-core | contrib/curl/lib/checksrc.pl | Perl | bsd-3-clause | 14,893 |
###############################################################################
## Copyright 2005-2016 OCSInventory-NG/OCSInventory-Server contributors.
## See the Contributors file for more details about them.
##
## This file is part of OCSInventory-NG/OCSInventory-ocsreports.
##
## OCSInventory-NG/OCSInventory-Server is free software: you can redistribute
## it and/or modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation, either version 2 of the License,
## or (at your option) any later version.
##
## OCSInventory-NG/OCSInventory-Server is distributed in the hope that it
## will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
## of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with OCSInventory-NG/OCSInventory-ocsreports. if not, write to the
## Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
## MA 02110-1301, USA.
################################################################################
package Apache::Ocsinventory::Plugins::Modules;
use SOAP::Lite;
use strict;
use LWP::Simple;
use Archive::Zip;
use File::Copy;
use File::Path;
use DBI;
sub InstallPlugins {
my $pluginName = $_[1];
# Download the created archive from the ocsreports which contain the communication server code (.conf and map.pm)
my $url = "http://$ENV{OCS_DB_HOST}/ocsreports/upload/$pluginName.zip";
my $file = "$ENV{OCS_PLUGINS_CONF_DIR}/$pluginName.zip";
our $result;
our $perm = 1;
#Up case plugin directory in OCS server for match with actual template
our $pluginNameUc = ucfirst($pluginName);
if (-e "$ENV{OCS_PLUGINS_CONF_DIR}/$pluginName.conf") {
$result = "Err_01";
}
elsif(-e "$ENV{OCS_PLUGINS_PERL_DIR}/Apache/Ocsinventory/Plugins/$pluginNameUc"){
$result = "Err_05";
}
else
{
my $status = getstore($url, $file);
# If download succes, unzip, create dir, move files.
if (is_success($status))
{
# Check for write perm in plugins dir
if(!(-w "$ENV{OCS_PLUGINS_CONF_DIR}"))
{
$result = "Err_03";
$perm = 0;
}
# Check for write perm in perl dir
if(!(-w "$ENV{OCS_PLUGINS_PERL_DIR}/Apache/Ocsinventory/Plugins"))
{
$result = "Err_04";
$perm = 0;
}
if($perm){
my $pluginsdir = "$ENV{OCS_PLUGINS_CONF_DIR}";
my $zipname = $file;
my $destinationDirectory = $pluginsdir;
my $zip = Archive::Zip->new($zipname);
my $member;
foreach my $member ($zip->members)
{
next if $member->isDirectory;
(my $extractName = $member->fileName) =~ s{.*/}{};
$member->extractToFileNamed("$destinationDirectory/$extractName");
}
my $dirtocreate = "$ENV{OCS_PLUGINS_PERL_DIR}/Apache/Ocsinventory/Plugins/$pluginNameUc";
mkdir $dirtocreate;
unlink $file;
move("$pluginsdir/Map.pm","$ENV{OCS_PLUGINS_PERL_DIR}/Apache/Ocsinventory/Plugins/$pluginNameUc/Map.pm");
$result = "Install_OK";
}
}else{
$result = "Err_02";
}
}
return( SOAP::Data->name( 'Result' => $result )->type( 'string' ) );
}
# Seek for deleted plugins // Delete map.pm and conf entry.
sub DeletePlugins {
my $pluginName = $_[1];
#Up case plugin directory in OCS server for match with actual template for deletion
my $pluginNameUc = ucfirst($pluginName);
my $pluginsdir = "$ENV{OCS_PLUGINS_CONF_DIR}";
if (-e "$ENV{OCS_PLUGINS_CONF_DIR}/$pluginName.conf"){
unlink "$ENV{OCS_PLUGINS_CONF_DIR}/$pluginName.conf";
}
rmtree "$ENV{OCS_PLUGINS_PERL_DIR}/Apache/Ocsinventory/Plugins/$pluginNameUc";
my $result = "Delete_OK";
return( SOAP::Data->name( 'Result' => $result )->type( 'string' ) );
}
1; | himynameismax/codeigniter | ocsinventory-server/perl/Apache/Ocsinventory/Plugins/Modules.pm | Perl | mit | 4,256 |
%%% Tests
:- include(shacl) .
:- begin_tests(set) .
test(addSet_empty) :- findall(X,addSet(3,[],X),Xs),assertion(Xs == [[3]]) .
test(addSet_nonExisting) :-
findall(X,addSet(3,[2,4],X),Xs),
assertion(Xs == [[2,4,3]]) .
test(addSet_existing) :-
findall(X,addSet(2,[2,4],X),Xs),
assertion(Xs == [[2,4]]) .
test(addSet_existing2) :-
findall(X,addSet(4,[2,4],X),Xs),
assertion(Xs == [[2,4]]) .
:- end_tests(set) .
:- begin_tests(typings) .
test(addType_empty) :-
findall(X,addType(x,shape,[],X),Xs),
assertion(Xs == [[type(x,[shape])]]) .
test(addType_single_same) :-
findall(X,addType(a,t1,[type(a,[t1])],X),Xs),
assertion(Xs == [[type(a,[t1])]]) .
test(addType_single_different) :-
findall(X,addType(x,shape,[type(x,[shape1])],X),Xs),
assertion(Xs == [[type(x,[shape1,shape])]]) .
test(addType_several_same) :-
findall(X,addType(x,shape,[type(y,[shape1])],X),Xs),
assertion(Xs == [[type(y,[shape1]),
type(x,[shape])
]]) .
test(addType_several_same) :-
findall(X,addType(x,shape,[type(y,[shape1]),type(z,[shape2])],X),Xs),
assertion(Xs == [[type(y,[shape1]),
type(z,[shape2]),
type(x,[shape])
]]) .
:- end_tests(typings) .
:- begin_tests(matchShape).
test(matchEmpty) :-
emptyContext(Ctx),
findall(Result,
matchShape(Ctx,
empty,
[triple(x,p,y)],
Result),
Results) ,
assertion(Results == [result([],[],[triple(x,p,y)])]) .
test(matchArc) :-
emptyContext(Ctx),
findall(Result,
matchShape(Ctx,
arc(direct(p,valueSet(a)),1,unbounded),
[triple(x,p,a)],
Result),
Results),
assertion(Results == [result([],
[(x,p,a)],
[])
]
).
:- end_tests(matchShape).
| labra/proshacl | testShacl.pl | Perl | mit | 2,042 |
package TEMPLATE::KISSTLC;
use URI::URL;
use Data::Dumper;
use CSS::Tiny;
use HTML::TreeBuilder;
use Text::WikiCreole;
use File::Slurp;
use HTML::TreeBuilder;
use strict;
use lib "/backend/lib";
require ZOOVY;
require LISTING::MSGS;
require TLC;
##
sub getFields {
my ($el,$EXISTSREF,$FIELDSREF) = @_;
if (not defined $FIELDSREF) { $FIELDSREF = []; }
if ( ($el->attr('data-object') eq 'PRODUCT') &&
($el->attr('data-attrib') ne '') &&
($el->attr('data-label') ne '') ) {
my %flex = ();
$flex{'title'} = $el->attr('data-label');
$flex{'ns'} = 'product';
$flex{'id'} = $el->attr('data-attrib');
$flex{'type'} = $el->attr('data-input-type');
if ((not defined $flex{'type'}) && ($el->tag() eq 'img')) { $flex{'type'} = 'image'; }
if (not defined $flex{'type'}) { $flex{'type'} = 'textbox'; }
if (not defined $EXISTSREF->{$flex{'id'}}) {
push @{$FIELDSREF}, \%flex;
$EXISTSREF->{ $flex{'id'} } = \%flex;
}
}
foreach my $elx (@{$el->content_array_ref()}) {
if (ref($elx) eq '') {
## just content!
}
else {
&getFields($elx,$EXISTSREF,$FIELDSREF);
}
}
return($FIELDSREF);
}
##
## returns the product input fields for an html template
##
sub getFlexedit {
my ($USERNAME,$PROFILE) = @_;
my @FIELDS = ();
my $userpath = &ZOOVY::resolve_userpath($USERNAME);
my $file = "$userpath/IMAGES/_ebay/$PROFILE/index.html";
my $html = '';
if (-f $file) { $html = File::Slurp::read_file($file); }
my $tree = HTML::TreeBuilder->new(no_space_compacting=>1,ignore_unknown=>0,store_comments=>1); # empty tree
$tree->parse_content($html);
my %EXISTS = ();
my %META = ();
my $el = $tree->elementify();
&loadMeta($el,\%META);
my $FIELDSREF = &getFields($el,%EXISTS);
return($FIELDSREF);
}
sub attribsToTag {
my ($tag,$attribs,$innerhtml) = @_;
my $htmltag = "<$tag";
foreach my $k (sort keys %{$attribs}) {
$htmltag .= " $k=\"".&ZOOVY::incode($attribs->{$k})."\"";
}
if ($tag eq 'img') {
$htmltag .= ' />';
}
else {
if ($innerhtml ne '') { $innerhtml = "\n$innerhtml"; }
$htmltag .= ">$innerhtml</$tag>\n";
}
return($htmltag);
}
sub loadMeta {
my ($el,$meta) = @_;
if ($el->tag() eq 'meta') {
$meta->{ $el->attr('name') } = $el->attr('content');
}
foreach my $elx (@{$el->content_array_ref()}) {
if (ref($elx) eq '') {
## just content!
}
else {
## print "-- ".$elx->tag()."\n";
&loadMeta($elx,$meta);
}
}
return($el);
}
##
##
##
sub render_kiss1 {
my ($meta,$el) = @_;
my @PRODUCT_INPUTS = ();
my $MSGS = $meta->{'@MSGS'};
if (not defined $MSGS) { $meta->{'@MSGS'} = $MSGS = []; }
my $ATTR = $el->attr('data-attrib');
# print STDERR "ATTR:$ATTR\n";
my $data_object = uc($el->attr('data-object'));
my $VALUE = undef;
if ($data_object eq 'PRODUCT') {
my ($P) = $meta->{'*PRODUCT'};
if (not defined $P) {
push @{$MSGS}, sprintf("ERROR|+template $meta->{'$CONTAINERTYPE'}/$meta->{'$CONTAINER'} called data-object=\"PRODUCT\" with no product in focus.",$meta->{'$TEMPLATE'});
}
else {
# print STDERR Dumper($meta);
$VALUE = $P->fetch( $el->attr('data-attrib') );
}
}
if (my $if = $el->attr('data-if')) {
# data-if=BLANK|NULL|NOTBLANK|NOTNULL|MATCH:|NOTMATCH:
# data-then=REMOVE|SET:xyz|FORMAT:xyz
my $is_true = undef;
if ($if eq 'BLANK') { $is_true = ($VALUE eq '')?1:0; }
elsif ($if eq 'NOTBLANK') { $is_true = ($VALUE ne '')?1:0; }
elsif ($if eq 'NULL') { $is_true = (defined $VALUE)?1:0; }
elsif ($if eq 'NOTNULL') { $is_true = (not defined $VALUE)?1:0; }
elsif ($if eq 'TRUE') { $is_true = (&ZOOVY::is_true($VALUE))?1:0; }
elsif ($if eq 'FALSE') { $is_true = (not &ZOOVY::is_true($VALUE))?1:0; }
elsif ($if =~ /^(GT|LT|EQ)\/([\d\.]+)\/$/) {
my ($OP,$OPVAL) = ($1,$2);
$OPVAL = int($OPVAL*1000);
$VALUE=int($VALUE*1000);
$is_true = undef;
if ($OP eq 'GT') { $is_true = ($VALUE > $OPVAL)?1:0; }
elsif ($OP eq 'LT') { $is_true = ($VALUE < $OPVAL)?1:0; }
elsif ($OP eq 'EQ') { $is_true = ($VALUE == $OPVAL)?1:0; }
elsif ($OP eq 'NE') { $is_true = ($VALUE == $OPVAL)?1:0; }
}
elsif ($if =~ /^REGEX\/(.*?)\/$/) { $is_true = ($VALUE =~ /$1/)?1:0; }
elsif ($if =~ /^NOTREGEX\/(.*?)\/$/) { $is_true = ($VALUE !~ /$1/)?1:0; }
if (not defined $is_true) {}
elsif ($is_true) {
$el->attr('data-else',undef);
$is_true = $el->attr('data-then'); }
else {
$el->attr('data-then',undef);
$is_true = $el->attr('data-else');
## if (not defined $is_true) { $is_true = 'IGNORE'; } ## this line is evil, because $IGNORE sets $VALUE to undef
if (not defined $is_true) {
$is_true = 'PROCEED';
}
}
if (not defined $is_true) {} ## else behavior will auto-populate data.
# elsif ($is_true eq 'REMOVE') { $el->delete(); $el = undef; } ## remove the tag and all children
elsif (($is_true eq 'REMOVE') || ($is_true eq 'EMPTY')) {
## remove the tag and all children
$el->delete_content();
if ($is_true eq 'REMOVE') { $el->replace_with(""); }
## NOTE DO NOT USE $el->delete() it doesnt work.
$el = undef;
$VALUE = undef;
}
elsif ($is_true eq 'IGNORE') { $VALUE = undef; } ## not sure "ignore" is the best name for this.
elsif ($is_true eq 'INNER') { $VALUE = undef; } ## process the inner html
elsif ($is_true eq 'PROCEED') { } ## continue with interpolation as if nothing has happened.
# elsif ($is_true eq 'FORMAT') { $el->delete(); $el = undef; }
}
# if (defined $ATTR) { print "$ATTR VALUE:$VALUE\n"; }
# data-attrib="zoovy:prod_image4" data-input-width="0" data-object="product" data-type="imagelink" data-input-bgcolor="ffffff" data-input-border="0" data-input-title="Image 4" href="#"
if (not defined $el) {
}
elsif (not defined $VALUE) {
}
else {
my $format = $el->tag();
if ($el->attr('data-format') ne '') { $format = $el->attr('data-format'); }
$format = lc($format);
if ($format eq 'img') {
## <a id="link_IMAGE1" data-input-height="0" id="IMAGE1" data-attrib="zoovy:prod_image1" data-input-width="0"
## data-object="product" data-type="imagelink" data-input-bgcolor="ffffff" data-input-border="0"
## data-input-title="Image 1" href="#">
my %options = ();
if ($el->attr('data-img-height')) { $options{'H'} = $el->attr('data-img-height'); }
if (($format eq 'img') && ($el->attr('height')>0)) { $options{'H'} = $el->attr('height'); }
if ($el->attr('data-img-width')) { $options{'W'} = $el->attr('data-img-width'); }
if (($format eq 'img') && ($el->attr('width')>0)) { $options{'W'} = $el->attr('width'); }
if ($el->attr('data-img-bgcolor')) { $options{'B'} = $el->attr('data-img-bgcolor'); }
if ($el->attr('data-img-minimal')) { $options{'M'} = $el->attr('data-img-minimal'); }
$VALUE = sprintf("//%s%s",&ZOOVY::resolve_media_host($meta->{'$USERNAME'}),&ZOOVY::image_path($meta->{'$USERNAME'},$VALUE,%options));
if ($el->tag() eq 'a') {
$el->attr('href',$VALUE);
}
elsif ($el->tag() eq 'img') {
$el->attr('src',$VALUE);
}
}
elsif ($format eq 'a') {
$el->attr('href',$VALUE);
}
elsif ($format =~ /^(wiki|html|dwiw|td|div|span|p|q|h1|h2|h3|h4|h5|h6|figcaption|section|article|aside|li)$/) {
# $el->replace_with_content($VALUE);
if ($format eq 'dwiw') {
## detect what i want
$format = ($VALUE =~ /<.*?>/)?'html':'wiki';
}
if ($format eq 'wiki') {
$VALUE = &Text::WikiCreole::creole_parse($VALUE);
# print "VALUE: $VALUE\n";
# $VALUE = "\n<!-- WIKI -->\n-$VALUE-\n<!-- WIKI -->\n";
$format = 'html';
}
if ($format eq 'html') {
## we're inserting html so we build a new tree, gut it, then push that.
my ($fragment) = HTML::TreeBuilder->new(no_space_compacting=>1,ignore_unknown=>0,store_comments=>1)->parse($VALUE);
$el->replace_with($fragment->guts())->delete;
# $el->replace_with($fragment->guts());
}
else {
## just text, so we can embed that.
$el->delete_content();
$el->push_content($VALUE);
}
}
elsif ($format eq 'currency') {
$el->delete_content();
$el->push_content(sprintf("\$%0.2f",$VALUE));
}
elsif ($format eq $el->tag()) {
## this is fine!
}
else {
$el->delete_content();
$el->push_content(sprintf("[unhandled data-format %s]",$format));
}
}
if (defined $el) {
foreach my $elx (@{$el->content_array_ref()}) {
if (ref($elx) eq '') {
## just content!
}
else {
&render_kiss1($meta,$elx);
}
}
}
return();
}
##
##
##
## perl -e 'use Data::Dumper; use lib "/backend/lib"; use TEMPLATE::KISS; $USERNAME=""; $PID=""; $PROFILE="";
## print Dumper(TEMPLATE::KISS::render($USERNAME,"EBAY","$PROFILE","*PRODUCT"=>PRODUCT->new($USERNAME,"$PID")));'
##
sub render {
my ($USERNAME,$TYPE,$CONTAINER,%options) = @_;
my $MSGS = $options{'@MSGS'};
my $userpath = &ZOOVY::resolve_userpath($USERNAME);
my $filepath = undef;
my $filename = 'index.html'; ## long term we might load a different one based on type of device.
if ($TYPE eq 'EBAY') { $filepath = "$userpath/IMAGES/_ebay/$CONTAINER/$filename"; }
if ($TYPE eq 'CPG') { $filepath = "$userpath/IMAGES/_campaigns/$CONTAINER/$filename"; }
my $html = '';
## /remote/bespin/users/brian/IMAGES/_ebay/ASDF/index.html
if (-f $filepath) {
$html = File::Slurp::read_file($filepath);
if ($html eq '') {
if (defined $MSGS) { push @{$MSGS}, "ERROR|+$filename in container $CONTAINER found but empty."; }
}
}
else {
if (defined $MSGS) { push @{$MSGS}, "ERROR|+$filename file not found in container $TYPE/$CONTAINER"; }
}
my $tree = HTML::TreeBuilder->new(no_space_compacting=>1,ignore_unknown=>0,store_comments=>1); # empty tree
$tree->parse_content($html);
my %META = ();
$META{'@MSGS'} = $MSGS;
my $el = $tree->elementify();
&loadMeta($el,\%META);
foreach my $k (keys %options) { $META{$k} = $options{$k}; } ## necessary for ebay refresh *PRODUCT ref
if ($META{'version'} eq '') {
if (defined $MSGS) { push @{$MSGS}, "ERROR|+template $TYPE/$CONTAINER cannot render index.html missing meta 'version' tag (try kiss/1.0)"; }
}
elsif ($META{'version'} eq 'tlc/1.0') {
my ($tlc) = TLC->new('username'=>$USERNAME);
$META{'USERNAME'} = $USERNAME;
$META{'CONTAINERTYPE'} = $TYPE;
$META{'CONTAINER'} = $CONTAINER;
if ($META{'SKU'}) {
$META{'SKU'} = $META{'SKU'};
$META{'%PRODUCT'} = $options{'*PRODUCT'}->TO_JSON();
if (not defined $options{'*PRODUCT'}) {
$META{'%PRODUCT'} = PRODUCT->new($USERNAME,$META{'SKU'})->TO_JSON();
}
}
if ($META{'CID'}) {
$META{'CID'} = $META{'CID'};
$META{'PRT'} = $META{'PRT'};
$META{'%CUSTOMER'} = $options{'*CUSTOMER'}->TO_JSON();
if (not defined $options{'*CUSTOMER'}) {
$META{'%CUSTOMER'} = CUSTOMER->new($USERNAME,'PRT'=>$META{'PRT'},'CID'=>$META{'CID'})->TO_JSON();
}
}
($html) = $tlc->render_html($html, \%META);
}
elsif ($META{'version'} eq 'kiss/1.0') {
$META{'$USERNAME'} = $USERNAME;
$META{'$CONTAINERTYPE'} = $TYPE;
$META{'$CONTAINER'} = $CONTAINER;
if ($META{'SKU'}) {
$META{'$SKU'} = $META{'SKU'};
$META{'*PRODUCT'} = $options{'*PRODUCT'};
if (not defined $options{'*PRODUCT'}) {
$META{'*PRODUCT'} = PRODUCT->new($USERNAME,$META{'SKU'});
}
}
if ($META{'CID'}) {
$META{'$CID'} = $META{'CID'};
$META{'$PRT'} = $META{'PRT'};
$META{'*CUSTOMER'} = $options{'*CUSTOMER'};
if (not defined $options{'*CUSTOMER'}) {
$META{'*CUSTOMER'} = CUSTOMER->new($USERNAME,'PRT'=>$META{'PRT'},'CID'=>$META{'CID'});
}
}
&render_kiss1(\%META,$el);
$html = $el->as_HTML();
}
else {
# warn("Unhandled api version \"$META{'version'}\"\n");
if (defined $MSGS) { push @{$MSGS}, "ERROR|+$TYPE/$CONTAINER/index.html contains invalid meta 'version' try kiss/1.0"; }
}
return($html);
}
####################################################################################################
##
## ebay doesn't allow base urls, or meta tags so this rewrites the document.
##
## my $html = File::Slurp::read_file('index.html');
## print ebayify_html($html);
##
sub ebayify_html {
my ($HTML) = @_;
my $tree = HTML::TreeBuilder->new(no_space_compacting=>1,ignore_unknown=>0,store_comments=>1); # empty tree
$tree->parse_content("$HTML");
my %META = ();
my $el = $tree->elementify();
&ebay_parseElement($el,\%META);
$HTML = $el->as_HTML();
$HTML =~ s/\<([\/]?[Mm][Ee][Tt][Aa].*?)\>/<!-- $1 -->/gs; ## ebay doesn't allow metas
$HTML =~ s/\<([\/]?[Bb][Aa][Ss][Ee].*?)\>/<!-- $1 -->/gs; ## ebay doesn't allow base urls
return($HTML);
}
sub ebay_parseElement {
my ($el, $METAREF) = @_;
if ($el->tag() eq 'base') {
$METAREF->{'base'} = $el->attr('href');
}
if (not $METAREF->{'base'}) {
}
elsif ($el->tag() eq 'a') {
## <a href="">
$el->attr('href',URI::URL->new($el->attr('href'),$METAREF->{'base'})->abs());
}
elsif ($el->tag() eq 'img') {
## <img src="">
$el->attr('src',URI::URL->new($el->attr('src'),$METAREF->{'base'})->abs());
}
elsif ($el->tag() eq 'style') {
my $sheet = $el->as_HTML();
$sheet =~ s/\<[Ss][Tt][Yy][Ll][Ee].*?\>(.*)\<\/[Ss][Tt][Yy][Ll][Ee]\>/$1/s;
$sheet =~ s/\<\!\-\-(.*)\-\-\>/$1/s;
my $CSS = CSS::Tiny->new()->read_string($sheet);
foreach my $property (keys %{$CSS}) {
foreach my $k (keys %{$CSS->{$property}}) {
if ($CSS->{$property}->{$k} =~ /^[Uu][Rr][Ll]\((.*?)\)/) {
my $url = $1;
my $absurl = URI::URL->new($url,$METAREF->{'base'})->abs();
$CSS->{$property}->{$k} =~ s/^[Uu][Rr][Ll]\(.*?\)/url($absurl)/;
}
}
}
$sheet = $CSS->html();
my $sheetnode = HTML::Element->new('style','type'=>'text/css');
$sheetnode->push_content("<!-- \n".$CSS->write_string()."\n -->");
$el->replace_with($sheetnode);
}
if (not $METAREF->{'base'}) {
}
elsif ($el->attr('style') ne '') {
## parse the style tag
# print $el->attr('style')."\n";
my $sheet = sprintf("style { %s }",$el->attr('style'));
my $CSS = CSS::Tiny->new()->read_string($sheet);
foreach my $k (keys %{$CSS->{'style'}}) {
if ($CSS->{'style'}->{$k} =~ /^[Uu][Rr][Ll]\((.*?)\)/) {
my $url = $1;
my $absurl = URI::URL->new($url,$METAREF->{'base'})->abs();
$CSS->{'style'}->{$k} =~ s/^[Uu][Rr][Ll]\(.*?\)/url($absurl)/;
}
}
$sheet = $CSS->write_string();
$sheet =~ s/\n/ /gs;
$sheet =~ s/\t/ /gs;
$sheet =~ s/[\s]+/ /gs;
$sheet =~ s/^.*?\{(.*)\}/$1/gs;
$sheet =~ s/^[\s]+//gs;
$sheet =~ s/[\s]+$//gs;
$el->attr('style',$sheet);
}
foreach my $elx (@{$el->content_array_ref()}) {
if (ref($elx) eq '') {
}
else {
&ebay_parseElement($elx,$METAREF);
}
}
}
1;
__DATA__
perl -e 'use lib "/backend/lib";
use TEMPLATE::KISS;
use Data::Dumper; print Dumper(TEMPLATE::KISS::render("brian","ASDF","TEST"));
';
| CommerceRack/backend | lib/TEMPLATE/KISSTLC.pm | Perl | mit | 14,893 |
# This file is auto-generated by the Perl DateTime Suite time zone
# code generator (0.07) This code generator comes with the
# DateTime::TimeZone module distribution in the tools/ directory
#
# Generated from debian/tzdata/southamerica. Olson data version 2008c
#
# Do not edit this file directly.
#
package DateTime::TimeZone::America::Lima;
use strict;
use Class::Singleton;
use DateTime::TimeZone;
use DateTime::TimeZone::OlsonDB;
@DateTime::TimeZone::America::Lima::ISA = ( 'Class::Singleton', 'DateTime::TimeZone' );
my $spans =
[
[
DateTime::TimeZone::NEG_INFINITY,
59611180092,
DateTime::TimeZone::NEG_INFINITY,
59611161600,
-18492,
0,
'LMT'
],
[
59611180092,
60197144916,
59611161576,
60197126400,
-18516,
0,
'LMT'
],
[
60197144916,
61125858000,
60197126916,
61125840000,
-18000,
0,
'PET'
],
[
61125858000,
61133630400,
61125843600,
61133616000,
-14400,
1,
'PEST'
],
[
61133630400,
61148926800,
61133612400,
61148908800,
-18000,
0,
'PET'
],
[
61148926800,
61164648000,
61148912400,
61164633600,
-14400,
1,
'PEST'
],
[
61164648000,
61180376400,
61164630000,
61180358400,
-18000,
0,
'PET'
],
[
61180376400,
61196097600,
61180362000,
61196083200,
-14400,
1,
'PEST'
],
[
61196097600,
62640622800,
61196079600,
62640604800,
-18000,
0,
'PET'
],
[
62640622800,
62648395200,
62640608400,
62648380800,
-14400,
1,
'PEST'
],
[
62648395200,
62672158800,
62648377200,
62672140800,
-18000,
0,
'PET'
],
[
62672158800,
62679931200,
62672144400,
62679916800,
-14400,
1,
'PEST'
],
[
62679931200,
62766853200,
62679913200,
62766835200,
-18000,
0,
'PET'
],
[
62766853200,
62774625600,
62766838800,
62774611200,
-14400,
1,
'PEST'
],
[
62774625600,
62893083600,
62774607600,
62893065600,
-18000,
0,
'PET'
],
[
62893083600,
62900856000,
62893069200,
62900841600,
-14400,
1,
'PEST'
],
[
62900856000,
DateTime::TimeZone::INFINITY,
62900838000,
DateTime::TimeZone::INFINITY,
-18000,
0,
'PET'
],
];
sub olson_version { '2008c' }
sub has_dst_changes { 7 }
sub _max_year { 2018 }
sub _new_instance
{
return shift->_init( @_, spans => $spans );
}
1;
| carlgao/lenga | images/lenny64-peon/usr/share/perl5/DateTime/TimeZone/America/Lima.pm | Perl | mit | 2,183 |
package Bio::KBase::KmerEval::Service;
use Data::Dumper;
use Moose;
extends 'RPC::Any::Server::JSONRPC::PSGI';
has 'instance_dispatch' => (is => 'ro', isa => 'HashRef');
has 'user_auth' => (is => 'ro', isa => 'UserAuth');
has 'valid_methods' => (is => 'ro', isa => 'HashRef', lazy => 1,
builder => '_build_valid_methods');
our $CallContext;
our %return_counts = (
'call_dna_with_kmers' => 1,
'call_prot_with_kmers' => 1,
'check_contig_set' => 1,
'version' => 1,
);
sub _build_valid_methods
{
my($self) = @_;
my $methods = {
'call_dna_with_kmers' => 1,
'call_prot_with_kmers' => 1,
'check_contig_set' => 1,
'version' => 1,
};
return $methods;
}
sub call_method {
my ($self, $data, $method_info) = @_;
my ($module, $method) = @$method_info{qw(module method)};
my $ctx = Bio::KBase::KmerEval::ServiceContext->new(client_ip => $self->_plack_req->address);
my $args = $data->{arguments};
# Service KmerEval does not require authentication.
my $new_isa = $self->get_package_isa($module);
no strict 'refs';
local @{"${module}::ISA"} = @$new_isa;
local $CallContext = $ctx;
my @result;
{
my $err;
eval {
@result = $module->$method(@{ $data->{arguments} });
};
if ($@)
{
#
# Reraise the string version of the exception because
# the RPC lib can't handle exception objects (yet).
#
my $err = $@;
my $str = "$err";
$str =~ s/Bio::KBase::CDMI::Service::call_method.*//s;
$str =~ s/^/>\t/mg;
die "The JSONRPC server invocation of the method \"$method\" failed with the following error:\n" . $str;
}
}
my $result;
if ($return_counts{$method} == 1)
{
$result = [[$result[0]]];
}
else
{
$result = \@result;
}
return $result;
}
sub get_method
{
my ($self, $data) = @_;
my $full_name = $data->{method};
$full_name =~ /^(\S+)\.([^\.]+)$/;
my ($package, $method) = ($1, $2);
if (!$package || !$method) {
$self->exception('NoSuchMethod',
"'$full_name' is not a valid method. It must"
. " contain a package name, followed by a period,"
. " followed by a method name.");
}
if (!$self->valid_methods->{$method})
{
$self->exception('NoSuchMethod',
"'$method' is not a valid method in service KmerEval.");
}
my $inst = $self->instance_dispatch->{$package};
my $module;
if ($inst)
{
$module = $inst;
}
else
{
$module = $self->get_module($package);
if (!$module) {
$self->exception('NoSuchMethod',
"There is no method package named '$package'.");
}
Class::MOP::load_class($module);
}
if (!$module->can($method)) {
$self->exception('NoSuchMethod',
"There is no method named '$method' in the"
. " '$package' package.");
}
return { module => $module, method => $method };
}
package Bio::KBase::KmerEval::ServiceContext;
use strict;
=head1 NAME
Bio::KBase::KmerEval::ServiceContext
head1 DESCRIPTION
A KB RPC context contains information about the invoker of this
service. If it is an authenticated service the authenticated user
record is available via $context->user. The client IP address
is available via $context->client_ip.
=cut
use base 'Class::Accessor';
__PACKAGE__->mk_accessors(qw(user_id client_ip authenticated token));
sub new
{
my($class, %opts) = @_;
my $self = {
%opts,
};
return bless $self, $class;
}
1;
| kbase/kb_seed | lib/Bio/KBase/KmerEval/Service.pm | Perl | mit | 3,542 |
package Google::Ads::AdWords::v201809::ConstantDataService::getUserInterestCriterionResponse;
use strict;
use warnings;
{ # BLOCK to scope variables
sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201809' }
__PACKAGE__->__set_name('getUserInterestCriterionResponse');
__PACKAGE__->__set_nillable();
__PACKAGE__->__set_minOccurs();
__PACKAGE__->__set_maxOccurs();
__PACKAGE__->__set_ref();
use base qw(
SOAP::WSDL::XSD::Typelib::Element
Google::Ads::SOAP::Typelib::ComplexType
);
our $XML_ATTRIBUTE_CLASS;
undef $XML_ATTRIBUTE_CLASS;
sub __get_attr_class {
return $XML_ATTRIBUTE_CLASS;
}
use Class::Std::Fast::Storable constructor => 'none';
use base qw(Google::Ads::SOAP::Typelib::ComplexType);
{ # BLOCK to scope variables
my %rval_of :ATTR(:get<rval>);
__PACKAGE__->_factory(
[ qw( rval
) ],
{
'rval' => \%rval_of,
},
{
'rval' => 'Google::Ads::AdWords::v201809::CriterionUserInterest',
},
{
'rval' => 'rval',
}
);
} # end BLOCK
} # end of BLOCK
1;
=pod
=head1 NAME
Google::Ads::AdWords::v201809::ConstantDataService::getUserInterestCriterionResponse
=head1 DESCRIPTION
Perl data type class for the XML Schema defined element
getUserInterestCriterionResponse from the namespace https://adwords.google.com/api/adwords/cm/v201809.
=head1 PROPERTIES
The following properties may be accessed using get_PROPERTY / set_PROPERTY
methods:
=over
=item * rval
$element->set_rval($data);
$element->get_rval();
=back
=head1 METHODS
=head2 new
my $element = Google::Ads::AdWords::v201809::ConstantDataService::getUserInterestCriterionResponse->new($data);
Constructor. The following data structure may be passed to new():
{
rval => $a_reference_to, # see Google::Ads::AdWords::v201809::CriterionUserInterest
},
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| googleads/googleads-perl-lib | lib/Google/Ads/AdWords/v201809/ConstantDataService/getUserInterestCriterionResponse.pm | Perl | apache-2.0 | 1,893 |
=head1 LICENSE
See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 NAME
Bio::EnsEMBL::Compara::PipeConfig::DumpMultiAlign_conf
=head1 SYNOPSIS
init_pipeline.pl Bio::EnsEMBL::Compara::PipeConfig::DumpMultiAlign_conf -host mysql-ens-compara-prod-X -port XXXX \
-division $COMPARA_DIV
=head1 DESCRIPTION
Pipeline to dump all the multiple sequence alignments from the given
compara database. To dump only certain method link (ml) types, set them
in --method_link_types with the following regex: ml(:ml)*.
E.g.: --method_link_types EPO:PECAN
The dumps are located in the pipeline's directory. This can be changed by
setting --export_dir.
The pipeline generates both EMF and MAF files ("emf+maf"). This can be
changed by setting --format to "emf", "maf", or anything BioPerl can
provide.
=cut
package Bio::EnsEMBL::Compara::PipeConfig::DumpMultiAlign_conf;
use strict;
use warnings;
use Bio::EnsEMBL::Compara::PipeConfig::Parts::DumpMultiAlign;
use base ('Bio::EnsEMBL::Compara::PipeConfig::ComparaGeneric_conf');
sub default_options {
my ($self) = @_;
return {
%{$self->SUPER::default_options}, # inherit the generic ones
# Compara reference to dump. Can be the "species" name (if loading the Registry via registry)
# or the url of the database itself
'compara_db' => 'compara_curr',
'export_dir' => $self->o('pipeline_dir'),
# List of species used to split EPO alignments. Required if split_by_chromosome is set
'epo_reference_species' => [],
# Maximum number of blocks per file
'split_size' => 200,
# See DumpMultiAlign.pl
# 0 for unmasked sequence (default)
# 1 for soft-masked sequence
# 2 for hard-masked sequence
'masked_seq' => 1,
# Usually "maf", "emf", or "emf+maf". BioPerl alignment formats are
# accepted in principle, but a healthcheck would have to be implemented
'format' => 'emf+maf',
# If set to 1, will make a compressed tar archive of a directory of
# uncompressed files. Otherwise, there will be a directory of
# compressed files
'make_tar_archive' => 0,
# If set to 1, the files are split by chromosome name and
# coordinate system. Otherwise, createOtherJobs randomly bins the
# alignment blocks into chunks
'split_by_chromosome' => 1,
# Method link types of mlss_id to retrieve
'method_link_types' => 'BLASTZ_NET:TRANSLATED_BLAT:TRANSLATED_BLAT_NET:LASTZ_NET:PECAN:EPO:EPO_EXTENDED',
# Specific mlss_id to dump. Leave undef as the pipeline can detect
# it automatically
'mlss_id' => undef,
'dump_aln_capacity' => 100,
};
}
sub no_compara_schema {} # Tell the base class not to create the Compara tables in the database
# Ensures species output parameter gets propagated implicitly
sub hive_meta_table {
my ($self) = @_;
return {
%{$self->SUPER::hive_meta_table},
'hive_use_param_stack' => 1,
};
}
sub pipeline_wide_parameters {
my ($self) = @_;
return {
%{$self->SUPER::pipeline_wide_parameters},
'dump_aln_program' => $self->o('dump_aln_program'),
'emf2maf_program' => $self->o('emf2maf_program'),
'make_tar_archive' => $self->o('make_tar_archive'),
'split_by_chromosome' => $self->o('split_by_chromosome'),
'format' => $self->o('format'),
'split_size' => $self->o('split_size'),
'registry' => $self->o('reg_conf'),
'compara_db' => $self->o('compara_db'),
'export_dir' => $self->o('export_dir'),
'masked_seq' => $self->o('masked_seq'),
'genome_dumps_dir' => $self->o('genome_dumps_dir'),
output_dir => '#export_dir#/#format#/ensembl-compara/#aln_type#/#base_filename#',
output_file_gen => '#output_dir#/#base_filename#.#region_name#.#format#',
output_file => '#output_dir#/#base_filename#.#region_name##filename_suffix#.#format#',
};
}
sub pipeline_create_commands {
my $self = shift;
return [
@{ $self->SUPER::pipeline_create_commands },
$self->db_cmd( 'CREATE TABLE other_gab (genomic_align_block_id bigint NOT NULL, PRIMARY KEY (genomic_align_block_id) )' ),
$self->db_cmd( 'CREATE TABLE healthcheck (filename VARCHAR(400) NOT NULL, expected INT NOT NULL, dumped INT NOT NULL)' ),
];
}
sub pipeline_analyses {
my ($self) = @_;
my $pipeline_analyses = Bio::EnsEMBL::Compara::PipeConfig::Parts::DumpMultiAlign::pipeline_analyses_dump_multi_align($self);
$pipeline_analyses->[0]->{'-input_ids'} = [
{
'compara_db' => $self->o('compara_db'),
'mlss_id' => $self->o('mlss_id'),
},
];
return $pipeline_analyses;
}
1;
| Ensembl/ensembl-compara | modules/Bio/EnsEMBL/Compara/PipeConfig/DumpMultiAlign_conf.pm | Perl | apache-2.0 | 5,478 |
#
# Copyright 2021 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package hardware::server::dell::idrac::snmp::mode::components::vdisk;
use strict;
use warnings;
use hardware::server::dell::idrac::snmp::mode::components::resources qw(%map_vdisk_state);
my $mapping = {
virtualDiskState => { oid => '.1.3.6.1.4.1.674.10892.5.5.1.20.140.1.1.4', map => \%map_vdisk_state },
virtualDiskFQDD => { oid => '.1.3.6.1.4.1.674.10892.5.5.1.20.140.1.1.35' }
};
sub load {
my ($self) = @_;
push @{$self->{request}}, { oid => $mapping->{virtualDiskState}->{oid} }, { oid => $mapping->{virtualDiskFQDD}->{oid} };
}
sub check {
my ($self) = @_;
$self->{output}->output_add(long_msg => "Checking virtual disks");
$self->{components}->{vdisk} = { name => 'virtual disks', total => 0, skip => 0 };
return if ($self->check_filter(section => 'vdisk'));
my $snmp_result = { %{$self->{results}->{ $mapping->{virtualDiskState}->{oid} }}, %{$self->{results}->{ $mapping->{virtualDiskFQDD}->{oid} }} };
foreach my $oid ($self->{snmp}->oid_lex_sort(keys %$snmp_result)) {
next if ($oid !~ /^$mapping->{virtualDiskState}->{oid}\.(.*)$/);
my $instance = $1;
my $result = $self->{snmp}->map_instance(mapping => $mapping, results => $snmp_result, instance => $instance);
next if ($self->check_filter(section => 'vdisk', instance => $instance));
$self->{components}->{vdisk}->{total}++;
$self->{output}->output_add(
long_msg => sprintf(
"virtual disk '%s' state is '%s' [instance = %s]",
$result->{virtualDiskFQDD}, $result->{virtualDiskState}, $instance,
)
);
my $exit = $self->get_severity(section => 'vdisk.state', value => $result->{virtualDiskState});
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(
severity => $exit,
short_msg => sprintf(
"Virtual disk '%s' state is '%s'", $result->{virtualDiskFQDD}, $result->{virtualDiskState}
)
);
}
}
}
1;
| Tpo76/centreon-plugins | hardware/server/dell/idrac/snmp/mode/components/vdisk.pm | Perl | apache-2.0 | 2,862 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.