code stringlengths 2 1.05M | repo_name stringlengths 5 101 | path stringlengths 4 991 | language stringclasses 3 values | license stringclasses 5 values | size int64 2 1.05M |
|---|---|---|---|---|---|
#!/usr/local/ActivePerl-5.8/bin/perl
# Gather Host Info
use CGI::Carp qw(carpout);
use POSIX qw(uname);
use Fcntl qw(:flock);
# Slurp in path to Perfhome
#my $perfhome=&PATH;
#$perfhome =~ s/\/bin//;
$perfhome="/perfstat/dev/1.52/server";
# Slurp in Configuration
my %conf = ();
&GetConfiguration(\%conf);
# Set Environment Variables from %conf
foreach $key (keys %conf) {
$ENV{$key}="$conf{$key}";
}
# Log all alerts and warnings to the below logfile
my $logfile = "$perfhome/var/logs/perfctl.log";
open(LOGFILE, ">> $logfile")
or die "ERROR: Unable to append to $logfile: $!\n";
carpout(*LOGFILE);
# Setup Variables
my $service="info";
my $os = (uname)[0];
# Location of OS info
my $cpuInfo="/proc/cpuinfo";
my $memInfo="/proc/meminfo";
my $osInfo="/proc/version";
# Define variables
my $cpuNum=();
my $cpuModel=();
my $cpuSpeed=();
my $cpuHash=();
my $memTotal=();
my $swapTotal=();
my $osVersion=();
my $kernelVersion=();
my $patches=();
&CPUINFO;
&MEMINFO;
&OSINFO;
&PatchINFO;
# Gather cpu info
sub CPUINFO {
open (CPUINFO,"$cpuInfo")
or die "ERROR: couldn't open $cpuInfo: $!\n";
my @data=<CPUINFO>;
close (CPUINFO);
$cpuNum=0;
foreach my $line (@data) {
if ($line =~ m/^processor/) {
#$line =~ m/\S+\s+:\s+(\d*)/;
#$cpuNum="$1";
$cpuNum++;
}
if ($line =~ m/^model\s+name/) {
#$line =~ m/\S+\s+\S+\s+:\s+(.*)\s+CPU\s+(\S+)/;
$line =~ m/\S+\s+\S+\s+:\s+(\S+)\s+(\S+).*\s+(\S+)/;
$cpuModel="$1$2";
$cpuSpeed="$3";
$cpuSpeed=$cpuSpeed * 1000;
if ($cpuModel =~ m/\s+/) {
$cpuModel =~ s/\s+//g;
}
#$cpuHash{$cpuNum}="$cpuModel";
}
}
# Gather MEM Info
# Print Results
#print "CPU Num: $cpuNum Model: $cpuModel Speed: $cpuSpeed\n";
#foreach my $key (sort keys %cpuHash) {
#print "key: $key value: $cpuHash{$key}\n";
#}
}
sub MEMINFO {
open (MEMINFO,"$memInfo")
or die "ERROR: couldn't open $memInfo: $!\n";
my @data=<MEMINFO>;
close (MEMINFO);
foreach my $line (@data) {
if ($line =~ m/^MemTotal/) {
$line =~ m/\S+\s+(\d*)/;
$memTotal="$1";
$memTotal=int($memTotal / 1024);
}
if ($line =~ m/^SwapTotal/) {
$line =~ m/\S+\s+(\d*)/;
$swapTotal="$1";
}
}
#print "mem: $memTotal swap: $swapTotal\n";
}
# Gather OS Info
sub OSINFO {
open (OSINFO,"$osInfo")
or die "ERROR: couldn't open $osInfo: $!\n";
my $data=<OSINFO>;
close(OSINFO);
#$data =~ m/\S+\s+\S+\s+(\S+)\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+(\S+)/;
$data =~ m/\S+\s+\S+\s+(\S+).*\s+\((.*)\)\)/;
$kernelVersion="$1";
$osVersion="$2";
$osVersion =~ s/ /_/g;
#print "os: $osVersion kernel: $kernelVersion\n";
}
# Gather Patch Info
sub PatchINFO {
open (PATCHINFO, "$ENV{'RPM_CMD'} -qg \"System Environment/Base\"|")
or die "ERROR: Couldn't open file handle for $ENV{'RPM_CMD'} -qg \"System Environment/Base\": $!\n";
my @patchesArray=<PATCHINFO>;
chomp @patchesArray;
close(PATCHINFO);
$patches=join(",", @patchesArray);
}
# Get configuration dynamically from perf-conf
sub GetConfiguration {
my $configfile="$perfhome/etc/perf-conf";
my $hashref = shift;
open(FILE, $configfile)
or die "ERROR: Couldn't open FileHandle for $configfile: $!\n";
my @data=<FILE>;
foreach $line (@data) {
# Skip line if commented out
next if ($line =~ m/^#/);
next if ($line =~ m/^\s+/);
$line =~ m/(\w+)=(.+)/;
my $key=$1;
my $value=$2;
$hashref->{$key}=$value;
}
close(FILE);
}
# Get path to perfctl executable
sub PATH {
my $path = PerlApp::exe();
$path =~ s/\/\w*$//;
return $path;
}
# Send Data to Perf Server
$perf_out="$os info $cpuNum $cpuModel $cpuSpeed $memTotal $osVersion $kernelVersion $patches";
#$perf_out="$os info $cpuNum $cpuModel $cpuSpeed $memTotal $osVersion $kernelVersion";
#print "OUT: $perf_out\n";
open(PERFOUT, ">> $perfhome/tmp/perf.out")
or die "WARNING: Couldn't open file handle for $perfhome/tmp/perf.out: $!\n";
flock(PERFOUT, LOCK_EX)
or die "WARNING: Couldn't obtain exclusive lock on $perfhome/tmp/perf.out: $!\n";
print PERFOUT "$perf_out\n";
close (PERFOUT);
| ktenzer/perfstat | scripts/experimental/info.pl | Perl | apache-2.0 | 4,402 |
=pod
=head1 NAME
errstr - lookup error codes
=head1 SYNOPSIS
B<openssl errstr error_code>
=head1 DESCRIPTION
Sometimes an application will not load error message and only
numerical forms will be available. The B<errstr> utility can be used to
display the meaning of the hex code. The hex code is the hex digits after the
second colon.
=head1 EXAMPLE
The error code:
27594:error:2006D080:lib(32):func(109):reason(128):bss_file.c:107:
can be displayed with:
openssl errstr 2006D080
to produce the error message:
error:2006D080:BIO routines:BIO_new_file:no such file
=head1 SEE ALSO
L<err(3)|err(3)>,
L<ERR_load_crypto_strings(3)|ERR_load_crypto_strings(3)>,
L<SSL_load_error_strings(3)|SSL_load_error_strings(3)>
=cut
| GaloisInc/hacrypto | src/C/libssl/HEAD/src/doc/apps/errstr.pod | Perl | bsd-3-clause | 736 |
#!/usr/bin/env perl
use strict;
use warnings;
use Carp;
use Data::Dumper;
my $usage = "\n\tusage: $0 chimJ_file star-fusion_outdir\n\n";
my $chimJ_file = $ARGV[0] or die $usage;
my $star_fusion_outdir = $ARGV[1] or die $usage;
my $count_sub = sub {
my ($file) = @_;
print STDERR "count_FFPM_filtered() - parsing $file\n";
open(my $fh, $file) or confess "Error, cannot open file: $file";
my $header = <$fh>;
my %fusions;
while(<$fh>) {
my @x = split(/\t/);
my $fusion = $x[0];
$fusions{$fusion}++;
}
close $fh;
return(scalar(keys %fusions));
};
main: {
my %audit;
&get_total_reads($chimJ_file, \%audit);
&audit_chimeric_alignments($chimJ_file, \%audit, "total_chim_reads");
&audit_failed_read_alignments("$star_fusion_outdir/star-fusion.preliminary/star-fusion.junction_breakpts_to_genes.txt.fail", \%audit);
&audit_chimeric_alignments("$star_fusion_outdir/star-fusion.preliminary/star-fusion.junction_breakpts_to_genes.txt.pass", \%audit, "chim_reads_passed");
&count_prelim_fusions("$star_fusion_outdir/star-fusion.preliminary/star-fusion.fusion_candidates.preliminary", \%audit);
## applied basic filtering criteria (ie. min support)
&count_pre_blast_filt("$star_fusion_outdir/star-fusion.preliminary/star-fusion.filter.intermediates_dir/star-fusion.pre_blast_filter.filt_info", \%audit);
my $total_remaining_after_basic_filtering = &$count_sub("$star_fusion_outdir/star-fusion.preliminary/star-fusion.filter.intermediates_dir/star-fusion.pre_blast_filter");
## remove those that are lesser scored paralogs of fusion partners
&count_blast_filt("$star_fusion_outdir/star-fusion.preliminary/star-fusion.filter.intermediates_dir/star-fusion.pre_blast_filter.post_blast_filter.info", \%audit);
$audit{'remaining_post_blast_filt'} = &$count_sub("$star_fusion_outdir/star-fusion.preliminary/star-fusion.filter.intermediates_dir/star-fusion.pre_blast_filter.post_blast_filter");
&count_promiscuous_filt("$star_fusion_outdir/star-fusion.preliminary/star-fusion.filter.intermediates_dir/star-fusion.pre_blast_filter.post_blast_filter.post_promisc_filter.info", \%audit);
$audit{'remaining_post_promisc_filt'} = &$count_sub("$star_fusion_outdir/star-fusion.preliminary/star-fusion.filter.intermediates_dir/star-fusion.pre_blast_filter.post_blast_filter.post_promisc_filter");
&count_red_herrings("$star_fusion_outdir/star-fusion.preliminary/star-fusion.fusion_candidates.preliminary.wSpliceInfo.wAnnot.annot_filt", \%audit);
$audit{'remaining_post_red_herrings'} = &$count_sub("$star_fusion_outdir/star-fusion.preliminary/star-fusion.fusion_candidates.preliminary.wSpliceInfo.wAnnot.pass");
&count_FFPM_filtered("$star_fusion_outdir/star-fusion.preliminary/star-fusion.fusion_candidates.preliminary.wSpliceInfo.wAnnot.pass",
"$star_fusion_outdir/star-fusion.preliminary/star-fusion.fusion_candidates.preliminary.wSpliceInfo.wAnnot.pass.minFFPM.0.1.pass",
\%audit);
my $total_final_fusions = &$count_sub("$star_fusion_outdir/star-fusion.fusion_predictions.abridged.tsv");
##########
## Reporting
#############
my $report = "";
## starting read counts:
# 'Nreads' => '21430514',
# 'NreadsUnique' => '17249407',
# 'NreadsMulti' => '1094325',
my $num_mapped_reads = $audit{'NreadsUnique'} + $audit{'NreadsMulti'};
$report .= "# Read Counts\n"
. "Nreads:\t" . $audit{'Nreads'} . "\n"
. "NreadsUnique:\t" . $audit{'NreadsUnique'} . "\n"
. "NreadsMulti:\t" . $audit{'NreadsMulti'} . "\n"
. "\n";
$report .= "pct reads mapped:\t" . sprintf("%.2f", $num_mapped_reads / $audit{'Nreads'} * 100) . "\n\n";
$report .= "chimeric read count:\t" . $audit{'total_chim_reads'} . "\t" . sprintf("%.2f", $audit{'total_chim_reads'} / $num_mapped_reads * 100) . " \n\n";
## Read filtering
# 'read_fail__no_gene_anchors' => 1087355,
# 'read_fail__selfie_or_homology' => 145793,
# 'read_fail__discarded_multimap_deficient_anchors' => 53221,
# 'read_fail__multimap_homology_congruence_fail' => 6523,
$report .= "# read filtering\n"
. "no anchors:\t" . $audit{'read_fail__no_gene_anchors'} . "\n"
. "selfie or homolog:\t" . $audit{'read_fail__selfie_or_homology'} . "\n"
. "multimap deficient anchors:\t" . $audit{'read_fail__discarded_multimap_deficient_anchors'} . "\n"
. "multimap homology congruence:\t" . $audit{'read_fail__multimap_homology_congruence_fail'} . "\n"
. "\n";
$report .= "chimeric reads passed:\t" . $audit{'chim_reads_passed'} . "\t = " . sprintf("%.2f", $audit{'chim_reads_passed'} / $audit{'total_chim_reads'} * 100) . "\n";
## initial fusion candidates:
# 'prelim_fusion_count' => 131313,
$report .= "# initial fusion candidates\n"
. "prelim fusion count:\t" . $audit{'prelim_fusion_count'} . "\n"
. "\n";
## Basic filtering applied.
# 'pre_blast::insuf_sum_support' => 125407,
# 'pre_blast::insuf_novel_junc_support' => 5789,
# 'pre_blast::no_junction_support' => 4377,
# 'pre_blast::no_span_no_LDAS' => 40,
# 'pre_blast::low_pct_isoform' => 1
$report .= "# basic filtering criteria applied.\n"
. "insufficient sum support:\t" . $audit{'pre_blast::insuf_sum_support'} . sprintf("\t%.2f", $audit{'pre_blast::insuf_sum_support'}/$audit{'prelim_fusion_count'} * 100) . "\n"
. "insufficient novel junction support:\t" . $audit{'pre_blast::insuf_novel_junc_support'} . sprintf("\t%.2f", $audit{'pre_blast::insuf_novel_junc_support'}/$audit{'prelim_fusion_count'} * 100) . "\n"
. "no junction support:\t" . $audit{'pre_blast::no_junction_support'} . sprintf("\t%.2f", $audit{'pre_blast::no_junction_support'}/$audit{'prelim_fusion_count'} * 100) . "\n"
. "no span, no LDAS:\t" . $audit{'pre_blast::no_span_no_LDAS'} . sprintf("\t%.2f", $audit{'pre_blast::no_span_no_LDAS'} / $audit{'prelim_fusion_count'} * 100) . "\n"
. "low pct isoform:\t" . $audit{'pre_blast::low_pct_isoform'} . sprintf("\t%.2f", $audit{'pre_blast::low_pct_isoform'} / $audit{'prelim_fusion_count'} * 100) . "\n";
my $total_basic_filtered = $audit{'prelim_fusion_count'} - $total_remaining_after_basic_filtering;
$report .= "-------------------------------------------\n"
. "total basic filtered:\t" . $total_basic_filtered . sprintf("\t%.2f", $total_basic_filtered / $audit{'prelim_fusion_count'} * 100) . "\n"
. "\n";
$report .= "\tnum fusion candidates remaining:\t$total_remaining_after_basic_filtering\n\n";
$report .= "# Final feature filters.\n"
## blast filter A--B exists, removing C--B where A,C are paralogs and score(C--B) < score(A--B)
# 'blast_filt' => 27,
. "blast paralog filter:\t" . $audit{'blast_filt'} . ", remaining post blast filt: " . $audit{'remaining_post_blast_filt'} . "\n"
## promiscuity filter
# 'promisc_filt' => 4,
. "promiscuity filter:\t" . $audit{'promisc_filt'} . ", remaining post promisc filt: " . $audit{'remaining_post_promisc_filt'} . "\n"
## red herring annotation filter:
## 'red_herrings_filt' => 27,
. "red herrings filter:\t" . $audit{'red_herrings_filt'} . ", remaining post red herring filt: " . $audit{'remaining_post_red_herrings'} . "\n"
## final expression filter
# 'FFPM_filt' => 5,
. "FFPM filter:\t" . $audit{'FFPM_filt'} . "\n"
. "\n";
$report .= "Final fusions called:\t" . $total_final_fusions . "\n\n";
print $report;
}
####
sub count_FFPM_filtered {
my ($file_before, $file_after, $audit_href) = @_;
my $count_before = &$count_sub($file_before);
my $count_after = &$count_sub($file_after);
$audit_href->{FFPM_filt} = ($count_before - $count_after);
return;
}
####
sub count_red_herrings {
my ($file, $audit_href) = @_;
print STDERR "count_red_herrings() - parsing $file\n";
my %fusions;
open(my $fh, $file) or confess "Error, cannot open file: $file";
my $header = <$fh>;
while(<$fh>) {
my @x = split(/\t/);
my $fusion = $x[0];
$fusions{$fusion}++;
}
close $fh;
my $num_fusions = scalar(keys %fusions);
$audit_href->{"red_herrings_filt"} = $num_fusions;
return;
}
####
sub count_promiscuous_filt {
my ($file, $audit_href) = @_;
print STDERR "count_promiscuous_filt() - parsing $file\n";
my %fusions;
open(my $fh, $file) or confess "Error, cannot open file: $file";
my $header = <$fh>;
while(<$fh>) {
chomp;
if (/^\#(\S+)/) {
my $fusion_name = $1;
$fusions{$fusion_name}++;
}
}
close $fh;
my $num_fusions = scalar(keys %fusions);
$audit_href->{"promisc_filt"} = $num_fusions;
return;
}
####
sub count_blast_filt {
my ($file, $audit_href) = @_;
print STDERR "count_blast_filt() - parsing $file\n";
my %fusions;
open(my $fh, $file) or confess "Error, cannot open file: $file";
my $header = <$fh>;
while(<$fh>) {
chomp;
if (/^\#(\S+)/) {
my $fusion_name = $1;
$fusions{$fusion_name}++;
}
}
close $fh;
my $num_fusions = scalar(keys %fusions);
$audit_href->{"blast_filt"} = $num_fusions;
return;
}
####
sub count_prelim_fusions {
my ($prelim_fusions_file, $audit_href) = @_;
print STDERR "count_prelim_fusions() -parsing $prelim_fusions_file\n";
my %fusions;
open(my $fh, $prelim_fusions_file) or confess "Error, cannot open file: $prelim_fusions_file ";
my $header = <$fh>;
while(<$fh>) {
my @x = split(/\t/);
my $fusion_name = $x[0];
$fusions{$fusion_name}++;
}
close $fh;
my $num_prelim_fusions = scalar(keys %fusions);
$audit_href->{prelim_fusion_count} = $num_prelim_fusions;
return;
}
####
sub get_total_reads {
my ($chimJ_file, $audit_href) = @_;
print STDERR "get_total_reads() - parsing $chimJ_file\n";
my $reads_line = `tail -n1 $chimJ_file`;
if ($reads_line =~ /^\# Nreads (\d+)\s+NreadsUnique (\d+)\s+NreadsMulti (\d+)/) {
$audit_href->{Nreads} = $1;
$audit_href->{NreadsUnique} = $2;
$audit_href->{NreadsMulti} = $3;
}
else {
confess "Error, didnt extract read count from $chimJ_file: $reads_line ";
}
return;
}
####
sub audit_chimeric_alignments {
my ($chimJ_file, $audit_href, $token) = @_;
print STDERR "audit_chimeric_alignments() - parsing $chimJ_file\n";
my $count = 0;
open(my $fh, $chimJ_file) or die $!;
my $prev_read = "";
while(<$fh>) {
unless (/\w/) { next; }
if (/^\#/) { next; }
my @x = split(/\t/);
my $read_name = $x[9];
if ($read_name ne $prev_read) {
$count++;
}
$prev_read = $read_name;
}
close $fh;
$audit_href->{$token} = $count;
return;
}
####
sub audit_failed_read_alignments {
my ($file, $audit_href) = @_;
print STDERR "audit_failed_read_alignments() - parsing $file\n";
open(my $fh, $file) or die "Error, cannot open file: $file ";
while(<$fh>) {
unless (/^\#/) { next; } # only processing comments.
if (/Contains selfie or homology match/) {
$audit_href->{read_fail__selfie_or_homology}++;
}
elsif (/only Pct \(0.00%\) of alignments had paired gene anchors/) {
$audit_href->{read_fail__no_gene_anchors}++;
}
elsif (/only Pct .* of alignments had paired gene anchors/) {
$audit_href->{read_fail__discarded_multimap_deficient_anchors}++;
}
elsif (/Fails mulitmapper homology congruence/) {
$audit_href->{read_fail__multimap_homology_congruence_fail}++;
}
else {
confess "not accounted for: $_";
}
}
return;
}
####
sub count_pre_blast_filt {
my ($file, $audit_href) = @_;
print STDERR "count_pre_blast_filt() - parsing $file\n";
my %seen;
open(my $fh, $file) or die "Error, cannot open file: $file";
my $header = <$fh>;
while(<$fh>) {
unless (/^\#/) { next; } # only examining filtered ones.
chomp;
my @x = split(/\t/);
my $fusion = $x[0];
if ($seen{$fusion}) { next; }
my $reason = $x[11];
if ($reason eq 'Merged') { next; }
my $token;
if ($reason =~ /FILTERED DUE TO .*novel.* junction support/) {
$token = "insuf_novel_junc_support";
}
elsif ($reason =~ /FILTERED DUE TO junction read support/) {
$token = "no_junction_support";
}
elsif (/no spanning reads and no long double anchor support at breakpoint/) {
$token = "no_span_no_LDAS";
}
elsif (/FILTERED DUE TO sum_support/) {
$token = "insuf_sum_support";
}
elsif (/FILTERED DUE TO ONLY .* % of dominant isoform support/) {
$token = "low_pct_isoform";
}
else {
confess " error, not recognizing reasoning here: $reason ";
}
$audit_href->{"pre_blast::$token"}++;
$seen{$fusion}=1;
}
return;
}
| STAR-Fusion/STAR-Fusion | util/misc/audit_read_and_fusion_filtering.pl | Perl | bsd-3-clause | 13,823 |
#!/usr/bin/perl
#use warnings 'all';
#use strict;
#use FindBin;
#use Math::Round;
use verilog::gen;
my $scriptInfo =
"/*------------------------------------------------------------------------------\n" .
" * This code was generated by Spiral IIR Filter Generator, www.spiral.net\n" .
" * Copyright (c) 2006, Carnegie Mellon University\n" .
" * All rights reserved.\n" .
" * The code is distributed under a BSD style license\n" .
" * (see http://www.opensource.org/licenses/bsd-license.php)\n" .
" *------------------------------------------------------------------------------" .
" */\n";
#-----------------------------------------------------------------------
# @brief Calls multBlockGen.pl on the commandline and generates the
# multiply block
# @param fh file handle to print to, pass with \*FH
# @param option "vanilla" - base or "addChain" - optimized
# @param moduleName multiply block module name
# @param ports hash of port names: \{i_data => "in", o_data => "out"}
# @param bitwidth input bitwidth to multiplier block
# @param fixedPoint how many bits of data are below the decimal point,
# @param constants array of constants
# @return undef
#-----------------------------------------------------------------------
sub genMultiply {
my ($fh, $option, $moduleName, $bitwidth, $fixedPoint, $constants, $debug) = @_;
my $mult_cmd = "./multBlockGen.pl";
my $constantCount = scalar(@$constants);
for(my $i = 0; $i < $constantCount; $i++){
$mult_cmd .= " " . $constants->[$i];
}
if($option eq "addChain"){
$mult_cmd .= " -addChain";
}
elsif($option eq "base"){
$mult_cmd .= " -base";
}
else{
print STDERR "./iirGen.pl: genMultiply doesn't know how to handle option: $option\n";
exit(-2);
}
$mult_cmd .= " -suppressInfo" .
" -moduleName $moduleName" .
" -bitWidth $bitwidth" .
" -fractionalBits $fixedPoint" .
" -inData X " .
" -outData Y " .
" -debug ";
if(!open(MULT_OUT, "$mult_cmd |")){
print STDERR "Error executing multBlockGen.pl\n";
exit(-2);
}
my $multBlockSize;
my %outputBitwidths = ();
if($option eq "addChain"){
while(my $line = <MULT_OUT>){
chomp $line;
if($constantCount > 1){
#assign Y[0] = w109[30:15]; //BitwidthUsed (0, 7)
if($line =~ /assign \w+\[(\d+)\].+\/\/BitwidthUsed\((.*)\)/){
my $index = $1;
my $values = $2;
if($values eq "none"){
my @info = (0,0,0);
$outputBitwidths{$index} = \@info;
}else{
if($values=~ /^(\d+), (\d+)$/){
my @info = (1,$1,$2);
$outputBitwidths{$index} = \@info;
}else{
print $fh "doesn't look right: $line\n";
exit(-2);
}
}
}
elsif($line =~ /MultiplyBlock[a-zA-Z0-9_]+\s+area estimate\s*=\s*([\d\.]+);/){
$multBlockSize = getnum($1);
}
}else{
#assign Y[0] = w109[30:15]; //BitwidthUsed (0, 7)
if($line =~ /assign .+\/\/BitwidthUsed\(([\d, ]+)\)/){
my $index = 0;
my $values = $1;
if($values eq "none"){
my @info = (0,0,0);
$outputBitwidths{$index} = \@info;
}else{
if($values=~ /^(\d+), (\d+)$/){
my @info = (1,$1,$2);
$outputBitwidths{$index} = \@info;
}else{
print $fh "doesn't look right: $line\n";
exit(-2);
}
}
}
elsif($line =~ /MultiplyBlock[a-zA-Z0-9_]+\s+area estimate\s*=\s*([\d\.]+);/){
$multBlockSize = getnum($1);
}
}
if(!$debug){
$line =~ s/;.*\/\/.*$/;/;
}
print $fh $line . "\n";
}
}else{
while(my $line = <MULT_OUT>){
print $fh $line;
}
}
if(!close(MULT_OUT)){ #check exit code
print STDERR "Error executing multBlockGen.pl: nonzero exit code\n";
exit(-2);
}
print $fh "\n\n";
return ($multBlockSize, \%outputBitwidths);
}
#-----------------------------------------------------------------------
# @brief Calls firGen.pl on the commandline and generates the
# FIR block
# @param fh file handle to print to, pass with \*FH
# @param option "vanilla" - base or "addChain" - optimized
# @param moduleName multiply block module name
# @param bitwidth input bitwidth to multiplier block
# @param fixedPoint how many bits of data are below the decimal point,
# @param constants array of constants
# @return undef
#-----------------------------------------------------------------------
sub genFIR {
my ($fh, $option, $moduleName, $bitwidth, $fixedPoint, $constants, $reset, $reset_edge, $debug) = @_;
my $firgen_cmd = "./firGen.pl";
my $constantCount = scalar(@$constants);
for(my $i = 0; $i < $constantCount; $i++){
$firgen_cmd .= " " . $constants->[$i];
}
if($option eq "addChain"){
$firgen_cmd .= " -addChain";
}
elsif($option eq "base"){
$firgen_cmd .= " -base";
}
else{
print STDERR "./iirGen.pl: genFIR doesn't know how to handle option: $option\n";
exit(-2);
}
$firgen_cmd .= " -suppressInfo" .
" -moduleName $moduleName" .
" -bitWidth $bitwidth" .
" -fractionalBits $fixedPoint" .
" -inData X " .
" -outData Y " .
" -clk clk " .
" -outReg " .
" -reset $reset " .
" -reset_edge $reset_edge ";
$firgen_cmd .= " -debug " if($debug);
if(!open(FIR_OUT, "$firgen_cmd |")){
print STDERR "Error executing: $firgen_cmd\n";
exit(-2);
}
my $firBlockSize = 0;
if($option eq "addChain"){
while(my $line = <FIR_OUT>){
chomp $line;
if($line =~ /\/\/(.*) area estimate\s*=\s*([\d\.]+);/){
if($1 eq $moduleName){
$firBlockSize = getnum($2);
}
}
print $fh $line . "\n";
}
}else{
while(my $line = <FIR_OUT>){
print $fh $line;
}
}
if(!close(FIR_OUT)){ #check exit code
print STDERR "Error executing (nonzero exit code): $firgen_cmd\n";
exit(-2);
}
print $fh "\n\n";
return $firBlockSize;
}
#-----------------------------------------------------------------------
# @brief Generates code for IIR filter
# @param fh file handle to print to, pass with \*FH
# @param ports hash of port names: {i_data => "in", o_data => "out", clk => "clk", reset => "rst_n"}
# @param bitWidth input bitwidth to multiplier block
# @param firLeftName input side of the fir
# @param firRightName output side of the fir
# @return undef
#-----------------------------------------------------------------------
sub genIIR{
my ($fh, $ports, $bitwidth, $firLeftName, $firRightName, $debug) = @_;
my $leftOut = "leftOut";
my $rightOut = "rightOut";
print $fh " wire [" . ($bitwidth - 1) . ":0] $leftOut, $rightOut;\n\n";
print $fh " $firLeftName my_$firLeftName(\n" .
" .X(" . $ports->{i_data} . "),\n" .
" .Y(" . $leftOut . "),\n" .
" .clk(" . $ports->{clk} . "),\n" .
" ." . $ports->{reset} . "(" . $ports->{reset} . ")\n" .
");\n\n";
print $fh " $firRightName my_$firRightName(\n" .
" .X(" . $ports->{o_data} . "),\n" .
" .Y(" . $rightOut . "),\n" .
" .clk(" . $ports->{clk} . "),\n" .
" ." . $ports->{reset} . "(" . $ports->{reset} . ")\n" .
");\n\n";
print $fh " assign " . $ports->{o_data} . " = $leftOut + $rightOut;";
print $fh " // adder($bitwidth)" if($debug);
print $fh "\n\n";
return adderArea($bitwidth);
}
#$iirSize += genIIRform2($fh, \%regPorts, $bitWidth, $multName . "_left", $multName . "_right", \@leftConstants, $outputHashLeft, \@rightConstants, $outputHashRight, $debug);
#-----------------------------------------------------------------------
# @brief Generates code for FIR filter
# @param fh file handle to print to, pass with \*FH
# @param ports hash of port names: \{i_data => "in", o_data => "out", clk => "clk"}
# @param bitWidth input bitwidth to multiplier block
# @param multiplyName multiply block module name
# @param fixedPoint how many bits of data are below the decimal point
# @param constantCount array of constants
# @return undef
#-----------------------------------------------------------------------
sub genIIRform2{
my ($fh, $ports, $bitwidth, $multiplyName_left, $multiplyName_right, $leftConst, $left_outputBitwidths, $rightConst, $right_outputBitwidths, $reset_edge, $debug) = @_;
my $multOut = "multProducts";
my $left_constantCount = scalar(@$leftConst);
my $right_constantCount = scalar(@$rightConst);
#multOut wires
print $fh " wire [" . ($bitwidth - 1) . ":0] ${multOut}_left";
print $fh " [0:" . ($left_constantCount - 1) . "]" if($left_constantCount > 1);
print $fh ";\n\n";
print $fh " wire [" . ($bitwidth - 1) . ":0] ${multOut}_right";
print $fh " [0:" . ($right_constantCount - 1) . "]" if($right_constantCount > 1);
print $fh ";\n\n";
print $fh " $multiplyName_left my_$multiplyName_left(\n"
. " .X(" . $ports->{i_data} . ")";
if($left_constantCount > 1){
for(my $i = 0; $i < $left_constantCount; $i++){
print $fh ",\n .Y" . ($i + 1) .
"(${multOut}_left\[$i\])";
}
}
else{
print $fh ",\n .Y" .
"(${multOut}_left)";
}
print $fh "\n );\n\n";
print $fh " $multiplyName_right my_$multiplyName_right(\n"
. " .X(" . $ports->{o_data_next} . ")";
if($right_constantCount > 1){
for(my $i = 0; $i < $right_constantCount; $i++){
print $fh ",\n .Y" . ($i + 1) .
"(${multOut}_right\[$i\])";
}
}
else{
print $fh ",\n .Y" .
"(${multOut}_right)";
}
print $fh "\n );\n\n";
my $maxConst = 0;
my $diff = 0;
my $max = "";
my $min = "";
if($left_constantCount > $right_constantCount){
$maxConst = $left_constantCount;
$diff = $left_constantCount - $right_constantCount;
$max = "_left";
$min = "_right";
}else{
$maxConst = $right_constantCount;
$diff = $right_constantCount - $left_constantCount;
$max = "_right";
$min = "_left";
}
my $diff2 = $diff;
#always block
my $arrayName = "iirStep";
if($maxConst > 1){
print $fh " reg [" . ($bitwidth-1) . ":0] $arrayName";
if ($maxConst > 2){
print $fh "[0:" . ($maxConst-2) . "]";
}
}
print $fh ";\n\n";
my $resetSense = "";
my $addedPrefix = "";
if(defined($reset_edge)){
$resetSense = " or $reset_edge " . $ports->{"reset"};
$addedPrefix = " ";
}
my $areaSum = 0;
if($maxConst > 1){
print $fh " always@(posedge ". $ports->{"clk"} . $resetSense . ") begin\n";;
if(defined($reset_edge)){
print $fh " if(~" . $ports->{"reset"} . ") begin\n";
print $fh " ${arrayName}";
print $fh "\[0\]" if($maxConst > 2);
print $fh " <= " . toHex(0, $bitwidth) . ";\n";
for(my $i = 1; $i < $maxConst - 1; $i++){
my $index = $i - 1;
print $fh " ${arrayName}\[$i\] <= ". toHex(0, $bitwidth) .";\n";
}
print $fh " end\n";
print $fh " else begin\n";
}
print $fh $addedPrefix . " ${arrayName}";
print $fh "\[0\]" if($maxConst > 2);
print $fh " <= ";
if($diff2){
print $fh "${multOut}${max}[0];";
my $flopSize = flopArea($bitwidth, defined($reset_edge));
print $fh " // $flopSize = flop(0, $bitwidth - 1)" if($debug);
print $fh "\n";
$diff2--;
$areaSum += $flopSize;
}
else{
print $fh "${multOut}_left[0] + ${multOut}_right[0];";
my $areaLocal = adderArea($bitwidth) + flopArea($bitwidth, defined($reset_edge));
print $fh " // $areaLocal = adder($bitwidth) + flop(0, $bitwidth - 1)" if($debug);
print $fh "\n";
$areaSum += $areaLocal;
}
for(my $i = 1; $i < $maxConst - 1; $i++){
my $index = $i - 1;
my $index2 = $i;
my $flopSize = flopArea($bitwidth, defined($reset_edge));
if($diff2){
print $fh $addedPrefix . " ${arrayName}\[$index2\] <= ${arrayName}\[$index\] + ${multOut}${max}\[$index2\];";
my $addsize = adderArea($bitwidth) + $flopSize;
print $fh " // $addsize = adder($bitwidth) + flop(0, $bitwidth - 1)" if($debug);
print $fh "\n";
$diff2--;
$areaSum += $addsize;
}else{
my $index3 = $i - $diff;
print $fh $addedPrefix . " ${arrayName}\[$index2\] <= ${arrayName}\[$index\] + ${multOut}${max}\[$index2\] + ${multOut}${min}\[$index3\];";
my $addsize = 2*adderArea($bitwidth) + $flopSize;
print $fh " // $addsize = 2*adder($bitwidth) + flop(0, $bitwidth - 1)" if($debug);
print $fh "\n";
$areaSum += $addsize;
}
}
print $fh " end\n" if(defined($reset_edge));
print $fh " end\n\n";
}
if($maxConst > 1){
print $fh " assign " . $ports->{o_data} . " = ${arrayName}";
print $fh "[". ($maxConst - 2) . "\]" if ($maxConst > 2);
print $fh "+ ${multOut}${max}\[". ($maxConst - 1) ."\]";
my $index3 = $maxConst - $diff-1;
print $fh "+ ${multOut}${min}";
print $fh "\[$index3\];" if($maxConst - $diff > 1);
my $addsize = 2*adderArea($bitwidth);
print $fh " // $addsize = 2*adder($bitwidth)" if($debug);
print $fh "\n";
$areaSum += $addsize;
}else{ #$constantCount == 1
print $fh " assign " . $ports->{o_data} . " = ${multOut}${max} + ${multOut}${min};\n";
}
print $fh "\n";
return $areaSum;
}
#-----------------------------------------------------------------------
# @brief generates a testBench, which provides the filter with an impulse
# and, if all goes correct, should return a time reversed reading of the
# filter
#-----------------------------------------------------------------------
sub genTestBench{
my ($fh, $ports, $moduleList, $bitwidth, $filterSteps, $registerDelays, $fixedPoint, $reset_edge, $constants) = @_;
print STDERR "Test Bench Option not supported.\n";
exit(-2);
print $fh " integer testEndedGracefully;\n\n";
print $fh " reg [" . ($bitwidth - 1) . ":0] inData;\n".
" reg clk;\n";
my $verify;
if(!scalar(@$moduleList)){
print STDERR "Error: Cannot write a testBench for no modules.\n";
exit(-2);
}
foreach my $module(@$moduleList){
print $fh
" wire [" . ($bitwidth - 1) . ":0] ${module}_out;\n" .
" integer ${module}_val;\n";
}
print $fh "\n";
print $fh
" initial begin\n";
print $fh " \$display(\" Time, ". join("_val, ", @$moduleList) . "_val\");\n";
print $fh
" clk = 1;\n".
" forever \#1 clk =~clk;\n".
" end\n\n";
foreach my $module(@$moduleList){
print $fh
" ${module} my_${module}(\n".
" ." . $ports->{"clk"} ."(clk),\n".
" ." . $ports->{"i_data"} ."(inData),\n".
" ." . $ports->{"o_data"} ."(${module}_out)\n".
" );\n\n";
}
my $pr_str = "\$display(\"Output: \%d";
foreach my $module(@$moduleList){
$pr_str .= ", %d";
}
$pr_str .= "\", \$time";
foreach my $module(@$moduleList){
$pr_str .= ", ${module}_val";
}
$pr_str .= ");\n";
print $fh "\n //Convert unsigned values to signed integers for comparison\n";
foreach my $module(@$moduleList){
print $fh
" always@(${module}_out) begin\n" .
" if(${module}_out >= ".(1 << ($bitwidth - 1)).")\n" .
" ${module}_val = ${module}_out - ".(1 << $bitwidth).";\n" .
" else\n" .
" ${module}_val = ${module}_out;\n" .
" end\n\n";
}
print $fh
" initial begin\n" .
" //ignore initial x's and initial\n" .
" //delays through registers\n" .
" #". (2*($filterSteps + 3*$registerDelays)) .";\n";
for(my $i = scalar(@$constants) - 1; $i >= 0; $i--){
my $const = $constants->[$i];
print $fh
"\n @(posedge clk);\n" .
" #0;\n" .
" $pr_str";
foreach my $module(@$moduleList){
print $fh
" if(($const+1 <= ${module}_val) && (${module}_val <= $const+1))\n" .
" \$display(\"ERROR: ${module}_val should be $const\");\n";
}
}
print $fh
"\n //All outputs have been seen\n" .
" testEndedGracefully <= 1;\n\n";
print $fh
"\n forever @(posedge clk) begin\n" .
" #0;\n";
foreach my $module(@$moduleList){
print $fh
" if(${module}_val !== 0 )\n" .
" \$display(\"ERROR: ${module}_val should be 0\");\n";
}
print $fh
" end //always@(posedge clk)\n\n".
" end //initial begin\n\n";
print $fh
" initial begin\n".
" testEndedGracefully <= 0;\n" .
" inData <= 0;\n".
" #". (2*($filterSteps + $registerDelays)) .";\n".
" //feed an impulse\n" .
" inData <= 1 << $fixedPoint;\n".
" #2;\n".
" inData <= 0;\n".
" #". (2*$filterSteps + 10) .";\n".
" if(testEndedGracefully !== 1)\n" .
" \$display(\"ERROR: Test ended too soon. Not all outputs seen.\");\n" .
" \$stop;\n".
" \$finish;\n".
" end\n\n";
}
#-----------------------------------------------------------------------
# @brief puts it all together, reading input, etc
#-----------------------------------------------------------------------
sub main {
#cmdline args = defaults:
my @leftConstants = ();
my @rightConstants = ();
my $bitWidth = 32;
my $fixedPoint = 0;
my $moduleName = "iirFilter";
my $inReg = 0;
my $outReg = 0;
my $nonOptimal = 0;
my $optimal = 0;
my $testBench = 0;
my $testBenchName = "top";
my $outFileName = undef;
my $reset_edge = undef;
my $fh;
my $suppressInfo;
my $debug = 0;
my $filterForm = 1;
my %ports =
("i_data" => "i_data",
"o_data" => "o_data",
"clk" => "clk");
my $cmdLine = "./iirGen.pl " . join(" ", @ARGV);
#my args
my $parseConstants = undef;
#parse args
for(my $i = 0; $i < scalar(@ARGV); $i++){
if($ARGV[$i] eq ""){
next;
}
elsif($ARGV[$i] eq "-B"){
$parseConstants = \@leftConstants;
}
elsif($ARGV[$i] eq "-A"){
$parseConstants = \@rightConstants;
}
elsif($ARGV[$i] eq "-debug"){
$debug = 1;
$parseConstants = undef;
}
elsif($ARGV[$i] eq "-filterForm"){
$i++;
$filterForm = $ARGV[$i];
$parseConstants = undef;
}
elsif($ARGV[$i] eq "-moduleName"){
$i++;
$moduleName = $ARGV[$i];
$parseConstants = undef;
}
elsif($ARGV[$i] eq "-bitWidth"){
$i++;
$bitWidth = $ARGV[$i];
$parseConstants = undef;
}
elsif($ARGV[$i] eq "-fractionalBits"){
$i++;
$fixedPoint = $ARGV[$i];
$parseConstants = undef;
if($fixedPoint < 0){
print STDERR "Fractional bits argument must be non-negative: $ARGV[$i]\n";
printUsage();
}
}
elsif($ARGV[$i] eq "-reset_edge"){
$i++;
$reset_edge = $ARGV[$i];
$parseConstants = undef;
}
elsif($ARGV[$i] eq "-reset"){
$i++;
$ports{"reset"} = $ARGV[$i];
$parseConstants = undef;
}
elsif($ARGV[$i] eq "-inData"){
$i++;
$ports{"i_data"} = $ARGV[$i];
$parseConstants = undef;
}
elsif($ARGV[$i] eq "-inReg"){
$inReg = 1;
$parseConstants = undef;
}
elsif($ARGV[$i] eq "-outData"){
$i++;
$ports{"o_data"} = $ARGV[$i];
$parseConstants = undef;
}
elsif($ARGV[$i] eq "-outReg"){
$outReg = 1;
$parseConstants = undef;
}
elsif($ARGV[$i] eq "-clk"){
$i++;
$ports{"clk"} = $ARGV[$i];
$parseConstants = undef;
}
elsif($ARGV[$i] eq "-h" || $ARGV[$i] eq "--help"){
printUsage();
}
elsif($ARGV[$i] eq "-base"){
$nonOptimal = 1;
$parseConstants = undef;
}
elsif($ARGV[$i] eq "-addChain"){
$optimal = 1;
$parseConstants = undef;
}
elsif($ARGV[$i] eq "-testBench"){
$testBench = 1;
$parseConstants = undef;
}
elsif($ARGV[$i] eq "-testBenchName"){
$i++;
$testBench = 1;
$testBenchName = $ARGV[$i];
$parseConstants = undef;
}
elsif($ARGV[$i] eq "-outFile"){
$i++;
$outFileName = $ARGV[$i];
$parseConstants = undef;
}
elsif($ARGV[$i] eq "-suppressInfo"){
$suppressInfo = 1;
$parseConstants = undef;
}
elsif(defined($parseConstants)){
#verify we were given a number:
my $temp = getnum($ARGV[$i]);
if(defined(scalar($temp))){
push(@$parseConstants, $ARGV[$i]);
}
else{
print STDERR "UNDEFINED ARG: " . $ARGV[$i] . "\n";
printUsage();
}
}
else{
print STDERR "UNDEFINED ARG: " . $ARGV[$i] . "\n";
printUsage();
}
}
if(!defined($reset_edge)){
$reset_edge = "negedge";
}
my $warnOpt = 0;
if(scalar(@rightConstants)){
my $a = shift(@rightConstants);
if($a != 1 << $fixedPoint){
$warnOpt = 1; #TODO: scale
print STDERR "The first constant in A is required to be 1, please check the input and try again.\n";
printUsage();
}
}
else{
print STDERR "A(0) constants is required and must be 1.\n";
printUsage();
}
my $rightFir = 0;
my $leftFir = 0;
$rightFir = 1 if(scalar(@rightConstants));
$leftFir = 1 if(scalar(@leftConstants));
if(!($rightFir && $leftFir)){
print STDERR "Both A constants and B constants are required, please check in the input and try again.\n";
printUsage();
}
#right filter (A) must be negatized to use an adder
for(my $i = 0; $i < scalar(@rightConstants); $i++){
$rightConstants[$i] = -1 * $rightConstants[$i];
}
if(defined($reset_edge)){
if($reset_edge eq "negedge"){
if(!defined($ports{"reset"})){
$ports{"reset"} = "rst_n";
}
}
elsif($reset_edge eq "posedge"){
if(!defined($ports{"reset"})){
$ports{"reset"} = "rst";
}
}
else{
print STDERR "Invalid reset_edge: $reset_edge\n";
printUsage();
}
}
if(defined($outFileName)){
if(!open(OUTFILE, "> $outFileName")){
print STDERR "Unable to open outfile for writing: $outFileName\n";
exit(-2);
}
$fh = \*OUTFILE;
}
else{
$fh = \*STDOUT;
}
#ports
my @portList = (["input ", $ports{"i_data"}, $bitWidth],
["input ", $ports{"clk"}],
["output ", $ports{"o_data"}, $bitWidth],
["input ", $ports{"reset"}]);
my $optSuffix = "";
my $vanillaSuffix = "";
if($optimal && $nonOptimal){
$optSuffix = "_addChain";
$vanillaSuffix = "_base";
}
elsif(!($optimal || $nonOptimal)){
$optimal = 1; #default
}
unless($suppressInfo){
print $fh $scriptInfo;
print $fh "/* $cmdLine */\n";
}
if($warnOpt){
print $fh "/* A(0) not equal to 1 << fractionalBits, scaling all taps to match. */\n";
}
#registered ports:
my %regPorts = %ports;
$regPorts{"i_data"} = $ports{"i_data"} . "_in" if($inReg);
$regPorts{"o_data"} = $ports{"o_data"} . "_in" if($outReg);
if($outReg){
$regPorts{"o_data_next"} = $ports{"o_data"};
}else{
$regPorts{"o_data_next"} = $ports{"o_data"} . "_next";
}
my $prefix = "w";
#inRegList:($inWire, $inReg, $bitWidth)
my @inRegList = ([$ports{"i_data"}, $regPorts{"i_data"}, $bitWidth]);
#outRegList:($outReg, $outWire, $bitWidth)
my @outRegList = ([$regPorts{"o_data_next"}, $regPorts{"o_data"}, $bitWidth]);
if($filterForm == 1){
if($optimal){
my $firName = $moduleName . $optSuffix . "_firBlock";
#fir blocks are separate modules
my $iirSize = genFIR($fh, "addChain", $firName . "_left", $bitWidth, $fixedPoint, \@leftConstants, $ports{"reset"}, $reset_edge, $debug);
$iirSize += genFIR($fh, "addChain", $firName . "_right", $bitWidth, $fixedPoint, \@rightConstants, $ports{"reset"}, $reset_edge, $debug);
genHeader($fh, $moduleName . $optSuffix, \@portList);
$iirSize += registerIn($fh, \@inRegList, $ports{"clk"}, $ports{"reset"}, $reset_edge) if($inReg);
$iirSize += registerOut($fh, \@outRegList, $ports{"clk"}, $ports{"reset"}, $reset_edge) if($outReg);
$iirSize += genIIR($fh, \%regPorts, $bitWidth, $firName . "_left", $firName . "_right", $debug);
genTail($fh, $moduleName . $optSuffix, $iirSize);
}
if($nonOptimal){
my $firName = $moduleName . $vanillaSuffix . "_firBlock";
genFIR($fh, "base", $firName . "_left", $bitWidth, $fixedPoint, \@leftConstants, $ports{"reset"}, $reset_edge);
genFIR($fh, "base", $firName . "_right", $bitWidth, $fixedPoint, \@rightConstants, $ports{"reset"}, $reset_edge);
genHeader($fh, $moduleName . $vanillaSuffix, \@portList);
registerIn($fh, \@inRegList, $ports{"clk"}, $ports{"reset"}, $reset_edge) if($inReg);
registerOut($fh, \@outRegList, $ports{"clk"}, $ports{"reset"}, $reset_edge) if($outReg);
genIIR($fh, \%regPorts, $bitWidth, $firName . "_left", $firName . "_right");
genTail($fh, $moduleName . $vanillaSuffix);
}
}else{
if($optimal){
@leftConstants = reverse @leftConstants;
@rightConstants = reverse @rightConstants;
my $multName = $moduleName . $optSuffix . "_MultiplyBlock";
my ($multSizeLeft, $outputHashLeft) = genMultiply($fh, "addChain", $multName . "_left", $bitWidth, $fixedPoint, \@leftConstants, $debug);
my ($multSizeRight, $outputHashRight) = genMultiply($fh, "addChain", $multName . "_right", $bitWidth, $fixedPoint, \@rightConstants, $debug);
my $iirSize = $multSizeLeft + $multSizeRight;
genHeader($fh, $moduleName . $optSuffix, \@portList);
$iirSize += registerIn($fh, \@inRegList, $ports{"clk"}, $ports{"reset"}, $reset_edge) if($inReg);
#we need this register regardless
$iirSize += registerOut($fh, \@outRegList, $ports{"clk"}, $ports{"reset"}, $reset_edge);
$iirSize += genIIRform2($fh, \%regPorts, $bitWidth, $multName . "_left", $multName . "_right", \@leftConstants, $outputHashLeft, \@rightConstants, $outputHashRight, $reset_edge, $debug);
genTail($fh, $moduleName . $optSuffix, $iirSize);
}
if($nonOptimal){
print STDERR "form II not available to baseline application\n";
exit(-1);
my $firName = $moduleName . $vanillaSuffix . "_firBlock";
genFIR($fh, "base", $firName . "_left", $bitWidth, $fixedPoint, \@leftConstants, $ports{"reset"}, $reset_edge);
genFIR($fh, "base", $firName . "_right", $bitWidth, $fixedPoint, \@rightConstants, $ports{"reset"}, $reset_edge);
genHeader($fh, $moduleName . $vanillaSuffix, \@portList);
registerIn($fh, \@inRegList, $ports{"clk"}, $ports{"reset"}, $reset_edge) if($inReg);
registerOut($fh, \@outRegList, $ports{"clk"}, $ports{"reset"}, $reset_edge) if($outReg);
genIIR($fh, \%regPorts, $bitWidth, $firName . "_left", $firName . "_right");
genTail($fh, $moduleName . $vanillaSuffix);
}
}
if($testBench){
print STDERR "TestBench not written yet. Sorry.\n";
exit(-2);
#my @dummy = ();
#my @moduleList =();
#push(@moduleList, $moduleName . $optSuffix) if($optimal);
#push(@moduleList, $moduleName . $vanillaSuffix) if($nonOptimal);
#genHeader($fh, $testBenchName, \@dummy);
#genTestBench($fh, \%ports, \@moduleList, $bitWidth, scalar(@constants), (($inReg?1:0) + ($outReg?1:0)), $fixedPoint, $reset_edge, \@constants);
#genTail($fh, $testBenchName);
}
close($fh) if(defined($outFileName));
return 1;
}
#-----------------------------------------------------------------------
# @brief prints the usage and exits -1
#-----------------------------------------------------------------------
sub printUsage(){
print STDERR <<EOF;
$scriptInfo
./iirGen.pl -leftConstants 10 20 30 40 30 20 10
-rightConstants 10 20 30 40 30 20 10 [-moduleName iirFilter] [-bitWidth 32]
[-fractionalBits value] [-inData i_data] [-inReg] [-outData o_data] [-outReg]
[-clk clk] [-base] [-addChain] [-testBench | -testBenchName top] [-outFile fileName]
[-suppressInfo] [-reset wireName] [-reset_edge negedge|posedge]
-leftConstants: input side of the filter
-rightConstants: output side of the filter
-moduleName: verilog module name
-bitWidth: How many bits of data the multiplier multiplies by
-fractionalBits: how many bits of data are below the decimal point, default: 0
when this is used, the constants still need to be whole numbers, merely
2^x is now construed to be 1, where x is the fractionalBits value
-inData: verilog input port name
-inReg: cleanly register input
-outData: verilog output port name
-outReg: cleanly register output
-clk: verilog clk name
-reset: wireName for reset wire defaults to rst for posedge and rst_n for negedge
-reset_edge: use asynchronous reset at negedge or posedge
if neither reset or reset_edge is provided on the commandline, filter generated
defaults to negedge
-base: generate the vanilla, non-optimized version, for comparison
may be used in conjunction with -addChain, will generate 2 modules
-addChain: generate the optimized version, default
may be used in conjunction with -base, will generate 2 modules
-testBench: generate testBench
if used alone or in conjunction with one of the above two options,
will generate a test bench to print the inputs and outputs
based on a pseudo random input stream
if used in conjunction with both -base and -addChain, will
will generate code to compare the output of the two when run
-testBenchName: generate testBench with the given name
-outFile print output to said fileName, defaults to stdout
-suppressInfo don't tag script info at the beginning of the verilog file
EOF
exit(-1);
}
if(!main()){
print STDERR "Script Failed.\n";
exit(-1);
}
else{
exit(0);
}
1; | UGent-HES/ConnectionRouter | vtr_flow/benchmarks/arithmetic/multless_consts/verilog/firgen/iirGen.pl | Perl | mit | 29,378 |
package Google::Ads::AdWords::v201406::Location;
use strict;
use warnings;
__PACKAGE__->_set_element_form_qualified(1);
sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201406' };
our $XML_ATTRIBUTE_CLASS;
undef $XML_ATTRIBUTE_CLASS;
sub __get_attr_class {
return $XML_ATTRIBUTE_CLASS;
}
use base qw(Google::Ads::AdWords::v201406::Criterion);
# Variety: sequence
use Class::Std::Fast::Storable constructor => 'none';
use base qw(Google::Ads::SOAP::Typelib::ComplexType);
{ # BLOCK to scope variables
my %id_of :ATTR(:get<id>);
my %type_of :ATTR(:get<type>);
my %Criterion__Type_of :ATTR(:get<Criterion__Type>);
my %locationName_of :ATTR(:get<locationName>);
my %displayType_of :ATTR(:get<displayType>);
my %targetingStatus_of :ATTR(:get<targetingStatus>);
my %parentLocations_of :ATTR(:get<parentLocations>);
__PACKAGE__->_factory(
[ qw( id
type
Criterion__Type
locationName
displayType
targetingStatus
parentLocations
) ],
{
'id' => \%id_of,
'type' => \%type_of,
'Criterion__Type' => \%Criterion__Type_of,
'locationName' => \%locationName_of,
'displayType' => \%displayType_of,
'targetingStatus' => \%targetingStatus_of,
'parentLocations' => \%parentLocations_of,
},
{
'id' => 'SOAP::WSDL::XSD::Typelib::Builtin::long',
'type' => 'Google::Ads::AdWords::v201406::Criterion::Type',
'Criterion__Type' => 'SOAP::WSDL::XSD::Typelib::Builtin::string',
'locationName' => 'SOAP::WSDL::XSD::Typelib::Builtin::string',
'displayType' => 'SOAP::WSDL::XSD::Typelib::Builtin::string',
'targetingStatus' => 'Google::Ads::AdWords::v201406::LocationTargetingStatus',
'parentLocations' => 'Google::Ads::AdWords::v201406::Location',
},
{
'id' => 'id',
'type' => 'type',
'Criterion__Type' => 'Criterion.Type',
'locationName' => 'locationName',
'displayType' => 'displayType',
'targetingStatus' => 'targetingStatus',
'parentLocations' => 'parentLocations',
}
);
} # end BLOCK
1;
=pod
=head1 NAME
Google::Ads::AdWords::v201406::Location
=head1 DESCRIPTION
Perl data type class for the XML Schema defined complexType
Location from the namespace https://adwords.google.com/api/adwords/cm/v201406.
Represents Location criterion. <p>A criterion of this type can only be created using an ID. <span class="constraint AdxEnabled">This is enabled for AdX.</span>
=head2 PROPERTIES
The following properties may be accessed using get_PROPERTY / set_PROPERTY
methods:
=over
=item * locationName
=item * displayType
=item * targetingStatus
=item * parentLocations
=back
=head1 METHODS
=head2 new
Constructor. The following data structure may be passed to new():
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| gitpan/GOOGLE-ADWORDS-PERL-CLIENT | lib/Google/Ads/AdWords/v201406/Location.pm | Perl | apache-2.0 | 2,900 |
package Bio::Phylo::TreeBASE::Result::Taxonlink;
# Created by DBIx::Class::Schema::Loader
# DO NOT MODIFY THE FIRST PART OF THIS FILE
use strict;
use warnings;
use base 'DBIx::Class::Core';
=head1 NAME
Bio::Phylo::TreeBASE::Result::Taxonlink
=cut
__PACKAGE__->table("taxonlink");
=head1 ACCESSORS
=head2 linktype
data_type: 'char'
is_nullable: 0
size: 1
=head2 taxonlink_id
data_type: 'bigint'
is_auto_increment: 1
is_nullable: 0
sequence: 'taxonlink_id_sequence'
=head2 version
data_type: 'integer'
is_nullable: 1
=head2 foreigntaxonid
data_type: 'varchar'
is_nullable: 1
size: 255
=head2 taxonauthority_id
data_type: 'bigint'
is_foreign_key: 1
is_nullable: 0
=head2 taxon_id
data_type: 'bigint'
is_foreign_key: 1
is_nullable: 0
=cut
__PACKAGE__->add_columns(
"linktype",
{ data_type => "char", is_nullable => 0, size => 1 },
"taxonlink_id",
{
data_type => "bigint",
is_auto_increment => 1,
is_nullable => 0,
sequence => "taxonlink_id_sequence",
},
"version",
{ data_type => "integer", is_nullable => 1 },
"foreigntaxonid",
{ data_type => "varchar", is_nullable => 1, size => 255 },
"taxonauthority_id",
{ data_type => "bigint", is_foreign_key => 1, is_nullable => 0 },
"taxon_id",
{ data_type => "bigint", is_foreign_key => 1, is_nullable => 0 },
);
__PACKAGE__->set_primary_key("taxonlink_id");
=head1 RELATIONS
=head2 taxon
Type: belongs_to
Related object: L<Bio::Phylo::TreeBASE::Result::Taxon>
=cut
__PACKAGE__->belongs_to(
"taxon",
"Bio::Phylo::TreeBASE::Result::Taxon",
{ taxon_id => "taxon_id" },
{ is_deferrable => 1, on_delete => "CASCADE", on_update => "CASCADE" },
);
=head2 taxonauthority
Type: belongs_to
Related object: L<Bio::Phylo::TreeBASE::Result::Taxonauthority>
=cut
__PACKAGE__->belongs_to(
"taxonauthority",
"Bio::Phylo::TreeBASE::Result::Taxonauthority",
{ taxonauthority_id => "taxonauthority_id" },
{ is_deferrable => 1, on_delete => "CASCADE", on_update => "CASCADE" },
);
# Created by DBIx::Class::Schema::Loader v0.07002 @ 2010-11-13 19:19:22
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:R2jBcSGtvGDHCR6C24KU5Q
# You can replace this text with custom content, and it will be preserved on regeneration
1;
| TreeBASE/treebasetest | treebase-core/src/main/perl/lib/Bio/Phylo/TreeBASE/Result/Taxonlink.pm | Perl | bsd-3-clause | 2,291 |
# Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
package Automake::VarDef;
use strict;
use Carp;
use Automake::ChannelDefs;
use Automake::ItemDef;
require Exporter;
use vars '@ISA', '@EXPORT';
@ISA = qw/Automake::ItemDef Exporter/;
@EXPORT = qw (&VAR_AUTOMAKE &VAR_CONFIGURE &VAR_MAKEFILE
&VAR_ASIS &VAR_PRETTY &VAR_SILENT &VAR_SORTED);
=head1 NAME
Automake::VarDef - a class for variable definitions
=head1 SYNOPSIS
use Automake::VarDef;
use Automake::Location;
# Create a VarDef for a definition such as
# | # any comment
# | foo = bar # more comment
# in Makefile.am
my $loc = new Automake::Location 'Makefile.am:2';
my $def = new Automake::VarDef ('foo', 'bar # more comment',
'# any comment',
$loc, '', VAR_MAKEFILE, VAR_ASIS);
# Appending to a definition.
$def->append ('value to append', 'comment to append');
# Accessors.
my $value = $def->value; # with trailing `#' comments and
# continuation ("\\\n") omitted.
my $value = $def->raw_value; # the real value, as passed to new().
my $comment = $def->comment;
my $location = $def->location;
my $type = $def->type;
my $owner = $def->owner;
my $pretty = $def->pretty;
# Changing owner.
$def->set_owner (VAR_CONFIGURE,
new Automake::Location 'configure.ac:15');
# Marking examined definitions.
$def->set_seen;
my $seen_p = $def->seen;
# Printing a variable for debugging.
print STDERR $def->dump;
=head1 DESCRIPTION
This class gathers data related to one Makefile-variable definition.
=head2 Constants
=over 4
=item C<VAR_AUTOMAKE>, C<VAR_CONFIGURE>, C<VAR_MAKEFILE>
Possible owners for variables. A variable can be defined
by Automake, in F<configure.ac> (using C<AC_SUBST>), or in
the user's F<Makefile.am>.
=cut
# Defined so that the owner of a variable can only be increased (e.g
# Automake should not override a configure or Makefile variable).
use constant VAR_AUTOMAKE => 0; # Variable defined by Automake.
use constant VAR_CONFIGURE => 1;# Variable defined in configure.ac.
use constant VAR_MAKEFILE => 2; # Variable defined in Makefile.am.
=item C<VAR_ASIS>, C<VAR_PRETTY>, C<VAR_SILENT>, C<VAR_SORTED>
Possible print styles. C<VAR_ASIS> variables should be output as-is.
C<VAR_PRETTY> variables are wrapped on multiple lines if they cannot
fit on one. C<VAR_SILENT> variables are not output at all. Finally,
C<VAR_SORTED> variables should be sorted and then handled as
C<VAR_PRETTY> variables.
C<VAR_SILENT> variables can also be overridden silently (unlike the
other kinds of variables whose overriding may sometimes produce
warnings).
=cut
# Possible values for pretty.
use constant VAR_ASIS => 0; # Output as-is.
use constant VAR_PRETTY => 1; # Pretty printed on output.
use constant VAR_SILENT => 2; # Not output. (Can also be
# overridden silently.)
use constant VAR_SORTED => 3; # Sorted and pretty-printed.
=back
=head2 Methods
C<VarDef> defines the following methods in addition to those inherited
from L<Automake::ItemDef>.
=over 4
=item C<my $def = new Automake::VarDef ($varname, $value, $comment, $location, $type, $owner, $pretty)>
Create a new Makefile-variable definition. C<$varname> is the name of
the variable being defined and C<$value> its value.
C<$comment> is any comment preceding the definition. (Because
Automake reorders variable definitions in the output, it also tries to
carry comments around.)
C<$location> is the place where the definition occurred, it should be
an instance of L<Automake::Location>.
C<$type> should be C<''> for definitions made with C<=>, and C<':'>
for those made with C<:=>.
C<$owner> specifies who owns the variables, it can be one of
C<VAR_AUTOMAKE>, C<VAR_CONFIGURE>, or C<VAR_MAKEFILE> (see these
definitions).
Finally, C<$pretty> tells how the variable should be output, and can
be one of C<VAR_ASIS>, C<VAR_PRETTY>, or C<VAR_SILENT>, or
C<VAR_SORTED> (see these definitions).
=cut
sub new ($$$$$$$$)
{
my ($class, $var, $value, $comment, $location, $type, $owner, $pretty) = @_;
# A user variable must be set by either `=' or `:=', and later
# promoted to `+='.
if ($owner != VAR_AUTOMAKE && $type eq '+')
{
error $location, "$var must be set with `=' before using `+='";
}
my $self = Automake::ItemDef::new ($class, $comment, $location, $owner);
$self->{'value'} = $value;
$self->{'type'} = $type;
$self->{'pretty'} = $pretty;
$self->{'seen'} = 0;
return $self;
}
=item C<$def-E<gt>append ($value, $comment)>
Append C<$value> and <$comment> to the existing value and comment of
C<$def>. This is normally called on C<+=> definitions.
=cut
sub append ($$$)
{
my ($self, $value, $comment) = @_;
$self->{'comment'} .= $comment;
my $val = $self->{'value'};
# Strip comments from augmented variables. This is so that
# VAR = foo # com
# VAR += bar
# does not become
# VAR = foo # com bar
# Furthermore keeping `#' would not be portable if the variable is
# output on multiple lines.
$val =~ s/ ?#.*//;
if (chomp $val)
{
# Insert a backslash before a trailing newline.
$val .= "\\\n";
}
elsif ($val)
{
# Insert a separator.
$val .= ' ';
}
$self->{'value'} = $val . $value;
# Turn ASIS appended variables into PRETTY variables. This is to
# cope with `make' implementation that cannot read very long lines.
$self->{'pretty'} = VAR_PRETTY if $self->{'pretty'} == VAR_ASIS;
}
=item C<$def-E<gt>value>
=item C<$def-E<gt>type>
=item C<$def-E<gt>pretty>
Accessors to the various constituents of a C<VarDef>. See the
documentation of C<new>'s arguments for a description of these.
=cut
sub value ($)
{
my ($self) = @_;
my $val = $self->raw_value;
# Strip anything past `#'. `#' characters cannot be escaped
# in Makefiles, so we don't have to be smart.
$val =~ s/#.*$//s;
# Strip backslashes.
$val =~ s/\\$/ /mg;
return $val;
}
sub raw_value ($)
{
my ($self) = @_;
return $self->{'value'};
}
sub type ($)
{
my ($self) = @_;
return $self->{'type'};
}
sub pretty ($)
{
my ($self) = @_;
return $self->{'pretty'};
}
=item C<$def-E<gt>set_owner ($owner, $location)>
Change the owner of a definition. This usually happens because
the user used C<+=> on an Automake variable, so (s)he now owns
the content. C<$location> should be an instance of L<Automake::Location>
indicating where the change took place.
=cut
sub set_owner ($$$)
{
my ($self, $owner, $location) = @_;
# We always adjust the location when the owner changes (even for
# `+=' statements). The risk otherwise is to warn about
# a VAR_MAKEFILE variable and locate it in configure.ac...
$self->{'owner'} = $owner;
$self->{'location'} = $location;
}
=item C<$def-E<gt>set_seen>
=item C<$bool = $def-E<gt>seen>
These function allows Automake to mark (C<set_seen>) variable that
it has examined in some way, and latter check (using C<seen>) for
unused variables. Unused variables usually indicate typos.
=cut
sub set_seen ($)
{
my ($self) = @_;
$self->{'seen'} = 1;
}
sub seen ($)
{
my ($self) = @_;
return $self->{'seen'};
}
=item C<$str = $def-E<gt>dump>
Format the contents of C<$def> as a human-readable string,
for debugging.
=cut
sub dump ($)
{
my ($self) = @_;
my $owner = $self->owner;
if ($owner == VAR_AUTOMAKE)
{
$owner = 'Automake';
}
elsif ($owner == VAR_CONFIGURE)
{
$owner = 'Configure';
}
elsif ($owner == VAR_MAKEFILE)
{
$owner = 'Makefile';
}
else
{
prog_error ("unexpected owner");
}
my $where = $self->location->dump;
my $comment = $self->comment;
my $value = $self->raw_value;
my $type = $self->type;
return "{
type: $type=
where: $where comment: $comment
value: $value
owner: $owner
}\n";
}
=back
=head1 SEE ALSO
L<Automake::Variable>, L<Automake::ItemDef>.
=cut
1;
### Setup "GNU" style for perl-mode and cperl-mode.
## Local Variables:
## perl-indent-level: 2
## perl-continued-statement-offset: 2
## perl-continued-brace-offset: 0
## perl-brace-offset: 0
## perl-brace-imaginary-offset: 0
## perl-label-offset: -2
## cperl-indent-level: 2
## cperl-brace-offset: 0
## cperl-continued-brace-offset: 0
## cperl-label-offset: -2
## cperl-extra-newline-before-brace: t
## cperl-merge-trailing-else: nil
## cperl-continued-statement-offset: 2
## End:
| racker/omnibus | source/automake-1.10.3/lib/Automake/VarDef.pm | Perl | apache-2.0 | 9,203 |
#
# Copyright 2017 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package centreon::common::dell::powerconnect3000::mode::globalstatus;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
my %states = (
3 => ['ok', 'OK'],
4 => ['non critical', 'WARNING'],
5 => ['critical', 'CRITICAL'],
);
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
$options{options}->add_options(arguments =>
{
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
}
sub run {
my ($self, %options) = @_;
$self->{snmp} = $options{snmp};
my $oid_productStatusGlobalStatus = '.1.3.6.1.4.1.674.10895.3000.1.2.110.1';
my $oid_productIdentificationDisplayName = '.1.3.6.1.4.1.674.10895.3000.1.2.100.1';
my $oid_productIdentificationBuildNumber = '.1.3.6.1.4.1.674.10895.3000.1.2.100.5';
my $oid_productIdentificationServiceTag = '.1.3.6.1.4.1.674.10895.3000.1.2.100.8.1.4';
my $result = $self->{snmp}->get_multiple_table(oids => [
{ oid => $oid_productStatusGlobalStatus, start => $oid_productStatusGlobalStatus },
{ oid => $oid_productIdentificationDisplayName, start => $oid_productIdentificationDisplayName },
{ oid => $oid_productIdentificationBuildNumber, start => $oid_productIdentificationBuildNumber },
{ oid => $oid_productIdentificationServiceTag, start => $oid_productIdentificationServiceTag },
],
nothing_quit => 1 );
my $globalStatus = $result->{$oid_productStatusGlobalStatus}->{$oid_productStatusGlobalStatus . '.0'};
my $displayName = $result->{$oid_productIdentificationDisplayName}->{$oid_productIdentificationDisplayName . '.0'};
my $buildNumber = $result->{$oid_productIdentificationBuildNumber}->{$oid_productIdentificationBuildNumber . '.0'};
my $serviceTag;
foreach my $key ($self->{snmp}->oid_lex_sort(keys %{$result->{$oid_productIdentificationServiceTag}})) {
next if ($key !~ /^$oid_productIdentificationServiceTag\.(\d+)$/);
if (!defined($serviceTag)) {
$serviceTag = $result->{$oid_productIdentificationServiceTag}->{$oid_productIdentificationServiceTag . '.' . $1};
} else {
$serviceTag .= ',' . $result->{$oid_productIdentificationServiceTag}->{$oid_productIdentificationServiceTag . '.' . $1};
}
}
$self->{output}->output_add(severity => ${$states{$globalStatus}}[1],
short_msg => sprintf("Overall global status is '%s' [Product: %s] [Version: %s] [Service Tag: %s]",
${$states{$globalStatus}}[0], $displayName, $buildNumber, $serviceTag));
$self->{output}->display();
$self->{output}->exit();
}
1;
__END__
=head1 MODE
Check the overall status of Dell Powerconnect 3000.
=over 8
=back
=cut
| nichols-356/centreon-plugins | centreon/common/dell/powerconnect3000/mode/globalstatus.pm | Perl | apache-2.0 | 3,990 |
package DDG::Spice::Maps::Maps;
# ABSTRACT: Map of current cocation
use strict;
use Text::Trim;
use DDG::Spice;
use Data::Dumper;
spice to => 'http://api.mapbox.com/v4/geocode/mapbox.places/$1.json?access_token={{ENV{DDG_SPICE_MAPBOX_KEY}}}';
spice is_cached => 0;
spice proxy_cache_valid => "418 1d";
spice wrap_jsonp_callback => 1;
my @startend_triggers = ("map of", "map", "maps", "current location");
my $startend_joined = join "|", @startend_triggers;
my $start_qr = qr/^($startend_joined)/;
my $end_qr = qr/($startend_joined)$/;
my $skip_words_qr = qr/google|yahoo|bing|mapquest|fallout|time zone|editor|world|star|search/i;
my @all_triggers = @startend_triggers;
push @all_triggers, "directions";
# allows us to handle e.g.
# - "directions to florida" (matches "florida")
# - "driving directions to 10 canal street new york" (matches "10 canal street new york")
# - "directions from leeds to skipton uk" (matches "skipton uk")
my $directions_qr = qr/^(\w+\s)?directions.*\bto\b/;
triggers any => @all_triggers;
handle query_lc => sub {
my $query_lc = $_;
return if $query_lc =~ $skip_words_qr;
# handle maps/locations queries
if ($query_lc =~ $start_qr or $query_lc =~ $end_qr) {
# replace trigger words
$query_lc =~ s/$start_qr//g;
$query_lc =~ s/$end_qr//g;
$query_lc = trim ($query_lc);
return $query_lc if $query_lc;
# if there's no remainder, show the user's location
my $location = $loc->loc_str;
return $location if $location;
}
# directions queries
if ($query_lc =~ $directions_qr) {
$query_lc =~ s/$directions_qr//g;
$query_lc = trim ($query_lc);
# there's a lot of queries like "directions from one place to another"
return if $query_lc eq "another";
return $query_lc if $query_lc;
}
return;
};
1;
| imwally/zeroclickinfo-spice | lib/DDG/Spice/Maps/Maps.pm | Perl | apache-2.0 | 1,872 |
package Perun::AuthzResolverAgent;
use strict;
use warnings;
use Perun::Common;
my $manager = 'authzResolver';
use fields qw(_agent _manager);
sub new
{
my $self = fields::new(shift);
$self->{_agent} = shift;
$self->{_manager} = $manager;
return $self;
}
sub getPrincipalRoleNames
{
return Perun::Common::callManagerMethod('getPrincipalRoleNames', '', @_);
}
sub isVoAdmin
{
return Perun::Common::callManagerMethod('isVoAdmin', '', @_);
}
sub isGroupAdmin
{
return Perun::Common::callManagerMethod('isGroupAdmin', '', @_);
}
sub isFacilityAdmin
{
return Perun::Common::callManagerMethod('isFacilityAdmin', '', @_);
}
sub isPerunAdmin
{
return Perun::Common::callManagerMethod('isPerunAdmin', '', @_);
}
sub setRole
{
return Perun::Common::callManagerMethod('setRole', '', @_);
}
sub unsetRole
{
return Perun::Common::callManagerMethod('unsetRole', '', @_);
}
1;
| Holdo/perun | perun-rpc/src/main/perl/Perun/AuthzResolverAgent.pm | Perl | bsd-2-clause | 888 |
package API::Job;
#
# Copyright 2015 Comcast Cable Communications Management, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# JvD Note: you always want to put Utils as the first use. Sh*t don't work if it's after the Mojo lines.
use UI::Utils;
use Mojo::Base 'Mojolicious::Controller';
use UI::Utils;
use Digest::SHA1 qw(sha1_hex);
use Mojolicious::Validator;
use Mojolicious::Validator::Validation;
use Mojo::JSON;
use Time::Local;
use LWP;
use Email::Valid;
use MojoPlugins::Response;
use MojoPlugins::Job;
use Utils::Helper::ResponseHelper;
use Validate::Tiny ':all';
use UI::ConfigFiles;
use UI::Tools;
use Data::Dumper;
sub index {
my $self = shift;
my $response = [];
my $username = $self->current_user()->{username};
my $ds_id = $self->param('dsId');
my $keyword = $self->param('keyword') || 'PURGE';
my $dbh;
if ( defined($ds_id) ) {
$dbh = $self->db->resultset('Job')->search(
{ keyword => $keyword, 'job_user.username' => $username, 'job_deliveryservice.id' => $ds_id },
{ prefetch => [ { 'job_deliveryservice' => undef } ], join => 'job_user' }
);
my $row_count = $dbh->count();
if ( defined($dbh) && ( $row_count > 0 ) ) {
my @data = $self->job_ds_data($dbh);
my $rh = new Utils::Helper::ResponseHelper();
$response = $rh->camelcase_response_keys(@data);
}
}
else {
$dbh =
$self->db->resultset('Job')
->search( { keyword => $keyword, 'job_user.username' => $username }, { prefetch => [ { 'job_user' => undef } ], join => 'job_user' } );
my $row_count = $dbh->count();
if ( defined($dbh) && ( $row_count > 0 ) ) {
my @data = $self->job_data($dbh);
my $rh = new Utils::Helper::ResponseHelper();
$response = $rh->camelcase_response_keys(@data);
}
}
return $self->success($response);
}
# Creates a purge job based upon the Deliveryservice (ds_id) instead
# of the ds_xml_id like the UI does.
sub create {
my $self = shift;
my $ds_id = $self->req->json->{dsId};
my $agent = $self->req->json->{agent};
my $keyword = $self->req->json->{keyword};
my $regex = $self->req->json->{regex};
my $ttl = $self->req->json->{ttl};
my $start_time = $self->req->json->{startTime};
my $asset_type = $self->req->json->{assetType};
if ( !&is_admin($self) && !&is_oper($self) ) {
# not admin or operations -- only an assigned user can purge
my $tm_user = $self->db->resultset('TmUser')->search( { username => $self->current_user()->{username} } )->single();
my $tm_user_id = $tm_user->id;
if ( defined($ds_id) ) {
# select deliveryservice from deliveryservice_tmuser where deliveryservice=$ds_id
my $dbh = $self->db->resultset('DeliveryserviceTmuser')->search( { deliveryservice => $ds_id, tm_user_id => $tm_user_id }, { id => 1 } );
my $count = $dbh->count();
if ( $count == 0 ) {
$self->forbidden("Forbidden. Delivery service not assigned to user.");
return;
}
}
}
# Just pass "true" in the urgent key to make it urgent.
my $urgent = $self->req->json->{urgent};
my ( $is_valid, $result ) = $self->is_valid( { dsId => $ds_id, regex => $regex, startTime => $start_time, ttl => $ttl } );
if ($is_valid) {
my $new_id = $self->create_new_job( $ds_id, $regex, $start_time, $ttl, 'PURGE', $urgent );
if ($new_id) {
my $saved_job = $self->db->resultset("Job")->find( { id => $new_id } );
my $asset_url = $saved_job->asset_url;
&log( $self, "Invalidate content request submitted for " . $asset_url, "APICHANGE" );
return $self->success_message( "Invalidate content request submitted for: " . $asset_url . " (" . $saved_job->parameters . ")" );
}
else {
return $self->alert( { "Error creating invalidate content request" . $ds_id } );
}
}
else {
return $self->alert($result);
}
}
sub is_valid {
my $self = shift;
my $job = shift;
my $rules = {
fields => [qw/dsId regex startTime ttl/],
# Checks to perform on all fields
checks => [
# All of these are required
[qw/regex startTime ttl dsId/] => is_required("is required"),
ttl => sub {
my $value = shift;
my $params = shift;
if ( defined( $params->{'ttl'} ) ) {
return $self->is_ttl_in_range($value);
}
},
startTime => sub {
my $value = shift;
my $params = shift;
if ( defined( $params->{'ttl'} ) ) {
return $self->is_valid_date_format($value);
}
},
startTime => sub {
my $value = shift;
my $params = shift;
if ( defined( $params->{'ttl'} ) ) {
return $self->is_more_than_two_days($value);
}
},
]
};
# Validate the input against the rules
my $result = validate( $job, $rules );
if ( $result->{success} ) {
#print "success: " . dump( $result->{data} );
return ( 1, $result->{data} );
}
else {
#print "failed " . Dumper( $result->{error} );
return ( 0, $result->{error} );
}
}
sub is_valid_date_format {
my $self = shift;
my $value = shift;
if ( !defined $value or $value eq '' ) {
return undef;
}
if (
( $value ne '' )
&& ( $value !~
qr/^((((19|[2-9]\d)\d{2})[\/\.-](0[13578]|1[02])[\/\.-](0[1-9]|[12]\d|3[01])\s(0[0-9]|1[0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]))|(((19|[2-9]\d)\d{2})[\/\.-](0[13456789]|1[012])[\/\ .-](0[1-9]|[12]\d|30)\s(0[0-9]|1[0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]))|(((19|[2-9]\d)\d{2})[\/\.-](02)[\/\.-](0[1-9]|1\d|2[0-8])\s(0[0-9]|1[0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]))|(((1[ 6-9]|[2-9]\d)(0[48]|[2468][048]|[13579][26])|((16|[2468][048]|[3579][26])00))[\/\.-](02)[\/\.-](29)\s(0[0-9]|1[0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])))$/
)
)
{
return "has an invalidate date format, should be in the form of YYYY-MM-DD HH:MM:SS";
}
return undef;
}
sub is_ttl_in_range {
my $self = shift;
my $value = shift;
my $min_hours = 1;
my $max_days =
$self->db->resultset('Parameter')->search( { name => "maxRevalDurationDays" }, { config_file => "regex_revalidate.config" } )->get_column('value')->first;
my $max_hours = $max_days * 24;
if ( !defined $value or $value eq '' ) {
return undef;
}
if ( ( $value ne '' ) && ( $value < $min_hours || $value > $max_hours ) ) {
return "should be between " . $min_hours . " and " . $max_hours;
}
return undef;
}
sub is_more_than_two_days {
my $self = shift;
my $value = shift;
if ( !defined $value or $value eq '' ) {
return undef;
}
my $dh = new Utils::Helper::DateHelper();
my $start_time_epoch = $dh->date_to_epoch($value);
my $date_range = abs( $start_time_epoch - time() );
if ( ( $value ne '' ) && ( $date_range > 172800 ) ) {
return "needs to be within two days from now.";
}
return undef;
}
1;
| dneuman64/traffic_control | traffic_ops/app/lib/API/Job.pm | Perl | apache-2.0 | 7,129 |
# -----------------------------------------------------------------
# catcoregen.pl
# Usage:
# $ cat <header files> | catcoregen.pl > catcoretable.c
#
# This script parses the special CATALOG, DECLARE_INDEX etc. lines,
# like genbki.sh does, and generates CatCore C definitions from them.
# The structure looks like:
#
# - array of relation
# - array of attribute
# - array of index
# - array of type
#
# See also catcore.h for each structure definition.
# -----------------------------------------------------------------
use strict;
my $relations;
my $indexes;
my $rel;
my $processing_rel_struct = 0;
my $relstruct;
# Read input
while (my $line = <>) {
if ($line =~ /^CATALOG.*/)
{
# It's the beginning of a FormData struct definition.
# Extract the table name and OID from the CATALOG line.
$line =~ /CATALOG\((.*),(\d+)\)(.*)$/
or die("malformed CATALOG line: $line\n");
my $has_oids = (index($3, 'BKI_WITHOUT_OIDS') != -1) ? 0 : 1;
$rel = { relname => $1,
reloid => $2,
has_oids => $has_oids };
# We will append the lines as is to $relstruct, until we reach the
# ending brace.
$processing_rel_struct = 1;
$relstruct = '';
next;
}
if ($processing_rel_struct)
{
$relstruct = $relstruct . $line;
}
if ($line =~ /\}\s+Form/)
{
# end of struct
$processing_rel_struct = 0;
# Strip comments. Per the perl FAQ, this doesn't handle all C comments
# that are legal in the language, but it's enough for our catalog
# header files.
$relstruct =~ s/\/\*.*?\*\///sg;
# Strip extra whitespace, and turn all whitespace into spaces.
$relstruct =~ s/\t/ /sg;
$relstruct =~ s/\s\s+/ /sg;
$relstruct =~ s/\s/ /sg;
# Strip { } at beginning and end
$relstruct =~ /{(.*)}/;
$relstruct = $1;
# split at ; to columns
my @cols = split (/;/, $relstruct);
# there's an empty element at the end, because of the last ';'
pop @cols;
$rel->{attributes} = \@cols;
$relations->{$rel->{relname}} = $rel;
next;
}
if ($line =~ /^DECLARE_(UNIQUE_)?INDEX/)
{
if ($line =~ /^DECLARE_(UNIQUE_)?INDEX\((\w+),\s*(\d+),\s*on\s+(\w+)\s+using\s*(\w+)\s*\((.*)\)\);/)
{
my $indexdef = {
indexname => $2,
indexoid => $3,
amname => $5
};
my $relname = $4;
@{$indexdef->{cols}} = split (/,/, $6);
push(@{$indexes->{$relname}}, $indexdef);
}
else {
die("malformed DECLARE_UNIQUE_INDEX or DECLARE_INDEX line: $line\n");
}
}
}
# Ok, we've read all the data. Start printing!
#
# Constant header first
print <<"END_MESSAGE";
/*-------------------------------------------------------------------------
*
* catcoretable.c
* Auto-generated C file from the catalog headers. Don't edit manually.
*
* WARNING: DO NOT MODIFY THIS FILE:
* Generated by ./catcoregen.pl
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "catalog/catcore.h"
#include "catalog/catalog.h"
#include "catalog/gp_configuration.h"
#include "catalog/gp_fastsequence.h"
#include "catalog/gp_id.h"
#include "catalog/gp_policy.h"
#include "catalog/gp_san_config.h"
#include "catalog/gp_segment_config.h"
#include "catalog/gp_version.h"
#include "catalog/indexing.h"
#include "catalog/pg_aggregate.h"
#include "catalog/pg_amop.h"
#include "catalog/pg_amproc.h"
#include "catalog/pg_appendonly.h"
#include "catalog/pg_attrdef.h"
#include "catalog/pg_auth_members.h"
#include "catalog/pg_authid.h"
#include "catalog/pg_autovacuum.h"
#include "catalog/pg_cast.h"
#include "catalog/pg_class.h"
#include "catalog/pg_constraint.h"
#include "catalog/pg_conversion.h"
#include "catalog/pg_database.h"
#include "catalog/pg_depend.h"
#include "catalog/pg_description.h"
#include "catalog/pg_extprotocol.h"
#include "catalog/pg_exttable.h"
#include "catalog/pg_filespace.h"
#include "catalog/pg_filespace_entry.h"
#include "catalog/pg_inherits.h"
#include "catalog/pg_language.h"
#include "catalog/pg_largeobject.h"
#include "catalog/pg_listener.h"
#include "catalog/pg_namespace.h"
#include "catalog/pg_opclass.h"
#include "catalog/pg_opfamily.h"
#include "catalog/pg_operator.h"
#include "catalog/pg_partition.h"
#include "catalog/pg_partition_rule.h"
#include "catalog/pg_pltemplate.h"
#include "catalog/pg_proc.h"
#include "catalog/pg_resqueue.h"
#include "catalog/pg_rewrite.h"
#include "catalog/pg_shdepend.h"
#include "catalog/pg_shdescription.h"
#include "catalog/pg_statistic.h"
#include "catalog/pg_tablespace.h"
#include "catalog/pg_trigger.h"
#include "catalog/pg_window.h"
#include "catalog/pg_tidycat.h"
#include "utils/fmgroids.h"
#include "utils/syscache.h"
/*
* First, information about every datatype that is used in catalogs.
* (This is maintained manually in catcoregen.pl, there are so many of these
* and they change seldom.)
*/
static const CatCoreType CatCoreType_bool =
{BOOLOID, F_BOOLEQ, F_BOOLLT, F_BOOLLE, F_BOOLGE, F_BOOLGT};
static const CatCoreType CatCoreType_bytea =
{BYTEAOID, F_BYTEAEQ, F_BYTEALT, F_BYTEALE, F_BYTEAGE, F_BYTEAGT};
static const CatCoreType CatCoreType_char =
{CHAROID, F_CHAREQ, F_CHARLT, F_CHARLE, F_CHARGE, F_CHARGT};
static const CatCoreType CatCoreType_NameData =
{NAMEOID, F_NAMEEQ, F_NAMELT, F_NAMELE, F_NAMEGE, F_NAMEGT};
static const CatCoreType CatCoreType_int8 =
{INT8OID, F_INT8EQ, F_INT8LT, F_INT8LE, F_INT8GE, F_INT8GT};
static const CatCoreType CatCoreType_int2 =
{INT2OID, F_INT2EQ, F_INT2LT, F_INT2LE, F_INT2GE, F_INT2GT};
static const CatCoreType CatCoreType_int2vector =
{INT2VECTOROID, InvalidOid, InvalidOid, InvalidOid, InvalidOid, InvalidOid};
static const CatCoreType CatCoreType_int4 =
{INT4OID, F_INT4EQ, F_INT4LT, F_INT4LE, F_INT4GE, F_INT4GT};
static const CatCoreType CatCoreType_regproc =
{REGPROCOID, F_OIDEQ, F_OIDLT, F_OIDLE, F_OIDGE, F_OIDGT};
static const CatCoreType CatCoreType_text =
{TEXTOID, F_TEXTEQ, F_TEXT_LT, F_TEXT_LE, F_TEXT_GE, F_TEXT_GT};
static const CatCoreType CatCoreType_Oid =
{OIDOID, F_OIDEQ, F_OIDLT, F_OIDLE, F_OIDGE, F_OIDGT};
static const CatCoreType CatCoreType_tid =
{TIDOID, F_TIDEQ, F_TIDLT, F_TIDLE, F_TIDGE, F_TIDGT};
static const CatCoreType CatCoreType_TransactionId =
{XIDOID, InvalidOid, InvalidOid, InvalidOid, InvalidOid, InvalidOid};
static const CatCoreType CatCoreType_oidvector =
{OIDVECTOROID, F_OIDVECTOREQ, F_OIDVECTORLT, F_OIDVECTORLE, F_OIDVECTORGE, F_OIDVECTORGT};
static const CatCoreType CatCoreType_float4 =
{FLOAT4OID, F_FLOAT4EQ, F_FLOAT4LT, F_FLOAT4LE, F_FLOAT4GE, F_FLOAT4GT};
static const CatCoreType CatCoreType_char_array =
{1002, F_ARRAY_EQ, F_ARRAY_LT, F_ARRAY_LE, F_ARRAY_GE, F_ARRAY_GT};
static const CatCoreType CatCoreType_int2_array =
{INT2ARRAYOID, F_ARRAY_EQ, F_ARRAY_LT, F_ARRAY_LE, F_ARRAY_GE, F_ARRAY_GT};
static const CatCoreType CatCoreType_int4_array =
{INT4ARRAYOID, F_ARRAY_EQ, F_ARRAY_LT, F_ARRAY_LE, F_ARRAY_GE, F_ARRAY_GT};
static const CatCoreType CatCoreType_text_array =
{TEXTARRAYOID, F_ARRAY_EQ, F_ARRAY_LT, F_ARRAY_LE, F_ARRAY_GE, F_ARRAY_GT};
static const CatCoreType CatCoreType_float4_array =
{FLOAT4ARRAYOID, F_ARRAY_EQ, F_ARRAY_LT, F_ARRAY_LE, F_ARRAY_GE, F_ARRAY_GT};
static const CatCoreType CatCoreType_Oid_array =
{1028, F_ARRAY_EQ, F_ARRAY_LT, F_ARRAY_LE, F_ARRAY_GE, F_ARRAY_GT};
static const CatCoreType CatCoreType_aclitem_array =
{1034, F_ARRAY_EQ, F_ARRAY_LT, F_ARRAY_LE, F_ARRAY_GE, F_ARRAY_GT};
static const CatCoreType CatCoreType_time =
{TIMEOID, F_TIME_EQ, F_TIME_LT, F_TIME_LE, F_TIME_GE, F_TIME_GT};
static const CatCoreType CatCoreType_timestamptz =
{TIMESTAMPTZOID, F_TIMESTAMP_EQ, F_TIMESTAMP_LT, F_TIMESTAMP_LE, F_TIMESTAMP_GE, F_TIMESTAMP_GT};
static const CatCoreType CatCoreType_gpxlogloc =
{XLOGLOCOID, F_GPXLOGLOCEQ, F_GPXLOGLOCLT, F_GPXLOGLOCLE, F_GPXLOGLOCGE, F_GPXLOGLOCGT};
static const CatCoreType CatCoreType_anyarray =
{2277, F_ARRAY_EQ, F_ARRAY_LT, F_ARRAY_LE, F_ARRAY_GE, F_ARRAY_GT};
/* A pseudo-attr entry, for the "oid" system column */
const CatCoreAttr TableOidAttr = {
"oid", ObjectIdAttributeNumber, &CatCoreType_Oid
};
END_MESSAGE
# Helper function for forming the attribute arrays. The arguments are the
# relation and attribute name, and this generates a reference to the correct
# Anum_* macro, for that column.
sub attname_to_anum
{
my ($relname, $attname) = @_;
# For most columns, the mapping is straightforward, but there are a few
# Greenplum-specific catalogs that break the normal naming convention.
if ($relname eq 'gp_distribution_policy')
{
$relname = 'gp_policy';
}
elsif ($relname eq 'pg_stat_last_operation')
{
$relname = 'pg_statlastop';
}
elsif ($relname eq 'pg_stat_last_shoperation')
{
$relname = 'pg_statlastshop'
}
if ($attname eq "oid")
{
return "ObjectIdAttributeNumber";
}
return "Anum_" . $relname . "_" . $attname;
}
# Helper function for generating a reference to the CatCoreType_* struct,
# for a given C type.
sub ctype_to_coretype
{
my ($ctype, $isarray) = @_;
my $result;
$result = "CatCoreType_" . $ctype;
if ($isarray ne '')
{
$result = $result . "_array";
}
return $result;
}
# Sort the list of relations, by relation name. This makes it possible to
# binary search the array.
my @relations = sort {$a->{relname} cmp $b->{relname}} (values %{$relations});
# First generate the attribute array for each relation
print "/* Attributes for each relation (auto-generated from *.h files) */\n";
foreach $rel (@relations) {
print "static const CatCoreAttr $rel->{relname}Attributes[] = {\n";
my @cols = @{$rel->{attributes}};
my $attnum = 1;
foreach my $col (@cols) {
my ($ctype, $attname, $isarray) = ($col =~ /(\w+)\s+(\w+)(\[?)/);
print "\t{\"$attname\", $attnum, &" . ctype_to_coretype($ctype, $isarray) . "}";
if ($attnum != scalar @cols)
{
print ",";
}
print "\n";
$attnum++;
}
print "};\n";
}
print "\n";
# Then index array for each relation
print "/* Indexes for each relation (auto-generated from *.h files) */\n";
foreach $rel (@relations) {
print "static const CatCoreIndex $rel->{relname}Indexes[] = ";
my $indref = $indexes->{$rel->{relname}};
if (!$indref)
{
print "{ };\n";
next;
}
my @indexes = @{$indexes->{$rel->{relname}}};
my $indexnum = 1;
print "{\n";
foreach my $indexdef (@indexes) {
print "\t/* $indexdef->{indexname} */\n";
print "\t{" . $indexdef->{indexoid} . ", {\n";
my @cols = @{$indexdef->{cols}};
for (my $i=0; $i <= 3; $i++) {
my $col = $cols[$i];
if ($col)
{
# trim whitespace
$col =~ s/^\s+|\s+$//g;
my ($colname, $ops) = split(/ /, $col);
print "\t\t" . attname_to_anum($rel->{relname}, $colname) . " /* $rel->{relname} $ops */";
}
else
{
print "\t\tInvalidAttrNumber";
}
if ($i < 3)
{
print ",";
}
print "\n";
}
print "\t}, " . scalar(@cols) . "}";
my ($ctype, $attname) = ($indexdef =~ /(\w+)\s+(\w+)/);
if ($indexnum != scalar @indexes)
{
print ",";
}
print "\n";
$indexnum++;
}
print "};\n";
}
print "\n";
# Finally, the relations array itself.
my $nrelations = scalar @relations;
my $lastreloid = $relations[-1]->{reloid};
print "/* List of catalog relations (auto-generated from *.h files) */\n";
print "const int CatCoreRelationSize = $nrelations;\n";
print "const CatCoreRelation CatCoreRelations[$nrelations] = {\n";
foreach $rel (@relations) {
print "\t{";
print "\"$rel->{relname}\", ";
print "$rel->{reloid}, ";
print "$rel->{relname}Attributes, lengthof($rel->{relname}Attributes), ";
print "$rel->{relname}Indexes, lengthof($rel->{relname}Indexes), ";
print $rel->{has_oids} ? "true" : "false";
print "}";
if ($rel->{reloid} != $lastreloid)
{
print ",";
}
print "\n";
}
print "};\n";
| rubikloud/gpdb | src/backend/catalog/core/catcoregen.pl | Perl | apache-2.0 | 11,789 |
package API::Division;
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
use UI::Utils;
use UI::Division;
use Mojo::Base 'Mojolicious::Controller';
use Data::Dumper;
use JSON;
use MojoPlugins::Response;
sub index {
my $self = shift;
my @data;
my $orderby = $self->param('orderby') || "name";
my $rs_data = $self->db->resultset("Division")->search( undef, { order_by => 'me.' . $orderby } );
while ( my $row = $rs_data->next ) {
push(
@data, {
"id" => $row->id,
"name" => $row->name,
"lastUpdated" => $row->last_updated
}
);
}
$self->success( \@data );
}
sub index_by_name {
my $self = shift;
my $name = $self->param('name');
my $rs_data = $self->db->resultset("Division")->search( { name => $name } );
my @data = ();
while ( my $row = $rs_data->next ) {
push(
@data, {
"id" => $row->id,
"name" => $row->name,
"lastUpdated" => $row->last_updated
}
);
}
$self->success( \@data );
}
sub show {
my $self = shift;
my $id = $self->param('id');
my $rs_data = $self->db->resultset("Division")->search( { id => $id } );
my @data = ();
while ( my $row = $rs_data->next ) {
push(
@data, {
"id" => $row->id,
"name" => $row->name,
"lastUpdated" => $row->last_updated
}
);
}
$self->success( \@data );
}
sub update {
my $self = shift;
my $id = $self->param('id');
my $params = $self->req->json;
if ( !&is_oper($self) ) {
return $self->forbidden();
}
my $division = $self->db->resultset('Division')->find( { id => $id } );
if ( !defined($division) ) {
return $self->not_found();
}
if ( !defined($params) ) {
return $self->alert("parameters must be in JSON format.");
}
if ( !defined( $params->{name} ) ) {
return $self->alert("Division name is required.");
}
my $values = { name => $params->{name} };
my $rs = $division->update($values);
if ($rs) {
my $response;
$response->{id} = $rs->id;
$response->{name} = $rs->name;
$response->{lastUpdated} = $rs->last_updated;
&log( $self, "Updated Division name '" . $rs->name . "' for id: " . $rs->id, "APICHANGE" );
return $self->success( $response, "Division update was successful." );
}
else {
return $self->alert("Division update failed.");
}
}
sub create {
my $self = shift;
my $params = $self->req->json;
if ( !defined($params) ) {
return $self->alert("parameters must be in JSON format, please check!");
}
if ( !&is_oper($self) ) {
return $self->alert( { Error => " - You must be an ADMIN or OPER to perform this operation!" } );
}
my $name = $params->{name};
if ( !defined($name) ) {
return $self->alert("division 'name' is not given.");
}
#Check for duplicate division name
my $existing_division = $self->db->resultset('Division')->search( { name => $name } )->get_column('name')->single();
if ($existing_division) {
return $self->alert("A division with name \"$name\" already exists.");
}
my $insert = $self->db->resultset('Division')->create( { name => $name } );
$insert->insert();
my $response;
my $rs = $self->db->resultset('Division')->find( { id => $insert->id } );
if ( defined($rs) ) {
$response->{id} = $rs->id;
$response->{name} = $rs->name;
return $self->success($response);
}
return $self->alert("create division failed.");
}
sub delete {
my $self = shift;
my $id = $self->param('id');
if ( !&is_oper($self) ) {
return $self->forbidden();
}
my $division = $self->db->resultset('Division')->find( { id => $id } );
if ( !defined($division) ) {
return $self->not_found();
}
my $regions = $self->db->resultset('Region')->find( { division => $division->id } );
if ( defined($regions) ) {
return $self->alert("This division is currently used by regions.");
}
my $rs = $division->delete();
if ($rs) {
return $self->success_message("Division deleted.");
} else {
return $self->alert( "Division delete failed." );
}
}
sub delete_by_name {
my $self = shift;
my $name = $self->param('name');
if ( !&is_oper($self) ) {
return $self->forbidden();
}
my $division = $self->db->resultset('Division')->find( { name => $name } );
if ( !defined($division) ) {
return $self->not_found();
}
my $regions = $self->db->resultset('Region')->find( { division => $division->id } );
if ( defined($regions) ) {
return $self->alert("This division is currently used by regions.");
}
my $rs = $division->delete();
if ($rs) {
return $self->success_message("Division deleted.");
} else {
return $self->alert( "Division delete failed." );
}
}
1;
| alficles/incubator-trafficcontrol | traffic_ops/app/lib/API/Division.pm | Perl | apache-2.0 | 5,329 |
#!/usr/bin/perl
# SPDX-FileCopyrightText: 2021 Pragmatic Software <pragma78@gmail.com>
# SPDX-License-Identifier: MIT
use warnings;
use strict;
package Languages::php;
use parent 'Languages::_default';
sub initialize {
my ($self, %conf) = @_;
$self->{sourcefile} = 'prog.php';
$self->{execfile} = 'prog.php';
$self->{default_options} = '';
$self->{cmdline} = 'php $options $sourcefile';
}
1;
| pragma-/pbot | applets/pbot-vm/host/lib/Languages/php.pm | Perl | mit | 428 |
=pod
=head1 NAME
EC_KEY_new, EC_KEY_get_flags, EC_KEY_set_flags, EC_KEY_clear_flags, EC_KEY_new_by_curve_name, EC_KEY_free, EC_KEY_copy, EC_KEY_dup, EC_KEY_up_ref, EC_KEY_get0_group, EC_KEY_set_group, EC_KEY_get0_private_key, EC_KEY_set_private_key, EC_KEY_get0_public_key, EC_KEY_set_public_key, EC_KEY_get_enc_flags, EC_KEY_set_enc_flags, EC_KEY_get_conv_form, EC_KEY_set_conv_form, EC_KEY_get_key_method_data, EC_KEY_insert_key_method_data, EC_KEY_set_asn1_flag, EC_KEY_precompute_mult, EC_KEY_generate_key, EC_KEY_check_key, EC_KEY_set_public_key_affine_coordinates - Functions for creating, destroying and manipulating B<EC_KEY> objects.
=head1 SYNOPSIS
#include <openssl/ec.h>
#include <openssl/bn.h>
EC_KEY *EC_KEY_new(void);
int EC_KEY_get_flags(const EC_KEY *key);
void EC_KEY_set_flags(EC_KEY *key, int flags);
void EC_KEY_clear_flags(EC_KEY *key, int flags);
EC_KEY *EC_KEY_new_by_curve_name(int nid);
void EC_KEY_free(EC_KEY *key);
EC_KEY *EC_KEY_copy(EC_KEY *dst, const EC_KEY *src);
EC_KEY *EC_KEY_dup(const EC_KEY *src);
int EC_KEY_up_ref(EC_KEY *key);
const EC_GROUP *EC_KEY_get0_group(const EC_KEY *key);
int EC_KEY_set_group(EC_KEY *key, const EC_GROUP *group);
const BIGNUM *EC_KEY_get0_private_key(const EC_KEY *key);
int EC_KEY_set_private_key(EC_KEY *key, const BIGNUM *prv);
const EC_POINT *EC_KEY_get0_public_key(const EC_KEY *key);
int EC_KEY_set_public_key(EC_KEY *key, const EC_POINT *pub);
point_conversion_form_t EC_KEY_get_conv_form(const EC_KEY *key);
void EC_KEY_set_conv_form(EC_KEY *eckey, point_conversion_form_t cform);
void *EC_KEY_get_key_method_data(EC_KEY *key,
void *(*dup_func)(void *), void (*free_func)(void *), void (*clear_free_func)(void *));
void EC_KEY_insert_key_method_data(EC_KEY *key, void *data,
void *(*dup_func)(void *), void (*free_func)(void *), void (*clear_free_func)(void *));
void EC_KEY_set_asn1_flag(EC_KEY *eckey, int asn1_flag);
int EC_KEY_precompute_mult(EC_KEY *key, BN_CTX *ctx);
int EC_KEY_generate_key(EC_KEY *key);
int EC_KEY_check_key(const EC_KEY *key);
int EC_KEY_set_public_key_affine_coordinates(EC_KEY *key, BIGNUM *x, BIGNUM *y);
=head1 DESCRIPTION
An EC_KEY represents a public key and (optionally) an associated private key. A new EC_KEY (with no associated curve) can be constructed by calling EC_KEY_new.
The reference count for the newly created EC_KEY is initially set to 1. A curve can be associated with the EC_KEY by calling
EC_KEY_set_group.
Alternatively a new EC_KEY can be constructed by calling EC_KEY_new_by_curve_name and supplying the nid of the associated curve. Refer to L<EC_GROUP_new(3)> for a description of curve names. This function simply wraps calls to EC_KEY_new and
EC_GROUP_new_by_curve_name.
Calling EC_KEY_free decrements the reference count for the EC_KEY object, and if it has dropped to zero then frees the memory associated
with it.
If B<key> is NULL nothing is done.
EC_KEY_copy copies the contents of the EC_KEY in B<src> into B<dest>.
EC_KEY_dup creates a new EC_KEY object and copies B<ec_key> into it.
EC_KEY_up_ref increments the reference count associated with the EC_KEY object.
EC_KEY_generate_key generates a new public and private key for the supplied B<eckey> object. B<eckey> must have an EC_GROUP object
associated with it before calling this function. The private key is a random integer (0 < priv_key < order, where order is the order
of the EC_GROUP object). The public key is an EC_POINT on the curve calculated by multiplying the generator for the curve by the
private key.
EC_KEY_check_key performs various sanity checks on the EC_KEY object to confirm that it is valid.
EC_KEY_set_public_key_affine_coordinates sets the public key for B<key> based on its affine co-ordinates, i.e. it constructs an EC_POINT
object based on the supplied B<x> and B<y> values and sets the public key to be this EC_POINT. It will also performs certain sanity checks
on the key to confirm that it is valid.
The functions EC_KEY_get0_group, EC_KEY_set_group, EC_KEY_get0_private_key, EC_KEY_set_private_key, EC_KEY_get0_public_key, and EC_KEY_set_public_key get and set the EC_GROUP object, the private key and the EC_POINT public key for the B<key> respectively.
The functions EC_KEY_get_conv_form and EC_KEY_set_conv_form get and set the point_conversion_form for the B<key>. For a description
of point_conversion_forms please refer to L<EC_POINT_new(3)>.
EC_KEY_insert_key_method_data and EC_KEY_get_key_method_data enable the caller to associate arbitrary additional data specific to the
elliptic curve scheme being used with the EC_KEY object. This data is treated as a "black box" by the ec library. The data to be stored by EC_KEY_insert_key_method_data is provided in the B<data> parameter, which must have have associated functions for duplicating, freeing and "clear_freeing" the data item. If a subsequent EC_KEY_get_key_method_data call is issued, the functions for duplicating, freeing and "clear_freeing" the data item must be provided again, and they must be the same as they were when the data item was inserted.
EC_KEY_set_flags sets the flags in the B<flags> parameter on the EC_KEY object. Any flags that are already set are left set. The currently defined standard flags are EC_FLAG_NON_FIPS_ALLOW and EC_FLAG_FIPS_CHECKED. In addition there is the flag EC_FLAG_COFACTOR_ECDH which is specific to ECDH and is defined in ecdh.h. EC_KEY_get_flags returns the current flags that are set for this EC_KEY. EC_KEY_clear_flags clears the flags indicated by the B<flags> parameter. All other flags are left in their existing state.
EC_KEY_set_asn1_flag sets the asn1_flag on the underlying EC_GROUP object (if set). Refer to L<EC_GROUP_copy(3)> for further information on the asn1_flag.
EC_KEY_precompute_mult stores multiples of the underlying EC_GROUP generator for faster point multiplication. See also L<EC_POINT_add(3)>.
=head1 RETURN VALUES
EC_KEY_new, EC_KEY_new_by_curve_name and EC_KEY_dup return a pointer to the newly created EC_KEY object, or NULL on error.
EC_KEY_get_flags returns the flags associated with the EC_KEY object as an integer.
EC_KEY_copy returns a pointer to the destination key, or NULL on error.
EC_KEY_up_ref, EC_KEY_set_group, EC_KEY_set_private_key, EC_KEY_set_public_key, EC_KEY_precompute_mult, EC_KEY_generate_key, EC_KEY_check_key and EC_KEY_set_public_key_affine_coordinates return 1 on success or 0 on error.
EC_KEY_get0_group returns the EC_GROUP associated with the EC_KEY.
EC_KEY_get0_private_key returns the private key associated with the EC_KEY.
EC_KEY_get_conv_form return the point_conversion_form for the EC_KEY.
=head1 SEE ALSO
L<crypto(3)>, L<ec(3)>, L<EC_GROUP_new(3)>,
L<EC_GROUP_copy(3)>, L<EC_POINT_new(3)>,
L<EC_POINT_add(3)>,
L<EC_GFp_simple_method(3)>,
L<d2i_ECPKParameters(3)>
=cut
| vbloodv/blood | extern/openssl.orig/doc/crypto/EC_KEY_new.pod | Perl | mit | 6,829 |
# solve a sudoku using backtrack
use strict;
use warnings;
use Scalar::Util qw( looks_like_number );
use Time::HiRes qw( time );
use constant ROW_SIZE => 9;
sub solve {
sub next_empty {
my $sudoku = shift;
for my $row (0 .. ROW_SIZE-1) {
for my $col (0 .. ROW_SIZE-1) {
if ($$sudoku[$row][$col] == 0) { return ($row, $col); }
}
}
return ();
}
sub can_put {
my ($sudoku, $x, $y, $val) = @_;
for my $i (0 .. ROW_SIZE - 1) {
if ($$sudoku[$x][$i] == $val) { return 0; }
if ($$sudoku[$i][$y] == $val) { return 0; }
}
my $sq_x = $x - ($x % 3);
my $sq_y = $y - ($y % 3);
for my $row ($sq_x .. $sq_x + 2) {
for my $col ($sq_y .. $sq_y + 2) {
if ($$sudoku[$row][$col] == $val) {
return 0;
}
}
}
return 1;
}
my $sudoku = shift;
my @spot = next_empty $sudoku;
if (@spot) {
my ($x, $y) = @spot;
for my $val (1 .. 9) {
if (can_put $sudoku, $x, $y, $val) {
$$sudoku[$x][$y] = $val;
my $new_sudoku = solve ($sudoku);
my @new_spot = next_empty $new_sudoku;
if (@new_spot == 0) {return $new_sudoku; } # solution found
}
}
$$sudoku[$x][$y] = 0; # backtrack
}
return $sudoku;
}
sub from_str {
my @sudoku = ();
my $str = shift;
my @numbers = grep { looks_like_number($_) } split(//, $str);
for my $i (map {ROW_SIZE * $_} 0..ROW_SIZE-1) {
push @sudoku, [@numbers[$i .. ($i+ROW_SIZE-1)]];
}
return @sudoku;
}
sub to_str {
my $sudoku = shift;
my $result = "";
foreach my $row (@$sudoku) {
$result .= " @$row \n";
}
return $result;
}
sub load_bar {
my ($step, $totalSteps, $resolution, $width) = @_;
if (int($totalSteps/$resolution) != 0 and
$step % int($totalSteps/$resolution) == 0) {
my $ratio = $step/$totalSteps;
my $count = int($ratio * $width);
printf("%d%% [",int($ratio * 100));
for (0 .. $count) { printf("="); }
for ($count+1 .. $width) { printf(" "); }
printf("]\r");
$|++; #flush
}
}
# read sudokus into memory
my $input = $ARGV[0];
open my $in, "<", $input or die "Can't open input file";
my $data;
read $in, $data, -s $input;
close $in;
my $result = "";
my $start_time = time();
# solve all sudokus
my @rows = split '\n', $data;
my $total = @rows;
for my $step (0 .. $#rows) {
my @sudoku = from_str($rows[$step]);
$result .= to_str( solve (\@sudoku)) . "\n";
load_bar($step, $total, 20, 50);
}
my $diff = time() - $start_time;
# write solutions to file
open my $out, ">", "solved_" . $input or die "Can't open output file";
print $out $result;
close $out;
print " -- Elapsed time: $diff s.\n";
| humbhenri/Sudoku_Poliglota | sudoku.pl | Perl | mit | 2,941 |
package MIP::Recipes::Download::Get_reference;
use 5.026;
use Carp;
use charnames qw{ :full :short };
use English qw{ -no_match_vars };
use File::Spec::Functions qw{ catfile };
use open qw{ :encoding(UTF-8) :std };
use Params::Check qw{ allow check last_error };
use utf8;
use warnings;
use warnings qw{ FATAL utf8 };
## CPANM
use autodie qw{ :all };
use Readonly;
## MIPs lib/
use MIP::Constants qw{ $NEWLINE $SPACE $UNDERSCORE };
BEGIN {
require Exporter;
use base qw{ Exporter };
# Functions and variables which can be optionally exported
our @EXPORT_OK = qw{ get_reference };
}
## Constants
Readonly my $READ_TIMEOUT_SEC => 20;
Readonly my $TIMEOUT_SEC => 20;
Readonly my $DOWNLOAD_TRIES => 12;
Readonly my $WAIT_RETRY_SEC => 300;
sub get_reference {
## Function : Write get reference recipe (download, decompress and validate)
## Returns :
## Arguments: $filehandle => Filehandle to write to
## : $recipe_name => Recipe name
## : $reference_dir => Reference directory
## : $reference_href => Reference hash {REF}
## : $quiet => Quiet (no output)
## : $verbose => Verbosity
my ($arg_href) = @_;
## Flatten argument(s)
my $filehandle;
my $recipe_name;
my $reference_dir;
my $reference_href;
## Default(s)
my $outdir_path;
my $quiet;
my $verbose;
my $tmpl = {
filehandle => { defined => 1, required => 1, store => \$filehandle, },
outdir_path => {
default => $arg_href->{reference_dir},
store => \$outdir_path,
strict_type => 1,
},
recipe_name => {
defined => 1,
required => 1,
store => \$recipe_name,
strict_type => 1,
},
reference_dir => {
defined => 1,
required => 1,
store => \$reference_dir,
strict_type => 1,
},
reference_href => {
default => {},
defined => 1,
required => 1,
store => \$reference_href,
strict_type => 1,
},
quiet => {
allow => [ undef, 0, 1 ],
default => 1,
store => \$quiet,
strict_type => 1,
},
verbose => {
allow => [ undef, 0, 1 ],
default => 0,
store => \$verbose,
strict_type => 1,
},
};
check( $tmpl, $arg_href, 1 ) or croak q{Could not parse arguments!};
use MIP::Validate::File qw{ check_file_md5sum };
use MIP::File::Decompression qw{ decompress_files };
use MIP::Program::Wget qw{ wget };
## Potential download files
my @file_keys = qw{ file file_check
file_index file_index_check };
REFERENCE_FILE:
foreach my $key (@file_keys) {
next REFERENCE_FILE
if ( not exists $reference_href->{$key} );
## Install reference
my $file = $reference_href->{$key};
my $outfile = $reference_href->{ q{out} . $key };
my $outfile_path = catfile( $reference_dir, $outfile );
my $url = $reference_href->{url_prefix} . $file;
my $user = $reference_href->{user};
## Download
say {$filehandle} q{## Download } . $recipe_name . $NEWLINE;
wget(
{
continue => 1,
filehandle => $filehandle,
outfile_path => $outfile_path,
read_timeout => $READ_TIMEOUT_SEC,
retry_connrefused => 1,
timeout => $TIMEOUT_SEC,
tries => $DOWNLOAD_TRIES,
wait_retry => $WAIT_RETRY_SEC,
quiet => $quiet,
url => $url,
user => $user,
verbose => $verbose,
}
);
say {$filehandle} $NEWLINE;
## Check if file needs to be decompress and write decompression if so
decompress_files(
{
filehandle => $filehandle,
outdir_path => $outdir_path,
file_path => $outfile_path,
program =>
$reference_href->{ q{out} . $key . $UNDERSCORE . q{decompress} },
outfile_path => $outfile_path,
}
);
## Check file integrity of file
check_file_md5sum(
{
filehandle => $filehandle,
md5_file_path => $outfile_path,
check_method =>
$reference_href->{ q{out} . $key . $UNDERSCORE . q{method} },
}
);
}
return 1;
}
1;
| henrikstranneheim/MIP | lib/MIP/Recipes/Download/Get_reference.pm | Perl | mit | 4,881 |
#
# $Header: svn://svn/SWM/trunk/web/RegoFormObj.pm 11494 2014-05-06 05:56:45Z sliu $
#
package RegoFormObj;
use lib '..';
use BaseObject2;
our @ISA = qw(BaseObject2);
use strict;
use Defs;
use Utils;
use RegoFormSQL;
sub _getTableName {
return 'tblRegoForm';
}
sub _getKeyName {
return 'intRegoFormID';
}
sub isNodeForm {
my $self = shift;
my $isNodeForm = (($self->getValue('intAssocID') == -1) and ($self->getValue('intCreatedLevel') > $Defs::LEVEL_ASSOC)) ? 1 : 0;
return $isNodeForm;
}
sub isLinkedForm {
my $self = shift;
my $isLinkedForm = ($self->getValue('intParentBodyFormID')) ? 1 : 0;
return $isLinkedForm;
}
sub isOwnForm {
my $self = shift;
my (%params) = @_;
my $entityID = $params{'entityID'};
return undef if !$entityID;
my $isOwnForm = ($entityID == $self->getValue('intCreatedID')) ? 1 : 0;
return $isOwnForm;
}
sub isParentBodyForm {
my $self = shift;
my (%params) = @_;
my $level = $params{'level'};
return undef if !$level;
my $isParentBodyForm = ($level < $self->getValue('intCreatedLevel')) ? 1 : 0;
return $isParentBodyForm;
}
sub allowPlayer {
my $self = shift;
my $allowPlayer = $self->getValue('ynPlayer') eq 'Y';
return $allowPlayer;
}
sub allowCoach {
my $self = shift;
my $allowCoach = $self->getValue('ynCoach') eq 'Y';
return $allowCoach;
}
sub allowOfficial {
my $self = shift;
my $allowOfficial = $self->getValue('ynOfficial') eq 'Y';
return $allowOfficial;
}
sub allowMisc {
my $self = shift;
my $allowMisc = $self->getValue('ynMisc') eq 'Y';
return $allowMisc;
}
sub allowUmpire {
my $self = shift;
my $allowUmpire = $self->getValue('ynMatchOfficial') eq 'Y';
return $allowUmpire;
}
sub allowVolunteer {
my $self = shift;
my $allowVolunteer = $self->getValue('ynVolunteer') eq 'Y';
return $allowVolunteer;
}
sub allowTypes {
my $self = shift;
my $allow_types = $self->getValue('strAllowedMemberRecordTypes') || '';
my @allow_types = split(',', $allow_types);
return @allow_types;
}
sub getListOfParentBodyForms {
my $self = shift;
my (%params) = @_;
my $dbh = $params{'dbh'};
my $realmID = $params{'realmID'} || 0;
my $assocID = $params{'assocID'} || 0;
my $formTypes = $params{'formTypes'} || '';
return undef if !$dbh;
return undef if !$realmID or !$assocID;
my $sql = getListOfParentBodyFormsSQL(realmID=>$realmID, formTypes=>$formTypes, assocID=>$assocID);
my @bindVars = ($realmID);
my $q = getQueryPreparedAndBound($dbh, $sql, \@bindVars);
$q->execute();
my @regoFormObjs = ();
while (my $dref = $q->fetchrow_hashref()) {
my $RegoFormObj = $self->load(db=>$dbh, ID=>$dref->{'intRegoFormID'});
push @regoFormObjs, $RegoFormObj;
}
$q->finish();
return \@regoFormObjs;
}
sub getFormEntityType {
my $self = shift;
my $formType = $self->getValue('intRegoType');
my $formEntityType = '';
if ($formType == $Defs::REGOFORM_TYPE_MEMBER_ASSOC or $formType == $Defs::REGOFORM_TYPE_MEMBER_TEAM or $formType == $Defs::REGOFORM_TYPE_MEMBER_CLUB or $formType == $Defs::REGOFORM_TYPE_MEMBER_PROGRAM) {
$formEntityType = 'Member';
}
elsif ($formType == $Defs::REGOFORM_TYPE_TEAM_ASSOC) {
$formEntityType = 'Team';
}
return $formEntityType;
}
sub is_form_type {
my $self = shift;
my $type = shift || 0;
my $formType = $self->getValue('intRegoType');
return ($formType == $type) ? 1 : 0;
}
1;
| facascante/slimerp | fifs/web/RegoFormBuilder/RegoFormObj.pm | Perl | mit | 3,593 |
#!/usr/bin/perl
package SYNDICATION::SHOPPINGCOM;
use strict;
use lib "/backend/lib";
use DBINFO;
use ZOOVY;
use NAVCAT;
use NAVCAT::FEED;
use ZTOOLKIT;
use SYNDICATION;
sub new {
my ($class, $so) = @_;
my ($self) = {};
$self->{'_SO'} = $so;
tie my %s, 'SYNDICATION', THIS=>$so;
my $USERNAME = $so->username();
# my $FILE = "$USERNAME.xml";
# if ($so->profile() ne 'DEFAULT') { $FILE = sprintf("%s-%s.xml",$USERNAME,$so->profile()); }
my $FILE = sprintf("%s-%s.xml",$USERNAME,$so->domain());
$so->set('.url',sprintf("ftp://%s:%s\@%s%s/%s",$s{'.ftp_user'},$s{'.ftp_pass'},$s{'.ftp_server'},$s{'.ftp_dir'},$FILE));
bless $self, 'SYNDICATION::SHOPPINGCOM';
untie %s;
return($self);
}
sub preflight {
my ($self, $lm) = @_;
my $so = $self->{'_SO'};
if ($so->get('.ftp_server') eq '') {
$lm->pooshmsg("ERROR|+FTP Server not set. Please check your configuration");
}
}
sub header_products {
my ($self) = @_;
my $xml = '';
$xml .= "<?xml version=\"1.0\"?>\n";
$xml .= "<Products>\n";
$xml .= "<!-- File generated ".&ZTOOLKIT::pretty_date(time(),1)." -->\n";
return($xml);
}
sub footer_products {
my ($self) = @_;
return("</Products>\n");
}
sub so { return($_[0]->{'_SO'}); }
##
##
##
sub product {
my ($self, $SKU, $P, $plm, $OVERRIDES) = @_;
my $c = '';
my $USERNAME = $self->so()->username();
my $DEBUG = 0;
if ($self->so()->is_debug($SKU)) {
$DEBUG++;
}
## check current inventory using inv and reserve values
if ($OVERRIDES->{'zoovy:qty_instock'} > 0) {
$OVERRIDES->{'shopping:inventory'} = 'Y';
}
## check if inventory is set to unlimited
else{
$OVERRIDES->{'shopping:inventory'} = 'N';
}
## Step 1: figure out the category
my $CATEGORY = undef;
if (($P->fetch('shopping:category') ne '') && (defined $P->fetch('shopping:category'))) {
$CATEGORY = $P->fetch('shopping:category');
if ($DEBUG) {
$plm->pooshmsg("INFO|+Category($CATEGORY) loaded from shopping:category\n");
}
}
elsif (defined $OVERRIDES->{'navcat:meta'}) {
$CATEGORY = $OVERRIDES->{'navcat:meta'};
if ($DEBUG) {
$plm->pooshmsg("INFO|+Category($CATEGORY) loaded from navigation category (navcat:meta)\n");
}
}
else {
$plm->pooshmsg("ERROR|+Required category($CATEGORY) was not set and is undef.\n");
}
if (defined $P->fetch('zoovy:prod_condition')) { $OVERRIDES->{'shopping:prod_condition'} = $P->fetch('zoovy:prod_condition'); }
$OVERRIDES->{'shopping:prod_condition'} = 'New';
my @fields = ();
push @fields, { 'id'=>'mpn', try=>'shopping:prod_mfgid|zoovy:prod_mfgid', };
push @fields, { 'id'=>'upc', try=>'shopping:prod_upc|zoovy:prod_upc', };
push @fields, { 'id'=>'manufacturer', try=>'shopping:prod_mfg|zoovy:prod_mfg', };
push @fields, { 'id'=>'ProductName', try=>'shopping:prod_name|zoovy:prod_name', };
push @fields, { 'id'=>'ProductDescription', try=>'shopping:prod_desc|zoovy:prod_desc', };
push @fields, { 'id'=>'Price', try=>'shopping:base_price|zoovy:base_price', };
push @fields, { 'id'=>'Condition', try=>'shopping:prod_condition|zoovy:prod_condition', };
push @fields, { 'id'=>'Stock', data=>($OVERRIDES->{'zoovy:qty_instock'}>0)?'Y':'N' };
push @fields, { 'id'=>'ImageUrl', data=> &ZOOVY::mediahost_imageurl($USERNAME,$P->thumbnail($SKU),0,0,'FFFFFF',0,'jpg') };
# push @fields, { 'id'=>'Image', data=> &ZOOVY::mediahost_imageurl($USERNAME,$P->thumbnail($SKU),0,0,'FFFFFF',0,'jpg') };
push @fields, { 'id'=>'Category', data=>$CATEGORY };
push @fields, { 'id'=>'Zip', try=>'shopping:prod_origin_zip|zoovy:prod_origin_zip', };
if ($P->fetch('shopping:ship_ground') ne '') {
push @fields, { 'id'=>'ShippingCost', try=>'shopping:ship_ground' };
}
elsif ($P->fetch('zoovy:ship_cost1') ne '') {
push @fields, { 'id'=>'ShippingCost', try=>'zoovy:ship_cost1' };
}
else {
## Zone based shipping.
## changed to pounds per ticket 149017 & docs
## (https://merchant.shopping.com/sc/docs/Feed_Uploading_Specifications.pdf)
#$P->fetch('zoovy:base_weight') = &ZSHIP::smart_weight_new($prodref->{'zoovy:base_weight'});
my $WEIGHT_IN_POUNDS = sprintf("%.1f",&ZSHIP::smart_weight_new($P->fetch('zoovy:base_weight'))/16);
push @fields, { 'id'=>'Weight', data=>$WEIGHT_IN_POUNDS };
if ($P->fetch('zoovy:prod_origin_zip') ne '') {
push @fields, { 'id'=>'Zip', try=>'shopping:prod_origin_zip,zoovy:prod_origin_zip' },
}
}
my $xml = '';
if ($plm->can_proceed()) {
$xml .= '<Product>';
$xml .= "<MerchantSKU>".$SKU."</MerchantSKU>\n";
# $xml .= "<ProductURL>".ZOOVY::incode($prodref->{'zoovy:link2'}."?meta=shopping-$pid&".$prodref->{'zoovy:analytics_data'})."</ProductURL>\n";
$xml .= "<ProductURL>".ZOOVY::incode($OVERRIDES->{'zoovy:link2'})."</ProductURL>\n";
foreach my $field (@fields) {
my $data = undef;
if (defined $field->{'data'}) { $data = $field->{'data'}; }
if (defined $field->{'try'}) {
foreach my $k (split(/\|/,$field->{'try'})) {
if (not defined $data) { $data = $OVERRIDES->{$k}; }
if (not defined $data) { $data = $P->fetch($k); }
}
}
next if (not defined $data);
## added stripUnicode, 20090127
#$xml .= "<$field>".&ZOOVY::incode($data)."</$field>";
$xml .= "<".$field->{'id'}.">".&ZOOVY::incode(&ZTOOLKIT::stripUnicode($data))."</".$field->{'id'}.">";
}
$xml .= "</Product>\n";
}
return($xml);
}
1;
| CommerceRack/backend | lib/SYNDICATION/SHOPPINGCOM.pm | Perl | mit | 5,324 |
package Perl6ish;
use strict;
use warnings;
our $VERSION = '0.02';
sub import {
my $caller = caller;
eval <<CODI;
package $caller;
use Perl6::Perl 'perl';
use Perl6::Slurp;
use Perl6::Caller;
use Perl6::Take;
use Perl6::Say;
use Perl6::Contexts;
use Perl6::Junction qw/all any one none/;
use Perl6ish::Syntax::temp;
use Perl6ish::Syntax::state;
use Perl6ish::Syntax::constant;
use Perl6ish::Array;
use Perl6ish::Hash;
use Perl6ish::String;
use Perl6ish::Syntax::DotMethod;
use Perl6ish::Autobox;
CODI
return 1;
}
1;
__END__
=head1 NAME
Perl6ish - Some Perl6 programming in Perl5 code.
=head1 SYNOPSIS
use Perl6ish;
=head1 DESCRIPTION
Perl6ish allows you to write Perl5 code some Perl6 look-n-feel. It
uses many good evil techniques to extend Perl5 syntax. Many of which
has been already done in the C<Perl6::*> namespace, some of them are
coded only in the Perl6ish distrition.
When you say C<use Perl6ish> in your code, it's exactualy the same
as saying this:
use Perl6::Perl 'perl';
use Perl6::Slurp;
use Perl6::Caller;
use Perl6::Take;
use Perl6::Say;
use Perl6::Contexts;
use Perl6::Junction qw/all any one none/;
use Perl6ish::Syntax::temp;
use Perl6ish::Syntax::state;
use Perl6ish::Syntax::constant;
use Perl6ish::Array;
use Perl6ish::Hash;
use Perl6ish::String;
use Perl6ish::Syntax::DotMethod;
C<Perl6ish::Syntax::*> modules are syntax extensions. Variable
declarators, C<temp>, C<state> and C<constant>, are implemented under
this namespace. They can be used alone if you prefer not to load all
those moduels above all at once.
=head1 AUTHOR
Kang-min Liu E<lt>gugod@gugod.orgE<gt>
=head1 SEE ALSO
L<Rubyish>
=head1 LICENSE AND COPYRIGHT
Copyright (c) 2009, Kang-min Liu C<< <gugod@gugod.org> >>.
This is free software, licensed under:
The MIT (X11) License
=head1 DISCLAIMER OF WARRANTY
BECAUSE THIS SOFTWARE IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE SOFTWARE, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE SOFTWARE "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER
EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE SOFTWARE IS WITH
YOU. SHOULD THE SOFTWARE PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL
NECESSARY SERVICING, REPAIR, OR CORRECTION.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE SOFTWARE AS PERMITTED BY THE ABOVE LICENCE, BE
LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL,
OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE
THE SOFTWARE (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
FAILURE OF THE SOFTWARE TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
=cut
| gitpan/Perl6ish | lib/Perl6ish.pm | Perl | mit | 3,134 |
#!/usr/bin/env perl
# -*- mode: perl -*-
# Author: Creative Commons Corporation, 2016.
# License: CC0. http://creativecommons.org/publicdomain/zero/1.0/
# To the extent possible under law, Creative Commons Corporation
# has waived all copyright and related or neighboring rights to ccexif.
#
# ccexif is a simple Unix command line tool to read and write Creative Commons
# license information in the Exif metadata of images.
#
# The format of this information is described in the Exif metadata proposal document.
# ccexif does not currently read, write or reconcile XMP license information.
#
# Dependency Install.
# ccexif requires Perl 5 and the ArgParse and ExifTool libraries. If the latter
# are not already installed they can be installed using cpan:
# cpan
# install Getopt::ArgParse
# install Image::ExifTool
# exit
#
# Reading Metadata: ./ccexif.pl person.jpg
# Writing Metadata: ./ccexif.pl --license by-sa --title person --workurl http://blah.com/person.jpg --author you --authorurl https://blah.com/author person.jpg
use strict;
use Getopt::ArgParse;
use Image::ExifTool qw(:Public);
use File::Copy qw(copy);
my %license_urls = (
'by' => 'https://creativecommons.org/licenses/by/4.0/',
'by-sa' => 'https://creativecommons.org/licenses/by-sa/4.0/',
'by-nd' => 'https://creativecommons.org/licenses/by-nd/4.0/',
'by-nc' => 'https://creativecommons.org/licenses/by-nc/4.0/',
'by-nc-sa' => 'https://creativecommons.org/licenses/by-nc-sa/4.0/',
'by-nc-nd' => 'https://creativecommons.org/licenses/by-nc-nd/4.0/'
);
my %license_names = (
'by' => 'Attribution',
'by-sa' => 'Attribution-ShareAlike',
'by-nd' => 'Attribution-NoDerivatives',
'by-nc' => 'Attribution-NonCommercial',
'by-nc-sa' => 'Attribution-NonCommercial-ShareAlike',
'by-nc-nd' => 'Attribution-NonCommercial-NoDerivatives'
);
my @license_ids = keys %license_urls;
my %license_urls_reverse = reverse %license_urls;
sub parse_args {
my @argv = @_;
my $parser = Getopt::ArgParse->new_parser(
prog => 'ccexif.pl',
description => "A simple tool to get/set JPEG Exif Creative Commons license metadata."
);
$parser->add_arg('--ignoreminorerrors', '-m', type => 'Bool',
help => 'ignore minor errors (e.g. "Bad PreviewIFD directory"');
$parser->add_arg('--author', '-a',
help => 'attribution for the author, e.g. A. N. Other');
$parser->add_arg('--title', '-t',
help => 'title of the work, e.g. My Cat');
$parser->add_arg('--workurl', '-w',
help => 'url for the work, e.g. http://anopics.cc/cat');
$parser->add_arg('--authorurl', '-u',
help => 'url for the author, e.g. http://anopics.cc/');
$parser->add_arg('--license', '-l', choices => \@license_ids,
help => 'license identifier');
$parser->add_arg('filename', required => 1,
help => 'the file to modify (a backup is created)');
my $args = $parser->parse_args(@argv);
return $args;
}
sub get_author {
# Set author and author_url from args, or if not set parse Author.
my ($metadata, $args) = @_;
my $author = $args->author;
my $author_url = $args->authorurl;
if (! $author) {
my $author_field = $metadata->GetValue('Artist');
if ($author_field) {
# Regex ignores space, punctuation, anything after closing >
if ($author_field =~ /^(.+)\s*<([^>]+)>[^>]*$/) {
$author = $1;
$author_url = $2;
} else {
$author = $author_field;
}
}
}
return ($author, $author_url);
}
sub get_title {
# Set title and work_url from args, or if not set parse Title.
my ($metadata, $args) = @_;
my $title = $args->title;
my $work_url = $args->workurl;
if (! $title) {
my $title_field = $metadata->GetValue('ImageDescription');
if ($title_field) {
# Regex ignores space, punctuation, anything after closing >
if ($title_field =~ /^(.+)\s*<([^>]+)>[^>]*$/) {
$title = $1;
$work_url = $2;
} else {
$title = $title_field;
}
}
}
return ($title, $work_url);
}
sub get_license_id {
# Set license id from args, extract it from Copyright url, or False.
my ($metadata, $args) = @_;
my $license_id = $args->license;
if (! $license_id) {
my$copyright_field = $metadata->GetValue('Copyright');
if ($copyright_field =~ /<([^>]+)>\s*\.?\s*$/) {
$license_id = $1;
}
}
return $license_id;
}
sub set_author {
# If the author is set, use that. If author_url is also set, use both.
my ($metadata, $author, $author_url) = @_;
if ($author) {
if ($author_url) {
$metadata->SetNewValue('Artist',
$author . ' <' . $author_url . '>');
} else {
$metadata->SetNewValue('Artist', $author);
}
}
}
sub set_title {
# If the title is set, use that. If work_url is also set, use both.
my ($metadata, $title, $work_url) = @_;
if ($title) {
if ($work_url) {
$metadata->SetNewValue('ImageDescription',
$title . ' <' . $work_url . '>');
} else {
$metadata->SetNewValue('ImageDescription', $title);
}
}
}
sub set_copyright {
# Build a copyright field from the provided author, etc.
my ($metadata, $title, $author, $license_id) = @_;
my $text = '';
if ($title && $author) {
$text .= '"' . $title . '" by ' . $author . '. ';
} elsif ($title) {
$text .= $title . '. ';
} elsif ($author) {
$text .= 'By ' . $author . '. ';
}
if ($license_id) {
$text .= 'This work is licensed under the Creative Commons '
. $license_names{$license_id} . ' 4.0 International License.'
. ' To view a copy of this license, visit <'
. $license_urls{$license_id} . '>.';
} else {
$text .= 'is All Rights Reserved.';
}
$metadata->SetNewValue('Copyright', $text);
}
sub set_metadata {
my ($exifTool, $args) = @_;
my ($author, $author_url) = get_author($exifTool, $args);
set_author($exifTool, $author, $author_url);
my ($title, $work_url) = get_title($exifTool, $args);
set_title($exifTool, $title, $work_url);
my $license_id = get_license_id($exifTool, $args);
set_copyright($exifTool, $title, $author, $license_id);
}
sub display_metadata {
my ($exifTool, $args) = @_;
my ($title, $work_url) = get_title($exifTool, $args);
my $title_field = $exifTool->GetValue('ImageDescription');
CORE::say "Exif ImageDescription: " . $title_field;
CORE::say "Title: " . $title;
CORE::say "Work URL: " . $work_url;
my $author_field = $exifTool->GetValue('Artist');
my ($author, $author_url) = get_author($exifTool, $args);
CORE::say "Exif Artist: " . $author_field;
CORE::say "Author: " . $author;
CORE::say "Attribution URL: " . $author_url;
my $copyright_field = $exifTool->GetValue('Copyright');
my $license_id = get_license_id($exifTool, $args);
CORE::say "Exif Copyright: " . $copyright_field;
CORE::say "License ID: " . $license_id;
}
# Configuration
my $args = parse_args(@ARGV);
my $filename = $args->filename;
CORE::say "Filename: " . $filename;
my $exifTool = new Image::ExifTool;
if ($args->ignoreminorerrors) {
$exifTool->Options(IgnoreMinorErrors => 1);
}
# Update the metadata, if the license is set
if ($args->license) {
my $info = $exifTool->ImageInfo($filename);
set_metadata($exifTool, $args);
my $err = $exifTool->WriteInfo($filename);
my $error_message = $exifTool->GetValue('Error');
if ($error_message) {
CORE::say 'ERROR: ' . $error_message;
CORE::say "To ignore [minor] errors (e.g. \"Bad PreviewIFD directory\")"
. ": --ignoreminorerrors";
exit 2;
}
my $warning_message = $exifTool->GetValue('Warning');
if ($warning_message) {
CORE::say 'WARNING: ' . $warning_message;
}
}
# Display the (newly updated) metadata
my $info = $exifTool->ImageInfo($filename);
display_metadata($exifTool, parse_args([]));
| creativecommons/exif | ccexif.pl | Perl | cc0-1.0 | 8,519 |
use strict;
use Bio::EnsEMBL::Registry;
my $registry = 'Bio::EnsEMBL::Registry';
$registry->load_registry_from_db
(
-host => 'ensembldb.ensembl.org',
-user => 'anonymous',
);
#Grab the adaptors
my $efg_db = $registry->get_DBAdaptor('Human', 'funcgen');
my $probe_adaptor = $efg_db->get_ProbeAdaptor;
my $pfeature_adaptor = $efg_db->get_ProbeFeatureAdaptor;
#Grab a probe from the HG17 array
my $probe = $probe_adaptor->fetch_by_array_probe_probeset_name
('2005-05-10_HG17Tiling_Set', 'chr22P38797630');
print "Got ".$probe->class." probe ".$probe->get_probename."\n";
#Grab the feature associated with this probe
my @pfeatures = @{$pfeature_adaptor->fetch_all_by_Probe($probe)};
print "\nFound ".scalar(@pfeatures)." ProbeFeatures\n";
#Print some info about the features
foreach my $pfeature ( @pfeatures ){
print "\nProbeFeature found at:\t".$pfeature->feature_Slice->name."\n";
}
| adamsardar/perl-libs-custom | EnsemblAPI/ensembl-functgenomics/scripts/examples/probe_example.pl | Perl | apache-2.0 | 934 |
package Web::Summarizer::Graph2;
use Moose;
use Web::Summarizer::Graph2::Definitions;
use Web::Summarizer::Graph2::GistGraph;
use Digest::MD5 qw/md5_hex/;
use Encode qw(encode_utf8);
use JSON;
use List::MoreUtils;
my $DEBUG = 0;
has 'graph' => (is => 'ro', isa => 'Graph', required => 1);
has 'edge2edgeFeatures' => (is => 'rw', isa => 'HashRef', default => sub { {} });
has 'edge2objectFeatures' => (is => 'rw', isa => 'HashRef', default => sub { {} });
#has 'feature2edge' => (is => 'rw', isa => 'HashRef', default => sub { {} });
sub specialize {
my $this = shift;
my $url = shift;
my $instance_features = shift;
my $instance_fillers = shift;
my $instance_features_reference = shift;
# Is a deep copy necessary ? --> no because we don't update the graph !
my $instance_graph = $this->graph();
# Instantiate gist graph for the target URL
my $gist_graph = new Web::Summarizer::Graph2::GistGraph( url => $url, controller => $this , graph => $instance_graph ,
features => $instance_features ,
features_reference => $instance_features_reference,
fillers => $instance_fillers
);
# Now replace slot node with candidate fillers
# 1 - activate slot nodes (alternate paths + missing filler for slot nodes)
$gist_graph->activate_nodes();
return $gist_graph;
}
sub _is_edge_feature {
my $feature_id = shift;
if ( $feature_id =~ m/^\d+$/ ) {
return 1;
}
return 0;
}
sub _raw_object_feature_id {
my $feature_id = shift;
my @feature_fields = split /::/, $feature_id;
my $raw_id = pop @feature_fields;
return $raw_id;
}
=pod
# Update graph weights
sub _update_graph_weights {
my $this = shift;
my $weights = shift;
my $update_ids = shift;
my $params = shift;
# map update ids to edges
my %edge2updated;
foreach my $update_id (@{ $update_ids }) {
if ( ! _is_edge_feature( $update_id ) ) {
$update_id = _raw_object_feature_id( $update_id );
}
# is this feature active for the current instance ?
if ( $this->features()->{ $Web::Summarizer::Graph2::Definitions::FEATURES_KEY_EDGE }->{ $update_id } || $this->features()->{ $Web::Summarizer::Graph2::Definitions::FEATURES_KEY_OBJECT } ) {
next;
}
my $edge_mapping = $this->feature2edge()->{ $update_id };
if ( ! defined( $edge_mapping ) ) {
die "Problem: missing edge mapping for feature $update_id ...";
}
if ( ! defined( $edge2updated{ $update_id } ) ) {
if ( $DEBUG ) {
print STDERR "Updating edge cost for feature $update_id ...\n";
}
$this->_update_edge_cost( $weights , $edge_mapping , $params );
$edge2updated{ $update_id } = 1;
}
}
if ( $DEBUG ) {
print STDERR "\n";
}
};
=cut
# return all possible feature ids (edge + object) for the specified edge
sub _get_features {
my $this = shift;
my $edge = shift;
my $params = shift;
my ( $from ,$to ) = @{ $edge };
my @features;
# 1 - edge features
my $edge_features = $this->_get_edge_features( $edge );
# 2 - object features
my $object_features = $this->_get_object_features( $edge , $params );
return ( $edge_features , $object_features );
}
# get edge feature ids
sub _get_edge_features {
my $this = shift;
my $edge = shift;
my $edge_key = $this->_edge_key( $edge );
if ( ! defined( $this->edge2edgeFeatures()->{ $edge_key } ) ) {
# TODO: should we skip when hitting edges involving virtual nodes ?
my $edge_feature_data_json = $this->graph()->get_edge_attribute( @{ $edge } , $Web::Summarizer::Graph2::Definitions::EDGE_ATTRIBUTE_FEATURES );
my $edge_feature_data = {};
if ( $edge_feature_data_json ) {
$edge_feature_data = decode_json( $edge_feature_data_json );
}
$this->edge2edgeFeatures()->{ $edge_key } = $edge_feature_data;
}
return $this->edge2edgeFeatures()->{ $edge_key };
}
sub _map_object_feature {
my $this = shift;
my $feature_id = shift;
my $edge = shift;
my ( $from , $to ) = @{ $edge };
return join("::", $from, $to, "object", $feature_id);
}
sub _map_object_features {
my $this = shift;
my $feature_ids = shift;
my $edge = shift;
if ( ref( $feature_ids ) eq 'ARRAY' ) {
my @mapped_features = map { $this->_map_object_feature( $_ , $edge ); } @{ $feature_ids };
return \@mapped_features;
}
else {
my %mapped_features;
map { $mapped_features{ $this->_map_object_feature( $_ , $edge ) } = $feature_ids->{ $_ }; } keys( %{ $feature_ids } );
return \%mapped_features;
}
}
# get object feature ids
sub _get_object_features {
my $this = shift;
my $edge = shift;
my $params = shift;
my $edge_key = $this->_edge_key( $edge );
# if ( ! defined( $this->edge2objectFeatures()->{ $edge_key } ) ) {
my $object_features = $params->{ 'object_features' };
# my $raw_object_features = $entry_featurized->{ $Web::Summarizer::Graph2::Definitions::FEATURES_KEY_OBJECT };
# 2 - object features
# --> simply copy in the current object's features
# $this->edge2objectFeatures()->{ $edge_key } = $this->_map_object_features( $object_features , $edge );
# }
# return $this->edge2objectFeatures()->{ $edge_key };
return $this->_map_object_features( $object_features , $edge );
}
sub _feature_weight {
my $weights = shift;
my $feature_id = shift;
return ( defined( $weights->{ $feature_id } ) ? $weights->{ $feature_id } : $Web::Summarizer::Graph2::Definitions::WEIGHT_DEFAULT );
}
sub sigmoid {
my $t = shift;
return 1 / ( 1 + exp( -1 * $t ) );
}
sub _edge_key {
my $this = shift;
my $edge = shift;
if ( $DEBUG ) {
return join("::", @{ $edge });
}
return ( "e_" . md5_hex( encode_utf8( join("::", @{ $edge }) ) ) );
}
no Moose;
1;
| ypetinot/web-summarization | summarizers/graph-summarizer-2/src/Web/Summarizer/Graph2.pm | Perl | apache-2.0 | 5,811 |
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V8::Enums::FeedItemTargetStatusEnum;
use strict;
use warnings;
use Const::Exporter enums => [
UNSPECIFIED => "UNSPECIFIED",
UNKNOWN => "UNKNOWN",
ENABLED => "ENABLED",
REMOVED => "REMOVED"
];
1;
| googleads/google-ads-perl | lib/Google/Ads/GoogleAds/V8/Enums/FeedItemTargetStatusEnum.pm | Perl | apache-2.0 | 826 |
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V10::Services::ReachPlanService;
use strict;
use warnings;
use base qw(Google::Ads::GoogleAds::BaseService);
sub generate_product_mix_ideas {
my $self = shift;
my $request_body = shift;
my $http_method = 'POST';
my $request_path = 'v10/customers/{+customerId}:generateProductMixIdeas';
my $response_type =
'Google::Ads::GoogleAds::V10::Services::ReachPlanService::GenerateProductMixIdeasResponse';
return $self->SUPER::call($http_method, $request_path, $request_body,
$response_type);
}
sub generate_reach_forecast {
my $self = shift;
my $request_body = shift;
my $http_method = 'POST';
my $request_path = 'v10/customers/{+customerId}:generateReachForecast';
my $response_type =
'Google::Ads::GoogleAds::V10::Services::ReachPlanService::GenerateReachForecastResponse';
return $self->SUPER::call($http_method, $request_path, $request_body,
$response_type);
}
sub list_plannable_locations {
my $self = shift;
my $request_body = shift;
my $http_method = 'POST';
my $request_path = 'v10:listPlannableLocations';
my $response_type =
'Google::Ads::GoogleAds::V10::Services::ReachPlanService::ListPlannableLocationsResponse';
return $self->SUPER::call($http_method, $request_path, $request_body,
$response_type);
}
sub list_plannable_products {
my $self = shift;
my $request_body = shift;
my $http_method = 'POST';
my $request_path = 'v10:listPlannableProducts';
my $response_type =
'Google::Ads::GoogleAds::V10::Services::ReachPlanService::ListPlannableProductsResponse';
return $self->SUPER::call($http_method, $request_path, $request_body,
$response_type);
}
1;
| googleads/google-ads-perl | lib/Google/Ads/GoogleAds/V10/Services/ReachPlanService.pm | Perl | apache-2.0 | 2,276 |
#!/usr/bin/perl
# Copyright 2014 Lana Garmire, lgarmire@cc.hawaii.edu
use strict;
use warnings;
my $resDir = shift();
opendir(DIR, $resDir) or die $!;
print("NumFeatures,TPRate_P,FPRate_P,Prec_P,Rec_P,F_P,");
print("AUC_P,TPRate_N,FPRate_N,Prec_N,Rec_N,F_N,AUC_N,");
print("TP,FN,FP,TN\n");
while (my $file = readdir(DIR))
{
next if ($file =~ m/^\./);
next if ($file =~ m/.*\.model/);
if ($file =~ m/10fold/)
{
#print STDERR "$file\n";
open(IN, $resDir."/".$file) or die $!;
my $accFlag = 0;
my $conFlag = 0;
my $cvFlag = 0;
my $count = 0;
my @numFeatures = ($file =~ m/top(\d+)\./);
#print STDERR "@numFeatures\n";
print("$numFeatures[0],");
while (my $line = <IN>)
{
chomp($line);
if ($conFlag == 1)
{
$count++;
if ($count == 3)
{
#print STDERR "$line\n";
my @tokens = split(/\s+/, $line);
#print STDERR "@tokens";
print("$tokens[1],$tokens[2],");
}
elsif ($count == 4)
{
#print STDERR "$line\n";
my @tokens = split(/\s+/, $line);
#print STDERR "@tokens";
print("$tokens[1],$tokens[2]");
print("\n");
}
}
elsif ($accFlag == 1)
{
$count++;
if ($count == 3)
{
#print STDERR "$line\n";
my @tokens = split(/\s+/, $line);
#print STDERR "@tokens";
print("$tokens[1],$tokens[2],$tokens[3],");
print("$tokens[4],$tokens[5],$tokens[6],");
}
elsif ($count == 4)
{
#print STDERR "$line\n";
my @tokens = split(/\s+/, $line);
#print STDERR "@tokens";
print("$tokens[1],$tokens[2],$tokens[3],");
print("$tokens[4],$tokens[5],$tokens[6],");
}
}
if ($line eq "=== Stratified cross-validation ===")
{
$cvFlag = 1;
next;
}
if ($cvFlag == 1 && $line eq "=== Detailed Accuracy By Class ===")
{
$accFlag = 1;
next;
}
if ($accFlag == 1 && $line eq "=== Confusion Matrix ===")
{
$conFlag = 1;
$count = 0;
next;
}
}
close(IN);
}
}
closedir(DIR);
| lanagarmire/MirMark | Core/parseWekaResults.pl | Perl | apache-2.0 | 2,032 |
package Google::Ads::AdWords::v201402::AdGroupAdService::getResponse;
use strict;
use warnings;
{ # BLOCK to scope variables
sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201402' }
__PACKAGE__->__set_name('getResponse');
__PACKAGE__->__set_nillable();
__PACKAGE__->__set_minOccurs();
__PACKAGE__->__set_maxOccurs();
__PACKAGE__->__set_ref();
use base qw(
SOAP::WSDL::XSD::Typelib::Element
Google::Ads::SOAP::Typelib::ComplexType
);
our $XML_ATTRIBUTE_CLASS;
undef $XML_ATTRIBUTE_CLASS;
sub __get_attr_class {
return $XML_ATTRIBUTE_CLASS;
}
use Class::Std::Fast::Storable constructor => 'none';
use base qw(Google::Ads::SOAP::Typelib::ComplexType);
{ # BLOCK to scope variables
my %rval_of :ATTR(:get<rval>);
__PACKAGE__->_factory(
[ qw( rval
) ],
{
'rval' => \%rval_of,
},
{
'rval' => 'Google::Ads::AdWords::v201402::AdGroupAdPage',
},
{
'rval' => 'rval',
}
);
} # end BLOCK
} # end of BLOCK
1;
=pod
=head1 NAME
Google::Ads::AdWords::v201402::AdGroupAdService::getResponse
=head1 DESCRIPTION
Perl data type class for the XML Schema defined element
getResponse from the namespace https://adwords.google.com/api/adwords/cm/v201402.
=head1 PROPERTIES
The following properties may be accessed using get_PROPERTY / set_PROPERTY
methods:
=over
=item * rval
$element->set_rval($data);
$element->get_rval();
=back
=head1 METHODS
=head2 new
my $element = Google::Ads::AdWords::v201402::AdGroupAdService::getResponse->new($data);
Constructor. The following data structure may be passed to new():
{
rval => $a_reference_to, # see Google::Ads::AdWords::v201402::AdGroupAdPage
},
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| gitpan/GOOGLE-ADWORDS-PERL-CLIENT | lib/Google/Ads/AdWords/v201402/AdGroupAdService/getResponse.pm | Perl | apache-2.0 | 1,763 |
#! /usr/bin/perl -w
use strict;
my %strokes;
sub or_vectors {
my ($vec1, @vecs) = @_;
my @result = @$vec1;
for my $vec2 (@vecs) {
for my $i (0 ... $#$vec2) {
$result[$i] = 0 unless defined $result[$i];
$result[$i] |= $vec2->[$i];
}
}
return wantarray ? @result : \@result;
}
sub handle ($);
sub handle ($) {
my ($line) = @_;
if (m/^\.stroke\s+([\w.-]+)\s+/) {
die "duplicate stroke '$1'" if exists $strokes{$1};
$strokes{$1} = [map {hex} split /\s+/, $'];
print "# $_";
} elsif (m/^\.macrostroke\s+([\w.-]+)\s+/) {
die "duplicate stroke '$1'" if exists $strokes{$1};
$strokes{$1} = or_vectors map {
die "unknown stroke '$_'" unless exists $strokes{$_};
$strokes{$_};
} split /\s+/, $';
print "# $_";
} elsif (m/^\.compose\s+([\dabcdefABCDEF]+)\s*:\s*/) {
my $code = hex $1;
my @elements = split /\s+/, $';
my @result;
for my $element (@elements) {
my $stroke = $strokes{$element};
die "unknown stroke $element" unless defined $stroke;
push @result, 0 until @result >= @$stroke;
$result[$_] |= $stroke->[$_] for 0 .. $#$stroke;
}
printf "%02X", $code;
printf " %02X", $_ for @result;
print "\n";
} elsif (m/^\.include\s+/) {
my $filename = $';
chomp $filename;
my $fh;
open $fh, '<', $filename
or die "$filename: open for reading: $!";
while (<$fh>) {
handle $_;
}
close $fh;
} else {
print;
}
}
die "No input?" unless @ARGV;
die "Too many arguments" if @ARGV > 2;
print "### !!! Generated from $ARGV[0] by compose-glyphs.pl\n";
print "\n";
while (<>) {
handle $_;
}
| digwuren/pixitie | compose-glyphs.pl | Perl | apache-2.0 | 1,660 |
package Forge::Model::R::Oos::MarketPlaceState;
use strict;
use base qw(Forge::Model::R::Oos);
__PACKAGE__->meta->setup(
table => 'market_place_state',
columns => [
id => { type => 'bigint', not_null => 1 },
version => { type => 'bigint', not_null => 1 },
code => { type => 'varchar', length => 255, not_null => 1 },
name => { type => 'varchar', length => 255, not_null => 1 },
note => { type => 'varchar', length => 512, not_null => 1 },
],
primary_key_columns => [ 'id' ],
unique_key => [ 'code' ],
relationships => [
market_place => {
class => 'Forge::Model::R::Oos::MarketPlace',
column_map => { id => 'market_place_state_id' },
type => 'one to many',
},
],
);
__PACKAGE__->meta->make_manager_class('market_place_state');
1;
| ant-i/db-crud | lib/Forge/Model/R/Oos/MarketPlaceState.pm | Perl | apache-2.0 | 884 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2021] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
package Bio::EnsEMBL::IdMapping::StableIdGenerator::AedesAegypti;
# Package that implements incrementing and verification of Aedes aegypti
# stable IDs as used by the VectorBase project.
use strict;
use warnings;
use base qw(Bio::EnsEMBL::IdMapping::StableIdGenerator::EnsemblGeneric);
sub increment_stable_id {
# This method will increment a stable ID. For Aedes aegypti, it will
# pick out the numerical part of the stable ID (no matter what type of
# stable ID it is) and increment it by one. It will then replace the
# numerical part by the incremented value and return the new stable
# ID. The parsing of the stable ID is very naive.
my ( $self, $stable_id ) = @_;
if ( !$self->is_valid($stable_id) ) {
throw("Unknown or missing stable ID: $stable_id.");
}
$stable_id =~ /^(\D*)(\d+)(\D*)/;
my $number_as_string = "$2";
my $number = $2 + 1;
$stable_id = sprintf(
"%s" . sprintf( "%%0%dd", length($number_as_string) ) . "%s",
$1, $number, $3 );
return $stable_id;
}
sub is_valid {
# A stable ID is a valid Aedes aegypti stable ID if it begins with the
# character string "AAEL".
my ( $self, $stable_id ) = @_;
if ( !( defined($stable_id) && $stable_id =~ /^AAEL/ ) ) { return 0 }
return 1;
}
1;
| Ensembl/ensembl | modules/Bio/EnsEMBL/IdMapping/StableIdGenerator/AedesAegypti.pm | Perl | apache-2.0 | 2,239 |
=head1 LICENSE
See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=pod
=head1 NAME
Bio::EnsEMBL::Compara::RunnableDB::StableIdMapper
=cut
=head1 SYNOPSIS
# compute and store the stable_id mapping between trees of rel.63 and trees of rel.64:
time standaloneJob.pl Bio::EnsEMBL::Compara::RunnableDB::StableIdMapper \
-compara_db "mysql://ensadmin:${ENSADMIN_PSW}@compara3/mm14_compara_homology_64" \
-master_db "mysql://ensadmin:${ENSADMIN_PSW}@compara1/mm14_ensembl_compara_master" \
-prev_rel_db "mysql://ensro@compara1/lg4_ensembl_compara_63" -type t
=cut
=head1 DESCRIPTION
This RunnableDB computes and stores stable_id mapping of either for ProteinTrees or Families between releases.
=cut
=head1 CONTACT
Contact anybody in Compara.
=cut
package Bio::EnsEMBL::Compara::RunnableDB::StableIdMapper;
use strict;
use warnings;
use Bio::EnsEMBL::Compara::StableId::Adaptor;
use Bio::EnsEMBL::Compara::StableId::NamedClusterSetLink;
use Scalar::Util qw(looks_like_number);
use base ('Bio::EnsEMBL::Compara::RunnableDB::BaseRunnable');
sub fetch_input {
my $self = shift @_;
my $prev_rel_db = $self->param('prev_rel_db');
if(! $prev_rel_db) {
$self->complete_early("Not running as 'prev_rel_db' not given in parameters\n");
}
$self->param_required('master_db');
my $type = $self->param_required('type'); # must be 't' or 'f'
my $curr_release = $self->param('release') || $self->compara_dba->get_MetaContainer->get_schema_version;
my $prev_rel_dba = $self->get_cached_compara_dba('prev_rel_db');
my $prev_release = $self->param('prev_release') || $prev_rel_dba->get_MetaContainer->get_schema_version;
my $adaptor = Bio::EnsEMBL::Compara::StableId::Adaptor->new();
my $from_ncs = $adaptor->fetch_ncs($prev_release, $type, $prev_rel_dba->dbc());
my $to_ncs = $adaptor->fetch_ncs($curr_release, $type, $self->compara_dba->dbc());
my $ncsl = Bio::EnsEMBL::Compara::StableId::NamedClusterSetLink->new(-FROM => $from_ncs, -TO => $to_ncs);
$self->param('adaptor', $adaptor);
$self->param('ncsl', $ncsl);
$self->param('prev_release', $prev_release); #replace it with whatever it is now
}
sub run {
my $self = shift @_;
$self->compara_dba()->dbc()->disconnect_if_idle();
my $ncsl = $self->param('ncsl');
my $postmap = $ncsl->maximum_name_reuse();
$ncsl->to->apply_map($postmap);
}
sub write_output {
my $self = shift @_;
my $adaptor = $self->param('adaptor');
my $ncsl = $self->param('ncsl');
my $master_dbc = $self->get_cached_compara_dba('master_db')->dbc();
$self->elevate_privileges($master_dbc);
my $time_when_started_storing = time();
eval {
$adaptor->store_map($ncsl->to, $self->compara_dba()->dbc());
$adaptor->store_history($ncsl, $self->compara_dba()->dbc(), $time_when_started_storing, $master_dbc);
};
if($@) {
die "Detected error during store. Check your database settings are correct for the master database (read/write): $@";
}
}
1;
| Ensembl/ensembl-compara | modules/Bio/EnsEMBL/Compara/RunnableDB/StableIdMapper.pm | Perl | apache-2.0 | 3,613 |
#
# Copyright 2022 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::perle::ids::snmp::mode::systemusage;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'global', type => 0, message_separator => ' - ' },
];
$self->{maps_counters}->{global} = [
{ label => 'cpu-load', nlabel => 'cpu.utilization.percentage', set => {
key_values => [ { name => 'cpu_load' } ],
output_template => 'cpu load : %.2f %%',
perfdatas => [
{ value => 'cpu_load', template => '%.2f',
min => 0, max => 100, unit => '%' },
],
}
},
{ label => 'memory-free', nlabel => 'memory.free.bytes', set => {
key_values => [ { name => 'memory_free' } ],
output_template => 'memory free : %s %s',
output_change_bytes => 1,
perfdatas => [
{ value => 'memory_free', template => '%d',
min => 0, unit => 'B' },
],
}
},
{ label => 'flashdisk-free', nlabel => 'flashdisk.free.bytes', set => {
key_values => [ { name => 'flashdisk_free' } ],
output_template => 'flash disk free : %s %s',
output_change_bytes => 1,
perfdatas => [
{ value => 'flashdisk_free', template => '%d',
min => 0, unit => 'B' },
],
}
},
];
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options, force_new_perfdata => 1);
bless $self, $class;
$options{options}->add_options(arguments => {});
return $self;
}
sub manage_selection {
my ($self, %options) = @_;
my $oid_perleAverageCPUUtilization = '.1.3.6.1.4.1.1966.22.44.1.24.0';
my $oid_perleMemory = '.1.3.6.1.4.1.1966.22.44.1.25.0'; # 295928 Kbytes free
my $oid_perleFlashdisk = '.1.3.6.1.4.1.1966.22.44.1.26.0';
my $result = $options{snmp}->get_leef(
oids => [
$oid_perleAverageCPUUtilization, $oid_perleMemory, $oid_perleFlashdisk
],
nothing_quit => 1
);
my ($cpu_load, $mem_free, $flashdisk_free);
$cpu_load = $1
if (defined($result->{$oid_perleAverageCPUUtilization}) && $result->{$oid_perleAverageCPUUtilization} =~ /((?:\d+)(?:\.\d+)?)/);
$mem_free = $1 * 1024 if (defined($result->{$oid_perleMemory}) && $result->{$oid_perleMemory} =~ /(\d+)/);
$flashdisk_free = $1 * 1024 if (defined($result->{$oid_perleFlashdisk}) && $result->{$oid_perleFlashdisk} =~ /(\d+)/);
$self->{global} = {
cpu_load => $cpu_load,
memory_free => $mem_free,
flashdisk_free => $flashdisk_free,
};
}
1;
__END__
=head1 MODE
Check system usage.
=over 8
=item B<--filter-counters>
Only display some counters (regexp can be used).
Example: --filter-counters='^memory-free$'
=item B<--warning-*> B<--critical-*>
Thresholds.
Can be: 'memory-free' (B), 'cpu-load' (%), 'flashdisk-free' (B)
=back
=cut
| centreon/centreon-plugins | network/perle/ids/snmp/mode/systemusage.pm | Perl | apache-2.0 | 3,960 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
package HiveRNASeq_conf;
use strict;
use warnings;
use feature 'say';
use base ('Bio::EnsEMBL::Analysis::Hive::Config::HiveBaseConfig_conf');
use Bio::EnsEMBL::ApiVersion qw/software_version/;
sub default_options {
my ($self) = @_;
return {
# inherit other stuff from the base class
%{ $self->SUPER::default_options() },
##########################################################################
# #
# CHANGE STUFF HERE #
# #
##########################################################################
'pipeline_name' => '',
'user_r' => '',
'user' => '',
'password' => '',
'port' => '',
species => '',
'pipe_dbname' => $ENV{USER}.'_'.$self->o('pipeline_name').'_hive',
'dna_dbname' => '',
'blast_output_dbname' => $ENV{USER}.'_hive_'.$self->o('species').'_blast',
'refine_output_dbname' => $ENV{USER}.'_hive_'.$self->o('species').'_refine',
'rough_output_dbname' => $ENV{USER}.'_hive_'.$self->o('species').'_rough',
'pipe_db_server' => '',
'dna_db_server' => '',
'blast_output_db_server' => '',
'refine_output_db_server' => '',
'rough_output_db_server' => '',
'genome_file' => 'genome/genome.fa',
'use_ucsc_naming' => 0,
'clone_db_script_path' => $ENV{ENSCODE}.'/ensembl-analysis/scripts/clone_database.ksh',
'create_type' => 'clone',
'rnaseq_summary_file' => '',
'samtools' => 'samtools',
'picard_lib_jar' => 'picard.jar',
'short_read_aligner' => 'bwa',
'input_dir' => '',
output_dir => '',
'merge_dir' => '',
'sam_dir' => '',
'sequence_dump_script' => $ENV{ENSCODE}.'/ensembl-analysis/scripts/sequence_dump.pl',
# Use this option to change the delimiter for your summary data
# file.
summary_file_delimiter => '\t',
summary_csv_table => 'csv_data',
assembly_name => '',
# Blast database for comparing the final models to.
uniprotdb => '',
# Index for the blast database.
uniprotindex => '',
blastp => 'blastall',
# blast used, it can be either ncbi or wu, it is overriding the -type value from BLAST_PARAMS
blast_type => 'ncbi',
splicing_aligner => 'exonerate-0.9.0',
# If your reads are unpaired you may want to run on slices to avoid
# making overlong rough models. If you want to do this, specify a
# slice length here otherwise it will default to whole chromosomes.
slice_length => 10000000,
# Regular expression to allow FastQ files to be correctly paired,
# for example: file_1.fastq and file_2.fastq could be paired using
# the expression "\S+_(\d)\.\S+". Need to identify the read number
# in brackets; the name the read number (1, 2) and the
# extension.
pairing_regex => '\S+_(\d)\.\S+',
paired => 1,
# Do you want to make models for the each individual sample as well
# as for the pooled samples (1/0)?
single_tissue => 1,
# What Read group tag would you like to group your samples
# by? Default = ID
read_group_tag => 'SM',
read_id_tag => 'ID',
use_threads => 3,
read_min_paired => 50,
read_min_mapped => 50,
# Please assign some or all columns from the summary file to the
# some or all of the following categories. Multiple values can be
# separted with commas. ID, SM, DS, CN, is_paired, filename, read_length, is_13plus,
# is_mate_1 are required. If pairing_regex can work for you, set is_mate_1 to -1.
# You can use any other tag specified in the SAM specification:
# http://samtools.github.io/hts-specs/SAMv1.pdf
####################################################################
# This is just an example based on the file snippet shown below. It
# will vary depending on how your data looks.
####################################################################
file_columns => ['SM', 'ID', 'is_paired', 'filename', 'is_mate_1', 'read_length', 'is_13plus', 'CN', 'PL', 'DS'],
##########################################################################
# #
# MOSTLY STAYS CONSTANT, MOSTLY #
# #
##########################################################################
'blast_output_db' => {
-dbname => $self->o('blast_output_dbname'),
-host => $self->o('blast_output_db_server'),
-port => $self->o('port'),
-user => $self->o('user'),
-pass => $self->o('password'),
},
'refine_output_db' => {
-dbname => $self->o('refine_output_dbname'),
-host => $self->o('refine_output_db_server'),
-port => $self->o('port'),
-user => $self->o('user'),
-pass => $self->o('password'),
},
'rough_output_db' => {
-dbname => $self->o('rough_output_dbname'),
-host => $self->o('rough_output_db_server'),
-port => $self->o('port'),
-user => $self->o('user'),
-pass => $self->o('password'),
},
};
}
sub pipeline_wide_parameters {
my ($self) = @_;
my $output_sam_dir = $self->o('sam_dir') ? $self->o('sam_dir') :$self->o('output_dir').'/SAM';
my $merge_dir = $self->o('merge_dir') ? $self->o('merge_dir') : $self->o('output_dir').'/merge_out';
my $genome_file = $self->o('genome_file') =~ /^\// ? $self->o('genome_file') : $self->o('input_dir').'/'.$self->o('genome_file');
return {
%{ $self->SUPER::pipeline_wide_parameters() }, # inherit other stuff from the base class
wide_genome_file => $genome_file,
wide_input_dir => $self->o('input_dir'),
wide_output_dir => $self->o('output_dir'),
wide_merge_dir => $merge_dir,
wide_short_read_aligner => $self->o('short_read_aligner'),
wide_samtools => $self->o('samtools'),
wide_output_sam_dir => $output_sam_dir,
wide_species => $self->o('species'),
wide_use_ucsc_naming => $self->o('use_ucsc_naming'),
wide_intron_bam_file => $self->o('output_dir').'/introns',
};
}
sub pipeline_create_commands {
my ($self) = @_;
my $tables;
my %small_columns = (
paired => 1,
read_length => 1,
is_13plus => 1,
is_mate_1 => 1,
);
# We need to store the values of the csv file to easily process it. It will be used at different stages
foreach my $key (@{$self->default_options->{'file_columns'}}) {
if (exists $small_columns{$key}) {
$tables .= $key.' SMALLINT UNSIGNED NOT NULL,'
}
elsif ($key eq 'DS') {
$tables .= $key.' VARCHAR(255) NOT NULL,'
}
else {
$tables .= $key.' VARCHAR(50) NOT NULL,'
}
}
$tables .= ' KEY(SM), KEY(ID)';
return [
# inheriting database and hive tables' creation
@{$self->SUPER::pipeline_create_commands},
$self->db_cmd('CREATE TABLE '.$self->default_options->{'summary_csv_table'}." ($tables)"),
];
}
=head2 create_header_line
Arg [1] : Arrayref String, it will contains the values of 'file_columns'
Example : create_header_line($self->o('file_columns');
Description: It will create a RG line using only the keys present in your csv file
Returntype : String representing the RG line in a BAM file
Exceptions : None
=cut
sub create_header_line {
my ($items) = shift;
my @read_tags = qw(ID SM DS CN DT FO KS LB PG PI PL PM PU);
my $read_line = '@RG';
foreach my $rt (@read_tags) {
$read_line .= "\t$rt:#$rt#" if (grep($rt eq $_, @$items));
}
return $read_line."\n";
}
## See diagram for pipeline structure
sub pipeline_analyses {
my ($self) = @_;
my $header_line = create_header_line($self->default_options->{'file_columns'});
my @analysis = (
{
-logic_name => 'checking_file_path',
-module => 'Bio::EnsEMBL::Hive::RunnableDB::SystemCmd',
-meadow_type => 'LOCAL',
-parameters => {
cmd => 'EXIT_CODE=0; for F in #wide_short_read_aligner# #wide_samtools# '.join (' ', $self->o('splicing_aligner'), $self->o('clone_db_script_path'), $self->o('sequence_dump_script'), $self->o('blastp')).'; do which "$F"; if [ "$?" == 1 ]; then EXIT_CODE=1;fi; done; for D in #wide_output_dir# #wide_input_dir# #wide_merge_dir# #wide_output_sam_dir# `dirname #wide_genome_file#`; do mkdir -p "$D"; done; exit $EXIT_CODE',
},
-input_ids => [{}],
-flow_into => {
'1->A' => ['create_rnaseq_genome_file'],
'A->1' => ['parse_summary_file'],
},
},
{
-logic_name => 'create_rnaseq_genome_file',
-module => 'Bio::EnsEMBL::Hive::RunnableDB::SystemCmd',
-rc_name => '1GB',
-parameters => {
cmd => 'if [ ! -e "#wide_genome_file#" ]; then perl '.$self->o('sequence_dump_script').' -dbhost '.$self->o('dna_db_server').' -dbuser '.$self->o('user_r').' -dbport '.$self->o('port').' -dbname '.$self->o('dna_dbname').' -coord_system_name '.$self->o('assembly_name').' -toplevel -onefile -header rnaseq -filename #wide_genome_file#;fi',
},
-flow_into => {
1 => [ 'index_genome_file'],
},
},
{
-logic_name => 'index_genome_file',
-module => 'Bio::EnsEMBL::Hive::RunnableDB::SystemCmd',
-rc_name => '5GB',
-parameters => {
cmd => 'if [ ! -e "#wide_genome_file#.ann" ]; then #wide_short_read_aligner# index -a bwtsw #wide_genome_file#;fi',
},
},
{
-logic_name => 'parse_summary_file',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveParseCsvIntoTable',
-meadow_type => 'LOCAL',
-parameters => {
column_names => $self->o('file_columns'),
sample_column => $self->o('read_group_tag'),
inputfile => $self->o('rnaseq_summary_file'),
delimiter => $self->o('summary_file_delimiter'),
csvfile_table => $self->o('summary_csv_table'),
pairing_regex => $self->o('pairing_regex'),
},
-flow_into => {
'2->A' => [ 'create_tissue_jobs'],
'A->1' => [ 'merged_bam_file' ],
},
},
{
-logic_name => 'create_tissue_jobs',
-module => 'Bio::EnsEMBL::Hive::RunnableDB::JobFactory',
-parameters => {
inputquery => join(' ', 'SELECT', $self->o('read_group_tag'), ',', $self->o('read_id_tag'), ', is_paired', 'FROM', $self->o('summary_csv_table'), 'WHERE', $self->o('read_group_tag'), '= "#sample_name#"'),
column_names => [$self->o('read_group_tag'), $self->o('read_id_tag'), 'is_paired'],
},
-meadow_type => 'LOCAL',
-flow_into => {
'2->A' => ['create_bwa_jobs'],
'A->1' => ['merged_tissue_file'],
},
},
{
-logic_name => 'create_bwa_jobs',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveCreateBWAJobs',
-parameters => {
sample_column => $self->o('read_group_tag'),
sample_id_column => $self->o('read_id_tag'),
csvfile_table => $self->o('summary_csv_table'),
column_names => $self->o('file_columns'),
use_threading => $self->o('use_threads'),
},
-meadow_type => 'LOCAL',
-flow_into => {
'2->A' => ['bwa', 'create_header_files'],
'A->1' => ['bwa2bam'],
},
},
{
-logic_name => 'create_header_files',
-module => 'Bio::EnsEMBL::Hive::RunnableDB::SystemCmd',
-meadow_type => 'LOCAL',
-parameters => {
cmd => 'if [ ! -e "#wide_output_dir#/#'.$self->o('read_id_tag').'#_header.h" ]; then printf "'.$header_line.'" > #wide_output_dir#/#'.$self->o('read_id_tag').'#_header.h; fi',
},
},
{
-logic_name => 'bwa',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveBWA',
-flow_into => {
1 => [ ':////accu?filename=[]' ],
-1 => [ 'bwa_failed' ],
-2 => [ 'bwa_failed' ],
},
-rc_name => '5GB_multithread',
},
{
-logic_name => 'bwa_failed',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveBWA',
-flow_into => {
1 => [ ':////accu?filename=[]' ],
},
-rc_name => '10GB_multithread',
},
{
-logic_name => 'bwa2bam',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveBWA2BAM',
-parameters => {
sampe_options => '-A -a 200000',
samse_options => '',
min_paired => $self->o('read_min_paired'),
min_mapped => $self->o('read_min_mapped'),
header_file => '#wide_output_dir#/#'.$self->o('read_id_tag').'#_header.h',
bam_prefix => $self->o('read_id_tag'),
},
-flow_into => {
1 => [ ':////accu?filename=[]' ],
-1 => {'bwa2bam_10GB' => { filename => '#filename#', is_paired => '#is_paired#', $self->o('read_id_tag') => '#'.$self->o('read_id_tag').'#', $self->o('read_group_tag') => '#'.$self->o('read_group_tag').'#',}},
-2 => {'bwa2bam_10GB' => { filename => '#filename#', is_paired => '#is_paired#', $self->o('read_id_tag') => '#'.$self->o('read_id_tag').'#', $self->o('read_group_tag') => '#'.$self->o('read_group_tag').'#',}},
},
-rc_name => '5GB',
},
{
-logic_name => 'bwa2bam_10GB',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveBWA2BAM',
-parameters => {
sampe_options => '-A -a 200000',
samse_options => '',
min_paired => $self->o('read_min_paired'),
min_mapped => $self->o('read_min_mapped'),
header_file => '#wide_output_dir#/#'.$self->o('read_id_tag').'#_header.h',
bam_prefix => $self->o('read_id_tag'),
},
-flow_into => {
1 => [ ':////accu?filename=[]' ],
},
-rc_name => '10GB',
},
{
-logic_name => 'create_rough_output_db',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveCreateDatabase',
-parameters => {
source_db => $self->o('dna_db'),
target_db => $self->o('rough_output_db'),
create_type => $self->o('create_type'),
script_path => $self->o('clone_db_script_path'),
},
-meadow => 'LOCAL',
-input_ids => [{}],
},
{
-logic_name => 'create_refine_output_db',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveCreateDatabase',
-parameters => {
source_db => $self->o('dna_db'),
target_db => $self->o('refine_output_db'),
create_type => $self->o('create_type'),
script_path => $self->o('clone_db_script_path'),
},
-meadow => 'LOCAL',
-input_ids => [{}],
},
{
-logic_name => 'create_blast_output_db',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveCreateDatabase',
-parameters => {
source_db => $self->o('dna_db'),
target_db => $self->o('blast_output_db'),
create_type => $self->o('create_type'),
script_path => $self->o('clone_db_script_path'),
},
-meadow => 'LOCAL',
-input_ids => [{}],
},
{
-logic_name => 'merged_tissue_file',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveMergeBamFiles',
-parameters => {
java => 'java',
java_options => '-Xmx2g',
# If 0, do not use multithreading, faster but can use more memory.
# If > 0, tells how many cpu to use for samtools or just to use multiple cpus for picard
use_threading => $self->o('use_threads'),
# Path to MergeSamFiles.jar
picard_lib => $self->o('picard_lib_jar'),
# Use this default options for Picard: 'MAX_RECORDS_IN_RAM=20000000 CREATE_INDEX=true SORT_ORDER=coordinate ASSUME_SORTED=true VALIDATION_STRINGENCY=LENIENT'
# You will need to change the options if you want to use samtools for merging
options => 'MAX_RECORDS_IN_RAM=20000000 CREATE_INDEX=true SORT_ORDER=coordinate ASSUME_SORTED=true VALIDATION_STRINGENCY=LENIENT',
},
-rc_name => '3GB_multithread',
-flow_into => {
1 => [ ':////accu?filename=[]' ],
},
},
{
-logic_name => 'merged_bam_file',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveMergeBamFiles',
-parameters => {
java => 'java',
java_options => '-Xmx2g',
# If 0, do not use multithreading, faster but can use more memory.
# If > 0, tells how many cpu to use for samtools or just to use multiple cpus for picard
use_threading => $self->o('use_threads'),
# Path to MergeSamFiles.jar
picard_lib => $self->o('picard_lib_jar'),
# Use this default options for Picard: 'MAX_RECORDS_IN_RAM=20000000 CREATE_INDEX=true SORT_ORDER=coordinate ASSUME_SORTED=true VALIDATION_STRINGENCY=LENIENT'
# You will need to change the options if you want to use samtools for merging
options => 'MAX_RECORDS_IN_RAM=20000000 CREATE_INDEX=true SORT_ORDER=coordinate ASSUME_SORTED=true VALIDATION_STRINGENCY=LENIENT',
},
-rc_name => '3GB_multithread',
-flow_into => {
1 => ['create_header_intron', 'clean_sai_files'],
},
},
{
-logic_name => 'clean_sai_files',
-module => 'Bio::EnsEMBL::Hive::RunnableDB::SystemCmd',
-meadow_type => 'LOCAL',
-parameters => {
cmd => 'rm #wide_output_dir#/*.sai',
},
},
{
-logic_name => 'create_header_intron',
-module => 'Bio::EnsEMBL::Hive::RunnableDB::SystemCmd',
-meadow_type => 'LOCAL',
-parameters => {
cmd => '#wide_samtools# view -H #filename# | grep -v @SQ | grep -v @HD > #wide_output_dir#/merged_header.h',
},
-flow_into => {
'1->A' => [ 'create_top_level_input_ids'],
'A->1' => ['sam2bam'],
},
},
{
-logic_name => 'create_top_level_input_ids',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveSubmitAnalysis',
-rc_name => '1GB',
-parameters => {
iid_type => 'slice',
coord_system_name => 'toplevel',
slice => 1,
include_non_reference => 0,
top_level => 1,
target_db => $self->o('rough_output_db'),
},
-wait_for => ['create_rough_output_db'],
-flow_into => {
2 => {'dispatch_toplevel' => {'iid' => '#iid#', alignment_bam_file => '#filename#'}},
},
},
{
-logic_name => 'dispatch_toplevel',
-module => 'Bio::EnsEMBL::Hive::RunnableDB::SystemCmd',
-rc_name => '1GB',
-batch_size => 1000,
-parameters => {
cmd => 'EXIT_CODE=1; if [ "`echo #iid# | cut -d \':\' -f1`" = "chromosome" ]; then EXIT_CODE=2; else EXIT_CODE=0;fi; exit $EXIT_CODE',
return_codes_2_branches => {'2' => 2},
},
-flow_into => {
1 => ['rough_transcripts'],
2 => ['rough_transcripts_5GB'],
},
},
{
-logic_name => 'rough_transcripts',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveBam2Genes',
-parameters => {
logic_name => 'rough_transcripts',
output_db => $self->o('rough_output_db'),
dna_db => $self->o('dna_db'),
alignment_bam_file => '#wide_merge_dir#/merged.bam',
min_length => 300,
min_exons => 1,
max_intron_length => 200000,
min_single_exon_length => 1000,
min_span => 1.5,
paired => $self->o('paired'),
pairing_regex => $self->o('pairing_regex'),
},
-rc_name => '2GB_rough',
-flow_into => {
1 => ['create_bam2introns_input_ids'],
-1 => {'rough_transcripts_5GB' => {'iid' => '#iid#', alignment_bam_file => '#alignment_bam_file#'}},
-2 => {'rough_transcripts_5GB' => {'iid' => '#iid#', alignment_bam_file => '#alignment_bam_file#'}},
},
},
{
-logic_name => 'rough_transcripts_5GB',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveBam2Genes',
-parameters => {
logic_name => 'rough_transcripts',
output_db => $self->o('rough_output_db'),
dna_db => $self->o('dna_db'),
alignment_bam_file => '#wide_merge_dir#/merged.bam',
min_length => 300,
min_exons => 1,
max_intron_length => 200000,
min_single_exon_length => 1000,
min_span => 1.5,
paired => $self->o('paired'),
pairing_regex => $self->o('pairing_regex'),
},
-rc_name => '5GB_rough',
-flow_into => {
1 => ['create_bam2introns_input_ids'],
-1 => {'rough_transcripts_15GB' => {'iid' => '#iid#', alignment_bam_file => '#alignment_bam_file#'}},
-2 => {'rough_transcripts_15GB' => {'iid' => '#iid#', alignment_bam_file => '#alignment_bam_file#'}},
},
},
{
-logic_name => 'rough_transcripts_15GB',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveBam2Genes',
-parameters => {
logic_name => 'rough_transcripts',
output_db => $self->o('rough_output_db'),
dna_db => $self->o('dna_db'),
alignment_bam_file => '#wide_merge_dir#/merged.bam',
min_length => 300,
min_exons => 1,
max_intron_length => 200000,
min_single_exon_length => 1000,
min_span => 1.5,
paired => $self->o('paired'),
pairing_regex => $self->o('pairing_regex'),
},
-rc_name => '15GB_rough',
-flow_into => {
1 => ['create_bam2introns_input_ids'],
},
},
{
-logic_name => 'create_bam2introns_input_ids',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveSubmitAnalysis',
-parameters => {
iid_type => 'slice_to_feature_ids',
target_db => $self->o('rough_output_db'),
feature_type => 'gene',
logic_name => ['rough_transcripts'],
use_stable_ids => 1,
create_stable_ids => 1,
stable_id_prefix => 'RNASEQ',
},
-rc_name => '1GB_rough',
-batch_size => 100,
-flow_into => {
2 => ['bam2introns'],
},
},
{
-logic_name => 'bam2introns',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveBam2Introns',
-parameters => {
program_file => $self->o('splicing_aligner'),
input_db => $self->o('rough_output_db'),
dna_db => $self->o('dna_db'),
missmatch => 6,
word_length => 10,
saturate_threshold => 10000,
mask => 1,
percent_id => 97,
coverage => 90,
fullseq => 1,
max_transcript => 1000000,
batch_size => 10000,
bam_file => '#wide_merge_dir#/merged.bam',
},
-rc_name => '2GB_introns',
-flow_into => {
1 => [':////accu?filename=[]'],
2 => ['bam2introns'],
-1 => ['bam2introns_5GB'],
-2 => ['bam2introns_5GB'],
},
},
{
-logic_name => 'bam2introns_5GB',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveBam2Introns',
-parameters => {
program_file => $self->o('splicing_aligner'),
input_db => $self->o('rough_output_db'),
dna_db => $self->o('dna_db'),
missmatch => 6,
word_length => 10,
saturate_threshold => 10000,
mask => 1,
percent_id => 97,
coverage => 90,
fullseq => 1,
max_transcript => 1000000,
batch_size => 10000,
bam_file => '#wide_merge_dir#/merged.bam',
},
-rc_name => '5GB_introns',
-flow_into => {
1 => [':////accu?filename=[]'],
2 => ['bam2introns'],
-1 => ['bam2introns_10GB'],
-2 => ['bam2introns_10GB'],
},
},
{
-logic_name => 'bam2introns_10GB',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveBam2Introns',
-parameters => {
program_file => $self->o('splicing_aligner'),
input_db => $self->o('rough_output_db'),
dna_db => $self->o('dna_db'),
missmatch => 6,
word_length => 10,
saturate_threshold => 10000,
mask => 1,
percent_id => 97,
coverage => 90,
fullseq => 1,
max_transcript => 1000000,
batch_size => 10000,
bam_file => '#wide_merge_dir#/merged.bam',
},
-rc_name => '10GB_introns',
-flow_into => {
1 => [':////accu?filename=[]'],
2 => ['bam2introns'],
},
},
{
-logic_name => 'sam2bam',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveSam2Bam',
-parameters => {
regex => '.sam',
headerfile => '#wide_output_dir#/merged_header.h',
},
-rc_name => '2GB',
-flow_into => ['create_top_level_input_ids_again'],
},
{
-logic_name => 'create_top_level_input_ids_again',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveSubmitAnalysis',
-rc_name => '1GB',
-parameters => {
iid_type => 'slice',
coord_system_name => 'toplevel',
slice => 1,
include_non_reference => 0,
top_level => 1,
target_db => $self->o('rough_output_db'),
},
-flow_into => {
2 => ['create_refine_genes_jobs'],
},
},
{
-logic_name => 'create_refine_genes_jobs',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveCreateRefineGenesJobs',
-parameters => {
single_tissue => $self->o('single_tissue'),
sample_column => $self->o('read_group_tag'),
sample_id_column => $self->o('read_id_tag'),
csvfile_table => $self->o('summary_csv_table'),
},
-rc_name => '1GB',
-batch_size => 100,
-flow_into => {
2 => ['refine_genes'],
},
},
{
-logic_name => 'refine_genes',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveRefineSolexaGenes',
-parameters => {
input_db => $self->o('rough_output_db'),
dna_db => $self->o('dna_db'),
output_db => $self->o('refine_output_db'),
# write the intron features into the OUTPUT_DB along with the models
write_introns => 1,
# maximum number of times to loop when building all possible paths through the transcript
max_recursions => 100000,
# analysis logic_name for the dna_align_features to fetch from the INTRON_DB
# If left blank all features will be fetched
logicname => [],
# logic name of the gene models to fetch
model_ln => '',
# penalty for removing a retined intron
retained_intron_penalty => 2,
#Remove introns that overlap X introns
filter_on_overlap => 0,
# minimum size for an intron
min_intron_size => 30,
max_intron_size => 200000,
# biotype to give to single exon models if left blank single exons are ignored
# minimum single exon size (bp)
min_single_exon => 1000,
# minimum percentage of single exon length that is coding
single_exon_cds => 66,
# Intron with most support determines the splice sites for an internal exon
# lower scoring introns with different splice sites are rejected
strict_internal_splice_sites => 1,
# In some species alternate splice sites for end exons seem to be common
strict_internal_end_exon_splice_sites => 1,
# biotypes to give gene models if left blank these models will not get written to the output database
# best score - model with most supporting intron features
# all other possible models
# max number of other models to make - blank = all
other_num => '10',
# max number of other models to process - blank = all
max_num => '1000',
# biotype to label bad models ( otherwise they are not written )
# do you want to trim UTR
trim_utr => 1,
# config for trimming UTR
max_3prime_exons => 2,
max_3prime_length => 5000,
max_5prime_exons => 3,
max_5prime_length => 1000,
# % of average intron score that a UTR intron must have
reject_intron_cutoff => 5,
},
-rc_name => '2GB_refine',
-wait_for => ['create_refine_output_db'],
-flow_into => {
1 => ['blast_rnaseq'],
-1 => ['refine_genes_5GB'],
-2 => ['refine_genes_5GB'],
},
},
{
-logic_name => 'refine_genes_5GB',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveRefineSolexaGenes',
-parameters => {
input_db => $self->o('rough_output_db'),
dna_db => $self->o('dna_db'),
output_db => $self->o('refine_output_db'),
# write the intron features into the OUTPUT_DB along with the models
write_introns => 1,
# maximum number of times to loop when building all possible paths through the transcript
max_recursions => 100000,
# analysis logic_name for the dna_align_features to fetch from the INTRON_DB
# If left blank all features will be fetched
logicname => [],
# logic name of the gene models to fetch
model_ln => '',
# penalty for removing a retined intron
retained_intron_penalty => 2,
#Remove introns that overlap X introns
filter_on_overlap => 0,
# minimum size for an intron
min_intron_size => 30,
max_intron_size => 200000,
# biotype to give to single exon models if left blank single exons are ignored
# minimum single exon size (bp)
min_single_exon => 1000,
# minimum percentage of single exon length that is coding
single_exon_cds => 66,
# Intron with most support determines the splice sites for an internal exon
# lower scoring introns with different splice sites are rejected
strict_internal_splice_sites => 1,
# In some species alternate splice sites for end exons seem to be common
strict_internal_end_exon_splice_sites => 1,
# biotypes to give gene models if left blank these models will not get written to the output database
# best score - model with most supporting intron features
# all other possible models
# max number of other models to make - blank = all
other_num => '10',
# max number of other models to process - blank = all
max_num => '1000',
# biotype to label bad models ( otherwise they are not written )
# do you want to trim UTR
trim_utr => 1,
# config for trimming UTR
max_3prime_exons => 2,
max_3prime_length => 5000,
max_5prime_exons => 3,
max_5prime_length => 1000,
# % of average intron score that a UTR intron must have
reject_intron_cutoff => 5,
},
-rc_name => '5GB_refine',
-flow_into => {
1 => ['blast_rnaseq'],
-1 => ['refine_genes_15GB'],
-2 => ['refine_genes_15GB_base'],
},
},
{
-logic_name => 'refine_genes_15GB',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveRefineSolexaGenes',
-parameters => {
input_db => $self->o('rough_output_db'),
dna_db => $self->o('dna_db'),
output_db => $self->o('refine_output_db'),
# write the intron features into the OUTPUT_DB along with the models
write_introns => 1,
# maximum number of times to loop when building all possible paths through the transcript
max_recursions => 100000,
# analysis logic_name for the dna_align_features to fetch from the INTRON_DB
# If left blank all features will be fetched
logicname => [],
# logic name of the gene models to fetch
model_ln => '',
# penalty for removing a retined intron
retained_intron_penalty => 2,
#Remove introns that overlap X introns
filter_on_overlap => 0,
# minimum size for an intron
min_intron_size => 30,
max_intron_size => 200000,
# biotype to give to single exon models if left blank single exons are ignored
# minimum single exon size (bp)
min_single_exon => 1000,
# minimum percentage of single exon length that is coding
single_exon_cds => 66,
# Intron with most support determines the splice sites for an internal exon
# lower scoring introns with different splice sites are rejected
strict_internal_splice_sites => 1,
# In some species alternate splice sites for end exons seem to be common
strict_internal_end_exon_splice_sites => 1,
# biotypes to give gene models if left blank these models will not get written to the output database
# best score - model with most supporting intron features
# all other possible models
# max number of other models to make - blank = all
other_num => '10',
# max number of other models to process - blank = all
max_num => '1000',
# biotype to label bad models ( otherwise they are not written )
# do you want to trim UTR
trim_utr => 1,
# config for trimming UTR
max_3prime_exons => 2,
max_3prime_length => 5000,
max_5prime_exons => 3,
max_5prime_length => 1000,
# % of average intron score that a UTR intron must have
reject_intron_cutoff => 5,
},
-rc_name => '15GB_refine',
-flow_into => {
1 => ['blast_rnaseq'],
-1 => ['refine_genes_30GB_base'],
-2 => ['refine_genes_30GB_base'],
},
},
{
-logic_name => 'refine_genes_15GB_base',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveRefineSolexaGenes',
-parameters => {
input_db => $self->o('rough_output_db'),
dna_db => $self->o('dna_db'),
output_db => $self->o('refine_output_db'),
# write the intron features into the OUTPUT_DB along with the models
write_introns => 1,
# maximum number of times to loop when building all possible paths through the transcript
max_recursions => 100000,
# analysis logic_name for the dna_align_features to fetch from the INTRON_DB
# If left blank all features will be fetched
logicname => [],
# logic name of the gene models to fetch
model_ln => '',
# penalty for removing a retined intron
retained_intron_penalty => 2,
#Remove introns that overlap X introns
filter_on_overlap => 0,
# minimum size for an intron
min_intron_size => 30,
max_intron_size => 200000,
# biotype to give to single exon models if left blank single exons are ignored
# minimum single exon size (bp)
min_single_exon => 1000,
# minimum percentage of single exon length that is coding
single_exon_cds => 66,
# Intron with most support determines the splice sites for an internal exon
# lower scoring introns with different splice sites are rejected
strict_internal_splice_sites => 1,
# In some species alternate splice sites for end exons seem to be common
strict_internal_end_exon_splice_sites => 1,
# biotypes to give gene models if left blank these models will not get written to the output database
# best score - model with most supporting intron features
# all other possible models
# max number of other models to make - blank = all
other_num => '10',
# max number of other models to process - blank = all
max_num => '1000',
# biotype to label bad models ( otherwise they are not written )
# do you want to trim UTR
trim_utr => 1,
# config for trimming UTR
max_3prime_exons => 2,
max_3prime_length => 5000,
max_5prime_exons => 3,
max_5prime_length => 1000,
# % of average intron score that a UTR intron must have
reject_intron_cutoff => 5,
},
-rc_name => '15GB_refine_base',
-flow_into => {
1 => ['blast_rnaseq'],
-1 => ['refine_genes_30GB_base'],
-2 => ['refine_genes_30GB_base'],
},
},
{
-logic_name => 'refine_genes_30GB',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveRefineSolexaGenes',
-parameters => {
input_db => $self->o('rough_output_db'),
dna_db => $self->o('dna_db'),
output_db => $self->o('refine_output_db'),
# write the intron features into the OUTPUT_DB along with the models
write_introns => 1,
# maximum number of times to loop when building all possible paths through the transcript
max_recursions => 100000,
# analysis logic_name for the dna_align_features to fetch from the INTRON_DB
# If left blank all features will be fetched
logicname => [],
# logic name of the gene models to fetch
model_ln => '',
# penalty for removing a retined intron
retained_intron_penalty => 2,
#Remove introns that overlap X introns
filter_on_overlap => 0,
# minimum size for an intron
min_intron_size => 30,
max_intron_size => 200000,
# biotype to give to single exon models if left blank single exons are ignored
# minimum single exon size (bp)
min_single_exon => 1000,
# minimum percentage of single exon length that is coding
single_exon_cds => 66,
# Intron with most support determines the splice sites for an internal exon
# lower scoring introns with different splice sites are rejected
strict_internal_splice_sites => 1,
# In some species alternate splice sites for end exons seem to be common
strict_internal_end_exon_splice_sites => 1,
# biotypes to give gene models if left blank these models will not get written to the output database
# best score - model with most supporting intron features
# all other possible models
# max number of other models to make - blank = all
other_num => '10',
# max number of other models to process - blank = all
max_num => '1000',
# biotype to label bad models ( otherwise they are not written )
# do you want to trim UTR
trim_utr => 1,
# config for trimming UTR
max_3prime_exons => 2,
max_3prime_length => 5000,
max_5prime_exons => 3,
max_5prime_length => 1000,
# % of average intron score that a UTR intron must have
reject_intron_cutoff => 5,
},
-rc_name => '30GB_refine',
-flow_into => {
1 => ['blast_rnaseq'],
-2 => ['refine_genes_30GB_base'],
},
},
{
-logic_name => 'refine_genes_30GB_base',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveRefineSolexaGenes',
-parameters => {
input_db => $self->o('rough_output_db'),
dna_db => $self->o('dna_db'),
output_db => $self->o('refine_output_db'),
# write the intron features into the OUTPUT_DB along with the models
write_introns => 1,
# maximum number of times to loop when building all possible paths through the transcript
max_recursions => 100000,
# analysis logic_name for the dna_align_features to fetch from the INTRON_DB
# If left blank all features will be fetched
logicname => [],
# logic name of the gene models to fetch
model_ln => '',
# penalty for removing a retined intron
retained_intron_penalty => 2,
#Remove introns that overlap X introns
filter_on_overlap => 0,
# minimum size for an intron
min_intron_size => 30,
max_intron_size => 200000,
# biotype to give to single exon models if left blank single exons are ignored
# minimum single exon size (bp)
min_single_exon => 1000,
# minimum percentage of single exon length that is coding
single_exon_cds => 66,
# Intron with most support determines the splice sites for an internal exon
# lower scoring introns with different splice sites are rejected
strict_internal_splice_sites => 1,
# In some species alternate splice sites for end exons seem to be common
strict_internal_end_exon_splice_sites => 1,
# biotypes to give gene models if left blank these models will not get written to the output database
# best score - model with most supporting intron features
# all other possible models
# max number of other models to make - blank = all
other_num => '10',
# max number of other models to process - blank = all
max_num => '1000',
# biotype to label bad models ( otherwise they are not written )
# do you want to trim UTR
trim_utr => 1,
# config for trimming UTR
max_3prime_exons => 2,
max_3prime_length => 5000,
max_5prime_exons => 3,
max_5prime_length => 1000,
# % of average intron score that a UTR intron must have
reject_intron_cutoff => 5,
},
-rc_name => '30GB_refine_base',
-flow_into => {
1 => ['blast_rnaseq'],
},
},
{
-logic_name => 'blast_rnaseq',
-module => 'Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveBlastRNASeqPep',
-parameters => {
input_db => $self->o('refine_output_db'),
output_db => $self->o('blast_output_db'),
dna_db => $self->o('dna_db'),
# path to index to fetch the sequence of the blast hit to calculate % coverage
indicate_index => $self->o('uniprotindex'),
uniprot_index => [$self->o('uniprotdb')],
blast_program => $self->o('blastp'),
type => $self->o('blast_type'),
config_settings => $self->get_config_settings('HiveBlast','HiveBlastGenscanPep'),
},
-rc_name => '2GB_blast',
-wait_for => ['create_blast_output_db'],
},
);
foreach my $analyses (@analysis) {
$analyses->{-meadow_type} = 'LSF' unless (exists $analyses->{-meadow_type});
$analyses->{-max_retry_count} = 1 unless (exists $analyses->{-max_retry_count});
}
return \@analysis;
}
sub resource_classes {
my $self = shift;
return {
%{ $self->SUPER::resource_classes() }, # inherit other stuff from the base class
'1GB' => { LSF => $self->lsf_resource_builder('normal', 1000, [$self->default_options->{'pipe_db_server'}])},
'1GB_rough' => { LSF => $self->lsf_resource_builder('normal', 1000, [$self->default_options->{'pipe_db_server'}, $self->default_options->{'rough_output_db_server'}])},
'2GB_rough' => { LSF => $self->lsf_resource_builder('normal', 2000, [$self->default_options->{'pipe_db_server'}, $self->default_options->{'rough_output_db_server'}])},
'5GB_rough' => { LSF => $self->lsf_resource_builder('long', 5000, [$self->default_options->{'pipe_db_server'}, $self->default_options->{'rough_output_db_server'}])},
'15GB_rough' => { LSF => $self->lsf_resource_builder('long', 15000, [$self->default_options->{'pipe_db_server'}, $self->default_options->{'rough_output_db_server'}])},
'2GB_blast' => { LSF => $self->lsf_resource_builder('normal', 2000, [$self->default_options->{'pipe_db_server'}, $self->default_options->{'refine_output_db_server'}, $self->default_options->{'blast_output_db_server'}])},
'2GB' => { LSF => $self->lsf_resource_builder('normal', 2000, [$self->default_options->{'pipe_db_server'}])},
'4GB' => { LSF => $self->lsf_resource_builder('normal', 4000, [$self->default_options->{'pipe_db_server'}, $self->default_options->{'dna_db_server'}])},
'5GB' => { LSF => $self->lsf_resource_builder('normal', 5000, [$self->default_options->{'pipe_db_server'}])},
'2GB_introns' => { LSF => $self->lsf_resource_builder('normal', 2000, [$self->default_options->{'pipe_db_server'}, $self->default_options->{'rough_output_db_server'}, $self->default_options->{'dna_db_server'}])},
'2GB_refine' => { LSF => $self->lsf_resource_builder('normal', 2000, [$self->default_options->{'pipe_db_server'}, $self->default_options->{'rough_output_db_server'}, $self->default_options->{'dna_db_server'}, $self->default_options->{'refine_output_db_server'}])},
'5GB_introns' => { LSF => $self->lsf_resource_builder('long', 5000, [$self->default_options->{'pipe_db_server'}, $self->default_options->{'rough_output_db_server'}, $self->default_options->{'dna_db_server'}])},
'10GB_introns' => { LSF => $self->lsf_resource_builder('long', 10000, [$self->default_options->{'pipe_db_server'}, $self->default_options->{'rough_output_db_server'}, $self->default_options->{'dna_db_server'}])},
'3GB_multithread' => { LSF => $self->lsf_resource_builder('long', 3000, [$self->default_options->{'pipe_db_server'}], undef, 3)},
'5GB_multithread' => { LSF => $self->lsf_resource_builder('normal', 5000, [$self->default_options->{'pipe_db_server'}], undef, ($self->default_options->{'use_threads'}+1))},
'10GB_multithread' => { LSF => $self->lsf_resource_builder('long', 10000, [$self->default_options->{'pipe_db_server'}], undef, ($self->default_options->{'use_threads'}+1))},
'20GB_multithread' => { LSF => $self->lsf_resource_builder('long', 20000, [$self->default_options->{'pipe_db_server'}], undef, ($self->default_options->{'use_threads'}+1))},
'5GB' => { LSF => $self->lsf_resource_builder('normal', 5000, [$self->default_options->{'pipe_db_server'}])},
'10GB' => { LSF => $self->lsf_resource_builder('long', 10000, [$self->default_options->{'pipe_db_server'}])},
'5GB_refine' => { LSF => $self->lsf_resource_builder('long', 5000, [$self->default_options->{'pipe_db_server'}, $self->default_options->{'rough_output_db_server'}, $self->default_options->{'dna_db_server'}, $self->default_options->{'refine_output_db_server'}])},
'15GB_refine' => { LSF => $self->lsf_resource_builder('long', 15000, [$self->default_options->{'pipe_db_server'}, $self->default_options->{'rough_output_db_server'}, $self->default_options->{'dna_db_server'}, $self->default_options->{'refine_output_db_server'}])},
'15GB_refine_base' => { LSF => $self->lsf_resource_builder('basement', 15000, [$self->default_options->{'pipe_db_server'}, $self->default_options->{'rough_output_db_server'}, $self->default_options->{'dna_db_server'}, $self->default_options->{'refine_output_db_server'}])},
'30GB_refine' => { LSF => $self->lsf_resource_builder('long', 30000, [$self->default_options->{'pipe_db_server'}, $self->default_options->{'rough_output_db_server'}, $self->default_options->{'dna_db_server'}, $self->default_options->{'refine_output_db_server'}])},
'30GB_refine_base' => { LSF => $self->lsf_resource_builder('basement', 30000, [$self->default_options->{'pipe_db_server'}, $self->default_options->{'rough_output_db_server'}, $self->default_options->{'dna_db_server'}, $self->default_options->{'refine_output_db_server'}])},
};
}
1;
| kiwiroy/ensembl-analysis | modules/Bio/EnsEMBL/Analysis/Hive/Config/HiveRNASeq_conf.pm | Perl | apache-2.0 | 55,943 |
package App::Wubot::Reactor::HashLookup;
use Moose;
our $VERSION = '0.3.4'; # VERSION
use App::Wubot::Logger;
sub react {
my ( $self, $message, $config ) = @_;
my $key = $message->{ $config->{source_field} };
return $message unless $key;
if ( exists $config->{lookup}->{ $key } ) {
$message->{ $config->{target_field} } = $config->{lookup}->{ $key };
}
return $message;
}
__PACKAGE__->meta->make_immutable;
1;
__END__
=head1 NAME
App::Wubot::Reactor::HashLookup - map the value of one field to a value for another using a lookup table
=head1 VERSION
version 0.3.4
=head1 SYNOPSIS
- name: look up nicknames for friends
plugin: HashLookup
config:
source_field: username
target_field: nickname
lookup:
lebowski: dude
someguy: nickname
john.smith: john
=head1 DESCRIPTION
Look up the value for a target field in a configured hash using the
value of another field as the key.
=head1 SUBROUTINES/METHODS
=over 8
=item react( $message, $config )
The standard reactor plugin react() method.
=back
| gitpan/wubot | lib/App/Wubot/Reactor/HashLookup.pm | Perl | bsd-3-clause | 1,133 |
package DBIx::Class::ResultSet::BitField;
our $VERSION = '0.13';
use strict;
use warnings;
use base 'DBIx::Class::ResultSet';
use Carp;
sub search_bitfield {
my ($self, $search) = @_;
my $source = $self->result_source;
my $row = $self->new_result({});
my %search = ref $search eq 'ARRAY' ? @{$search} : ref $search eq 'HASH' ? %{$search} : croak 'search_bitfield takes an arrayref or a hashref';
my $type = ref $search eq 'ARRAY' ? '-or' : '-and';
my $query = [];
while(my($column, $value) = each %search) {
$column =~ s/^(.*?\.)?(.*)$/$2/;
my $prefix = $1 || q{};
next unless(my $bitfield = $row->__is_bitfield_item($column));
my $info = $source->column_info($bitfield);
my $i = 0;
foreach my $field (@{$info->{bitfield} || []}) {
my $bit = 2**$i++;
next unless($field eq $column);
if($value) {
push(@{$query}, \qq($prefix$bitfield & $bit > 0));
} else {
push(@{$query}, \qq($prefix$bitfield & $bit = 0));
}
}
}
return $self->search({ $type => $query });
}
sub update {
my ($self, $data, @rest) = @_;
my $source = $self->result_source;
my $row = $self->new_result({});
while(my ($k, $value) = each %{$data || {}}) {
my $info = $source->column_info($k);
if($row->__is_bitfield($info) && ($value !~ /^\d+$/ || int($value) ne $value)) {
if(ref $value eq 'ARRAY') {
foreach my $bit (@{$value || []}) {
$row->can($bit) ? $row->$bit(1) : croak qq(bitfield item '$bit' does not exist);
}
} else {
$row->can($value) ? $row->$value(1) : croak qq(bitfield item '$value' does not exist);
}
my $accessor = $info->{accessor};
$data->{$k} = $row->$accessor;
$row = $self->new_result({});
}
}
return $self->next::method($data, @rest);
}
1;
__END__
=pod
=head1 NAME
DBIx::Class::ResultSet::BitField
=head1 VERSION
version 0.13
=head1 AUTHOR
Moritz Onken <onken@netcubed.de>
=head1 COPYRIGHT AND LICENSE
This software is Copyright (c) 2009 by Moritz Onken.
This is free software, licensed under:
The (three-clause) BSD License
=cut
| gitpan/DBIx-Class-BitField | lib/DBIx/Class/ResultSet/BitField.pm | Perl | bsd-3-clause | 2,173 |
#
# Copyright 2016 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package centreon::common::jvm::mode::memorydetailed;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
my %mapping_memory = (
'Eden Space' => 'eden',
'Par Eden Space' => 'eden',
'PS Eden Space' => 'eden',
'Survivor Space' => 'survivor',
'Par Survivor Space' => 'survivor',
'PS Survivor Space' => 'survivor',
'CMS Perm Gen' => 'permanent',
'PS Perm Gen' => 'permanent',
'Perm Gen' => 'permanent',
'Code Cache' => 'code',
'CMS Old Gen' => 'tenured',
'PS Old Gen' => 'tenured',
'Tenured Gen' => 'tenured',
);
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
$options{options}->add_options(arguments =>
{
"warning-eden:s" => { name => 'warning_eden' },
"critical-eden:s" => { name => 'critical_eden' },
"warning-survivor:s" => { name => 'warning_survivor' },
"critical-survivor:s" => { name => 'critical_survivor' },
"warning-tenured:s" => { name => 'warning_tenured' },
"critical-tenured:s" => { name => 'critical_tenured' },
"warning-permanent:s" => { name => 'warning_permanent' },
"critical-permanent:s" => { name => 'critical_permanent' },
"warning-code:s" => { name => 'warning_code' },
"critical-code:s" => { name => 'critical_code' }
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
foreach my $label ('warning_eden', 'critical_eden', 'warning_survivor', 'critical_survivor', 'warning_tenured', 'critical_tenured', 'warning_permanent', 'critical_permanent', 'warning_code', 'critical_code') {
if (($self->{perfdata}->threshold_validate(label => $label, value => $self->{option_results}->{$label})) == 0) {
my ($label_opt) = $label;
$label_opt =~ tr/_/-/;
$self->{output}->add_option_msg(short_msg => "Wrong " . $label_opt . " threshold '" . $self->{option_results}->{$label} . "'.");
$self->{output}->option_exit();
}
}
}
sub run {
my ($self, %options) = @_;
$self->{connector} = $options{custom};
$self->{request} = [
{ mbean => "java.lang:type=MemoryPool,name=*", attributes => [ { name => 'Usage' } ] }
];
my $result = $self->{connector}->get_attributes(request => $self->{request}, nothing_quit => 1);
$self->{output}->output_add(severity => 'OK',
short_msg => 'All memories within bounds');
foreach my $key (keys %$result) {
$key =~ /name=(.*?),type/;
my $memtype = $1;
my $prct = $result->{"java.lang:name=".$memtype.",type=MemoryPool"}->{Usage}->{used} / $result->{"java.lang:name=".$memtype.",type=MemoryPool"}->{Usage}->{max} * 100;
$self->{output}->perfdata_add(label => $mapping_memory{$memtype}, unit => 'B',
value => $result->{"java.lang:name=" . $memtype . ",type=MemoryPool"}->{Usage}->{used},
warning => $self->{perfdata}->get_perfdata_for_output(label => 'warning-' . $mapping_memory{$memtype}, total => $result->{"java.lang:name=" . $memtype . ",type=MemoryPool"}->{Usage}->{used}, cast_int => 1),
warning => $self->{perfdata}->get_perfdata_for_output(label => 'critical-' . $mapping_memory{$memtype}, total => $result->{"java.lang:name=" . $memtype . ",type=MemoryPool"}->{Usage}->{used}, cast_int => 1),
min => 0, max => $result->{"java.lang:name=".$memtype.",type=MemoryPool"}->{Usage}->{max});
my $exit = $self->{perfdata}->threshold_check(value => $prct,
threshold => [ { label => 'critical_' . $mapping_memory{$memtype}, exit_litteral => 'critical' },
{ label => 'warning_' . $mapping_memory{$memtype}, exit_litteral => 'warning' } ]);
$self->{output}->output_add(long_msg => sprintf("%s usage %.2f%%", $memtype, $prct));
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("%s usage %.2f%% ", $memtype, $prct));
}
}
$self->{output}->display();
$self->{output}->exit();
}
1;
__END__
=head1 MODE
Check JVM Memory Pools :
Eden Space (heap) (-eden) : The pool from which memory is initially allocated for most objects.
Survivor Space (heap) (-survivor) : The pool containing objects that have survived the garbage collection of the Eden space.
Tenured Generation (heap) (-tenured) : The pool containing objects that have existed for some time in the survivor space.
Permanent Generation (non-heap) (-permanent) : The pool containing all the reflective data of the virtual machine itself, such as class and method objects.
Code Cache (non-heap) (-code) : The HotSpot Java VM also includes a code cache, containing memory that is used for compilation and storage of native code.
Example:
perl centreon_plugins.pl --plugin=apps::tomcat::jmx::plugin --custommode=jolokia --url=http://10.30.2.22:00/jolokia-war --mode=memory-detailed --warning-eden 60 --critical-eden 75 --warning-survivor 65 --critical-survivor 75
=over 8
=item B<--warning-eden>
Threshold warning of Heap 'Eden Space' memory usage
=item B<--critical-eden>
Threshold critical of Heap 'Survivor Space' memory usage
=item B<--warning-tenured>
Threshold warning of Heap 'Tenured Generation' memory usage
=item B<--critical-tenured>
Threshold critical of Heap 'Tenured Generation' memory usage
=item B<--warning-survivor>
Threshold warning of Heap 'Survivor Space' memory usage
=item B<--critical-survivor>
Threshold critical of Heap 'Survivor Space' memory usage
=item B<--warning-permanent>
Threshold warning of NonHeap 'Permanent Generation' memory usage
=item B<--critical-permanent>
Threshold critical of NonHeap 'Permanent Generation' memory usage
=item B<--warning-code>
Threshold warning of NonHeap 'Code Cache' memory usage
=item B<--critical-code>
Threshold critical of NonHeap 'Code Cache' memory usage
=back
=cut
| bcournaud/centreon-plugins | centreon/common/jvm/mode/memorydetailed.pm | Perl | apache-2.0 | 7,561 |
#!/usr/bin/perl
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
use strict;
use CGI::Request;
use CGI::Carp qw(fatalsToBrowser);
use Time::HiRes qw(gettimeofday tv_interval);
use POSIX qw(strftime);
use DBI;
# list of test pages, JS to insert, httpbase, filebase, etc.
use PageData;
use vars qw(%params $req $cgi $dbh $pagedata
$gStartNow $gStartNowStr
$gResponseNow $gLogging);
$gStartNow = [gettimeofday]; # checkpoint the time
$gStartNowStr = strftime "%Y%m%d%H%M%S", localtime;
$gLogging = 1;
$req = new CGI::Request; # get the HTTP/CGI request
$cgi = $req->cgi;
$pagedata = PageData->new;
setDefaultParams();
#XXXdebugcrap
#warn $params{index}, " ", $params{maxidx};
if (!defined($req->param('delay'))) {
# give the user a form to pick options (but note that going
# to "loader.pl?delay=1000" immediately starts the test run
outputForm();
}
elsif (!$req->param('id')) {
initialize(); # do redirect to start the cycle
}
elsif ($params{index} > $params{maxidx}) {
redirectToReport(); # the test is over; spit out a summary
markTestAsComplete(); # close the meta table entry
}
elsif (!isRequestStale()) {
outputPage(); # otherwise, keep dishing out pages
updateDataBase(); # client has the response; now write out stats to db
}
# cleanup
$req = undef;
$dbh->disconnect() if $dbh; # not strictly required (ignored in some cases anyways)
#logMessage(sprintf("Page load server responded in %3d msec, total time %3d msec, pid: %d",
# 1000*tv_interval($gStartNow, $gResponseNow), 1000*tv_interval($gStartNow), $$))
# if $gResponseNow; # log only when a test page has been dished out
exit 0;
#######################################################################
sub logMessage {
print STDERR strftime("[%a %b %d %H:%M:%S %Y] ", localtime), @_, "\n"
if $gLogging;
}
sub isRequestStale {
my $limit = 30*60; # 30 minutes, although if we never stalled on mac I'd make it 3 minutes
my $ts = decodeHiResTime($params{s_ts});
my $delta = tv_interval($ts, $gStartNow);
return undef if $delta < $limit;
# otherwise, punt this request
print "Content-type: text/html\n\n";
print <<"ENDOFHTML";
<html><head><title>Page Loading Times Test</title></head><body>
<p><b>The timestamp on the request is too old to continue:<br>
s_ts=$params{s_ts} was $delta seconds ago. Limit is $limit seconds.</b></p>
</body></html>
ENDOFHTML
return 1; # it's stale
}
sub initialize {
updateMetaTable();
createDataSetTable();
# start the test by bouncing off of an echo page
my $script = $cgi->var("SCRIPT_NAME");
my $server = $cgi->var("SERVER_NAME");
my $proto = $ENV{SERVER_PORT} == 443 ? 'https://' : 'http://';
my $me = $proto . $server . $script;
$script =~ /^(.*\/).*$/;
my $loc = "Location: ". $proto . $server . $1 . "echo.pl?";
for (qw(id index maxcyc delay replace nocache timeout)) {
$loc .= "$_=$params{$_}\&";
}
$loc .= "url=" . $me;
print $loc, "\n\n";
}
sub redirectToReport {
# n.b., can also add '&sort=1' to get a time sorted list
my $proto = $ENV{SERVER_PORT} == 443 ? 'https://' : 'http://';
my $loc = "Location: " . $proto . $cgi->var("SERVER_NAME");
$cgi->var("SCRIPT_NAME") =~ /^(.*\/).*$/;
$loc .= $1 . "report.pl?id=" . $params{id};
# To use for a tinderbox, comment out the line above and uncomment this:
# $loc .= $1 . "dump.pl?id=" . $params{id} . "&purge=1";
print $loc, "\n\n";
}
sub generateTestId {
# use the epoch time, in hex, plus a two-character random.
return sprintf "%8X%02X", time(), int(256*rand());
}
sub setDefaultParams {
$params{id} = $req->param('id') || generateTestId(); # "unique" id for this run
$params{index} = $req->param('index') || 0; # request index for the test
$params{maxcyc} = defined($req->param('maxcyc')) ?
$req->param('maxcyc') : 3; # max visits (zero-based count)
$params{delay} = $req->param('delay') || 1000; # setTimeout on the next request (msec)
$params{replace} = $req->param('replace') || 0; # use Location.replace (1) or Location.href (0)
$params{nocache} = $req->param('nocache') || 0; # serve content via uncacheable path
$params{c_part} = $req->param('c_part') || 0; # client time elapsed; page head to onload (msec)
$params{c_intvl} = $req->param('c_intvl') || 0; # client time elapsed; onload to onload event (msec)
$params{c_ts} = $req->param('c_ts') || 0; # client timestamp (.getTime()) (msec)
$params{content} = $req->param('content') || "UNKNOWN"; # name of content page for this data
$params{s_ts} = $req->param('s_ts') || undef; # server timestamp; no default
$params{timeout} = $req->param('timeout') || 30000; # msec; timer will cancel stalled page loading
$params{maxidx} = ($params{maxcyc}+1) * $pagedata->length; # total pages loads to be done
$params{curidx} = $params{index} % $pagedata->length; # current request index into page list
$params{curcyc} = int(($params{index}-1) / $pagedata->length); # current "cycle" (visit)
}
sub outputPage {
my $relpath = $pagedata->url($params{curidx});
my $file = $pagedata->filebase . $relpath;
open (HTML, "<$file") ||
die "Can't open file: $file, $!";
my $hook = "<script xmlns='http://www.w3.org/1999/xhtml'>\n";
$hook .= "var g_moztest_Start = (new Date()).getTime();\n";
$hook .= "var g_moztest_ServerTime='" . encodeHiResTime($gStartNow) . "';\n";
$hook .= "var g_moztest_Content='" . $pagedata->name($params{curidx}) . "';\n";
$hook .= $pagedata->clientJS; # ... and the main body
$hook .= "var g_moztest_safetyTimer = ";
$hook .= "window.setTimeout(moztest_safetyValve, " . $params{timeout} . ");";
$hook .= "</script>\n";
my $basepath = $pagedata->httpbase;
$basepath =~ s/^http:/https:/i
if $ENV{SERVER_PORT} == 443;
#warn "basepath: $basepath";
$basepath =~ s#^(.*?)(/base/)$#$1/nocache$2# if ($params{nocache});
$hook .= "<base href='". $basepath . $relpath .
"' xmlns='http://www.w3.org/1999/xhtml' />";
my $magic = $pagedata->magicString;
my $content = "";
while (<HTML>) {
s/$magic/$hook/;
$content .= $_;
}
my $contentTypeHeader;
my $mimetype = $pagedata->mimetype($params{curidx});
my $charset = $pagedata->charset($params{curidx});
if ($charset) {
$contentTypeHeader = qq{Content-type: $mimetype; charset="$charset"\n\n};
} else {
$contentTypeHeader = qq{Content-type: $mimetype\n\n};
}
#warn $contentTypeHeader; #XXXjrgm testing...
# N.B., these two cookie headers are obsolete, since I pass server info in
# JS now, to work around a bug in winEmbed with document.cookie. But
# since I _was_ sending two cookies as part of the test, I have to keep
# sending two cookies (at least for now, and it's not a bad thing to test)
#XXX other headers to test/use?
$gResponseNow = [gettimeofday]; # for logging
{ # turn on output autoflush, locally in this block
print "Set-Cookie: moztest_SomeRandomCookie1=somerandomstring\n";
print "Set-Cookie: moztest_SomeRandomCookie2=somerandomstring\n";
print $contentTypeHeader;
local $| = 1;
print $content;
}
return;
}
sub encodeHiResTime {
my $timeref = shift;
return unless ref($timeref);
return $$timeref[0] . "-" . $$timeref[1];
}
sub decodeHiResTime {
my $timestr = shift;
return [ split('-', $timestr) ];
}
sub elapsedMilliSeconds {
my ($r_time, $timestr) = @_;
return "NaN" unless $timestr;
my $delta = tv_interval( [ split('-', $timestr) ], $r_time );
my $delta = int(($delta*1000) - $params{delay}); # adjust for delay (in msec)
return $delta;
}
sub updateDataBase {
connectToDataBase(); # (may already be cached)
updateMetaTable();
updateDataSetTable() unless $params{c_part} == -1; # the initial request
}
sub connectToDataBase {
# don't reconnect if already connected. (Other drivers provide this
# for free I think, but not this one).
if (!ref($dbh)) {
$dbh = DBI->connect("DBI:CSV:f_dir=./db", {RaiseError => 1, AutoCommit => 1})
|| die "Cannot connect: " . $DBI::errstr;
}
}
#
# Holds the individual page load data for this id.
#
# (Of course, this should really be a single table for all datasets, but
# that was becoming punitively slow with DBD::CSV. I could have moved to
# a "real" database, but I didn't want to make that a requirement for
# installing this on another server and using this test (e.g., install a
# few modules and you can run this; no sql installation/maintenance required).
# At some point though, I may switch to some sql db, but hopefully still allow
# this to be used with a simple flat file db. (Hmm, maybe I should try a *dbm
# as a compromise (disk based but indexed)).
#
sub createDataSetTable {
my $table = "t" . $params{id};
return if -f "db/$table"; # don't create it if it exists
logMessage("createDataSetTable:\tdb/$table");
connectToDataBase(); # cached
my ($sth, $sql);
$sql = qq{
CREATE TABLE $table
(DATETIME CHAR(14),
ID CHAR(10),
INDEX INTEGER,
CUR_IDX INTEGER,
CUR_CYC INTEGER,
C_PART INTEGER,
S_INTVL INTEGER,
C_INTVL INTEGER,
CONTENT CHAR(128)
)
};
$sth = $dbh->prepare($sql);
$sth->execute();
$sth->finish();
return 1;
}
#
# holds the information about all test runs
#
sub createMetaTable {
my $table = shift;
return if -f "db/$table"; # don't create it if it exists
logMessage("createMetaTable:\tdb/$table");
my ($sth, $sql);
$sql = qq{
CREATE TABLE $table
(DATETIME CHAR(14),
LASTPING CHAR(14),
ID CHAR(8),
INDEX INTEGER,
CUR_IDX INTEGER,
CUR_CYC INTEGER,
CUR_CONTENT CHAR(128),
STATE INTEGER,
BLESSED INTEGER,
MAXCYC INTEGER,
MAXIDX INTEGER,
REPLACE INTEGER,
NOCACHE INTEGER,
DELAY INTEGER,
REMOTE_USER CHAR(16),
HTTP_USER_AGENT CHAR(128),
REMOTE_ADDR CHAR(15),
USER_EMAIL CHAR(32),
USER_COMMENT CHAR(256)
)
};
$sth = $dbh->prepare($sql);
$sth->execute();
$sth->finish();
warn 'created meta table';
return 1;
}
sub updateMetaTable {
connectToDataBase(); # if not already connected
my $table = "tMetaTable";
createMetaTable($table); # just returns if already created
my ($sth, $sql);
$sql = qq{
SELECT INDEX, MAXCYC, MAXIDX, REPLACE, NOCACHE,
DELAY, REMOTE_USER, HTTP_USER_AGENT, REMOTE_ADDR
FROM $table
WHERE ID = '$params{id}'
};
$sth = $dbh->prepare($sql);
$sth->execute();
my @dataset = ();
while (my @data = $sth->fetchrow_array()) {
push @dataset, {index => shift @data,
maxcyc => shift @data,
maxidx => shift @data,
replace => shift @data,
nocache => shift @data,
delay => shift @data,
remote_user => shift @data,
http_user_agent => shift @data,
remote_addr => shift @data
};
}
$sth->finish();
warn "More than one ID: $params{id} ??" if scalar(@dataset) > 1;
if (scalar(@dataset) == 0) {
# this is a new dataset and id
initMetaTableRecord($table);
return;
}
#XXX need to check that values are sane, and not update if they don't
# match certain params. This should not happen in a normal test run.
# However, if a test url was bookmarked or in history, I might get bogus
# data collected after the fact. But I have a stale date set on the URL,
# so that is good enough for now.
# my $ref = shift @dataset; # check some $ref->{foo}
$sql = qq{
UPDATE $table
SET LASTPING = ?,
INDEX = ?,
CUR_IDX = ?,
CUR_CYC = ?,
CUR_CONTENT = ?,
STATE = ?
WHERE ID = '$params{id}'
};
$sth = $dbh->prepare($sql);
$sth->execute($gStartNowStr,
$params{index}-1, # (index-1) is complete; (index) in progress
($params{curidx}-1) % $pagedata->length,
$params{curcyc},
$params{content},
'OPEN'
);
$sth->finish();
}
sub markTestAsComplete {
connectToDataBase(); # if not already connected
my $table = "tMetaTable";
createMetaTable($table); # just returns if already created
my ($sth, $sql);
#XXX should probably check if this ID exists first
$sql = qq{
UPDATE $table
SET STATE = "COMPLETE"
WHERE ID = '$params{id}'
};
$sth = $dbh->prepare($sql);
$sth->execute();
$sth->finish();
}
sub initMetaTableRecord {
# we know this record doesn't exist, so put in the initial values
my $table = shift;
my ($sth, $sql);
$sql = qq{
INSERT INTO $table
(DATETIME,
LASTPING,
ID,
INDEX,
CUR_IDX,
CUR_CYC,
CUR_CONTENT,
STATE,
BLESSED,
MAXCYC,
MAXIDX,
REPLACE,
NOCACHE,
DELAY,
REMOTE_USER,
HTTP_USER_AGENT,
REMOTE_ADDR,
USER_EMAIL,
USER_COMMENT
)
VALUES (?,?,?,?,
?,?,?,?,
?,?,?,?,
?,?,?,?,
?,?,?)
};
$sth = $dbh->prepare($sql);
$sth->execute($gStartNowStr,
$gStartNowStr,
$params{id},
$params{index}-1,
($params{curidx}-1) % $pagedata->length,
$params{curcyc},
$params{content},
"INIT",
0,
$params{maxcyc},
$params{maxidx},
$params{replace},
$params{nocache},
$params{delay},
$cgi->var("REMOTE_USER"),
$cgi->var("HTTP_USER_AGENT"),
$cgi->var("REMOTE_ADDR"),
"",
""
);
$sth->finish();
}
sub updateDataSetTable {
my $table = shift;
my $table = "t" . $params{id};
my ($sth, $sql);
$sql = qq{
INSERT INTO $table
(DATETIME,
ID,
INDEX,
CUR_IDX,
CUR_CYC,
C_PART,
S_INTVL,
C_INTVL,
CONTENT
)
VALUES (?,?,?,?,
?,?,?,?,?)
};
my $s_intvl = elapsedMilliSeconds( $gStartNow, $params{s_ts} );
$sth = $dbh->prepare($sql);
$sth->execute($gStartNowStr,
$params{id},
$params{index}-1,
($params{curidx}-1) % $pagedata->length,
$params{curcyc},
$params{c_part},
$s_intvl,
$params{c_intvl},
$req->param('content'),
);
$sth->finish();
}
sub outputForm {
my @prog = split('/', $0); my $prog = $prog[$#prog];
print "Content-type: text/html\n\n";
my $bgcolor = $ENV{SERVER_PORT} == 443 ? '#eebb66' : '#ffffff';
print <<"ENDOFHTML";
<html>
<head>
<title>Page Loading Times Test</title>
</head>
<body bgcolor="$bgcolor">
<h3>Page Loading Times Test</h3>
<p>Questions: <a href="mailto:jrgm\@netscape.com">John Morrison</a>
ENDOFHTML
print " - ";
my $script = $cgi->var("SCRIPT_NAME");
my $server = $cgi->var("SERVER_NAME");
# pick the "other" protocol (i.e., test is inverted)
my $proto = $ENV{SERVER_PORT} == 443 ? 'http://' : 'https://';
my $other = $proto . $server . $script;
if ($ENV{SERVER_PORT} == 443) {
print "[ <a href='$other'>With no SSL</a> | <b>With SSL</b> ]<br>";
} else {
print "[ <b>With no SSL</b> | <a href='$other'>With SSL</a> ]<br>";
}
print <<"ENDOFHTML";
<form method="get" action="$prog" >
<table border="1" cellpadding="5" cellspacing="2">
<tr>
<td valign="top">
Page-load to Page-load Delay (msec):<br>
(Use 1000. Be nice.)
</td>
<td valign="top">
<select name="delay">
<option value="0">0
<option value="500">500
<option selected value="1000">1000
<option value="2000">2000
<option value="3000">3000
<option value="4000">4000
<option value="5000">5000
</select>
</td>
</tr>
<tr>
<td valign="top">
Number of test cycles to run:<br>
<br>
</td>
<td valign="top">
<select name="maxcyc">
<option value="0">1
<option value="1">2
<option value="2">3
<option value="3">4
<option value="4" selected>5
<option value="5">6
<option value="6">7
</select>
</td>
</tr>
<tr>
<td valign="top">
How long to wait before cancelling (msec):<br>
(Don't change this unless on a very slow link, or very slow machine.)
</td>
<td valign="top">
<select name="timeout">
<option value="15000">15000
<option selected value="30000">30000
<option value="45000">45000
<option value="60000">60000
<option value="90000">90000
</select>
</td>
</tr>
<tr>
<td valign="top">
<input type="reset" value="reset">
</td>
<td valign="top">
<input type="submit" value="submit">
</td>
</tr>
</table>
<hr>
<p>
You can visit the content that will be loaded, minus the embedded
javascript, by clicking on any of the links below.
</p>
<table border="1" cellpadding="5" cellspacing="2">
ENDOFHTML
my $i;
print "<tr>\n";
my $base = $pagedata->httpbase;
$base =~ s/^http:/https:/i
if $ENV{SERVER_PORT} == 443;
for ($i=0; $i<$pagedata->length; $i++) {
print "<td nowrap><a href='", $base, $pagedata->url($i), "'>";
print $pagedata->name($i);
print "</a>\n";
print "</tr><tr>\n" if (($i+1)%4 == 0);
}
print "</tr>" if (($i+1)%4 != 0);
print "</table></form></body></html>\n";
return;
}
| sergecodd/FireFox-OS | B2G/gecko/tools/page-loader/loader.pl | Perl | apache-2.0 | 19,294 |
#!/usr/bin/perl
# This script generates the character table for 'special' lookups
#
use strict;
use warnings;
use Getopt::Long;
################################################################################
################################################################################
### Character Table Definitions ###
################################################################################
################################################################################
my @special_begin;
$special_begin[ord('-')] = 'JSONSL_SPECIALf_DASH';
$special_begin[ord('i')] = 'JSONSL__INF_PROXY';
$special_begin[ord('I')] = 'JSONSL__INF_PROXY';
$special_begin[ord('t')] = 'JSONSL_SPECIALf_TRUE';
$special_begin[ord('f')] = 'JSONSL_SPECIALf_FALSE';
$special_begin[ord('n')] = 'JSONSL_SPECIALf_NULL|JSONSL__NAN_PROXY';
$special_begin[ord('N')] = 'JSONSL__NAN_PROXY';
$special_begin[ord($_)] = 'JSONSL_SPECIALf_UNSIGNED' for (0..9);
$special_begin[ord('0')] = 'JSONSL_SPECIALf_ZERO';
my @strdefs;
$strdefs[ord('\\')] = 1;
$strdefs[ord('"')] = 1;
#Tokens which terminate a 'special' sequence. Basically all JSON tokens
#themselves
my @special_end;
{
my @toks = qw([ { } ] " : \\ );
push @toks, ',';
$special_end[ord($_)] = 1 for (@toks);
}
#RFC 4627 allowed whitespace
my @wstable;
foreach my $x (0x20, 0x09, 0xa, 0xd) {
$wstable[$x] = 1;
$special_end[$x] = 1;
}
my @special_body;
{
foreach my $x (0..9) {
$special_body[ord($x)] = 1;
}
foreach my $x ('E', 'e', 'a','l','s','u','-','+', '.') {
$special_body[ord($x)] = 1;
}
}
my @unescapes;
$unescapes[ord('t')] = 0x09;
$unescapes[ord('b')] = 0x08;
$unescapes[ord('n')] = 0x0a;
$unescapes[ord('f')] = 0x0c;
$unescapes[ord('r')] = 0x0d;
my @allowed_escapes;
{
@allowed_escapes[ord($_)] = 1 foreach
('"', '\\', '/', 'b', 'f', 'n', 'r', 't', 'u');
}
my @string_passthrough;
$string_passthrough[ord($_)] = 1 for ('\\','"');
$string_passthrough[$_] = 1 for (0..19);
################################################################################
################################################################################
### CLI Options ###
################################################################################
################################################################################
my %HMap = (
special => [ undef, \@special_begin ],
strings => [ undef, \@strdefs ],
special_end => [ undef, \@special_end ],
special_body => [undef, \@special_body ],
whitespace => [ undef, \@wstable ],
unescapes => [undef, \@unescapes],
allowed_escapes => [ undef, \@allowed_escapes],
string_passthrough => [ undef, \@string_passthrough ]
);
my $Table;
my %opthash;
while (my ($optname,$optarry) = each %HMap) {
$opthash{$optname} = \$optarry->[0];
}
GetOptions(%opthash, escape_newlines => \my $EscapeNewlines);
while (my ($k,$v) = each %HMap) {
if ($v->[0]) {
$Table = $v->[1];
last;
}
}
if (!$Table) {
die("Please specify one of: " . join(",", keys %HMap));
}
################################################################################
################################################################################
### Logic ###
################################################################################
################################################################################
my %PrettyMap = (
"\x00" => '<NUL>',
"\x01" => '<SOH>',
"\x02" => '<STX>',
"\x03" => '<ETX>',
"\x04" => '<EOT>',
"\x05" => '<ENQ>',
"\x06" => '<ACK>',
"\x07" => '<BEL>',
"\x08" => '<BS>',
"\x09" => '<HT>',
"\x0a" => '<LF>',
"\x0b" => '<VT>',
"\x0c" => '<FF>',
"\x0d" => '<CR>',
"\x0e" => '<SO>',
"\x0f" => '<SI>',
"\x10" => '<DLE>',
"\x11" => '<DC1>',
"\x12" => '<DC2>',
"\x13" => '<DC3>',
"\x14" => '<DC4>',
"\x15" => '<NAK>',
"\x16" => '<SYN>',
"\x17" => '<ETB>',
"\x18" => '<CAN>',
"\x19" => '<EM>',
"\x1a" => '<SUB>',
"\x1b" => '<ESC>',
"\x1c" => '<FS>',
"\x1d" => '<GS>',
"\x1e" => '<RS>',
"\x1f" => '<US>',
"\x20" => '<SP>',
"\x21" => '<!>',
"\x22" => '<">',
"\x23" => '<#>',
"\x24" => '<$>',
"\x25" => '<%>',
"\x26" => '<&>',
"\x27" => '<\'>',
"\x28" => '<(>',
"\x29" => '<)>',
"\x2a" => '<*>',
"\x2b" => '<+>',
"\x2c" => '<,>',
"\x2d" => '<->',
"\x2e" => '<.>',
"\x2f" => '</>',
"\x30" => '<0>',
"\x31" => '<1>',
"\x32" => '<2>',
"\x33" => '<3>',
"\x34" => '<4>',
"\x35" => '<5>',
"\x36" => '<6>',
"\x37" => '<7>',
"\x38" => '<8>',
"\x39" => '<9>',
"\x3a" => '<:>',
"\x3b" => '<;>',
"\x3c" => '<<>',
"\x3d" => '<=>',
"\x3e" => '<>>',
"\x3f" => '<?>',
"\x40" => '<@>',
"\x41" => '<A>',
"\x42" => '<B>',
"\x43" => '<C>',
"\x44" => '<D>',
"\x45" => '<E>',
"\x46" => '<F>',
"\x47" => '<G>',
"\x48" => '<H>',
"\x49" => '<I>',
"\x4a" => '<J>',
"\x4b" => '<K>',
"\x4c" => '<L>',
"\x4d" => '<M>',
"\x4e" => '<N>',
"\x4f" => '<O>',
"\x50" => '<P>',
"\x51" => '<Q>',
"\x52" => '<R>',
"\x53" => '<S>',
"\x54" => '<T>',
"\x55" => '<U>',
"\x56" => '<V>',
"\x57" => '<W>',
"\x58" => '<X>',
"\x59" => '<Y>',
"\x5a" => '<Z>',
"\x5b" => '<[>',
"\x5c" => '<\>',
"\x5d" => '<]>',
"\x5e" => '<^>',
"\x5f" => '<_>',
"\x60" => '<`>',
"\x61" => '<a>',
"\x62" => '<b>',
"\x63" => '<c>',
"\x64" => '<d>',
"\x65" => '<e>',
"\x66" => '<f>',
"\x67" => '<g>',
"\x68" => '<h>',
"\x69" => '<i>',
"\x6a" => '<j>',
"\x6b" => '<k>',
"\x6c" => '<l>',
"\x6d" => '<m>',
"\x6e" => '<n>',
"\x6f" => '<o>',
"\x70" => '<p>',
"\x71" => '<q>',
"\x72" => '<r>',
"\x73" => '<s>',
"\x74" => '<t>',
"\x75" => '<u>',
"\x76" => '<v>',
"\x77" => '<w>',
"\x78" => '<x>',
"\x79" => '<y>',
"\x7a" => '<z>',
"\x7b" => '<{>',
"\x7c" => '<|>',
"\x7d" => '<}>',
"\x7e" => '<~>',
"\x7f" => '<DEL>',
);
my @lines;
my $cur = { begin => 0, items => [], end => 0 };
push @lines, $cur;
my $i = 0;
my $cur_col = 0;
my $special_last = 0;
sub add_to_grid {
my $v = shift;
if ($special_last) {
$cur = { begin => $i, end => $i, items => [ $v ]};
push @lines, $cur;
$special_last = 0;
$cur_col = 1;
return;
} else {
push @{$cur->{items}}, $v;
$cur->{end} = $i;
$cur_col++;
}
if ($cur_col >= 32) {
$cur = {
begin => $i+1, end => $i+1, items => [] };
$cur_col = 0;
push @lines, $cur;
}
}
sub add_special {
my $v = shift;
push @lines, { items => [ $v ], begin => $i, end => $i };
$special_last = 1;
}
$special_last = 0;
for (; $i < 255; $i++) {
my $v = $Table->[$i];
if (defined $v) {
my $char_pretty = $PrettyMap{chr($i)};
if (defined $char_pretty) {
$v = sprintf("$v /* %s */", $char_pretty);
add_special($v);
} else {
add_to_grid(1);
}
} else {
add_to_grid(0);
}
}
foreach my $line (@lines) {
my $items = $line->{items};
if (@$items) {
printf("/* 0x%02x */ %s, /* 0x%02x */",
$line->{begin}, join(",", @$items), $line->{end});
if ($EscapeNewlines) {
print " \\";
}
print "\n";
}
}
| snej/jsonsl | srcutil/genchartables.pl | Perl | mit | 7,204 |
package DDG::Goodie::SHA3;
# ABSTRACT: Computes the SHA-3 cryptographic hash function
use strict;
use DDG::Goodie;
use Digest::SHA3;
zci answer_type => "sha3";
zci is_cached => 1;
my @triggers = qw(sha3 sha3sum sha3-224 sha3-256 sha3-384 sha3-512
shake128 shake-128 shake256 shake-256);
triggers start => @triggers;
handle query => sub {
return unless $_ =~ /^(?<alg>sha3|shake)\-?(?<ver>128|224|256|384|512|)?(?:sum|)\s*
(?<enc>hex|base64|)\s+(?<str>.*)$/ix;
my $alg = lc $+{'alg'};
my $ver = $+{'ver'} || '512'; # SHA3-512 by default
return if $alg eq 'sha3' && $ver eq '128'; # Special case to avoid the search 'sha3-128'
# (128 is only valid for SHAKE)
my $enc = lc $+{'enc'} || 'hex';
my $str = $+{'str'} || '';
$str =~ s/^hash\s+(.*\S+)/$1/; # Remove 'hash' in queries like 'sha3-224 hash this'
$str =~ s/^of\s+(.*\S+)/$1/; # Remove 'of' in queries like 'sha3-256 hash of this'
$str =~ s/^\"(.+)\"$/$1/; # remove quotes (e.g. sha3-384 "this string")
return unless $str;
my $alg_name = $alg eq "sha3" ? $alg . '_' : $alg; # The functions prefix for sha3 is "sha3_"
my $func_name = 'Digest::SHA3::' . $alg_name . $ver . '_' . $enc;
my $func = \&$func_name;
my $out = $func->($str);
# By convention, CPAN Digest modules do not pad their Base64 output. So any
# necessary padding will be implemented here
my $modulo = length($out) % 4;
my $pad = ($enc eq 'base64' && $modulo) ? 4 - $modulo : 0;
$out .= '=' x $pad if ($pad);
return $out, structured_answer => {
data => {
title => html_enc($out),
subtitle => html_enc(uc($alg) . "-$ver $enc hash").": ".html_enc($str)
},
templates => {
group => 'text'
}
};
};
1;
| Midhun-Jo-Antony/zeroclickinfo-goodies | lib/DDG/Goodie/SHA3.pm | Perl | apache-2.0 | 1,903 |
#!/usr/bin/perl
=pod
=head1 Tests for the globus gsi proxy_ssl code
Tests that exercise the functionality of creating an
ASN1 DER encoded PROXYCERTINFO extension to be placed
in an X509 certificate.
=cut
use strict;
use File::Basename;
use File::Compare;
use Test::More;
$ENV{PATH} = dirname($0) . ":.:" . $ENV{PATH};
my $test_prog = 'test_pci';
my @tests;
my @todo;
my $valgrind = "";
if (exists $ENV{VALGRIND})
{
$valgrind = "valgrind --log-file=VALGRIND-$test_prog.log";
if (exists $ENV{VALGRIND_OPTIONS})
{
$valgrind .= ' ' . $ENV{VALGRIND_OPTIONS};
}
}
sub test_case
{
my $test_index = shift;
my $options = shift;
my $testname = shift;
ok(system("$valgrind $test_prog $options -out $test_prog.norm$test_index.der 1>$test_prog.log1.stdout") == 0, "$testname.norm");
ok(system("$valgrind $test_prog -in $test_prog.norm$test_index.der -out $test_prog.log$test_index.der 1> $test_prog.log2.stdout") == 0, "$testname.log");
ok(File::Compare::compare("$test_prog.log$test_index.der",
"$test_prog.norm$test_index.der") == 0,
"$testname.compareder");
ok(File::Compare::compare("$test_prog.log1.stdout",
"$test_prog.log2.stdout") == 0,
"$testname.compare_stdout");
&cleanup();
}
$SIG{'INT'} = 'cleanup';
$SIG{'QUIT'} = 'cleanup';
$SIG{'KILL'} = 'cleanup';
plan tests => 4*5; # 4 steps * 5 tests
test_case(1, "-path 10 -rest POLICYLANGUAGE POLICY", "path10-policy");
test_case(2, "-path 10", "path10");
test_case(3, "-path 0 -rest POLICYLANGUAGE POLICY", "path0-policy");
test_case(4, "-rest POLICYLANGUAGE POLICY", "policy");
test_case(5, "-out test_pci5.der", "default");
sub cleanup
{
if (-e "$test_prog.log1.stdout")
{
unlink("$test_prog.log1.stdout");
}
if (-e "$test_prog.log2.stdout")
{
unlink("$test_prog.log2.stdout");
}
if (-e "$test_prog.log1.stderr")
{
unlink("$test_prog.log1.stderr");
}
if (-e "$test_prog.log2.stderr")
{
unlink("$test_prog.log2.stderr");
}
}
END {
&cleanup();
}
| gridcf/gct | gsi/proxy/proxy_ssl/source/test/test_pci.pl | Perl | apache-2.0 | 2,125 |
#-----------------------------------------------------------
# officedocs.pl
# Plugin for Registry Ripper
#
# Change history
#
#
# References
#
#
# copyright 2008 H. Carvey
#-----------------------------------------------------------
package officedocs;
use strict;
my %config = (hive => "NTUSER\.DAT",
hasShortDescr => 1,
hasDescr => 0,
hasRefs => 0,
osmask => 22,
version => 20080324);
sub getConfig{return %config}
sub getShortDescr {
return "Gets contents of user's Office doc MRU keys";
}
sub getDescr{}
sub getRefs {}
sub getHive {return $config{hive};}
sub getVersion {return $config{version};}
my $VERSION = getVersion();
sub pluginmain {
my $class = shift;
my $ntuser = shift;
::logMsg("Launching officedocs v.".$VERSION);
::rptMsg("officedocs v.".$VERSION); # banner
::rptMsg("(".getHive().") ".getShortDescr()."\n"); # banner
my $reg = Parse::Win32Registry->new($ntuser);
my $root_key = $reg->get_root_key;
::rptMsg("officedocs v.".$VERSION);
# First, let's find out which version of Office is installed
my $version;
my $tag = 0;
my @versions = ("7\.0","8\.0", "9\.0", "10\.0", "11\.0","12\.0");
foreach my $ver (@versions) {
my $key_path = "Software\\Microsoft\\Office\\".$ver."\\Common\\Open Find";
if (defined($root_key->get_subkey($key_path))) {
$version = $ver;
$tag = 1;
}
}
if ($tag) {
::rptMsg("MSOffice version ".$version." located.");
my $key_path = "Software\\Microsoft\\Office\\".$version;
my $of_key = $root_key->get_subkey($key_path);
if ($of_key) {
# Attempt to retrieve Word docs
my @funcs = ("Open","Save As","File Save");
foreach my $func (@funcs) {
my $word = "Common\\Open Find\\Microsoft Office Word\\Settings\\".$func."\\File Name MRU";
my $word_key = $of_key->get_subkey($word);
if ($word_key) {
::rptMsg($word);
::rptMsg("LastWrite Time ".gmtime($word_key->get_timestamp())." (UTC)");
::rptMsg("");
my $value = $word_key->get_value("Value")->get_data();
my @data = split(/\00/,$value);
map{::rptMsg("$_");}@data;
}
else {
# ::rptMsg("Could not access ".$word);
}
::rptMsg("");
}
# Attempt to retrieve Excel docs
my $excel = 'Excel\\Recent Files';
if (my $excel_key = $of_key->get_subkey($excel)) {
::rptMsg($key_path."\\".$excel);
::rptMsg("LastWrite Time ".gmtime($excel_key->get_timestamp())." (UTC)");
my @vals = $excel_key->get_list_of_values();
if (scalar(@vals) > 0) {
my %files;
# Retrieve values and load into a hash for sorting
foreach my $v (@vals) {
my $val = $v->get_name();
my $data = $v->get_data();
my $tag = (split(/File/,$val))[1];
$files{$tag} = $val.":".$data;
}
# Print sorted content to report file
foreach my $u (sort {$a <=> $b} keys %files) {
my ($val,$data) = split(/:/,$files{$u},2);
::rptMsg(" ".$val." -> ".$data);
}
}
else {
::rptMsg($key_path.$excel." has no values.");
}
}
else {
::rptMsg($key_path.$excel." not found.");
}
::rptMsg("");
# Attempt to retrieve PowerPoint docs
my $ppt = 'PowerPoint\\Recent File List';
if (my $ppt_key = $of_key->get_subkey($ppt)) {
::rptMsg($key_path."\\".$ppt);
::rptMsg("LastWrite Time ".gmtime($ppt_key->get_timestamp())." (UTC)");
my @vals = $ppt_key->get_list_of_values();
if (scalar(@vals) > 0) {
my %files;
# Retrieve values and load into a hash for sorting
foreach my $v (@vals) {
my $val = $v->get_name();
my $data = $v->get_data();
my $tag = (split(/File/,$val))[1];
$files{$tag} = $val.":".$data;
}
# Print sorted content to report file
foreach my $u (sort {$a <=> $b} keys %files) {
my ($val,$data) = split(/:/,$files{$u},2);
::rptMsg(" ".$val." -> ".$data);
}
}
else {
::rptMsg($key_path."\\".$ppt." has no values.");
}
}
else {
::rptMsg($key_path."\\".$ppt." not found.");
}
}
else {
::rptMsg("Could not access ".$key_path);
::logMsg("Could not access ".$key_path);
}
}
else {
::logMsg("MSOffice version not found.");
::rptMsg("MSOffice version not found.");
}
}
1; | mhmdfy/autopsy | RecentActivity/release/rr-full/plugins/officedocs.pl | Perl | apache-2.0 | 4,291 |
#------------------------------------------------------------------------------
# File: DNG.pm
#
# Description: Read DNG-specific information
#
# Revisions: 01/09/2006 - P. Harvey Created
#
# References: 1) http://www.adobe.com/products/dng/
#------------------------------------------------------------------------------
package Image::ExifTool::DNG;
use strict;
use vars qw($VERSION);
use Image::ExifTool qw(:DataAccess :Utils);
use Image::ExifTool::Exif;
use Image::ExifTool::MakerNotes;
use Image::ExifTool::CanonRaw;
$VERSION = '1.11';
sub ProcessOriginalRaw($$$);
sub ProcessAdobeData($$$);
sub ProcessAdobeMakN($$$);
sub ProcessAdobeCRW($$$);
sub ProcessAdobeRAF($$$);
sub ProcessAdobeMRW($$$);
sub ProcessAdobeSR2($$$);
sub ProcessAdobeIFD($$$);
sub WriteAdobeStuff($$$);
# data in OriginalRawFileData
%Image::ExifTool::DNG::OriginalRaw = (
GROUPS => { 2 => 'Image' },
PROCESS_PROC => \&ProcessOriginalRaw,
NOTES => q{
This table defines tags extracted from the DNG OriginalRawFileData
information.
},
0 => { Name => 'OriginalRawImage', Binary => 1 },
1 => { Name => 'OriginalRawResource', Binary => 1 },
2 => 'OriginalRawFileType',
3 => 'OriginalRawCreator',
4 => { Name => 'OriginalTHMImage', Binary => 1 },
5 => { Name => 'OriginalTHMResource', Binary => 1 },
6 => 'OriginalTHMFileType',
7 => 'OriginalTHMCreator',
);
%Image::ExifTool::DNG::AdobeData = ( #PH
GROUPS => { 0 => 'MakerNotes', 1 => 'Adobe', 2 => 'Image' },
PROCESS_PROC => \&ProcessAdobeData,
WRITE_PROC => \&WriteAdobeStuff,
NOTES => q{
This information is found in the "Adobe" DNGPrivateData.
The maker notes ('MakN') are processed by ExifTool, but some information may
have been lost by the Adobe DNG Converter. This is because the Adobe DNG
Converter (as of version 4.3) doesn't properly handle information referenced
from inside the maker notes that lies outside the original maker notes
block, and this information is lost when only the maker note block is copied
to the DNG image. While this isn't a big problem for most camera makes, it
is serious for some makes like Olympus.
Other entries in this table represent proprietary information that is
extracted from the original RAW image and restructured to a different (but
still proprietary) Adobe format.
},
MakN => [ ], # (filled in later)
'CRW ' => {
Name => 'AdobeCRW',
SubDirectory => {
TagTable => 'Image::ExifTool::CanonRaw::Main',
ProcessProc => \&ProcessAdobeCRW,
WriteProc => \&WriteAdobeStuff,
},
},
'MRW ' => {
Name => 'AdobeMRW',
SubDirectory => {
TagTable => 'Image::ExifTool::MinoltaRaw::Main',
ProcessProc => \&ProcessAdobeMRW,
WriteProc => \&WriteAdobeStuff,
},
},
'SR2 ' => {
Name => 'AdobeSR2',
SubDirectory => {
TagTable => 'Image::ExifTool::Sony::SR2Private',
ProcessProc => \&ProcessAdobeSR2,
},
},
'RAF ' => {
Name => 'AdobeRAF',
SubDirectory => {
TagTable => 'Image::ExifTool::FujiFilm::RAF',
ProcessProc => \&ProcessAdobeRAF,
},
},
'Pano' => {
Name => 'AdobePano',
SubDirectory => {
TagTable => 'Image::ExifTool::PanasonicRaw::Main',
ProcessProc => \&ProcessAdobeIFD,
},
},
'Koda' => {
Name => 'AdobeKoda',
SubDirectory => {
TagTable => 'Image::ExifTool::Kodak::IFD',
ProcessProc => \&ProcessAdobeIFD,
},
},
'Leaf' => {
Name => 'AdobeLeaf',
SubDirectory => {
TagTable => 'Image::ExifTool::Leaf::SubIFD',
ProcessProc => \&ProcessAdobeIFD,
},
},
);
# fill in maker notes
{
my $tagInfo;
my $list = $Image::ExifTool::DNG::AdobeData{MakN};
foreach $tagInfo (@Image::ExifTool::MakerNotes::Main) {
unless (ref $tagInfo eq 'HASH') {
push @$list, $tagInfo;
next;
}
my %copy = %$tagInfo;
delete $copy{Groups};
delete $copy{GotGroups};
delete $copy{Table};
push @$list, \%copy;
}
}
#------------------------------------------------------------------------------
# Process DNG OriginalRawFileData information
# Inputs: 0) ExifTool object ref, 1) dirInfo ref, 2) tag table ref
# Returns: 1 on success, otherwise returns 0 and sets a Warning
sub ProcessOriginalRaw($$$)
{
my ($exifTool, $dirInfo, $tagTablePtr) = @_;
my $dataPt = $$dirInfo{DataPt};
my $start = $$dirInfo{DirStart};
my $end = $start + $$dirInfo{DirLen};
my $pos = $start;
my ($index, $err);
SetByteOrder('MM'); # pointers are always big-endian in this structure
for ($index=0; $index<8; ++$index) {
last if $pos + 4 > $end;
my $val = Get32u($dataPt, $pos);
$val or $pos += 4, next; # ignore zero values
my $tagInfo = $exifTool->GetTagInfo($tagTablePtr, $index);
$tagInfo or $err = "Missing DNG tag $index", last;
if ($index & 0x02) {
# extract a simple file type (tags 2, 3, 6 and 7)
$val = substr($$dataPt, $pos, 4);
$pos += 4;
} else {
# extract a compressed data block (tags 0, 1, 4 and 5)
my $n = int(($val + 65535) / 65536);
my $hdrLen = 4 * ($n + 2);
$pos + $hdrLen > $end and $err = '', last;
my $tag = $$tagInfo{Name};
# only extract this information if requested (because it takes time)
if ($exifTool->{OPTIONS}->{Binary} or
$exifTool->{REQ_TAG_LOOKUP}->{lc($tag)})
{
unless (eval 'require Compress::Zlib') {
$err = 'Install Compress::Zlib to extract compressed images';
last;
}
my $i;
$val = '';
my $p2 = $pos + Get32u($dataPt, $pos + 4);
for ($i=0; $i<$n; ++$i) {
# inflate this compressed block
my $p1 = $p2;
$p2 = $pos + Get32u($dataPt, $pos + ($i + 2) * 4);
if ($p1 >= $p2 or $p2 > $end) {
$err = 'Bad compressed RAW image';
last;
}
my $buff = substr($$dataPt, $p1, $p2 - $p1);
my ($v2, $stat);
my $inflate = Compress::Zlib::inflateInit();
$inflate and ($v2, $stat) = $inflate->inflate($buff);
if ($inflate and $stat == Compress::Zlib::Z_STREAM_END()) {
$val .= $v2;
} else {
$err = 'Error inflating compressed RAW image';
last;
}
}
$pos = $p2;
} else {
$pos + $hdrLen > $end and $err = '', last;
my $len = Get32u($dataPt, $pos + $hdrLen - 4);
$pos + $len > $end and $err = '', last;
$val = substr($$dataPt, $pos + $hdrLen, $len - $hdrLen);
$val = "Binary data $len bytes";
$pos += $len; # skip over this block
}
}
$exifTool->FoundTag($tagInfo, $val);
}
$exifTool->Warn($err || 'Bad OriginalRawFileData') if defined $err;
return 1;
}
#------------------------------------------------------------------------------
# Process Adobe DNGPrivateData directory
# Inputs: 0) ExifTool object ref, 1) dirInfo ref, 2) tag table ref
# Returns: 1 on success
sub ProcessAdobeData($$$)
{
my ($exifTool, $dirInfo, $tagTablePtr) = @_;
my $dataPt = $$dirInfo{DataPt};
my $dataPos = $$dirInfo{DataPos};
my $pos = $$dirInfo{DirStart};
my $end = $$dirInfo{DirLen} + $pos;
my $outfile = $$dirInfo{OutFile};
my $verbose = $exifTool->Options('Verbose');
my $htmlDump = $exifTool->Options('HtmlDump');
return 0 unless $$dataPt =~ /^Adobe\0/;
unless ($outfile) {
$exifTool->VerboseDir($dirInfo);
}
$htmlDump and $exifTool->HtmlDump($dataPos, 6, 'Adobe DNGPrivateData header');
SetByteOrder('MM'); # always big endian
$pos += 6;
while ($pos + 8 <= $end) {
my ($tag, $size) = unpack("x${pos}a4N", $$dataPt);
$pos += 8;
last if $pos + $size > $end;
my $tagInfo = $$tagTablePtr{$tag};
if ($htmlDump) {
my $name = "Adobe$tag";
$name =~ tr/ //d;
$exifTool->HtmlDump($dataPos + $pos - 8, 8, "$name header", "Data Size: $size bytes");
# dump non-EXIF format data
unless ($tag =~ /^(MakN|SR2 )$/) {
$exifTool->HtmlDump($dataPos + $pos, $size, "$name data");
}
}
if ($verbose and not $outfile) {
$tagInfo or $exifTool->VPrint(0, "$$exifTool{INDENT}Unsupported DNGAdobeData record: ($tag)\n");
$exifTool->VerboseInfo($tag,
ref $tagInfo eq 'HASH' ? $tagInfo : undef,
DataPt => $dataPt,
DataPos => $dataPos,
Start => $pos,
Size => $size,
);
}
my $value;
while ($tagInfo) {
my ($subTable, $subName, $processProc);
if (ref $tagInfo eq 'HASH') {
unless ($$tagInfo{SubDirectory}) {
if ($outfile) {
# copy value across to outfile
$value = substr($$dataPt, $pos, $size);
} else {
$exifTool->HandleTag($tagTablePtr, $tag, substr($$dataPt, $pos, $size));
}
last;
}
$subTable = GetTagTable($tagInfo->{SubDirectory}->{TagTable});
$subName = $$tagInfo{Name};
$processProc = $tagInfo->{SubDirectory}->{ProcessProc};
} else {
$subTable = $tagTablePtr;
$subName = 'AdobeMakN';
$processProc = \&ProcessAdobeMakN;
}
my %dirInfo = (
Base => $$dirInfo{Base},
DataPt => $dataPt,
DataPos => $dataPos,
DataLen => $$dirInfo{DataLen},
DirStart => $pos,
DirLen => $size,
DirName => $subName,
);
if ($outfile) {
$dirInfo{Proc} = $processProc; # WriteAdobeStuff() calls this to do the actual writing
$value = $exifTool->WriteDirectory(\%dirInfo, $subTable, \&WriteAdobeStuff);
# use old directory if an error occurred
defined $value or $value = substr($$dataPt, $pos, $size);
} else {
# override process proc for MakN
$exifTool->ProcessDirectory(\%dirInfo, $subTable, $processProc);
}
last;
}
if (defined $value and length $value) {
# add "Adobe" header if necessary
$$outfile = "Adobe\0" unless $$outfile and length $$outfile;
$$outfile .= $tag . pack('N', length $value) . $value;
$$outfile .= "\0" if length($value) & 0x01; # pad if necessary
}
$pos += $size;
++$pos if $size & 0x01; # (darn padding)
}
$pos == $end or $exifTool->Warn("$pos $end Adobe private data is corrupt");
return 1;
}
#------------------------------------------------------------------------------
# Process Adobe CRW directory
# Inputs: 0) ExifTool object ref, 1) dirInfo ref, 2) tag table ref
# Returns: 1 on success, otherwise returns 0 and sets a Warning
# Notes: data has 4 byte header (2 for byte order and 2 for entry count)
# - this routine would be as simple as ProcessAdobeMRW() below if Adobe hadn't
# pulled the bonehead move of reformatting the CRW information
sub ProcessAdobeCRW($$$)
{
my ($exifTool, $dirInfo, $tagTablePtr) = @_;
my $dataPt = $$dirInfo{DataPt};
my $start = $$dirInfo{DirStart};
my $end = $start + $$dirInfo{DirLen};
my $verbose = $exifTool->Options('Verbose');
my $buildMakerNotes = $exifTool->Options('MakerNotes');
my $outfile = $$dirInfo{OutFile};
my ($newTags, $oldChanged);
SetByteOrder('MM'); # always big endian
return 0 if $$dirInfo{DirLen} < 4;
my $byteOrder = substr($$dataPt, $start, 2);
return 0 unless $byteOrder =~ /^(II|MM)$/;
# initialize maker note data if building maker notes
$buildMakerNotes and Image::ExifTool::CanonRaw::InitMakerNotes($exifTool);
my $entries = Get16u($dataPt, $start + 2);
my $pos = $start + 4;
$exifTool->VerboseDir($dirInfo, $entries) unless $outfile;
if ($outfile) {
# get hash of new tags
$newTags = $exifTool->GetNewTagInfoHash($tagTablePtr);
$$outfile = substr($$dataPt, $start, 4);
$oldChanged = $exifTool->{CHANGED};
}
# loop through entries in Adobe CRW information
my $index;
for ($index=0; $index<$entries; ++$index) {
last if $pos + 6 > $end;
my $tag = Get16u($dataPt, $pos);
my $size = Get32u($dataPt, $pos + 2);
$pos += 6;
last if $pos + $size > $end;
my $value = substr($$dataPt, $pos, $size);
my $tagID = $tag & 0x3fff;
my $tagType = ($tag >> 8) & 0x38; # get tag type
my $format = $Image::ExifTool::CanonRaw::crwTagFormat{$tagType};
my $count;
my $tagInfo = $exifTool->GetTagInfo($tagTablePtr, $tagID, \$value);
if ($tagInfo) {
$format = $$tagInfo{Format} if $$tagInfo{Format};
$count = $$tagInfo{Count};
}
# set count to 1 by default for values that were in the directory entry
if (not defined $count and $tag & 0x4000 and $format and $format ne 'string') {
$count = 1;
}
# set count from tagInfo count if necessary
if ($format and not $count) {
# set count according to format and size
my $fnum = $Image::ExifTool::Exif::formatNumber{$format};
my $fsiz = $Image::ExifTool::Exif::formatSize[$fnum];
$count = int($size / $fsiz);
}
$format or $format = 'undef';
SetByteOrder($byteOrder);
my $val = ReadValue(\$value, 0, $format, $count, $size);
if ($outfile) {
if ($tagInfo) {
my $subdir = $$tagInfo{SubDirectory};
if ($subdir and $$subdir{TagTable}) {
my $name = $$tagInfo{Name};
my $newTagTable = GetTagTable($$subdir{TagTable});
return 0 unless $newTagTable;
my $subdirStart = 0;
#### eval Start ()
$subdirStart = eval $$subdir{Start} if $$subdir{Start};
my $dirData = \$value;
my %subdirInfo = (
Name => $name,
DataPt => $dirData,
DataLen => $size,
DirStart => $subdirStart,
DirLen => $size - $subdirStart,
Parent => $$dirInfo{DirName},
);
#### eval Validate ($dirData, $subdirStart, $size)
if (defined $$subdir{Validate} and not eval $$subdir{Validate}) {
$exifTool->Warn("Invalid $name data");
} else {
$subdir = $exifTool->WriteDirectory(\%subdirInfo, $newTagTable);
if (defined $subdir and length $subdir) {
if ($subdirStart) {
# add header before data directory
$value = substr($value, 0, $subdirStart) . $subdir;
} else {
$value = $subdir;
}
}
}
} elsif ($$newTags{$tagID}) {
my $nvHash = $exifTool->GetNewValueHash($tagInfo);
if (Image::ExifTool::IsOverwriting($nvHash, $val)) {
my $newVal = Image::ExifTool::GetNewValues($nvHash);
my $verboseVal;
$verboseVal = $newVal if $verbose > 1;
# convert to specified format if necessary
if (defined $newVal and $format) {
$newVal = WriteValue($newVal, $format, $count);
}
if (defined $newVal) {
$exifTool->VerboseValue("- CanonRaw:$$tagInfo{Name}", $value);
$exifTool->VerboseValue("+ CanonRaw:$$tagInfo{Name}", $verboseVal);
$value = $newVal;
++$exifTool->{CHANGED};
}
}
}
}
# write out new value (always big-endian)
SetByteOrder('MM');
# (verified that there is no padding here)
$$outfile .= Set16u($tag) . Set32u(length($value)) . $value;
} else {
$exifTool->HandleTag($tagTablePtr, $tagID, $val,
Index => $index,
DataPt => $dataPt,
DataPos => $$dirInfo{DataPos},
Start => $pos,
Size => $size,
TagInfo => $tagInfo,
);
if ($buildMakerNotes) {
# build maker notes information if requested
Image::ExifTool::CanonRaw::BuildMakerNotes($exifTool, $tagID, $tagInfo,
\$value, $format, $count);
}
}
# (we lost the directory structure, but the second tag 0x0805
# should be in the ImageDescription directory)
$exifTool->{DIR_NAME} = 'ImageDescription' if $tagID == 0x0805;
SetByteOrder('MM');
$pos += $size;
}
if ($outfile and (not defined $$outfile or $index != $entries or
$exifTool->{CHANGED} == $oldChanged))
{
$exifTool->{CHANGED} = $oldChanged; # nothing changed
undef $$outfile; # rewrite old directory
}
if ($index != $entries) {
$exifTool->Warn('Truncated CRW notes');
} elsif ($pos < $end) {
$exifTool->Warn($end-$pos . ' extra bytes at end of CRW notes');
}
# finish building maker notes if necessary
if ($buildMakerNotes) {
SetByteOrder($byteOrder);
Image::ExifTool::CanonRaw::SaveMakerNotes($exifTool);
}
return 1;
}
#------------------------------------------------------------------------------
# Process Adobe MRW directory
# Inputs: 0) ExifTool object ref, 1) dirInfo ref, 2) tag table ref
# Returns: 1 on success, otherwise returns 0 and sets a Warning
# Notes: data has 4 byte header (2 for byte order and 2 for entry count)
sub ProcessAdobeMRW($$$)
{
my ($exifTool, $dirInfo, $tagTablePtr) = @_;
my $dataPt = $$dirInfo{DataPt};
my $dirLen = $$dirInfo{DirLen};
my $dirStart = $$dirInfo{DirStart};
my $outfile = $$dirInfo{OutFile};
# construct fake MRW file
my $buff = "\0MRM" . pack('N', $dirLen - 4);
# ignore leading byte order and directory count words
$buff .= substr($$dataPt, $dirStart + 4, $dirLen - 4);
my $raf = new File::RandomAccess(\$buff);
my %dirInfo = ( RAF => $raf, OutFile => $outfile );
my $rtnVal = Image::ExifTool::MinoltaRaw::ProcessMRW($exifTool, \%dirInfo);
if ($outfile and defined $$outfile and length $$outfile) {
# remove MRW header and add Adobe header
$$outfile = substr($$dataPt, $dirStart, 4) . substr($$outfile, 8);
}
return $rtnVal;
}
#------------------------------------------------------------------------------
# Process Adobe RAF directory
# Inputs: 0) ExifTool object ref, 1) dirInfo ref, 2) tag table ref
# Returns: 1 on success, otherwise returns 0 and sets a Warning
sub ProcessAdobeRAF($$$)
{
my ($exifTool, $dirInfo, $tagTablePtr) = @_;
return 0 if $$dirInfo{OutFile}; # (can't write this yet)
my $dataPt = $$dirInfo{DataPt};
my $pos = $$dirInfo{DirStart};
my $dirEnd = $$dirInfo{DirLen} + $pos;
my ($readIt, $warn);
# set byte order according to first 2 bytes of Adobe RAF data
if ($pos + 2 <= $dirEnd and SetByteOrder(substr($$dataPt, $pos, 2))) {
$pos += 2;
} else {
$exifTool->Warn('Invalid DNG RAF data');
return 0;
}
$exifTool->VerboseDir($dirInfo);
# make fake RAF object for processing (same acronym, different meaning)
my $raf = new File::RandomAccess($dataPt);
my $num = '';
# loop through all records in Adobe RAF data:
# 0 - RAF table (not processed)
# 1 - first RAF directory
# 2 - second RAF directory (if available)
for (;;) {
last if $pos + 4 > $dirEnd;
my $len = Get32u($dataPt, $pos);
$pos += 4 + $len; # step to next entry in Adobe RAF record
$len or last; # ends with an empty entry
$readIt or $readIt = 1, next; # ignore first entry (RAF table)
my %dirInfo = (
RAF => $raf,
DirStart => $pos - $len,
);
$$exifTool{SET_GROUP1} = "RAF$num";
$exifTool->ProcessDirectory(\%dirInfo, $tagTablePtr) or $warn = 1;
delete $$exifTool{SET_GROUP1};
$num = ($num || 1) + 1;
}
$warn and $exifTool->Warn('Possibly corrupt RAF information');
return 1;
}
#------------------------------------------------------------------------------
# Process Adobe SR2 directory
# Inputs: 0) ExifTool object ref, 1) dirInfo ref, 2) tag table ref
# Returns: 1 on success, otherwise returns 0 and sets a Warning
# Notes: data has 6 byte header (2 for byte order and 4 for original offset)
sub ProcessAdobeSR2($$$)
{
my ($exifTool, $dirInfo, $tagTablePtr) = @_;
return 0 if $$dirInfo{OutFile}; # (can't write this yet)
my $dataPt = $$dirInfo{DataPt};
my $start = $$dirInfo{DirStart};
my $len = $$dirInfo{DirLen};
return 0 if $len < 6;
SetByteOrder('MM');
my $originalPos = Get32u($dataPt, $start + 2);
return 0 unless SetByteOrder(substr($$dataPt, $start, 2));
$exifTool->VerboseDir($dirInfo);
my $dataPos = $$dirInfo{DataPos};
my $dirStart = $start + 6; # pointer to maker note directory
my $dirLen = $len - 6;
# initialize subdirectory information
my $fix = $dataPos + $dirStart - $originalPos;
my %subdirInfo = (
DirName => 'AdobeSR2',
Base => $$dirInfo{Base} + $fix,
DataPt => $dataPt,
DataPos => $dataPos - $fix,
DataLen => $$dirInfo{DataLen},
DirStart => $dirStart,
DirLen => $dirLen,
Parent => $$dirInfo{DirName},
);
if ($exifTool->Options('HtmlDump')) {
$exifTool->HtmlDump($dataPos + $start, 6, 'Adobe SR2 data');
}
# parse the SR2 directory
$exifTool->ProcessDirectory(\%subdirInfo, $tagTablePtr);
return 1;
}
#------------------------------------------------------------------------------
# Process Adobe-mutilated IFD directory
# Inputs: 0) ExifTool object ref, 1) dirInfo ref, 2) tag table ref
# Returns: 1 on success, otherwise returns 0 and sets a Warning
# Notes: data has 2 byte header (byte order of the data)
sub ProcessAdobeIFD($$$)
{
my ($exifTool, $dirInfo, $tagTablePtr) = @_;
return 0 if $$dirInfo{OutFile}; # (can't write this yet)
my $dataPt = $$dirInfo{DataPt};
my $pos = $$dirInfo{DirStart};
my $dataPos = $$dirInfo{DataPos};
my $base = $$dirInfo{Base};
return 0 if $$dirInfo{DirLen} < 4;
my $dataOrder = substr($$dataPt, $pos, 2);
return 0 unless SetByteOrder($dataOrder); # validate byte order of data
# parse the mutilated IFD. This is similar to a TIFF IFD, except:
# - data follows directly after Count entry in IFD
# - byte order of IFD entires is always big-endian, but byte order of data changes
SetByteOrder('MM'); # IFD structure is always big-endian
my $entries = Get16u($dataPt, $pos + 2);
$exifTool->VerboseDir($dirInfo, $entries);
$pos += 4;
my $end = $pos + $$dirInfo{DirLen};
my $index;
for ($index=0; $index<$entries; ++$index) {
last if $pos + 8 > $end;
SetByteOrder('MM'); # directory entries always big-endian (doh!)
my $tagID = Get16u($dataPt, $pos);
my $format = Get16u($dataPt, $pos+2);
my $count = Get32u($dataPt, $pos+4);
if ($format < 1 or $format > 13) {
# warn unless the IFD was just padded with zeros
$format and $exifTool->Warn(
sprintf("Unknown format ($format) for $$dirInfo{DirName} tag 0x%x",$tagID));
return 0; # must be corrupted
}
my $size = $Image::ExifTool::Exif::formatSize[$format] * $count;
last if $pos + 8 + $size > $end;
my $formatStr = $Image::ExifTool::Exif::formatName[$format];
SetByteOrder($dataOrder); # data stored in native order
my $val = ReadValue($dataPt, $pos + 8, $formatStr, $count, $size);
$exifTool->HandleTag($tagTablePtr, $tagID, $val,
Index => $index,
DataPt => $dataPt,
DataPos => $dataPos,
Start => $pos + 8,
Size => $size
);
$pos += 8 + $size;
}
if ($index < $entries) {
$exifTool->Warn("Truncated $$dirInfo{DirName} directory");
return 0;
}
return 1;
}
#------------------------------------------------------------------------------
# Process Adobe MakerNotes directory
# Inputs: 0) ExifTool object ref, 1) dirInfo ref, 2) tag table ref
# Returns: 1 on success, otherwise returns 0 and sets a Warning
# Notes: data has 6 byte header (2 for byte order and 4 for original offset)
sub ProcessAdobeMakN($$$)
{
my ($exifTool, $dirInfo, $tagTablePtr) = @_;
my $dataPt = $$dirInfo{DataPt};
my $start = $$dirInfo{DirStart};
my $len = $$dirInfo{DirLen};
my $outfile = $$dirInfo{OutFile};
return 0 if $len < 6;
SetByteOrder('MM');
my $originalPos = Get32u($dataPt, $start + 2);
return 0 unless SetByteOrder(substr($$dataPt, $start, 2));
$exifTool->VerboseDir($dirInfo) unless $outfile;
my $dataPos = $$dirInfo{DataPos};
my $dirStart = $start + 6; # pointer to maker note directory
my $dirLen = $len - 6;
my $hdr = substr($$dataPt, $dirStart, $dirLen < 48 ? $dirLen : 48);
my $tagInfo = $exifTool->GetTagInfo($tagTablePtr, 'MakN', \$hdr);
return 0 unless $tagInfo and $$tagInfo{SubDirectory};
my $subdir = $$tagInfo{SubDirectory};
my $subTable = GetTagTable($$subdir{TagTable});
# initialize subdirectory information
my %subdirInfo = (
DirName => 'MakerNotes',
Name => $$tagInfo{Name}, # needed for maker notes verbose dump
Base => $$dirInfo{Base},
DataPt => $dataPt,
DataPos => $dataPos,
DataLen => $$dirInfo{DataLen},
DirStart => $dirStart,
DirLen => $dirLen,
TagInfo => $tagInfo,
FixBase => $$subdir{FixBase},
EntryBased=> $$subdir{EntryBased},
Parent => $$dirInfo{DirName},
);
# look for start of maker notes IFD
my $loc = Image::ExifTool::MakerNotes::LocateIFD($exifTool,\%subdirInfo);
unless (defined $loc) {
$exifTool->Warn('Maker notes could not be parsed');
return 0;
}
if ($exifTool->Options('HtmlDump')) {
$exifTool->HtmlDump($dataPos + $start, 6, 'Adobe MakN data');
$exifTool->HtmlDump($dataPos + $dirStart, $loc, "$$tagInfo{Name} header") if $loc;
}
my $fix = 0;
unless ($$subdir{Base}) {
# adjust base offset for current maker note position
$fix = $dataPos + $dirStart - $originalPos;
$subdirInfo{Base} += $fix;
$subdirInfo{DataPos} -= $fix;
}
if ($outfile) {
# rewrite the maker notes directory
my $fixup = $subdirInfo{Fixup} = new Image::ExifTool::Fixup;
my $oldChanged = $$exifTool{CHANGED};
my $buff = $exifTool->WriteDirectory(\%subdirInfo, $subTable);
# nothing to do if error writing directory or nothing changed
unless (defined $buff and $exifTool->{CHANGED} != $oldChanged) {
$exifTool->{CHANGED} = $oldChanged;
return 1;
}
# deleting maker notes if directory is empty
unless (length $buff) {
$$outfile = '';
return 1;
}
# apply a one-time fixup to offsets
if ($subdirInfo{Relative}) {
# shift all offsets to be relative to new base
my $baseShift = $dataPos + $dirStart + $$dirInfo{Base} - $subdirInfo{Base};
$fixup->{Shift} += $baseShift;
} else {
# shift offsets to position of original maker notes
$fixup->{Shift} += $originalPos;
}
$fixup->{Shift} += $loc; # adjust for makernotes header
$fixup->ApplyFixup(\$buff); # fix up pointer offsets
# get copy of original Adobe header (6) and makernotes header ($loc)
my $header = substr($$dataPt, $start, 6 + $loc);
# add Adobe and makernotes headers to new directory
$$outfile = $header . $buff;
} else {
# parse the maker notes directory
$exifTool->ProcessDirectory(\%subdirInfo, $subTable, $$subdir{ProcessProc});
# extract maker notes as a block if specified
if ($exifTool->Options('MakerNotes') or
$exifTool->{REQ_TAG_LOOKUP}->{lc($$tagInfo{Name})})
{
my $val;
if ($$tagInfo{MakerNotes}) {
$subdirInfo{Base} = $$dirInfo{Base} + $fix;
$subdirInfo{DataPos} = $dataPos - $fix;
$subdirInfo{DirStart} = $dirStart;
$subdirInfo{DirLen} = $dirLen;
# rebuild the maker notes to identify all offsets that require fixing up
$val = Image::ExifTool::Exif::RebuildMakerNotes($exifTool, $subTable, \%subdirInfo);
defined $val or $exifTool->Warn('Error rebuilding maker notes (may be corrupt)');
} else {
# extract this directory as a block if specified
return 1 unless $$tagInfo{Writable};
}
$val = substr($$dataPt, 20) unless defined $val;
$exifTool->FoundTag($tagInfo, $val);
}
}
return 1;
}
#------------------------------------------------------------------------------
# Write Adobe information (calls appropriate ProcessProc to do the actual work)
# Inputs: 0) ExifTool object ref, 1) source dirInfo ref, 2) tag table ref
# Returns: new data block (may be empty if directory is deleted) or undef on error
sub WriteAdobeStuff($$$)
{
my ($exifTool, $dirInfo, $tagTablePtr) = @_;
$exifTool or return 1; # allow dummy access
my $proc = $$dirInfo{Proc} || \&ProcessAdobeData;
my $buff;
$$dirInfo{OutFile} = \$buff;
&$proc($exifTool, $dirInfo, $tagTablePtr) or undef $buff;
return $buff;
}
1; # end
__END__
=head1 NAME
Image::ExifTool::DNG.pm - Read DNG-specific information
=head1 SYNOPSIS
This module is used by Image::ExifTool
=head1 DESCRIPTION
This module contains routines required by Image::ExifTool to process
information in DNG (Digital Negative) images.
=head1 AUTHOR
Copyright 2003-2009, Phil Harvey (phil at owl.phy.queensu.ca)
This library is free software; you can redistribute it and/or modify it
under the same terms as Perl itself.
=head1 REFERENCES
=over 4
=item L<http://www.adobe.com/products/dng/>
=back
=head1 SEE ALSO
L<Image::ExifTool::TagNames/DNG Tags>,
L<Image::ExifTool::TagNames/EXIF Tags>,
L<Image::ExifTool(3pm)|Image::ExifTool>
=cut
| opf-attic/ref | tools/fits/0.6.1/tools/exiftool/perl/lib/Image/ExifTool/DNG.pm | Perl | apache-2.0 | 32,990 |
package Mouse::Meta::Role;
use Mouse::Util qw(:meta); # enables strict and warnings
use Mouse::Meta::Module;
our @ISA = qw(Mouse::Meta::Module);
sub method_metaclass;
sub _construct_meta {
my $class = shift;
my %args = @_;
$args{methods} = {};
$args{attributes} = {};
$args{required_methods} = [];
$args{roles} = [];
my $self = bless \%args, ref($class) || $class;
if($class ne __PACKAGE__){
$self->meta->_initialize_object($self, \%args);
}
return $self;
}
sub create_anon_role{
my $self = shift;
return $self->create(undef, @_);
}
sub is_anon_role;
sub get_roles;
sub calculate_all_roles {
my $self = shift;
my %seen;
return grep { !$seen{ $_->name }++ }
($self, map { $_->calculate_all_roles } @{ $self->get_roles });
}
sub get_required_method_list{
return @{ $_[0]->{required_methods} };
}
sub add_required_methods {
my($self, @methods) = @_;
my %required = map{ $_ => 1 } @{$self->{required_methods}};
push @{$self->{required_methods}}, grep{ !$required{$_}++ && !$self->has_method($_) } @methods;
return;
}
sub requires_method {
my($self, $name) = @_;
return scalar( grep{ $_ eq $name } @{ $self->{required_methods} } ) != 0;
}
sub add_attribute {
my $self = shift;
my $name = shift;
$self->{attributes}->{$name} = (@_ == 1) ? $_[0] : { @_ };
return;
}
sub apply {
my $self = shift;
my $consumer = shift;
require 'Mouse/Meta/Role/Application.pm';
return Mouse::Meta::Role::Application->new(@_)->apply($self, $consumer);
}
sub combine {
my($self, @role_specs) = @_;
require 'Mouse/Meta/Role/Composite.pm';
return Mouse::Meta::Role::Composite->new(roles => \@role_specs);
}
sub add_before_method_modifier;
sub add_around_method_modifier;
sub add_after_method_modifier;
sub get_before_method_modifiers;
sub get_around_method_modifiers;
sub get_after_method_modifiers;
sub add_override_method_modifier{
my($self, $method_name, $method) = @_;
if($self->has_method($method_name)){
# This error happens in the override keyword or during role composition,
# so I added a message, "A local method of ...", only for compatibility (gfx)
$self->throw_error("Cannot add an override of method '$method_name' "
. "because there is a local version of '$method_name'"
. "(A local method of the same name as been found)");
}
$self->{override_method_modifiers}->{$method_name} = $method;
}
sub get_override_method_modifier {
my ($self, $method_name) = @_;
return $self->{override_method_modifiers}->{$method_name};
}
sub does_role {
my ($self, $role_name) = @_;
(defined $role_name)
|| $self->throw_error("You must supply a role name to look for");
$role_name = $role_name->name if ref $role_name;
# if we are it,.. then return true
return 1 if $role_name eq $self->name;
# otherwise.. check our children
for my $role (@{ $self->get_roles }) {
return 1 if $role->does_role($role_name);
}
return 0;
}
1;
__END__
=head1 NAME
Mouse::Meta::Role - The Mouse Role metaclass
=head1 VERSION
This document describes Mouse version 1.12
=head1 DESCRIPTION
This class is a meta object protocol for Mouse roles,
which is a subset of Moose::Meta:::Role.
=head1 SEE ALSO
L<Moose::Meta::Role>
=cut
| takashabe/isucon3-qual | perl/local/lib/perl5/x86_64-linux/Mouse/Meta/Role.pm | Perl | mit | 3,429 |
package MooseX::Declare::Syntax::Keyword::Namespace;
BEGIN {
$MooseX::Declare::Syntax::Keyword::Namespace::AUTHORITY = 'cpan:FLORA';
}
{
$MooseX::Declare::Syntax::Keyword::Namespace::VERSION = '0.35';
}
# ABSTRACT: Declare outer namespace
use Moose;
use Carp qw( confess );
use MooseX::Declare::Util qw( outer_stack_push outer_stack_peek );
use namespace::clean -except => 'meta';
with qw(
MooseX::Declare::Syntax::KeywordHandling
);
sub parse {
my ($self, $ctx) = @_;
confess "Nested namespaces are not supported yet"
if outer_stack_peek $ctx->caller_file;
$ctx->skip_declarator;
my $namespace = $ctx->strip_word
or confess "Expected a namespace argument to use from here on";
confess "Relative namespaces are currently not supported"
if $namespace =~ /^::/;
$ctx->skipspace;
my $next_char = $ctx->peek_next_char;
confess "Expected end of statement after namespace argument"
unless $next_char eq ';';
outer_stack_push $ctx->caller_file, $namespace;
}
1;
__END__
=pod
=encoding utf-8
=head1 NAME
MooseX::Declare::Syntax::Keyword::Namespace - Declare outer namespace
=head1 SYNOPSIS
use MooseX::Declare;
namespace Foo::Bar;
class ::Baz extends ::Qux with ::Fnording {
...
}
=head1 DESCRIPTION
The C<namespace> keyword allows you to declare an outer namespace under
which other namespaced constructs can be nested. The L</SYNOPSIS> is
effectively the same as
use MooseX::Declare;
class Foo::Bar::Baz extends Foo::Bar::Qux with Foo::Bar::Fnording {
...
}
=head1 METHODS
=head2 parse
Object->parse(Object $context)
Will skip the declarator, parse the namespace and push the namespace
in the file package stack.
=head1 CONSUMES
=over 4
=item *
L<MooseX::Declare::Syntax::KeywordHandling>
=back
=head1 SEE ALSO
=over 4
=item *
L<MooseX::Declare>
=back
=head1 AUTHORS
=over 4
=item *
Florian Ragwitz <rafl@debian.org>
=item *
Ash Berlin <ash@cpan.org>
=item *
Chas. J. Owens IV <chas.owens@gmail.com>
=item *
Chris Prather <chris@prather.org>
=item *
Dave Rolsky <autarch@urth.org>
=item *
Devin Austin <dhoss@cpan.org>
=item *
Hans Dieter Pearcey <hdp@cpan.org>
=item *
Justin Hunter <justin.d.hunter@gmail.com>
=item *
Matt Kraai <kraai@ftbfs.org>
=item *
Michele Beltrame <arthas@cpan.org>
=item *
Nelo Onyiah <nelo.onyiah@gmail.com>
=item *
nperez <nperez@cpan.org>
=item *
Piers Cawley <pdcawley@bofh.org.uk>
=item *
Rafael Kitover <rkitover@io.com>
=item *
Robert 'phaylon' Sedlacek <rs@474.at>
=item *
Stevan Little <stevan.little@iinteractive.com>
=item *
Tomas Doran <bobtfish@bobtfish.net>
=item *
Yanick Champoux <yanick@babyl.dyndns.org>
=back
=head1 COPYRIGHT AND LICENSE
This software is copyright (c) 2011 by Florian Ragwitz.
This is free software; you can redistribute it and/or modify it under
the same terms as the Perl 5 programming language system itself.
=cut
| Dokaponteam/ITF_Project | xampp/perl/vendor/lib/MooseX/Declare/Syntax/Keyword/Namespace.pm | Perl | mit | 2,963 |
# !!!!!!! DO NOT EDIT THIS FILE !!!!!!!
# This file is built by mktables from e.g. UnicodeData.txt.
# Any changes made here will be lost!
#
# Linebreak category 'Close_Punctuation'
#
return <<'END';
0029
005D
007D
0F3B
0F3D
169C
2046
207E
208E
232A
2769
276B
276D
276F
2771
2773
2775
27C6
27E7
27E9
27EB
27ED
27EF
2984
2986
2988
298A
298C
298E
2990
2992
2994
2996
2998
29D9
29DB
29FD
2E23
2E25
2E27
2E29
3001 3002
3009
300B
300D
300F
3011
3015
3017
3019
301B
301E 301F
FD3F
FE11 FE12
FE18
FE36
FE38
FE3A
FE3C
FE3E
FE40
FE42
FE44
FE48
FE50
FE52
FE5A
FE5C
FE5E
FF09
FF0C
FF0E
FF3D
FF5D
FF60 FF61
FF63 FF64
END
| leighpauls/k2cro4 | third_party/cygwin/lib/perl5/5.10/unicore/lib/lb/CL.pl | Perl | bsd-3-clause | 761 |
#============================================================= -*-perl-*-
#
# Template::Modules
#
# DESCRIPTION
#
# AUTHOR
# Andy Wardley <abw@wardley.org>
#
# COPYRIGHT
# Copyright (C) 1996-2007 Andy Wardley. All Rights Reserved.
#
# This module is free software; you can redistribute it and/or
# modify it under the same terms as Perl itself.
#
#========================================================================
=head1 NAME
Template::Modules - Template Toolkit Modules
=head1 Template Toolkit Modules
This documentation provides an overview of the different modules that
comprise the Template Toolkit.
=head2 Template
The L<Template> module is the front-end to the Template Toolkit for
Perl programmers.
use Template;
my $tt = Template->new();
$tt->process('hello.html', message => 'Hello World');
=head2 Template::Base
The L<Template::Base> module implements a base class from which the other
Template Toolkit modules are derived. It implements common functionality
for creating objects, error reporting, debugging, and so on.
=head2 Template::Config
The L<Template::Config> module defines the configuration of the Template
Toolkit for your system. It is an example of a I<factory module> which is
responsible for instantiating the various other modules used in the Template
Toolkit.
For example, the L<Template::Config> module defines the C<$STASH> package
variable which indicates which version of the L<Template::Stash> you are
using by default. If you elected to use the faster L<XS|Template::Stash::XS>
stash when you installed the Template Toolkit, then this will be set as:
$STASH = 'Template::Stash::XS';
Otherwise you'll get the regular L<Perl|Template::Stash> stash:
$STASH = 'Template::Stash';
This approach means that other parts of the Template Toolkit don't have to
worry about which stash you're using. They just ask the L<Template::Config>
module to create a stash of the right kind.
=head2 Template::Constants
The L<Template::Constants> defines a number of constants that are used by
the Template Toolkit.
For example, the C<:chomp> tagset defines the C<CHOMP_???> constants that
can be used with the C<PRE_CHOMP> and C<POST_CHOMP> configuration options.
use Template::Constants ':chomp';
my $tt = Template->new({
PRE_CHOMP => CHOMP_COLLAPSE,
});
=head2 Template::Context
The L<Template::Context> module defines a runtime context in which templates
are processed. A context keeps track of all the templates, variables, plugins,
and other resources that are available (either directly or through delegate
objects) and provides methods to fetch, store, and perform various operations
on them.
=head2 Template::Document
The L<Template::Document> module implements a compiled template document
object. This is generated by the L<Template::Parser> module.
=head2 Template::Exception
The L<Template::Exception> module implements an exception object which
is used for runtime error reporting.
=head2 Template::Filters
The L<Template::Filters> module implements a filter provider. It includes
the core collection of filters that can be used via the C<FILTER> directive.
=head2 Template::Iterator
The L<Template::Iterator> module implements a data iterator which steps
through each item in a list in turn. It is used by the C<FOREACH> directive.
Within a C<FOREACH> block, the C<loop> variable always references the
current iterator object.
[% FOREACH item IN list;
IF loop.first;
# first item in loop
ELSIF loop.last;
# last item in loop
ELSE;
# any other item in loop
END;
END
%]
=head2 Template::Namespace::Constants
The L<Template::Namespace::Constants> module is used internally to represent
constants. These can be resolved immediately at the point that a template is
compiled.
=head2 Template::Parser
The L<Template::Parser> module is used to parse a source template and turn it
into Perl code which can be executed.
=head2 Template::Plugin
The L<Template::Plugin> module is a base class for Template Toolkit plugins
that can be loaded on demand from within a template using the C<USE> directive.
=head2 Template::Plugins
The L<Template::Plugins> module is the plugins provider. It loads and prepares
plugins as and when they are requested from within a template.
=head2 Template::Provider
The L<Template::Provider> module is responsible for loading, compiling and
caching templates.
=head2 Template::Service
The L<Template::Service> module implements a service layer that sits just
behind the L<Template> module, and just in front of a L<Template::Context>. It
handles each request to process a template (forwarded from the L<Template>
module). It adds any headers and/or footers (specified via the C<PRE_PROCESS>
and C<POST_PROCESS> options), applies any wrapper (the C<WRAPPER> option) and
catches any errors returned (the C<ERRORS> option).
=head2 Template::Stash
The L<Template::Stash> module is used to fetch and store template variables.
It implements all of the magic associated with the dot operator.
=head2 Template::Stash::XS
The L<Template::Stash::XS> module is a high-speed implementation of
L<Template::Stash> written in C.
=head2 Template::Test
The L<Template::Test> module is used to automate the Template Toolkit
test scripts.
=cut
# Local Variables:
# mode: perl
# perl-indent-level: 4
# indent-tabs-mode: nil
# End:
#
# vim: expandtab shiftwidth=4:
| Dokaponteam/ITF_Project | xampp/perl/vendor/lib/Template/Modules.pod | Perl | mit | 5,503 |
#!/usr/bin/perl -w
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Michael J. Wouters
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Extracts a single RINEX observation type from a RINEX file
# Modification history
# 2015-08-20 MJW First version
# 2016-08-09 MJW Handle more than 12 observations properly. Handle blank entries in RINEX observation records
# 2017-02-07 MJW Bug fixes for multiline observations with missing observations.
# 2018-05-15 MJW Extract more than one observation
use POSIX qw(floor ceil);
$fin=$ARGV[1];
if ($#ARGV!=1){
print "Usage: xrnxobs.pl observation_name[,observation_name,...] rinex_obs_file\n";
exit;
}
open(IN,"<$fin");
$line=<IN>;
print $line;
@verinfo = split /\s+/,$line;
$rnxver=0;
if ($verinfo[1] =~ /2\.\d+/){
$rnxver=2;
}
elsif ($verinfo[1] =~ /3\.\d+/){
$rnxver=3;
print "Sorry,RINEX V3 is not supported yet.\n";
exit;
}
if ($rnxver == 0){
print "Can't determine the RINEX version.\n";
exit;
}
@obs = split /,/,$ARGV[0];
while ($line=<IN>){
if ($line=~/TYPES OF OBSERV/){
for ($i=0;$i<=$#obs;$i++){
if (!($line =~ /$obs[$i]/)){
print "The observation $obs[$i] is not present\n";
exit;
}
}
@obstypes= split " ",( substr $line,0,60);
$nobs=$obstypes[0];
for ($i=0;$i<=$#obs;$i++){
for ($n=1;$n<=$nobs;$n++){
if ($obstypes[$n] eq $obs[$i]){
$obscol[$i]=$n-1;
last;
}
}
}
printf("%6d",$#obs + 1);
for ($i=0;$i<=$#obs;$i++){
printf("%4s%2s"," ",$obs[$i]);
}
printf(" " x (60 - ($#obs+1+1)*6));
printf("# / TYPES OF OBSERV \n");
}
else{
print $line;
}
last if ($line =~/END OF HEADER/);
}
while ($line=<IN>){
if ($rnxver == 2){
print $line;
next if $line=~/COMMENT/;
next if (!($line=~/G|R/));
$line =~ s/^\s+//;
chomp $line;
@obsinfo = split " ",$line;
$svnlist = $obsinfo[7];
@svns = split /G|R/,$svnlist;
if ($svns[0] > 12){
$todo= floor($svns[0]/12);
for ($i=0;$i<$todo;$i++){
$line=<IN>; # lazy
print $line;
chomp $line;
$line =~ s/^\s+//;
$svnlist .= $line;
}
@svns = split /G|R/,$svnlist;
}
for ($i=0;$i<$svns[0];$i++){
$nlines = ceil($nobs/5); # could be more than one line of measurements per SV
$line = "";
for ($l=0;$l<$nlines;$l++){ # so read lines, concatenating them
$ll=<IN>;
chomp $ll;
# To simplify parsing, pad each line out to 80 characters
$ll = sprintf("%-80s",$ll);
$line .= $ll;
}
# Decompose the string, FORTRAN-like
$len = length($line);
# Each field should be 16 characters long
# Round up the length to the next multiple of 16
$nfields = ceil($len/16);
for ($o=0;$o<=$#obs;$o++){
for ($f=0;$f<$nfields;$f++){
$val = substr $line, $f*16, (($f+1)*16<=$len?16:$len-$f*16);
if ($f == $obscol[$o]){
# Is it a number ?
if ($val =~/\d+\.\d+/){
printf "%16s",$val;
}
else{ # nahh
printf "%16s"," "; # blank it
}
last;
}
}
}
printf("\n");
}
}
elsif ($rnxver == 3){
}
}
close(IN);
| openttp/openttp | software/gpscv/common/bin/xrnxobs.pl | Perl | mit | 4,036 |
#!/usr/bin/perl
use ZOOM;
use MARC::Record;
#use MARC::File::XML;
use MARC::File::XML ( BinaryEncoding => 'utf8', RecordFormat => 'UNIMARC' );
use MARC::File::USMARC;
use YAML qw(LoadFile);
use Data::Dumper;
use Mojolicious::Lite;
use Switch;
# read config
my $config = YAML::LoadFile('config.yaml');
#z3950 queries
use constant {
TITLE => "\@attr 1=4",
ISBN => "\@attr 1=7",
AUTHOR => "\@attr 1=1003",
EAN => "\@attr 1=1016",
};
get '/' => sub {
my $self = shift;
my $base = $self->param('base') || return $self->render(text => 'Missing base param!', status => 400);
my $format = $self->param('format') || 'USMARC';
my $maxRecords = $self->param('maxRecords') || 10;
return $self->render(text => 'Invalid base supplied!', status => 400) unless (exists $config->{bases}->{$base});
# building query
my %query = ();
$query{'isbn'} = $self->param('isbn') if $self->param('isbn');
$query{'ean'} = $self->param('ean') if $self->param('ean');
$query{'title'} = $self->param('title') if $self->param('title');
$query{'author'} = $self->param('author') if $self->param('author');
# Query handling
my $querystr;
switch (%query) {
case 'isbn' { $querystr = "@{[ISBN]} $query{isbn}" }
case 'ean' { $querystr = "@{[EAN]} $query{ean}" }
case 'title' { $querystr = "@{[TITLE]} \"$query{title}\"" }
case 'author' { $querystr = "@{[AUTHOR]} \"$query{author}\"" }
}
return $self->render(text => 'No valid query params given!', status => 400) unless ($querystr);
# connecting to external base
my $conn = new ZOOM::Connection($config->{bases}->{$base}->{host},
$config->{bases}->{$base}->{port},
databaseName => $config->{bases}->{$base}->{db},
preferredRecordSyntax => $format,
user => $config->{bases}->{$base}->{user},
pass => $config->{bases}->{$base}->{pass},
charset => "UTF-8"); # request unicode encoding
$self->app->log->debug("Logged in to server $config->{bases}->{$base}->{host}: "
. $conn->option("serverImplementationName"));
if ($conn->errcode() != 0) {
$self->app->log->error("Error connecting to external base: " . $conn->errmsg() );
return $self->render(text => 'Error connecting to external base:\n' . $conn->errmsg(), status => 500);
}
my $rs = $conn->search_pqf($querystr);
my $n = $rs->size();
$self->app->log->debug("Querystring: $querystr.");
$self->app->log->debug("Number of records found: $n. maxRecords: $maxRecords");
return $self->render(text => 'No records found.', status => 404) unless ($n);
my $xml = MARC::File::XML::header();
for my $i (1 .. $n) {
my $rec = $rs->record($i-1);
my $raw = $rec->raw();
# force leader pos 09 to be 'a', meaning that encoding is indeed unicode, as requested above
substr($raw, 9, 1, 'a');
my $marc = MARC::Record->new_from_usmarc($raw);
$xml .= MARC::File::XML::record( $marc );
last if $i >= $maxRecords;
}
$xml .= MARC::File::XML::footer();
#print $xml;
$conn->destroy();
$self->render(text => $xml, status => 200, format => 'xml');
};
app->types->type(xml => 'application/xml; charset=UTF-8');
app->secrets($config->{appsecret});
app->log->level('error');
app->start;
| digibib/api4marc | api4marc.pl | Perl | mit | 3,282 |
package Devel::NYTProf::SubInfo; # sub_subinfo
use strict;
use warnings;
use Carp;
use List::Util qw(sum min max);
use Data::Dumper;
use Devel::NYTProf::Util qw(
trace_level
);
use Devel::NYTProf::Constants qw(
NYTP_SIi_FID NYTP_SIi_FIRST_LINE NYTP_SIi_LAST_LINE
NYTP_SIi_CALL_COUNT NYTP_SIi_INCL_RTIME NYTP_SIi_EXCL_RTIME
NYTP_SIi_SUB_NAME NYTP_SIi_PROFILE
NYTP_SIi_REC_DEPTH NYTP_SIi_RECI_RTIME NYTP_SIi_CALLED_BY
NYTP_SIi_elements
NYTP_SCi_CALL_COUNT
NYTP_SCi_INCL_RTIME NYTP_SCi_EXCL_RTIME NYTP_SCi_RECI_RTIME
NYTP_SCi_REC_DEPTH NYTP_SCi_CALLING_SUB
NYTP_SCi_elements
);
# extra constants for private elements
use constant {
NYTP_SIi_meta => NYTP_SIi_elements + 1,
NYTP_SIi_cache => NYTP_SIi_elements + 2,
};
sub fid { shift->[NYTP_SIi_FID] || 0 }
sub first_line { shift->[NYTP_SIi_FIRST_LINE] }
sub last_line { shift->[NYTP_SIi_LAST_LINE] }
sub calls { shift->[NYTP_SIi_CALL_COUNT] }
sub incl_time { shift->[NYTP_SIi_INCL_RTIME] }
sub excl_time { shift->[NYTP_SIi_EXCL_RTIME] }
sub subname { shift->[NYTP_SIi_SUB_NAME] }
sub subname_without_package {
my $subname = shift->[NYTP_SIi_SUB_NAME];
$subname =~ s/.*:://;
return $subname;
}
sub profile { shift->[NYTP_SIi_PROFILE] }
sub package { (my $pkg = shift->subname) =~ s/^(.*)::.*/$1/; return $pkg }
sub recur_max_depth { shift->[NYTP_SIi_REC_DEPTH] }
sub recur_incl_time { shift->[NYTP_SIi_RECI_RTIME] }
# general purpose hash - mainly a hack to help kill off Reader.pm
sub meta { shift->[NYTP_SIi_meta()] ||= {} }
# general purpose cache
sub cache { shift->[NYTP_SIi_cache()] ||= {} }
# { fid => { line => [ count, incl_time ] } }
sub caller_fid_line_places {
my ($self, $merge_evals) = @_;
carp "caller_fid_line_places doesn't merge evals yet" if $merge_evals;
# shallow clone to remove fid 0 is_sub hack
my %tmp = %{ $self->[NYTP_SIi_CALLED_BY] || {} };
delete $tmp{0};
return \%tmp;
}
sub called_by_subnames {
my ($self) = @_;
my $callers = $self->caller_fid_line_places || {};
my %subnames;
for my $sc (map { values %$_ } values %$callers) {
my $caller_subnames = $sc->[NYTP_SCi_CALLING_SUB];
@subnames{ keys %$caller_subnames } = (); # viv keys
}
return \%subnames;
}
sub is_xsub {
my $self = shift;
# XXX should test == 0 but some xsubs still have undef first_line etc
# XXX shouldn't include opcode
my $first = $self->first_line;
return undef if not defined $first;
return 1 if $first == 0 && $self->last_line == 0;
return 0;
}
sub is_opcode {
my $self = shift;
return 0 if $self->first_line or $self->last_line;
return 1 if $self->subname =~ m/(?:^CORE::|::CORE:)\w+$/;
return 0;
}
sub is_anon {
shift->subname =~ m/::__ANON__\b/;
}
sub kind {
my $self = shift;
return 'opcode' if $self->is_opcode;
return 'xsub' if $self->is_xsub;
return 'perl';
}
sub fileinfo {
my $self = shift;
my $fid = $self->fid;
if (!$fid) {
return undef; # sub not have a known fid
}
$self->profile->fileinfo_of($fid);
}
sub clone { # shallow
my $self = shift;
return bless [ @$self ] => ref $self;
}
sub _min {
my ($a, $b) = @_;
$a = $b if not defined $a;
$b = $a if not defined $b;
# either both are defined or both are undefined here
return undef unless defined $a;
return min($a, $b);
}
sub _max {
my ($a, $b) = @_;
$a = $b if not defined $a;
$b = $a if not defined $b;
# either both are defined or both are undefined here
return undef unless defined $a;
return max($a, $b);
}
sub _alter_fileinfo {
my ($self, $remove_fi, $new_fi) = @_;
my $remove_fid = ($remove_fi) ? $remove_fi->fid : 0;
my $new_fid = ( $new_fi) ? $new_fi->fid : 0;
if ($self->fid == $remove_fid) {
$self->[NYTP_SIi_FID] = $new_fid;
$remove_fi->_remove_sub_defined($self) if $remove_fi;
$new_fi->_add_new_sub_defined($self) if $new_fi;
}
}
sub _alter_called_by_fileinfo {
my ($self, $remove_fi, $new_fi) = @_;
my $remove_fid = ($remove_fi) ? $remove_fi->fid : 0;
my $new_fid = ( $new_fi) ? $new_fi->fid : 0;
# remove mentions of $remove_fid from called-by details
# { fid => { line => [ count, incl, excl, ... ] } }
if (my $called_by = $self->[NYTP_SIi_CALLED_BY]) {
my $cb = delete $called_by->{$remove_fid};
if ($cb && $new_fid) {
my $new_cb = $called_by->{$new_fid} ||= {};
warn sprintf "_alter_called_by_fileinfo: %s from fid %d to fid %d\n",
$self->subname, $remove_fid, $new_fid
if trace_level() >= 4;
# merge $cb into $new_cb
while ( my ($line, $cb_li) = each %$cb ) {
my $dst_line_info = $new_cb->{$line} ||= [];
_merge_in_caller_info($dst_line_info, delete $cb->{$line},
tag => "$line:".$self->subname,
);
}
}
}
}
# merge details of another sub into this one
# there are very few cases where this is sane thing to do
# it's meant for merging things like anon-subs in evals
# e.g., "PPI::Node::__ANON__[(eval 286)[PPI/Node.pm:642]:4]"
sub merge_in {
my ($self, $donor, %opts) = @_;
my $self_subname = $self->subname;
my $donor_subname = $donor->subname;
warn sprintf "Merging sub %s into %s (%s)\n",
$donor_subname, $self_subname, join(" ", %opts)
if trace_level() >= 4;
# see also "case NYTP_TAG_SUB_CALLERS:" in load_profile_data_from_stream()
push @{ $self->meta->{merged_sub_names} }, $donor->subname;
$self->[NYTP_SIi_FIRST_LINE] = _min($self->[NYTP_SIi_FIRST_LINE], $donor->[NYTP_SIi_FIRST_LINE]);
$self->[NYTP_SIi_LAST_LINE] = _max($self->[NYTP_SIi_LAST_LINE], $donor->[NYTP_SIi_LAST_LINE]);
$self->[NYTP_SIi_CALL_COUNT] += $donor->[NYTP_SIi_CALL_COUNT];
$self->[NYTP_SIi_INCL_RTIME] += $donor->[NYTP_SIi_INCL_RTIME];
$self->[NYTP_SIi_EXCL_RTIME] += $donor->[NYTP_SIi_EXCL_RTIME];
$self->[NYTP_SIi_REC_DEPTH] = max($self->[NYTP_SIi_REC_DEPTH], $donor->[NYTP_SIi_REC_DEPTH]);
# adding reci_rtime is correct only if one sub doesn't call the other
$self->[NYTP_SIi_RECI_RTIME] += $donor->[NYTP_SIi_RECI_RTIME]; # XXX
# { fid => { line => [ count, incl_time, ... ] } }
my $dst_called_by = $self ->[NYTP_SIi_CALLED_BY] ||= {};
my $src_called_by = $donor->[NYTP_SIi_CALLED_BY] || {};
$opts{opts} ||= "merge in $donor_subname";
# iterate over src and merge into dst
while (my ($fid, $src_line_hash) = each %$src_called_by) {
my $dst_line_hash = $dst_called_by->{$fid};
# merge lines in %$src_line_hash into %$dst_line_hash
for my $line (keys %$src_line_hash) {
my $dst_line_info = $dst_line_hash->{$line} ||= [];
my $src_line_info = $src_line_hash->{$line};
delete $src_line_hash->{$line} unless $opts{src_keep};
_merge_in_caller_info($dst_line_info, $src_line_info, %opts);
}
}
return;
}
sub _merge_in_caller_info {
my ($dst_line_info, $src_line_info, %opts) = @_;
my $tag = ($opts{tag}) ? " $opts{tag}" : "";
if (!@$src_line_info) {
carp sprintf "_merge_in_caller_info%s skipped (empty donor)", $tag
if trace_level();
return;
}
if (trace_level() >= 5) {
carp sprintf "_merge_in_caller_info%s merging from $src_line_info -> $dst_line_info:", $tag;
warn sprintf " . %s\n", _fmt_sc($src_line_info);
warn sprintf " + %s\n", _fmt_sc($dst_line_info);
}
if (!@$dst_line_info) {
@$dst_line_info = (0) x NYTP_SCi_elements;
$dst_line_info->[NYTP_SCi_CALLING_SUB] = undef;
}
# merge @$src_line_info into @$dst_line_info
$dst_line_info->[$_] += $src_line_info->[$_] for (
NYTP_SCi_CALL_COUNT, NYTP_SCi_INCL_RTIME, NYTP_SCi_EXCL_RTIME,
);
$dst_line_info->[NYTP_SCi_REC_DEPTH] = max($dst_line_info->[NYTP_SCi_REC_DEPTH],
$src_line_info->[NYTP_SCi_REC_DEPTH]);
# ug, we can't really combine recursive incl_time, but this is better than undef
$dst_line_info->[NYTP_SCi_RECI_RTIME] = max($dst_line_info->[NYTP_SCi_RECI_RTIME],
$src_line_info->[NYTP_SCi_RECI_RTIME]);
my $src_cs = $src_line_info->[NYTP_SCi_CALLING_SUB]|| {};
my $dst_cs = $dst_line_info->[NYTP_SCi_CALLING_SUB]||={};
$dst_cs->{$_} = $src_cs->{$_} for keys %$src_cs;
warn sprintf " = %s\n", _fmt_sc($dst_line_info)
if trace_level() >= 5;
return;
}
sub _fmt_sc {
my ($sc) = @_;
return "(empty)" if !@$sc;
my $dst_cs = $sc->[NYTP_SCi_CALLING_SUB]||{};
my $by = join " & ", sort keys %$dst_cs;
sprintf "calls %d%s",
$sc->[NYTP_SCi_CALL_COUNT], ($by) ? ", by $by" : "";
}
sub caller_fids {
my ($self, $merge_evals) = @_;
my $callers = $self->caller_fid_line_places($merge_evals) || {};
my @fids = keys %$callers;
return @fids; # count in scalar context
}
sub caller_count { return scalar shift->caller_places; } # XXX deprecate later
# array of [ $fid, $line, $sub_call_info ], ...
sub caller_places {
my ($self, $merge_evals) = @_;
my $callers = $self->caller_fid_line_places || {};
my @callers;
for my $fid (sort { $a <=> $b } keys %$callers) {
my $lines_hash = $callers->{$fid};
for my $line (sort { $a <=> $b } keys %$lines_hash) {
push @callers, [ $fid, $line, $lines_hash->{$line} ];
}
}
return @callers; # scalar: number of distinct calling locations
}
sub normalize_for_test {
my $self = shift;
my $profile = $self->profile;
# normalize eval sequence numbers in anon sub names to 0
$self->[NYTP_SIi_SUB_NAME] =~ s/ \( ((?:re_)?) eval \s \d+ \) /(${1}eval 0)/xg
if $self->[NYTP_SIi_SUB_NAME] =~ m/__ANON__/
&& not $ENV{NYTPROF_TEST_SKIP_EVAL_NORM};
# zero subroutine inclusive time
$self->[NYTP_SIi_INCL_RTIME] = 0;
$self->[NYTP_SIi_EXCL_RTIME] = 0;
$self->[NYTP_SIi_RECI_RTIME] = 0;
# { fid => { line => [ count, incl, excl, ... ] } }
my $callers = $self->[NYTP_SIi_CALLED_BY] || {};
# calls from modules shipped with perl cause problems for tests
# because the line numbers vary between perl versions, so here we
# edit the line number of calls from these modules
for my $fid (keys %$callers) {
next if not $fid;
my $fileinfo = $profile->fileinfo_of($fid) or next;
next if $fileinfo->filename !~ /(AutoLoader|Exporter)\.pm$/;
# normalize the lines X,Y,Z to 1,2,3
my %lines = %{ delete $callers->{$fid} };
my @lines = @lines{sort { $a <=> $b } keys %lines};
$callers->{$fid} = { map { $_ => shift @lines } 1..@lines };
}
for my $sc (map { values %$_ } values %$callers) {
# zero per-call-location subroutine inclusive time
$sc->[NYTP_SCi_INCL_RTIME] =
$sc->[NYTP_SCi_EXCL_RTIME] =
$sc->[NYTP_SCi_RECI_RTIME] = 0;
if (not $ENV{NYTPROF_TEST_SKIP_EVAL_NORM}) {
# normalize eval sequence numbers in anon sub names to 0
my $names = $sc->[NYTP_SCi_CALLING_SUB]||{};
for my $subname (keys %$names) {
(my $newname = $subname) =~ s/ \( ((?:re_)?) eval \s \d+ \) /(${1}eval 0)/xg;
next if $newname eq $subname;
warn "Normalizing $subname to $newname overwrote other calling-sub data\n"
if $names->{$newname};
$names->{$newname} = delete $names->{$subname};
}
}
}
return $self->[NYTP_SIi_SUB_NAME];
}
sub dump {
my ($self, $separator, $fh, $path, $prefix) = @_;
my ($fid, $l1, $l2, $calls) = @{$self}[
NYTP_SIi_FID, NYTP_SIi_FIRST_LINE, NYTP_SIi_LAST_LINE, NYTP_SIi_CALL_COUNT
];
my @values = @{$self}[
NYTP_SIi_INCL_RTIME, NYTP_SIi_EXCL_RTIME,
NYTP_SIi_REC_DEPTH, NYTP_SIi_RECI_RTIME
];
printf $fh "%s[ %s:%s-%s calls %s times %s ]\n",
$prefix,
map({ defined($_) ? $_ : 'undef' } $fid, $l1, $l2, $calls),
join(" ", map { defined($_) ? $_ : 'undef' } @values);
my @caller_places = $self->caller_places;
for my $cp (@caller_places) {
my ($fid, $line, $sc) = @$cp;
my @sc = @$sc;
$sc[NYTP_SCi_CALLING_SUB] = join "|", keys %{ $sc[NYTP_SCi_CALLING_SUB] };
printf $fh "%s%s%s%d:%d%s[ %s ]\n",
$prefix,
'called_by', $separator,
$fid, $line, $separator,
join(" ", map { defined($_) ? $_ : 'undef' } @sc);
}
# where a sub has had others merged into it, list them
my $merge_subs = $self->meta->{merged_sub_names} || [];
for my $ms (sort @$merge_subs) {
printf $fh "%s%s%s%s\n",
$prefix, 'merge_donor', $separator, $ms;
}
}
# vim:ts=8:sw=4:et
1;
| amidoimidazol/bio_info | Beginning Perl for Bioinformatics/lib/Devel/NYTProf/SubInfo.pm | Perl | mit | 13,101 |
package ExtUtils::MM_NW5;
=head1 NAME
ExtUtils::MM_NW5 - methods to override UN*X behaviour in ExtUtils::MakeMaker
=head1 SYNOPSIS
use ExtUtils::MM_NW5; # Done internally by ExtUtils::MakeMaker if needed
=head1 DESCRIPTION
See ExtUtils::MM_Unix for a documentation of the methods provided
there. This package overrides the implementation of these methods, not
the semantics.
=over
=cut
use strict;
use ExtUtils::MakeMaker::Config;
use File::Basename;
our $VERSION = '6.63_02';
require ExtUtils::MM_Win32;
our @ISA = qw(ExtUtils::MM_Win32);
use ExtUtils::MakeMaker qw( &neatvalue );
$ENV{EMXSHELL} = 'sh'; # to run `commands`
my $BORLAND = $Config{'cc'} =~ /\bbcc\b/i;
my $GCC = $Config{'cc'} =~ /\bgcc\b/i;
=item os_flavor
We're Netware in addition to being Windows.
=cut
sub os_flavor {
my $self = shift;
return ($self->SUPER::os_flavor, 'Netware');
}
=item init_platform
Add Netware macros.
LIBPTH, BASE_IMPORT, NLM_VERSION, MPKTOOL, TOOLPATH, BOOT_SYMBOL,
NLM_SHORT_NAME, INCLUDE, PATH, MM_NW5_REVISION
=item platform_constants
Add Netware macros initialized above to the Makefile.
=cut
sub init_platform {
my($self) = shift;
# To get Win32's setup.
$self->SUPER::init_platform;
# incpath is copied to makefile var INCLUDE in constants sub, here just
# make it empty
my $libpth = $Config{'libpth'};
$libpth =~ s( )(;);
$self->{'LIBPTH'} = $libpth;
$self->{'BASE_IMPORT'} = $Config{'base_import'};
# Additional import file specified from Makefile.pl
if($self->{'base_import'}) {
$self->{'BASE_IMPORT'} .= ', ' . $self->{'base_import'};
}
$self->{'NLM_VERSION'} = $Config{'nlm_version'};
$self->{'MPKTOOL'} = $Config{'mpktool'};
$self->{'TOOLPATH'} = $Config{'toolpath'};
(my $boot = $self->{'NAME'}) =~ s/:/_/g;
$self->{'BOOT_SYMBOL'}=$boot;
# If the final binary name is greater than 8 chars,
# truncate it here.
if(length($self->{'BASEEXT'}) > 8) {
$self->{'NLM_SHORT_NAME'} = substr($self->{'BASEEXT'},0,8);
}
# Get the include path and replace the spaces with ;
# Copy this to makefile as INCLUDE = d:\...;d:\;
($self->{INCLUDE} = $Config{'incpath'}) =~ s/([ ]*)-I/;/g;
# Set the path to CodeWarrior binaries which might not have been set in
# any other place
$self->{PATH} = '$(PATH);$(TOOLPATH)';
$self->{MM_NW5_VERSION} = $VERSION;
}
sub platform_constants {
my($self) = shift;
my $make_frag = '';
# Setup Win32's constants.
$make_frag .= $self->SUPER::platform_constants;
foreach my $macro (qw(LIBPTH BASE_IMPORT NLM_VERSION MPKTOOL
TOOLPATH BOOT_SYMBOL NLM_SHORT_NAME INCLUDE PATH
MM_NW5_VERSION
))
{
next unless defined $self->{$macro};
$make_frag .= "$macro = $self->{$macro}\n";
}
return $make_frag;
}
=item const_cccmd
=cut
sub const_cccmd {
my($self,$libperl)=@_;
return $self->{CONST_CCCMD} if $self->{CONST_CCCMD};
return '' unless $self->needs_linking();
return $self->{CONST_CCCMD} = <<'MAKE_FRAG';
CCCMD = $(CC) $(CCFLAGS) $(INC) $(OPTIMIZE) \
$(PERLTYPE) $(MPOLLUTE) -o $@ \
-DVERSION=\"$(VERSION)\" -DXS_VERSION=\"$(XS_VERSION)\"
MAKE_FRAG
}
=item static_lib
=cut
sub static_lib {
my($self) = @_;
return '' unless $self->has_link_code;
my $m = <<'END';
$(INST_STATIC): $(OBJECT) $(MYEXTLIB) $(INST_ARCHAUTODIR)$(DFSEP).exists
$(RM_RF) $@
END
# If this extension has it's own library (eg SDBM_File)
# then copy that to $(INST_STATIC) and add $(OBJECT) into it.
$m .= <<'END' if $self->{MYEXTLIB};
$self->{CP} $(MYEXTLIB) $@
END
my $ar_arg;
if( $BORLAND ) {
$ar_arg = '$@ $(OBJECT:^"+")';
}
elsif( $GCC ) {
$ar_arg = '-ru $@ $(OBJECT)';
}
else {
$ar_arg = '-type library -o $@ $(OBJECT)';
}
$m .= sprintf <<'END', $ar_arg;
$(AR) %s
$(NOECHO) $(ECHO) "$(EXTRALIBS)" > $(INST_ARCHAUTODIR)\extralibs.ld
$(CHMOD) 755 $@
END
$m .= <<'END' if $self->{PERL_SRC};
$(NOECHO) $(ECHO) "$(EXTRALIBS)" >> $(PERL_SRC)\ext.libs
END
return $m;
}
=item dynamic_lib
Defines how to produce the *.so (or equivalent) files.
=cut
sub dynamic_lib {
my($self, %attribs) = @_;
return '' unless $self->needs_linking(); #might be because of a subdir
return '' unless $self->has_link_code;
my($otherldflags) = $attribs{OTHERLDFLAGS} || ($BORLAND ? 'c0d32.obj': '');
my($inst_dynamic_dep) = $attribs{INST_DYNAMIC_DEP} || "";
my($ldfrom) = '$(LDFROM)';
(my $boot = $self->{NAME}) =~ s/:/_/g;
my $m = <<'MAKE_FRAG';
# This section creates the dynamically loadable $(INST_DYNAMIC)
# from $(OBJECT) and possibly $(MYEXTLIB).
OTHERLDFLAGS = '.$otherldflags.'
INST_DYNAMIC_DEP = '.$inst_dynamic_dep.'
# Create xdc data for an MT safe NLM in case of mpk build
$(INST_DYNAMIC): $(OBJECT) $(MYEXTLIB) $(BOOTSTRAP) $(INST_ARCHAUTODIR)$(DFSEP).exists
$(NOECHO) $(ECHO) Export boot_$(BOOT_SYMBOL) > $(BASEEXT).def
$(NOECHO) $(ECHO) $(BASE_IMPORT) >> $(BASEEXT).def
$(NOECHO) $(ECHO) Import @$(PERL_INC)\perl.imp >> $(BASEEXT).def
MAKE_FRAG
if ( $self->{CCFLAGS} =~ m/ -DMPK_ON /) {
$m .= <<'MAKE_FRAG';
$(MPKTOOL) $(XDCFLAGS) $(BASEEXT).xdc
$(NOECHO) $(ECHO) xdcdata $(BASEEXT).xdc >> $(BASEEXT).def
MAKE_FRAG
}
# Reconstruct the X.Y.Z version.
my $version = join '.', map { sprintf "%d", $_ }
$] =~ /(\d)\.(\d{3})(\d{2})/;
$m .= sprintf ' $(LD) $(LDFLAGS) $(OBJECT:.obj=.obj) -desc "Perl %s Extension ($(BASEEXT)) XS_VERSION: $(XS_VERSION)" -nlmversion $(NLM_VERSION)', $version;
# Taking care of long names like FileHandle, ByteLoader, SDBM_File etc
if($self->{NLM_SHORT_NAME}) {
# In case of nlms with names exceeding 8 chars, build nlm in the
# current dir, rename and move to auto\lib.
$m .= q{ -o $(NLM_SHORT_NAME).$(DLEXT)}
} else {
$m .= q{ -o $(INST_AUTODIR)\\$(BASEEXT).$(DLEXT)}
}
# Add additional lib files if any (SDBM_File)
$m .= q{ $(MYEXTLIB) } if $self->{MYEXTLIB};
$m .= q{ $(PERL_INC)\Main.lib -commandfile $(BASEEXT).def}."\n";
if($self->{NLM_SHORT_NAME}) {
$m .= <<'MAKE_FRAG';
if exist $(INST_AUTODIR)\$(NLM_SHORT_NAME).$(DLEXT) del $(INST_AUTODIR)\$(NLM_SHORT_NAME).$(DLEXT)
move $(NLM_SHORT_NAME).$(DLEXT) $(INST_AUTODIR)
MAKE_FRAG
}
$m .= <<'MAKE_FRAG';
$(CHMOD) 755 $@
MAKE_FRAG
return $m;
}
1;
__END__
=back
=cut
| amidoimidazol/bio_info | Beginning Perl for Bioinformatics/lib/ExtUtils/MM_NW5.pm | Perl | mit | 6,571 |
# Copyright (c) 2008 ToI-Planning, All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# $Id$
package PreloadTemplate;
use strict;
sub pre_run {
my ($cb, $app) = @_;
if ($app->mode ne 'preload_template_tags_help') {
&_preload($app->blog);
}
}
sub _preload {
my $app = MT->instance;
my ($blog) = @_;
my $key = 'preloaded_params:' . ($blog ? $blog->id : 0);
my @blog_ids = (0);
if ($blog) {
push(@blog_ids, $blog->id);
if (my $website = $blog->website) {
push(@blog_ids, $website->id);
}
}
return 1 if $app->request($key);
require MT::Template;
my @tmpls = MT::Template->load(
{
'blog_id' => \@blog_ids,
'type' => 'preload',
},
{
sort => [
{ column => 'blog_id', desc => 'ASC' },
{ column => 'name', desc => 'ASC' },
],
},
);
my $all_param = {};
foreach my $tmpl (@tmpls) {
next unless $tmpl;
my $key = 'preloaded_tmpl_params:' . $tmpl->id;
my $param = $app->request($key);
if (! $param) {
$tmpl->output;
$param = $tmpl->param;
$app->request($key, $param);
}
foreach my $k (keys(%$param)) {
$all_param->{$k} = $param->{$k};
}
last if $tmpl->context->stash('preload_template_stop_propagation');
}
$app->request($key, $all_param);
require MT::Template;
no warnings 'redefine';
my $template_build = \&MT::Template::build;
*MT::Template::build = sub {
my $tmpl = shift;
my $ctx = shift || $tmpl->context;
if ($tmpl->id) {
my $param = $app->request($key);
foreach my $k (%$param) {
$ctx->{__stash}{vars}{$k} = $param->{$k};
}
}
$template_build->($tmpl, $ctx, @_);
};
}
sub _preload_param {
my $app = MT->instance;
my ($blog) = @_;
$app->request('preloaded_params:' . ($blog ? $blog->id : 0)) || {};
}
sub post_load_template {
my ($cb, $obj) = @_;
my $app = MT->instance;
&_preload($obj->blog);
if (
$app->can('mode')
&& $app->mode eq 'delete'
&& $obj->type eq 'preload'
) {
$obj->type('custom');
}
$obj->param(&_preload_param($obj->blog));
}
sub param_list_template {
my ($cb, $app, $param, $tmpl) = @_;
my $plugin = MT->component('PreloadTemplate');
my $tmpl_loop = $param->{'template_type_loop'};
my $blog_id = $app->param('blog_id') || 0;
my $terms = { blog_id => $blog_id };
my $args = { sort => 'name' };
my $hasher = sub {
my ( $obj, $row ) = @_;
my $template_type;
my $type = $row->{type} || '';
my $tblog = MT::Blog->load( $obj->blog_id ) if $obj->blog_id;
$template_type = 'preload';
$row->{use_cache} = ( $tblog && $tblog->include_cache && ($obj->cache_expire_type || 0) != 0 ) ? 1 : 0;
$row->{use_ssi} = ( $tblog && $tblog->include_system && $obj->include_with_ssi ) ? 1 : 0;
$row->{template_type} = $template_type;
$row->{type} = 'entry' if $type eq 'individual';
my $published_url = $obj->published_url;
$row->{published_url} = $published_url if $published_url;
};
my $tmpl_type = 'preload';
$app->param('filter_key', 'preload_templates');
my $tmpl_param = {};
$terms->{type} = 'preload';
$tmpl_param = $app->listing(
{
type => 'template',
terms => $terms,
args => $args,
no_limit => 1,
no_html => 1,
code => $hasher,
}
);
$tmpl_param->{template_type} = $tmpl_type;
$tmpl_param->{template_type_label} = $plugin->translate("Preload Templates");
push @$tmpl_loop, $tmpl_param;
my $quickfilters = $tmpl->getElementById('quickfilters');
$quickfilters->innerHTML($quickfilters->innerHTML . <<__EOF__);
<ul>
<li id="preload-tab" class="first-child last-child">
<a class="first-child last-child" onclick="showTable('preload-listing');" href="#preload">@{[ $plugin->translate('Preload Templates') ]}</a>
</li>
</ul>
__EOF__
}
sub __getElementsByTagName {
my $tmpl = shift;
my ($name) = @_;
my $tokens = $tmpl->tokens;
foreach my $t (@$tokens) {
if (ref $t && ref $t->attributes && lc ($t->getAttribute('name')) eq 'listing_header') {
return $t;
}
}
();
}
sub param_template_table {
my ($cb, $app, $param, $tmpl) = @_;
my $plugin = MT->component('PreloadTemplate');
my $append = $plugin->load_tmpl('template_table.tmpl');
my $lh_orig = (&__getElementsByTagName($tmpl, 'listing_header'))[0];
my $lh_append = (&__getElementsByTagName($append, 'listing_header'))[0];
$tmpl->insertAfter($lh_append, $lh_orig);
}
sub param_edit_template {
my ($cb, $app, $param, $tmpl) = @_;
my $plugin = MT->component('PreloadTemplate');
my $type = $param->{'type'};
if (my $id = $app->param('id')) {
if (my $tmpl = MT->model('template')->load($id)) {
$type = $tmpl->type;
}
}
return if $type ne 'preload' && $app->param('subtype') ne 'preload';
$param->{'type'} = 'preload';
my $links = $tmpl->getElementById('useful-links');
$links->innerHTML(<<__EOH__);
<li><a href="<mt:var name="script_url">?__mode=list_template&blog_id=<mt:var name="blog_id">#preload" class="icon-left icon-related"><__trans phrase="List [_1] templates" params="@{[ $plugin->translate('Preload') ]}"></a></li>
<li><a href="<mt:var name="script_url">?__mode=list_template&blog_id=<mt:var name="blog_id">" class="icon-left icon-related"><__trans phrase="List all templates"></a></li>
__EOH__
}
sub _hdlr_define_tag {
my ($ctx, $args, $cond) = @_;
my $app = MT->instance;
my $plugin = MT->component('PreloadTemplate');
my $name = $args->{'name'}
or return $ctx->error("No name");
my $tag_name = lc($name);
my $tokens = $ctx->stash('tokens');
my $tags = $plugin->registry('tags');
if (my $help = $args->{help}) {
my $tmpl = $ctx->stash('template');
$tags->{'help_url'} = $app->config->CGIPath . $app->config->AdminScript . '?__mode=preload_template_tags_help&template_id=' . $tmpl->id . '&tag=%t';
my $helps = $ctx->stash('preload_template_helps')
|| $ctx->stash('preload_template_helps', {});
$helps->{$tag_name} = $help;
}
$tags->{'block'}{$tag_name} = $ctx->{__handlers}{$tag_name} = sub {
my ($ctx, $args, $cond) = @_;
local $ctx->{__stash}{tokens} = $tokens;
$ctx->slurp($ctx, $args);
};
}
sub _hdlr_stop_propagation {
my ($ctx, $args) = @_;
$ctx->stash('preload_template_stop_propagation', 1);
'';
}
sub tags_help {
my $app = shift;
my $plugin = MT->component('PreloadTemplate');
my $tags = $plugin->registry('tags');
my $help_url = $tags->{'help_url'};
my $tmpl = MT->model('template')->load($app->param('template_id'));
$tmpl->output;
my $helps = $tmpl->context->stash('preload_template_helps');
if ($helps->{$app->param('tag')}) {
return $helps->{$app->param('tag')};
}
else {
$help_url =~ s/%t/$app->param('tag')/ge;
MT->instance->redirect($help_url);
}
}
1;
| usualoma/movable-type-plugin-preload-template | lib/PreloadTemplate.pm | Perl | mit | 7,634 |
use bytes;
#=====================================================================================
# Lisp.pm
# by Shinsuke MORI
# Last change : 27 December 1996
#=====================================================================================
# µ¡ ǽ : ÇÛÎó¤ò¥ê¥¹¥È¤È¤ß¤Ê¤¹¤³¤È¤ÇÄêµÁ¤Ç¤¤ë LISP ¤Î´Ø¿ô·²
#
# ¼Â Îã : ¤Ê¤·
#
# Ãí°ÕÅÀ : ¤Ê¤·
#-------------------------------------------------------------------------------------
# uniq
#-------------------------------------------------------------------------------------
# uniq(ARRAY)
#
# µ¡ ǽ : ARRAY ¤ÎÍ×ÁǤνÅÊ£¤ò¼è¤ê½ü¤¯
#
# ¼Â Îã : @array = uniq(@array);
#
# Ãí°ÕÅÀ : ½ç½ø¤âÊݸ¤µ¤ì¤ë¡£
sub uniq{
my(%hash) = ();
return(grep(!$hash{$_}++, @_));
}
#-------------------------------------------------------------------------------------
# plus
#-------------------------------------------------------------------------------------
# plus(ARRAY)
#
# µ¡ ǽ : ARRAY ¤ÎÍ×ÁǤÎϤò·×»»¤¹¤ë¡£
#
# ¼Â Îã : $sum = plus(@array);
#
# Ãí°ÕÅÀ : ¤Ê¤·
sub plus{
my($sum) = 0;
$sum += $_ while ($_ = shift);
return($sum);
}
#-------------------------------------------------------------------------------------
# return
#-------------------------------------------------------------------------------------
1;
#=====================================================================================
# END
#=====================================================================================
| tkd53/KKConv | lib/perl/Lisp.pm | Perl | mit | 1,643 |
use utf8;
package Chinook::Schema;
# Created by DBIx::Class::Schema::Loader
# DO NOT MODIFY THE FIRST PART OF THIS FILE
use strict;
use warnings;
use base 'DBIx::Class::Schema';
__PACKAGE__->load_namespaces;
# Created by DBIx::Class::Schema::Loader v0.07035 @ 2013-04-19 13:38:48
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:hynOLzSCGBogCMlqmMFxig
# You can replace this text with custom code or comments, and it will be preserved on regeneration
1;
| rpillar/Chinook | lib/Chinook/Schema.pm | Perl | mit | 460 |
%query: log2(i,o).
/* author: Michael Codish
date: March 2003
log2(X,Y) :- Y equals the integer log base 2 of X.
Namely, the largest integer smaller or equal to the
log base 2 of X
*/
log2(X,Y) :- log2(X,0,s(0),Y).
% log2(X,H,A,Y) :- computes Y equals the integer log base 2 of X.
% H accumulates "half" of X and A accumulates the number of times
% X was halved.
log2(s(s(X)),Half,Acc,Y) :- log2(X,s(Half),Acc,Y).
log2(X,s(s(Half)),Acc,Y) :- small(X), log2(Half,s(0),s(Acc),Y).
log2(X,Half,Y,Y) :- small(X),small(Half).
small(0).
small(s(0)).
/*
?- log2(s(s(s(s(s(s(s(s(0)))))))), Y).
Y = s(s(s(0)))
?- log2(s(s(s(s(s(s(s(s(s(s(0)))))))))), Y).
Y = s(s(s(0)))
*/
| ComputationWithBoundedResources/ara-inference | doc/tpdb_trs/Logic_Programming/lpexamples/log2b.pl | Perl | mit | 705 |
#
# Autogenerated by Thrift Compiler (0.9.1)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
require 5.6.0;
use strict;
use warnings;
use Thrift;
use Recomm::Types;
use Facebook::FB303::FacebookService;
# HELPER FUNCTIONS AND STRUCTURES
package Recomm::RecommEngine_query_args;
use base qw(Class::Accessor);
Recomm::RecommEngine_query_args->mk_accessors( qw( request ) );
sub new {
my $classname = shift;
my $self = {};
my $vals = shift || {};
$self->{request} = undef;
if (UNIVERSAL::isa($vals,'HASH')) {
if (defined $vals->{request}) {
$self->{request} = $vals->{request};
}
}
return bless ($self, $classname);
}
sub getName {
return 'RecommEngine_query_args';
}
sub read {
my ($self, $input) = @_;
my $xfer = 0;
my $fname;
my $ftype = 0;
my $fid = 0;
$xfer += $input->readStructBegin(\$fname);
while (1)
{
$xfer += $input->readFieldBegin(\$fname, \$ftype, \$fid);
if ($ftype == TType::STOP) {
last;
}
SWITCH: for($fid)
{
/^1$/ && do{ if ($ftype == TType::STRUCT) {
$self->{request} = new Recomm::RecommendationRequest();
$xfer += $self->{request}->read($input);
} else {
$xfer += $input->skip($ftype);
}
last; };
$xfer += $input->skip($ftype);
}
$xfer += $input->readFieldEnd();
}
$xfer += $input->readStructEnd();
return $xfer;
}
sub write {
my ($self, $output) = @_;
my $xfer = 0;
$xfer += $output->writeStructBegin('RecommEngine_query_args');
if (defined $self->{request}) {
$xfer += $output->writeFieldBegin('request', TType::STRUCT, 1);
$xfer += $self->{request}->write($output);
$xfer += $output->writeFieldEnd();
}
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
}
package Recomm::RecommEngine_query_result;
use base qw(Class::Accessor);
Recomm::RecommEngine_query_result->mk_accessors( qw( success ) );
sub new {
my $classname = shift;
my $self = {};
my $vals = shift || {};
$self->{success} = undef;
if (UNIVERSAL::isa($vals,'HASH')) {
if (defined $vals->{success}) {
$self->{success} = $vals->{success};
}
}
return bless ($self, $classname);
}
sub getName {
return 'RecommEngine_query_result';
}
sub read {
my ($self, $input) = @_;
my $xfer = 0;
my $fname;
my $ftype = 0;
my $fid = 0;
$xfer += $input->readStructBegin(\$fname);
while (1)
{
$xfer += $input->readFieldBegin(\$fname, \$ftype, \$fid);
if ($ftype == TType::STOP) {
last;
}
SWITCH: for($fid)
{
/^0$/ && do{ if ($ftype == TType::STRUCT) {
$self->{success} = new Recomm::RecommendationResponse();
$xfer += $self->{success}->read($input);
} else {
$xfer += $input->skip($ftype);
}
last; };
$xfer += $input->skip($ftype);
}
$xfer += $input->readFieldEnd();
}
$xfer += $input->readStructEnd();
return $xfer;
}
sub write {
my ($self, $output) = @_;
my $xfer = 0;
$xfer += $output->writeStructBegin('RecommEngine_query_result');
if (defined $self->{success}) {
$xfer += $output->writeFieldBegin('success', TType::STRUCT, 0);
$xfer += $self->{success}->write($output);
$xfer += $output->writeFieldEnd();
}
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
}
package Recomm::RecommEngineIf;
use strict;
use base qw(Facebook::FB303::FacebookServiceIf);
sub query{
my $self = shift;
my $request = shift;
die 'implement interface';
}
package Recomm::RecommEngineRest;
use strict;
use base qw(Facebook::FB303::FacebookServiceRest);
sub query{
my ($self, $request) = @_;
my $request = ($request->{'request'}) ? $request->{'request'} : undef;
return $self->{impl}->query($request);
}
package Recomm::RecommEngineClient;
use base qw(Facebook::FB303::FacebookServiceClient);
use base qw(Recomm::RecommEngineIf);
sub new {
my ($classname, $input, $output) = @_;
my $self = {};
$self = $classname->SUPER::new($input, $output);
return bless($self,$classname);
}
sub query{
my $self = shift;
my $request = shift;
$self->send_query($request);
return $self->recv_query();
}
sub send_query{
my $self = shift;
my $request = shift;
$self->{output}->writeMessageBegin('query', TMessageType::CALL, $self->{seqid});
my $args = new Recomm::RecommEngine_query_args();
$args->{request} = $request;
$args->write($self->{output});
$self->{output}->writeMessageEnd();
$self->{output}->getTransport()->flush();
}
sub recv_query{
my $self = shift;
my $rseqid = 0;
my $fname;
my $mtype = 0;
$self->{input}->readMessageBegin(\$fname, \$mtype, \$rseqid);
if ($mtype == TMessageType::EXCEPTION) {
my $x = new TApplicationException();
$x->read($self->{input});
$self->{input}->readMessageEnd();
die $x;
}
my $result = new Recomm::RecommEngine_query_result();
$result->read($self->{input});
$self->{input}->readMessageEnd();
if (defined $result->{success} ) {
return $result->{success};
}
die "query failed: unknown result";
}
package Recomm::RecommEngineProcessor;
use strict;
use base qw(Facebook::FB303::FacebookServiceProcessor);
sub process {
my ($self, $input, $output) = @_;
my $rseqid = 0;
my $fname = undef;
my $mtype = 0;
$input->readMessageBegin(\$fname, \$mtype, \$rseqid);
my $methodname = 'process_'.$fname;
if (!$self->can($methodname)) {
$input->skip(TType::STRUCT);
$input->readMessageEnd();
my $x = new TApplicationException('Function '.$fname.' not implemented.', TApplicationException::UNKNOWN_METHOD);
$output->writeMessageBegin($fname, TMessageType::EXCEPTION, $rseqid);
$x->write($output);
$output->writeMessageEnd();
$output->getTransport()->flush();
return;
}
$self->$methodname($rseqid, $input, $output);
return 1;
}
sub process_query {
my ($self, $seqid, $input, $output) = @_;
my $args = new Recomm::RecommEngine_query_args();
$args->read($input);
$input->readMessageEnd();
my $result = new Recomm::RecommEngine_query_result();
$result->{success} = $self->{handler}->query($args->request);
$output->writeMessageBegin('query', TMessageType::REPLY, $seqid);
$result->write($output);
$output->writeMessageEnd();
$output->getTransport()->flush();
}
1;
| enyun/misc | idl/gen-perl/Recomm/RecommEngine.pm | Perl | mit | 6,448 |
:-[readFile, parseProblem, parseDomain, common].
:-['wa-star', forward, h_add].
| TeamSPoon/logicmoo_workspace | packs_sys/logicmoo_ec/test/pddl_tests/orig_pddl_parser/forward-wa-star-h_add.pl | Perl | mit | 80 |
:-consult('../../Game').
addPlayers :- addPlayer(marco), addPlayer(anthony).
addShips :- assertz(ships(marco, 1, 1, 0, battleship)), assertz(ships(marco, 1, 2, 0, battleship)), assertz(ships(marco, 1, 3, 0, battleship)),
assertz(ships(marco, 1, 4, 0, battleship)), assertz(ships(anthony, 1, 1, 0, battleship)), assertz(ships(anthony, 1, 2, 0, battleship)),
assertz(ships(anthony, 1, 3, 0, battleship)), assertz(ships(anthony, 1, 4, 0, battleship)).
/* We destroy all the boats. */
test :- addPlayers, addShips, not(startGame), not(shot(1,1)), not(shot(1,2)), not(shot(1,3)), not(shot(1,4)). | H4305/battleship | Game/Tests/5. Victory/VictoryMessage.pl | Perl | mit | 595 |
#!/usr/bin/env perl
#===============================================================================
#
# FILE: list-stack.pl
#
# DESCRIPTION: example matching stack braces
#
# AUTHOR: stelf
# ORGANIZATION: Practical Perl Programing at FMI/Sofia University
# VERSION: 1.0
# CREATED: 03/13/2015 10:24:56 AM
#
#===============================================================================
use strict;
use warnings;
use utf8;
use v5.012;
my $bstr = $ARGV[0] || "ds[dsds[(asd)]]";
say 'parsing ', $bstr;
# each opening brace has a matching closing brace
my %bmatch = qw/( ) [ ] { }/;
# stack var
my @s;
for my $pos (1..length($bstr)) {
my $c = substr $bstr, $pos - 1, 1;
# push in stack if exists such characater
# that is assumed to be an opening brace
exists $bmatch{$c}
and push @s, $c;
# if exists a closing brace that is matched
# against opening
if ( grep { $_ eq $c } values %bmatch ) {
scalar @s or die "too many closing";
$bmatch{pop @s} eq $c or die "mismatch at [$pos]";
}
}
#if something is left in the stack -> there's a mismatch
scalar @s and die "mismatch";
say "OK"
| stelf/fmi-perl | examples/02-list/list-stack.pl | Perl | cc0-1.0 | 1,184 |
package Google::Ads::AdWords::v201809::FeedItemService::get;
use strict;
use warnings;
{ # BLOCK to scope variables
sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201809' }
__PACKAGE__->__set_name('get');
__PACKAGE__->__set_nillable();
__PACKAGE__->__set_minOccurs();
__PACKAGE__->__set_maxOccurs();
__PACKAGE__->__set_ref();
use base qw(
SOAP::WSDL::XSD::Typelib::Element
Google::Ads::SOAP::Typelib::ComplexType
);
our $XML_ATTRIBUTE_CLASS;
undef $XML_ATTRIBUTE_CLASS;
sub __get_attr_class {
return $XML_ATTRIBUTE_CLASS;
}
use Class::Std::Fast::Storable constructor => 'none';
use base qw(Google::Ads::SOAP::Typelib::ComplexType);
{ # BLOCK to scope variables
my %selector_of :ATTR(:get<selector>);
__PACKAGE__->_factory(
[ qw( selector
) ],
{
'selector' => \%selector_of,
},
{
'selector' => 'Google::Ads::AdWords::v201809::Selector',
},
{
'selector' => 'selector',
}
);
} # end BLOCK
} # end of BLOCK
1;
=pod
=head1 NAME
Google::Ads::AdWords::v201809::FeedItemService::get
=head1 DESCRIPTION
Perl data type class for the XML Schema defined element
get from the namespace https://adwords.google.com/api/adwords/cm/v201809.
Returns a list of FeedItems that meet the selector criteria. @param selector Determines which FeedItems to return. If empty all FeedItems are returned. @return The list of FeedItems. @throws ApiException Indicates a problem with the request.
=head1 PROPERTIES
The following properties may be accessed using get_PROPERTY / set_PROPERTY
methods:
=over
=item * selector
$element->set_selector($data);
$element->get_selector();
=back
=head1 METHODS
=head2 new
my $element = Google::Ads::AdWords::v201809::FeedItemService::get->new($data);
Constructor. The following data structure may be passed to new():
{
selector => $a_reference_to, # see Google::Ads::AdWords::v201809::Selector
},
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| googleads/googleads-perl-lib | lib/Google/Ads/AdWords/v201809/FeedItemService/get.pm | Perl | apache-2.0 | 2,000 |
#
# Copyright 2019 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package apps::protocols::udp::mode::connection;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
use IO::Socket::INET;
use IO::Select;
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$options{options}->add_options(arguments =>
{
"hostname:s" => { name => 'hostname' },
"port:s" => { name => 'port', },
"timeout:s" => { name => 'timeout', default => '3' },
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
if (($self->{perfdata}->threshold_validate(label => 'warning', value => $self->{option_results}->{warning})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong warning threshold '" . $self->{option_results}->{warning} . "'.");
$self->{output}->option_exit();
}
if (($self->{perfdata}->threshold_validate(label => 'critical', value => $self->{option_results}->{critical})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong critical threshold '" . $self->{option_results}->{critical} . "'.");
$self->{output}->option_exit();
}
if (!defined($self->{option_results}->{hostname})) {
$self->{output}->add_option_msg(short_msg => "Need to specify '--hostname' option");
$self->{output}->option_exit();
}
if (!defined($self->{option_results}->{port})) {
$self->{output}->add_option_msg(short_msg => "Need to specify '--port' option");
$self->{output}->option_exit();
}
}
sub run {
my ($self, %options) = @_;
my $icmp_sock = new IO::Socket::INET(Proto => "icmp");
if (!defined($icmp_sock)) {
$self->{output}->add_option_msg(short_msg => "Cannot create socket: $!");
$self->{output}->option_exit();
}
my $read_set = new IO::Select();
$read_set->add($icmp_sock);
my $sock = IO::Socket::INET->new(PeerAddr => $self->{option_results}->{hostname},
PeerPort => $self->{option_results}->{port},
Proto => 'udp',
);
$sock->send("Hello");
close($sock);
my ($new_readable) = IO::Select->select($read_set, undef, undef, $self->{option_results}->{timeout});
my $icmp_arrived = 0;
foreach $sock (@$new_readable) {
if ($sock == $icmp_sock) {
$icmp_arrived = 1;
$icmp_sock->recv(my $buffer,50,0);
}
}
close($icmp_sock);
if ($icmp_arrived == 1) {
$self->{output}->output_add(severity => 'CRITICAL',
short_msg => sprintf("Connection failed on port %s", $self->{option_results}->{port}));
} else {
$self->{output}->output_add(severity => 'OK',
short_msg => sprintf("Connection success on port %s", $self->{option_results}->{port}));
}
$self->{output}->display();
$self->{output}->exit();
}
1;
__END__
=head1 MODE
Check UDP connection
=over 8
=item B<--hostname>
IP Addr/FQDN of the host
=item B<--port>
Port used
=item B<--timeout>
Connection timeout in seconds (Default: 3)
=back
=cut
| Sims24/centreon-plugins | apps/protocols/udp/mode/connection.pm | Perl | apache-2.0 | 4,025 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2021] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
package EnsEMBL::Web::Component::Tools::NewJobButton;
use strict;
use warnings;
use parent qw(EnsEMBL::Web::Component::Tools);
sub content {
my $self = shift;
my $hub = $self->hub;
my $button_url = $hub->url({'function' => undef, 'expand_form' => 'true'});
return $self->create_button($button_url, 'top-margin');
}
sub create_button {
my ($self, $button_url, $button_class) = @_;
return '<a class="button ' . $button_class . '" href="' . $button_url . '">New job</a>';
}
1;
| Ensembl/public-plugins | tools/modules/EnsEMBL/Web/Component/Tools/NewJobButton.pm | Perl | apache-2.0 | 1,207 |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% This file is part of VivoMind Prolog Unicode Resources
% SPDX-License-Identifier: CC0-1.0
%
% VivoMind Prolog Unicode Resources is free software distributed using the
% Creative Commons CC0 1.0 Universal (CC0 1.0) - Public Domain Dedication
% license
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Last modified: March 29, 2012
unicode_category_(0x00AD, 'Cf').
unicode_category_(0x0600, 'Cf').
unicode_category_(0x0601, 'Cf').
unicode_category_(0x0602, 'Cf').
unicode_category_(0x0603, 'Cf').
unicode_category_(0x0604, 'Cf').
unicode_category_(0x06DD, 'Cf').
unicode_category_(0x070F, 'Cf').
unicode_category_(0x200B, 'Cf').
unicode_category_(0x200C, 'Cf').
unicode_category_(0x200D, 'Cf').
unicode_category_(0x200E, 'Cf').
unicode_category_(0x200F, 'Cf').
unicode_category_(0x202A, 'Cf').
unicode_category_(0x202B, 'Cf').
unicode_category_(0x202C, 'Cf').
unicode_category_(0x202D, 'Cf').
unicode_category_(0x202E, 'Cf').
unicode_category_(0x2060, 'Cf').
unicode_category_(0x2061, 'Cf').
unicode_category_(0x2062, 'Cf').
unicode_category_(0x2063, 'Cf').
unicode_category_(0x2064, 'Cf').
unicode_category_(0x206A, 'Cf').
unicode_category_(0x206B, 'Cf').
unicode_category_(0x206C, 'Cf').
unicode_category_(0x206D, 'Cf').
unicode_category_(0x206E, 'Cf').
unicode_category_(0x206F, 'Cf').
unicode_category_(0xFEFF, 'Cf').
unicode_category_(0xFFF9, 'Cf').
unicode_category_(0xFFFA, 'Cf').
unicode_category_(0xFFFB, 'Cf').
unicode_category_(0x110BD, 'Cf').
unicode_category_(0x1D173, 'Cf').
unicode_category_(0x1D174, 'Cf').
unicode_category_(0x1D175, 'Cf').
unicode_category_(0x1D176, 'Cf').
unicode_category_(0x1D177, 'Cf').
unicode_category_(0x1D178, 'Cf').
unicode_category_(0x1D179, 'Cf').
unicode_category_(0x1D17A, 'Cf').
unicode_category_(0xE0001, 'Cf').
unicode_category_(0xE0020, 'Cf').
unicode_category_(0xE0021, 'Cf').
unicode_category_(0xE0022, 'Cf').
unicode_category_(0xE0023, 'Cf').
unicode_category_(0xE0024, 'Cf').
unicode_category_(0xE0025, 'Cf').
unicode_category_(0xE0026, 'Cf').
unicode_category_(0xE0027, 'Cf').
unicode_category_(0xE0028, 'Cf').
unicode_category_(0xE0029, 'Cf').
unicode_category_(0xE002A, 'Cf').
unicode_category_(0xE002B, 'Cf').
unicode_category_(0xE002C, 'Cf').
unicode_category_(0xE002D, 'Cf').
unicode_category_(0xE002E, 'Cf').
unicode_category_(0xE002F, 'Cf').
unicode_category_(0xE0030, 'Cf').
unicode_category_(0xE0031, 'Cf').
unicode_category_(0xE0032, 'Cf').
unicode_category_(0xE0033, 'Cf').
unicode_category_(0xE0034, 'Cf').
unicode_category_(0xE0035, 'Cf').
unicode_category_(0xE0036, 'Cf').
unicode_category_(0xE0037, 'Cf').
unicode_category_(0xE0038, 'Cf').
unicode_category_(0xE0039, 'Cf').
unicode_category_(0xE003A, 'Cf').
unicode_category_(0xE003B, 'Cf').
unicode_category_(0xE003C, 'Cf').
unicode_category_(0xE003D, 'Cf').
unicode_category_(0xE003E, 'Cf').
unicode_category_(0xE003F, 'Cf').
unicode_category_(0xE0040, 'Cf').
unicode_category_(0xE0041, 'Cf').
unicode_category_(0xE0042, 'Cf').
unicode_category_(0xE0043, 'Cf').
unicode_category_(0xE0044, 'Cf').
unicode_category_(0xE0045, 'Cf').
unicode_category_(0xE0046, 'Cf').
unicode_category_(0xE0047, 'Cf').
unicode_category_(0xE0048, 'Cf').
unicode_category_(0xE0049, 'Cf').
unicode_category_(0xE004A, 'Cf').
unicode_category_(0xE004B, 'Cf').
unicode_category_(0xE004C, 'Cf').
unicode_category_(0xE004D, 'Cf').
unicode_category_(0xE004E, 'Cf').
unicode_category_(0xE004F, 'Cf').
unicode_category_(0xE0050, 'Cf').
unicode_category_(0xE0051, 'Cf').
unicode_category_(0xE0052, 'Cf').
unicode_category_(0xE0053, 'Cf').
unicode_category_(0xE0054, 'Cf').
unicode_category_(0xE0055, 'Cf').
unicode_category_(0xE0056, 'Cf').
unicode_category_(0xE0057, 'Cf').
unicode_category_(0xE0058, 'Cf').
unicode_category_(0xE0059, 'Cf').
unicode_category_(0xE005A, 'Cf').
unicode_category_(0xE005B, 'Cf').
unicode_category_(0xE005C, 'Cf').
unicode_category_(0xE005D, 'Cf').
unicode_category_(0xE005E, 'Cf').
unicode_category_(0xE005F, 'Cf').
unicode_category_(0xE0060, 'Cf').
unicode_category_(0xE0061, 'Cf').
unicode_category_(0xE0062, 'Cf').
unicode_category_(0xE0063, 'Cf').
unicode_category_(0xE0064, 'Cf').
unicode_category_(0xE0065, 'Cf').
unicode_category_(0xE0066, 'Cf').
unicode_category_(0xE0067, 'Cf').
unicode_category_(0xE0068, 'Cf').
unicode_category_(0xE0069, 'Cf').
unicode_category_(0xE006A, 'Cf').
unicode_category_(0xE006B, 'Cf').
unicode_category_(0xE006C, 'Cf').
unicode_category_(0xE006D, 'Cf').
unicode_category_(0xE006E, 'Cf').
unicode_category_(0xE006F, 'Cf').
unicode_category_(0xE0070, 'Cf').
unicode_category_(0xE0071, 'Cf').
unicode_category_(0xE0072, 'Cf').
unicode_category_(0xE0073, 'Cf').
unicode_category_(0xE0074, 'Cf').
unicode_category_(0xE0075, 'Cf').
unicode_category_(0xE0076, 'Cf').
unicode_category_(0xE0077, 'Cf').
unicode_category_(0xE0078, 'Cf').
unicode_category_(0xE0079, 'Cf').
unicode_category_(0xE007A, 'Cf').
unicode_category_(0xE007B, 'Cf').
unicode_category_(0xE007C, 'Cf').
unicode_category_(0xE007D, 'Cf').
unicode_category_(0xE007E, 'Cf').
unicode_category_(0xE007F, 'Cf').
| LogtalkDotOrg/logtalk3 | library/unicode_data/unicode_categories/unicode_category_cf_other_format.pl | Perl | apache-2.0 | 5,148 |
% (c) 1996-2016 Michael Leuschel
% see https://github.com/leuschel/logen for more details
:- module(tools, [string_concatenate/3,
print_error/1,
print_message/1,
same_len/2, is_list_skel/1,
convert_cli_into_atom/2,
get_modulename_filename/2,
get_tail_filename/2]).
is_list_skel(X) :- nonvar(X), (X=[] -> true ; (X=[_|T], is_list_skel(T))).
same_len([],[]).
same_len([_|T],[_|T2]) :- same_len(T,T2).
print_error(Msg) :-
format(user_error, "!~w~n", [Msg]).
print_message(Msg) :-
format(user, "%~w~n", [Msg]).
:- if(current_prolog_flag(dialect, ciao)).
:- use_module(ciao_tools,[read_from_chars/2]).
:- else.
:- use_module(sics_tools,[read_from_chars/2]).
:- endif.
convert_cli_into_atom(CLIGOAL,Atom) :- read_from_chars(CLIGOAL,Atom).
string_concatenate(S1, S2, S3) :-
name(S1, S1S),
name(S2, S2S),
append(S1S,S2S,S3S),
name(S3,S3S).
:- use_module(library(lists)).
% :- assert_must_succeed(tools:get_modulename_filename('/aaaa/bbb/cc/d.app','d')).
get_modulename_filename(Path,Module) :-
get_tail_filename(Path,Tail),
(split_last(Tail, '.', M, _) -> Module=M ; Module='_').
% :- assert_must_succeed(tools:get_tail_filename('/aaaa/bbb/cc/d.app','d.app')).
get_tail_filename(Path,Tail) :- (split_last(Path, '/', _, T) -> Tail=T ; Tail=Path).
split_last(Atom, Sep, Head, Tail) :-
atom_chars(Atom,ListAscii), atom_chars(Sep,[SepACode]),
split_last2(ListAscii,SepACode,[],[],HeadA, TailA),
atom_chars(Head,HeadA), atom_chars(Tail,TailA).
split_last2([],Sep,CurSplit,[Sep|Head],ResH,ResT) :-
reverse(CurSplit,ResT),
reverse(Head,ResH).
split_last2([Sep|Tail],Sep,CurSplit,Head,ResH,ResT) :- !,
append([Sep|CurSplit],Head,NewHead),
split_last2(Tail,Sep,[],NewHead,ResH,ResT).
split_last2([H|Tail],Sep,CurSplit,Head,ResH,ResT) :-
split_last2(Tail,Sep,[H|CurSplit],Head,ResH,ResT). | leuschel/logen | tools/tools.pl | Perl | apache-2.0 | 1,844 |
package Dancer::Introduction
# ABSTRACT: A gentle introduction to Dancer
=head1 DESCRIPTION
Dancer is a free and open source micro web application framework written in
Perl.
=head1 INSTALL
Installation of Dancer is simple:
perl -MCPAN -e 'install Dancer'
Thanks to the magic of cpanminus, if you do not have CPAN.pm configured, or just
want a quickfire way to get running, the following should work, at least on
Unix-like systems:
wget -O - http://cpanmin.us | sudo perl - Dancer
(If you don't have root access, omit the 'sudo', and cpanminus will install
Dancer and prereqs into C<~/perl5>.)
=head1 SETUP
Create a web application using the dancer script:
dancer -a MyApp
Run the web application:
cd MyApp
bin/app.pl
You can read the output of C<bin/app.pl --help> to change any settings such as
the port number.
View the web application at:
http://localhost:3000
=head1 USAGE
When Dancer is imported to a script, that script becomes a webapp, and at this
point, all the script has to do is declare a list of B<routes>. A route
handler is composed by an HTTP method, a path pattern and a code block.
C<strict> and C<warnings> pragmas are also imported with Dancer.
The code block given to the route handler has to return a string which will be
used as the content to render to the client.
Routes are defined for a given HTTP method. For each method
supported, a keyword is exported by the module.
The following is an example of a route definition. The route is defined for the
method 'get', so only GET requests will be honoured by that route:
get '/hello/:name' => sub {
# do something
return "Hello ".param('name');
};
=head2 HTTP METHODS
Here are some of the standard HTTP methods which you can use to define your
route handlers.
=over 8
=item B<GET>
The GET method retrieves information (when defining a route
handler for the GET method, Dancer automatically defines a
route handler for the HEAD method, in order to honour HEAD
requests for each of your GET route handlers).
To define a GET action, use the B<get> keyword.
=item B<POST>
The POST method is used to create a resource on the
server.
To define a POST action, use the B<post> keyword.
=item B<PUT>
The PUT method is used to update an existing resource.
To define a PUT action, use the B<put> keyword.
=item B<DELETE>
The DELETE method requests that the origin server delete
the resource identified by the Request-URI.
To define a DELETE action, use the B<del> keyword.
=back
To define a route for multiple methods you can also use the special keyword
B<any>. This example illustrates how to define a route for both GET and
POST methods:
any ['get', 'post'] => '/myaction' => sub {
# code
};
Or even, a route handler that would match any HTTP methods:
any '/myaction' => sub {
# code
};
=head2 ROUTE HANDLERS
The route action is the code reference declared. It can access parameters
through the `params' keyword, which returns a hashref.
This hashref is a merge of the route pattern matches and the request params.
You can have more details about how params are built and how to access them in
the L<Dancer::Request> documentation.
=head2 NAMED MATCHING
A route pattern can contain one or more tokens (a word prefixed with ':'). Each
token found in a route pattern is used as a named-pattern match. Any match will
be set in the params hashref.
get '/hello/:name' => sub {
"Hey ".param('name').", welcome here!";
};
Tokens can be optional, for example:
get '/hello/:name?' => sub {
"Hello there " . (param('name') || "whoever you are!");
};
=head2 WILDCARDS MATCHING
A route can contain a wildcard (represented by a '*'). Each wildcard match will
be returned in an arrayref, accessible via the `splat' keyword.
get '/download/*.*' => sub {
my ($file, $ext) = splat;
# do something with $file.$ext here
};
=head2 REGULAR EXPRESSION MATCHING
A route can be defined with a Perl regular expression.
In order to tell Dancer to consider the route as a real regexp, the route must
be defined explicitly with C<qr{}>, like the following:
get qr{/hello/([\w]+)} => sub {
my ($name) = splat;
return "Hello $name";
};
=head2 CONDITIONAL MATCHING
Routes may include some matching conditions (on the useragent and the hostname
at the moment):
get '/foo', {agent => 'Songbird (\d\.\d)[\d\/]*?'} => sub {
'foo method for songbird'
}
get '/foo' => sub {
'all browsers except songbird'
}
=head2 PREFIX
A prefix can be defined for each route handler, like this:
prefix '/home';
From here, any route handler is defined to /home/*
get '/page1' => sub {}; # will match '/home/page1'
You can unset the prefix value
prefix '/'; # or: prefix undef;
get '/page1' => sub {}; # will match '/page1'
Alternatively, to prevent you from ever forgetting to undef the prefix,
you can use lexical prefix like this:
prefix '/home' => sub {
get '/page1' => sub {}; # will match '/home/page1'
}; ## prefix reset to previous value on exit
get '/page1' => sub {}; # will match '/page1'
=head1 ACTION SKIPPING
An action can choose not to serve the current request and ask Dancer to process
the request with the next matching route.
This is done with the B<pass> keyword, like in the following example
get '/say/:word' => sub {
return pass if (params->{word} =~ /^\d+$/);
"I say a word: ".params->{word};
};
get '/say/:number' => sub {
"I say a number: ".params->{number};
};
=head2 DEFAULT ERROR PAGES
When an error is rendered (the action responded with a status code different
than 200), Dancer first looks in the public directory for an HTML file matching
the error code (eg: 500.html or 404.html).
If such a file exists, it's used to render the error, otherwise, a default
error page will be rendered on the fly.
=head2 EXECUTION ERRORS
When an error occurs during the route execution, Dancer will render an error
page with the HTTP status code 500.
It's possible either to display the content of the error message or to hide it
with a generic error page.
This is a choice left to the end-user and can be set with the
B<show_errors> setting.
Note that you can also choose to consider all warnings in your route handlers
as errors when the setting B<warnings> is set to 1.
=head1 HOOKS
=head2 Before hooks
Before hooks are evaluated before each request within the context of the
request and can modify the request and response. It's possible to define
variables which will be accessible in the action blocks with the keyword 'var'.
hook 'before' => sub {
var note => 'Hi there';
request->path_info('/foo/oversee')
};
get '/foo/*' => sub {
my ($match) = splat; # 'oversee';
vars->{note}; # 'Hi there'
};
For another example, this can be used along with session support to easily
give non-logged-in users a login page:
hook 'before' => sub {
if (!session('user') && request->path_info !~ m{^/login}) {
# Pass the original path requested along to the handler:
var requested_path => request->path_info;
request->path_info('/login');
}
};
The request keyword returns the current Dancer::Request object representing the
incoming request. See the documentation of the L<Dancer::Request> module for
details.
=head2 After hooks
C<after> hooks are evaluated after the response has been built by a route
handler, and can alter the response itself, just before it's sent to the
client.
The hook is given the response object as its first argument:
hook 'after' => sub {
my $response = shift;
$response->{content} = 'after hook got here!';
};
=head2 Before template hook
C<before_template_render> hooks are called whenever a template is going to be
processed, they are passed the tokens hash which they can alter.
hook 'before_template_render' => sub {
my $tokens = shift;
$tokens->{foo} = 'bar';
};
The tokens hash will then be passed to the template with all the modifications
performed by the hook. This is a good way to setup some global vars you like
to have in all your templates, like the name of the user logged in or a
section name.
=head1 CONFIGURATION AND ENVIRONMENTS
Configuring a Dancer application can be done in many ways. The easiest one (and
maybe the dirtiest) is to put all your settings statements at the top of
your script, before calling the dance() method.
Other ways are possible, you can write all your setting calls in the file
`appdir/config.yml'. For this, you must have installed the YAML module, and of
course, write the conffile in YAML.
That's better than the first option, but it's still not
perfect as you can't switch easily from an environment to another without
rewriting the config.yml file.
The better way is to have one config.yml file with default global settings,
like the following:
# appdir/config.yml
logger: 'file'
layout: 'main'
And then write as many environment files as you like in appdir/environments.
That way, the appropriate environment config file will be loaded according to
the running environment (if none is specified, it will be 'development').
Note that you can change the running environment using the --environment
command line switch.
Typically, you'll want to set the following values in a development config
file:
# appdir/environments/development.yml
log: 'debug'
startup_info: 1
show_errors: 1
And in a production one:
# appdir/environments/production.yml
log: 'warning'
startup_info: 0
show_errors: 0
=head2 load
You can use the load method to include additional routes into your application:
get '/go/:value', sub {
# foo
};
load 'more_routes.pl';
# then, in the file more_routes.pl:
get '/yes', sub {
'orly?';
};
B<load> is just a wrapper for B<require>, but you can also specify a list of
routes files:
load 'login_routes.pl', 'session_routes.pl', 'misc_routes.pl';
=head2 Accessing configuration data
A Dancer application can access the information from its config file easily with
the config keyword:
get '/appname' => sub {
return "This is " . config->{appname};
};
=head1 Importing just the syntax
If you want to use more complex file hierarchies, you can import just the
syntax of Dancer.
package App;
use Dancer; # App may contain generic routes
use App::User::Routes; # user-related routes
Then in App/User/Routes.pm:
use Dancer ':syntax';
get '/user/view/:id' => sub {
...
};
=head1 LOGGING
It's possible to log messages sent by the application. In the current version,
only one method is possible for logging messages but future releases may add
additional logging methods, for instance logging to syslog.
In order to enable the logging system for your application, you first have to
start the logger engine in your config.yml
logger: 'file'
Then you can choose which kind of messages you want to actually log:
log: 'debug' # will log debug, warning, error and info messages
log: 'info' # will log info, warning and error messages
log: 'warning' # will log warning and error messages
log: 'error' # will log error messages
A directory appdir/logs will be created and will host one logfile per
environment. The log message contains the time it was written, the PID of the
current process, the message and the caller information (file and line).
To log messages, use the debug, info, warning and error functions. For
instance:
debug "This is a debug message";
=head1 USING TEMPLATES
=head1 VIEWS
It's possible to render the action's content with a template; this is called a
view. The `appdir/views' directory is the place where views are located.
You can change this location by changing the setting 'views', for instance if
your templates are located in the 'templates' directory, do the following:
set views => path(dirname(__FILE__), 'templates');
By default, the internal template engine is used (L<Dancer::Template::Simple>)
but you may want to upgrade to Template::Toolkit. If you do so, you have to
enable this engine in your settings as explained in
L<Dancer::Template::TemplateToolkit>. If you do so, you'll also have to import
the L<Template> module in your application code. Note that Dancer configures
the Template::Toolkit engine to use <% %> brackets instead of its default
[% %] brackets, although you can change this in your config file.
All views must have a '.tt' extension. This may change in the future.
In order to render a view, just call the 'template' keyword at the end of the
action by giving the view name and the HASHREF of tokens to interpolate in the
view (note that the request, session and route params are automatically
accessible in the view, named request, session and params):
use Dancer;
use Template;
get '/hello/:name' => sub {
template 'hello' => { number => 42 };
};
And the appdir/views/hello.tt view can contain the following code:
<html>
<head></head>
<body>
<h1>Hello <% params.name %></h1>
<p>Your lucky number is <% number %></p>
<p>You are using <% request.user_agent %></p>
<% IF session.user %>
<p>You're logged in as <% session.user %></p>
<% END %>
</body>
</html>
=head2 LAYOUTS
A layout is a special view, located in the 'layouts' directory (inside the
views directory) which must have a token named `content'. That token marks the
place where to render the action view. This lets you define a global layout
for your actions. Any tokens that you defined when you called the 'template'
keyword are available in the layouts, as well as the standard session,
request, and params tokens. This allows you to insert per-page content into
the HTML boilerplate, such as page titles, current-page tags for navigation,
etc.
Here is an example of a layout: views/layouts/main.tt:
<html>
<head><% page_title %></head>
<body>
<div id="header">
...
</div>
<div id="content">
<% content %>
</div>
</body>
</html>
This layout can be used like the following:
use Dancer;
set layout => 'main';
get '/' => sub {
template 'index' => { page_title => "Your website Homepage" };
};
Of course, if a layout is set, it can also be disabled for a specific action,
like the following:
use Dancer;
set layout => 'main';
get '/nolayout' => sub {
template 'some_ajax_view',
{ tokens_var => "42" },
{ layout => 0 };
};
=head1 STATIC FILES
=head2 STATIC DIRECTORY
Static files are served from the ./public directory. You can specify a
different location by setting the 'public' option:
set public => path(dirname(__FILE__), 'static');
Note that the public directory name is not included in the URL. A file
./public/css/style.css is made available as example.com/css/style.css.
=head2 STATIC FILE FROM A ROUTE HANDLER
It's possible for a route handler to send a static file, as follows:
get '/download/*' => sub {
my $params = shift;
my ($file) = @{ $params->{splat} };
send_file $file;
};
Or even if you want your index page to be a plain old index.html file, just do:
get '/' => sub {
send_file '/index.html'
};
=head1 SETTINGS
It's possible to change quite every parameter of the application via the
settings mechanism.
A setting is key/value pair assigned by the keyword B<set>:
set setting_name => 'setting_value';
More usefully, settings can be defined in a YAML configuration file.
Environment-specific settings can also be defined in environment-specific files
(for instance, you might want extra logging in development). See the cookbook for examples.
See L<Dancer::Config> for complete details about supported settings.
=head1 SERIALIZERS
When writing a webservice, data serialization/deserialization is a common issue
to deal with. Dancer can automatically handle that for you, via a serializer.
When setting up a serializer, a new behaviour is authorized for any route
handler you define: any response that is a reference will be rendered as a
serialized string, via the current serializer.
Here is an example of a route handler that will return a HashRef
use Dancer;
set serializer => 'JSON';
get '/user/:id/' => sub {
{ foo => 42,
number => 100234,
list => [qw(one two three)],
}
};
As soon as the content is a reference - and a serializer is set, which is not
the case by default - Dancer renders the response via the current
serializer.
Hence, with the JSON serializer set, the route handler above would result in a
content like the following:
{"number":100234,"foo":42,"list":["one","two","three"]}
The following serializers are available, be aware they dynamically depend on
Perl modules you may not have on your system.
=over 4
=item B<JSON>
requires L<JSON>
=item B<YAML>
requires L<YAML>
=item B<XML>
requires L<XML::Simple>
=item B<Mutable>
will try to find the appropriate serializer using the B<Content-Type> and
B<Accept-type> header of the request.
=back
=head1 EXAMPLE
This is a possible webapp created with Dancer:
#!/usr/bin/perl
# make this script a webapp
use Dancer;
# declare routes/actions
get '/' => sub {
"Hello World";
};
get '/hello/:name' => sub {
"Hello ".param('name');
};
# run the webserver
Dancer->dance;
=cut
| sonar-perl/sonar-perl | perl/Dancer/lib/Dancer/Introduction.pod | Perl | apache-2.0 | 17,813 |
#
# Copyright 2016 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package apps::lmsensors::mode::voltage;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
my $oid_SensorDesc = '.1.3.6.1.4.1.2021.13.16.4.1.2'; # voltage entry description
my $oid_SensorValue = '.1.3.6.1.4.1.2021.13.16.4.1.3'; # voltage entry value (mV)
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
$options{options}->add_options(arguments =>
{
"warning:s" => { name => 'warning' },
"critical:s" => { name => 'critical' },
"name" => { name => 'use_name' },
"sensor:s" => { name => 'sensor' },
"regexp" => { name => 'use_regexp' },
"regexp-isensitive" => { name => 'use_regexpi' },
});
$self->{Sensor_id_selected} = [];
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
if (($self->{perfdata}->threshold_validate(label => 'warning', value => $self->{option_results}->{warning})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong warning threshold '" . $self->{option_results}->{warning} . "'.");
$self->{output}->option_exit();
}
if (($self->{perfdata}->threshold_validate(label => 'critical', value => $self->{option_results}->{critical})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong critical threshold '" . $self->{option_results}->{critical} . "'.");
$self->{output}->option_exit();
}
}
sub run {
my ($self, %options) = @_;
$self->{snmp} = $options{snmp};
$self->{hostname} = $self->{snmp}->get_hostname();
$self->{snmp_port} = $self->{snmp}->get_port();
$self->manage_selection();
$self->{snmp}->load(oids => [$oid_SensorDesc, $oid_SensorValue], instances => $self->{Sensor_id_selected});
my $SensorValueResult = $self->{snmp}->get_leef(nothing_quit => 1);
if (!defined($self->{option_results}->{sensor}) || defined($self->{option_results}->{use_regexp})) {
$self->{output}->output_add(severity => 'OK',
short_msg => 'All Voltages are ok.');
}
foreach my $SensorId (sort @{$self->{Sensor_id_selected}}) {
my $SensorDesc = $SensorValueResult->{$oid_SensorDesc . '.' . $SensorId};
my $SensorValue = $SensorValueResult->{$oid_SensorValue . '.' . $SensorId} / 1000;
my $exit = $self->{perfdata}->threshold_check(value => $SensorValue, threshold => [ { label => 'critical', 'exit_litteral' => 'critical' }, { label => 'warning', exit_litteral => 'warning' } ]);
$self->{output}->output_add(long_msg => sprintf("Sensor '%s' Volt: %s",
$SensorDesc, $SensorValue));
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1) || (defined($self->{option_results}->{sensor}) && !defined($self->{option_results}->{use_regexp}))) {
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("Sensor '%s' Volt: %s",
$SensorDesc, $SensorValue));
}
my $label = 'sensor_voltage';
my $extra_label = '';
$extra_label = '_' . $SensorId . "_" . $SensorDesc if (!defined($self->{option_results}->{sensor}) || defined($self->{option_results}->{use_regexp}));
$self->{output}->perfdata_add(label => $label . $extra_label, unit => 'V',
value => $SensorValue,
warning => $self->{perfdata}->get_perfdata_for_output(label => 'warning'),
critical => $self->{perfdata}->get_perfdata_for_output(label => 'critical'));
}
$self->{output}->display();
$self->{output}->exit();
}
sub manage_selection {
my ($self, %options) = @_;
my $result = $self->{snmp}->get_table(oid => $oid_SensorDesc, nothing_quit => 1);
foreach my $key ($self->{snmp}->oid_lex_sort(keys %$result)) {
next if ($key !~ /\.([0-9]+)$/);
my $SensorId = $1;
my $SensorDesc = $result->{$key};
next if (defined($self->{option_results}->{sensor}) && !defined($self->{option_results}->{use_name}) && !defined($self->{option_results}->{use_regexp}) && !defined($self->{option_results}->{use_regexpi})
&& $SensorId !~ /$self->{option_results}->{sensor}/i);
next if (defined($self->{option_results}->{use_name}) && defined($self->{option_results}->{use_regexp}) && defined($self->{option_results}->{use_regexpi})
&& $SensorDesc !~ /$self->{option_results}->{sensor}/i);
next if (defined($self->{option_results}->{use_name}) && defined($self->{option_results}->{use_regexp}) && !defined($self->{option_results}->{use_regexpi})
&& $SensorDesc !~ /$self->{option_results}->{sensor}/);
next if (defined($self->{option_results}->{use_name}) && !defined($self->{option_results}->{use_regexp}) && !defined($self->{option_results}->{use_regexpi})
&& $SensorDesc ne $self->{option_results}->{sensor});
push @{$self->{Sensor_id_selected}}, $SensorId;
}
if (scalar(@{$self->{Sensor_id_selected}}) <= 0) {
if (defined($self->{option_results}->{sensor})) {
$self->{output}->add_option_msg(short_msg => "No Sensors found for '" . $self->{option_results}->{sensor} . "'.");
} else {
$self->{output}->add_option_msg(short_msg => "No Sensors found.");
};
$self->{output}->option_exit();
}
}
1;
__END__
=head1 MODE
Check LM-Sensors: Voltage Sensors
=over 8
=item B<--warning>
Threshold warning (Volt)
=item B<--critical>
Threshold critical (Volt)
=item B<--sensor>
Set the Sensor Desc (number expected) ex: 1, 2,... (empty means 'check all sensors').
=item B<--name>
Allows to use Sensor Desc name with option --sensor instead of Sensor Desc oid index.
=item B<--regexp>
Allows to use regexp to filter sensordesc (with option --name).
=item B<--regexp-isensitive>
Allows to use regexp non case-sensitive (with --regexp).
=back
=cut
| bcournaud/centreon-plugins | apps/lmsensors/mode/voltage.pm | Perl | apache-2.0 | 7,233 |
# Copyright [1999-2014] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
use strict;
use warnings;
use Data::Dumper;
use Bio::AlignIO;
use Bio::EnsEMBL::Registry;
# Auto-configure the registry
Bio::EnsEMBL::Registry->load_registry_from_db(
-host=>"ensembldb.ensembl.org", -user=>"anonymous",
-port=>'5306');
# Get the Compara Adaptor for MethodLinkSpeciesSets
my $mlssa = Bio::EnsEMBL::Registry->get_adaptor(
"Multi", "compara", "MethodLinkSpeciesSet");
my $mlss = $mlssa->fetch_by_method_link_type_species_set_name("EPO", "mammals");
print "# method_link_species_set_id : ", $mlss->dbID, "\n";
# $mlss->species_set_obj->genome_dbs() brings back a list ref of genome_db objects
foreach my $genome_db (@{ $mlss->species_set_obj->genome_dbs() }){
print join("\t", $genome_db->name, $genome_db->dbID), "\n";
}
| dbolser-ebi/ensembl-compara | docs/workshop/API_workshop_exercises/MethodLinkSpeciesSet_2.pl | Perl | apache-2.0 | 1,407 |
#!/usr/bin/perl -w
#
# Copyright 2012, Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This example shows how to use the validate only header to check for errors.
# No objects will be created, but exceptions will still be returned.
#
# Tags: AdGroupAdService.mutate
# Author: David Torres <api.davidtorres@gmail.com>
use strict;
use lib "../../../lib";
use Google::Ads::AdWords::Client;
use Google::Ads::AdWords::Logging;
use Google::Ads::AdWords::v201402::AdGroupAd;
use Google::Ads::AdWords::v201402::AdGroupAdOperation;
use Google::Ads::AdWords::v201402::TextAd;
use Cwd qw(abs_path);
# Replace with valid values of your account.
my $ad_group_id = "INSERT_AD_GROUP_ID_HERE";
# Example main subroutine.
sub validate_text_ad {
my $client = shift;
my $ad_group_id = shift;
# Don't die on fault it will be handled in code.
$client->set_die_on_faults(0);
# Set validate only.
$client->set_validate_only(1);
# Create invalid text ad
my $text_ad = Google::Ads::AdWords::v201402::TextAd->new({
headline => "Luxury Cruise to Mars",
description1 => "Visit the Red Planet in style.",
description2 => "Low-gravity fun for all astronauts in orbit.",
displayUrl => "www.example.com/cruises",
url => "http://www.example.com"
});
my $text_ad_group_ad = Google::Ads::AdWords::v201402::AdGroupAd->new({
adGroupId => $ad_group_id,
ad => $text_ad
});
# Create operations.
my $operation = Google::Ads::AdWords::v201402::AdGroupAdOperation->new({
operand => $text_ad_group_ad,
operator => "ADD"
});
# Validate text ad operation.
my $result = $client->AdGroupAdService()->mutate({
operations => [$operation]
});
if ($result->isa("SOAP::WSDL::SOAP::Typelib::Fault11")) {
printf "Validation failed for reason: %s\n", $result->get_faultstring();
} else {
print "The ad is valid!\n";
}
return 1;
}
# Don't run the example if the file is being included.
if (abs_path($0) ne abs_path(__FILE__)) {
return 1;
}
# Log SOAP XML request, response and API errors.
Google::Ads::AdWords::Logging::enable_all_logging();
# Get AdWords Client, credentials will be read from ~/adwords.properties.
my $client = Google::Ads::AdWords::Client->new({version => "v201402"});
# By default examples are set to die on any server returned fault.
$client->set_die_on_faults(1);
# Call the example
validate_text_ad($client, $ad_group_id);
| gitpan/GOOGLE-ADWORDS-PERL-CLIENT | examples/v201402/campaign_management/validate_text_ad.pl | Perl | apache-2.0 | 2,932 |
package Paws::KMS::ListRetirableGrants;
use Moose;
has Limit => (is => 'ro', isa => 'Int');
has Marker => (is => 'ro', isa => 'Str');
has RetiringPrincipal => (is => 'ro', isa => 'Str', required => 1);
use MooseX::ClassAttribute;
class_has _api_call => (isa => 'Str', is => 'ro', default => 'ListRetirableGrants');
class_has _returns => (isa => 'Str', is => 'ro', default => 'Paws::KMS::ListGrantsResponse');
class_has _result_key => (isa => 'Str', is => 'ro');
1;
### main pod documentation begin ###
=head1 NAME
Paws::KMS::ListRetirableGrants - Arguments for method ListRetirableGrants on Paws::KMS
=head1 DESCRIPTION
This class represents the parameters used for calling the method ListRetirableGrants on the
AWS Key Management Service service. Use the attributes of this class
as arguments to method ListRetirableGrants.
You shouldn't make instances of this class. Each attribute should be used as a named argument in the call to ListRetirableGrants.
As an example:
$service_obj->ListRetirableGrants(Att1 => $value1, Att2 => $value2, ...);
Values for attributes that are native types (Int, String, Float, etc) can passed as-is (scalar values). Values for complex Types (objects) can be passed as a HashRef. The keys and values of the hashref will be used to instance the underlying object.
=head1 ATTRIBUTES
=head2 Limit => Int
Use this parameter to specify the maximum number of items to return.
When this value is present, AWS KMS does not return more than the
specified number of items, but it might return fewer.
This value is optional. If you include a value, it must be between 1
and 100, inclusive. If you do not include a value, it defaults to 50.
=head2 Marker => Str
Use this parameter in a subsequent request after you receive a response
with truncated results. Set it to the value of C<NextMarker> from the
truncated response you just received.
=head2 B<REQUIRED> RetiringPrincipal => Str
The retiring principal for which to list grants.
To specify the retiring principal, use the Amazon Resource Name (ARN)
of an AWS principal. Valid AWS principals include AWS accounts (root),
IAM users, federated users, and assumed role users. For examples of the
ARN syntax for specifying a principal, see AWS Identity and Access
Management (IAM) in the Example ARNs section of the I<Amazon Web
Services General Reference>.
=head1 SEE ALSO
This class forms part of L<Paws>, documenting arguments for method ListRetirableGrants in L<Paws::KMS>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/KMS/ListRetirableGrants.pm | Perl | apache-2.0 | 2,674 |
package Mojolicious::Plugin::ID::OAuth::Mailru;
use Mojo::Base 'Mojolicious::Plugin';
use Mojo::Parameters;
use Mojo::ByteStream;
use Mojo::UserAgent;
use JSON::XS;
use Data::Dumper;
our $VERSION = '0.01';
# Documentaion
# http://api.mail.ru/docs/guides/oauth/sites/
# http://api.mail.ru/docs/reference/rest/users-getinfo/
sub register {
my ($self, $app, $conf) = @_;
die "Not Found client_id or client_secret" if(ref($conf) ne 'HASH');
return if(!exists $conf->{'oauth'});
return if(!exists $conf->{'oauth'}->{'mailru'});
return if(!exists $conf->{'oauth'}->{'mailru'}->{'client_id'});
return if(!exists $conf->{'oauth'}->{'mailru'}->{'client_secret'});
return if(!exists $conf->{'oauth'}->{'mailru'}->{'secret_key'});
my $client_id = $conf->{'oauth'}->{'mailru'}->{'client_id'};
my $client_secret = $conf->{'oauth'}->{'mailru'}->{'client_secret'};
my $return_url = $conf->{'oauth'}->{'mailru'}->{'return_url'} || '/';
my $secret_key = $conf->{'oauth'}->{'mailru'}->{'secret_key'};
die "Not found client_id" if(!$client_id);
die "Not found client_secret" if(!$client_secret);
die "Not found secret_key" if(!$secret_key);
if ($client_id !~ m/^\d+$/){
die "Invalid client_id";
return;
}
if (length($client_secret) != 32){
die "Invalid client_secret";
return;
}
$app->routes->any([qw(GET)]=>'/oauth/mailru/receiver.html'=>sub {
my ($self) = @_;
$self->render(text=>'<html><body><script src="http://connect.mail.ru/js/loader.js"></script><script>mailru.loader.require(\'receiver\', function(){ mailru.receiver.init();})</script></body></html>');
return;
});
$app->routes->any([qw(GET)]=>'/oauth/mailru/'=>sub {
my ($self) = @_;
my $state = $self->param('state');
if($state){
my $error = $self->param('error');
my $code = $self->param('code');
if($error){
$self->app->log->error('error authorization server OAuth server:'.$error);
$self->redirect_to('/');
return;
}
my $redirect_uri;
my $host = $self->req->headers->host;
my $is_secure = $self->req->is_secure;
if($is_secure){
$redirect_uri = "https://$host/oauth/mailru/";
}
else{
$redirect_uri = "http://$host/oauth/mailru/";
}
my $ua = Mojo::UserAgent->new;
my $json = JSON::XS->new()->utf8(1);
my $tx = $ua->post('https://connect.mail.ru/oauth/token'=>form=>{
redirect_uri=>$redirect_uri,
grant_type=>'authorization_code',
code=>$code,
client_id=>$client_id,
client_secret=>$client_secret,
});
if($tx->error){
my ($err, $code) = $tx->error;
$self->render_exception("https://connect.mail.ru/oauth/token (message:$err, http code:$code)");
return;
}
my $res = $tx->success;
my $content = $res->body;
eval{
$content = $json->decode($content);
};
if($@){
die "error parse json $@";
}
if(ref($content) ne 'HASH' && exists $content->{'access_token'} && !$content->{'access_token'}){
die "Invalid access_token";
}
my $access_token = $content->{'access_token'};
my $expires_in = $content->{'expires_in'};
my $oauth_id = $content->{'x_mailru_vid'};
if(length($access_token) != 32){
die "invalid param access_token";
}
my $get_info = $self->mailru_get_info(access_token=>$access_token, client_id=>$client_id, secret_key=>$secret_key, oauth_id=>$oauth_id);
$get_info->{'expires_in'} = $expires_in;
$self->flash({'plugin.id.oauth'=>$get_info});
$self->redirect_to($return_url);
return;
}
else{
my $redirect_uri;
my $host = $self->req->headers->host;
my $is_secure = $self->req->is_secure;
if($is_secure){
$redirect_uri = "https://$host/oauth/mailru/";
}
else{
$redirect_uri = "http://$host/oauth/mailru/";
}
my $url = "https://connect.mail.ru/oauth/authorize?";
my $params = Mojo::Parameters->new;
$params = $params->append(response_type=>'code');
$params = $params->append(redirect_uri=>$redirect_uri);
$params = $params->append(client_id=>$client_id);
$params = $params->append(state=>'mailru');
$self->redirect_to($url.$params);
}
return;
});
$app->helper(mailru_get_info=>\&_mailru_get_info);
}
sub _mailru_get_info {
my $self = shift;
my %param = @_;
if(length($param{'access_token'}) != 32){
die "Invalid access_token";
}
my @params = ();
push(@params, "method=users.getInfo");
push(@params, "uids=$param{oauth_id}");
push(@params, "session_key=$param{access_token}");
push(@params, "secure=1");
push(@params, "app_id=$param{client_id}");
@params = sort{$a cmp $b} @params;
my $params = join('&',@params);
my $stream = Mojo::ByteStream->new(join('',@params).$param{'secret_key'});
my $sig = $stream->md5_sum;
my $json = JSON::XS->new()->utf8(1);
my $ua = Mojo::UserAgent->new;
my $tx = $ua->get("http://www.appsmail.ru/platform/api?$params&sig=$sig");
$self->app->log->debug("http://www.appsmail.ru/platform/api?$params&sig=$sig");
if($tx->error){
my ($err, $code) = $tx->error;
$code /= 'none';
$self->render_exception("error appsmail.ru http status:$code message:$err");
return;
}
my $res = $tx->success;
my $content = $res->body;
eval{
$content = $json->decode($content);
};
if($@){
die "error parse json $@";
}
my %result;
$result{'access_token'} = $param{'access_token'};
$result{'server'} = 'mailru';
$result{'login'} = undef;
$result{'email'} = undef;
$result{'birthday'} = undef;
$result{'gender'} = undef;
$result{'lastname'} = undef;
$result{'firstname'} = undef;
if(!ref($content) eq 'ARRAY' && !ref($content->[0]) eq 'HASH' && !%{$content->[0]}){
$self->render_exception("http://www.appsmail.ru/platform/api (not HASH param)");
return;
}
if(exists $content->[0]->{'uid'} && $content->[0]->{'uid'} =~ m/^[0-9]+$/){
my $val = $content->[0]->{'uid'};
$result{'oauth_id'} = $val;
}
else{
die "http://www.appsmail.ru/platform/api (not oauth_id param)";
}
if(exists $content->[0]->{'email'} && $content->[0]->{'email'}){
my $val = $content->[0]->{'email'};
$val =~ s/^\s+|\s+$//g;
$val = lc($val);
$result{'email'} = $val;
}
if(exists $content->[0]->{'email'} && $content->[0]->{'email'}){
my $val = $content->[0]->{'email'};
$val =~ s/^\s+|\s+$//g;
if($val =~ m/^(?<login>([\w]+[\.\_\-]?[\w]+))\@/){
$val = $+{'login'};
$val = lc($val);
$val =~ s/\./-/g;
$result{'login'} = $val;
}
}
if(exists $content->[0]->{'birthday'} && $content->[0]->{'birthday'}){
my $val = $content->[0]->{'birthday'};
$val =~ s/^\s+|\s+$//g;
if($val =~ m/^(?<day>(\d{2}))\.(?<month>(\d{2}))\.(?<year>(\d{4}))$/){
my $day = $+{'day'};
my $month = $+{'month'};
my $year = $+{'year'};
$result{'birthday'} = "$year-$month-$day";
}
}
if(exists $content->[0]->{'sex'}){
my $val = $content->[0]->{'sex'};
if($val){
$result{'gender'} = 'female';
}
else{
$result{'gender'} = 'male';
}
}
if(exists $content->[0]->{'last_name'}){
$result{'lastname'} = $content->[0]->{'last_name'};
}
if(exists $content->[0]->{'first_name'}){
$result{'firstname'} = $content->[0]->{'first_name'};
}
return \%result;
}
1;
| kostya-ten/Mojolicious-Plugin-ID | lib/Mojolicious/Plugin/ID/OAuth/Mailru.pm | Perl | apache-2.0 | 7,137 |
package OpenXPKI::Test::QA::Role::WorkflowCreateCert;
use Moose::Role;
=head1 NAME
OpenXPKI::Test::QA::Role::WorkflowCreateCert - Moose role that extends L<OpenXPKI::Test>
with a quick way to create certificates
=head1 DESCRIPTION
Please note that this role requires two other roles to be applied:
L<OpenXPKI::Test::QA::Role::SampleConfig> and
L<OpenXPKI::Test::QA::Role::Workflows>, i.e.:
my $oxitest = OpenXPKI::Test->new(
with => [ qw( SampleConfig Workflow WorkflowCreateCert ],
...
);
You could also omit C<OpenXPKI::Test::QA::Role::SampleConfig> if you set up a
realm configuration with a I<certificate_signing_request_v2> workflow that is
compatible to the OpenXPKI default one.
=cut
# CPAN modules
use Test::More;
use Test::Exception;
# Project modules
use OpenXPKI::Server::Context;
use OpenXPKI::Test::QA::Role::Workflows::CertParams;
use OpenXPKI::Serialization::Simple;
requires 'also_init';
requires 'create_workflow'; # effectively requires 'OpenXPKI::Test::QA::Role::Workflows'
requires 'session';
before 'init_server' => sub {
my $self = shift;
# prepend to existing array in case a user supplied "also_init" needs our modules
unshift @{ $self->also_init }, 'crypto_layer', 'volatile_vault';
};
=head1 METHODS
This role adds the following methods to L<OpenXPKI::Test>:
=head2 create_cert
Runs a L<lives_ok|Test::Exception/lives_ok> test that creates a certificate via
API by starting the workflow I<certificate_signing_request_v2>.
Returns a I<HashRef> with some certificate info:
{
req_key => ...,
identifier => ...,
profile => ...,
}
Please note that if used in conjunction with L<OpenXPKI::Test::QA::Role::Server>
the workflow is still directly created by accessing the API methods, i.e. there
is NO socket communication to the running server daemon.
=cut
sub create_cert {
my ($self, @args) = @_;
my $params = OpenXPKI::Test::QA::Role::Workflows::CertParams->new(@args);
my $serializer = OpenXPKI::Serialization::Simple->new();
my $is_server_profile = $params->profile eq "tls_server";
my $is_client_profile = $params->profile eq "tls_client";
my %cert_subject_parts = (
# IP addresses instead of host names will make DNS lookups fail quicker
hostname => $params->hostname,
$is_server_profile ? (
hostname2 => $params->hostname2,
port => 8080,
) : (),
$is_client_profile ? (
application_name => $params->application_name,
) : (),
);
my $cert_info = {};
subtest "Create certificate (hostname ".$params->hostname.")" => sub {
# change PKI realm, user and role to get permission to create workflow
my $sess_data = $self->session->data;
die "Cannot create certificate if session data is not set" unless $sess_data->has_pki_realm;
my $old_user = $sess_data->user;
my $old_role = $sess_data->role;
$self->set_user($sess_data->pki_realm => "raop");
my $result;
lives_and {
my $wftest = $self->create_workflow(
"certificate_signing_request_v2" => {
cert_profile => $params->profile,
cert_subject_style => "00_basic_style",
}
);
$wftest->state_is('SETUP_REQUEST_TYPE');
$wftest->execute(
csr_provide_server_key_params => {
key_alg => "rsa",
enc_alg => 'aes256',
key_gen_params => $serializer->serialize( { KEY_LENGTH => 2048 } ),
password_type => 'client',
csr_type => 'pkcs10'
},
);
$wftest->state_is('ENTER_SUBJECT');
$wftest->execute(
csr_edit_subject => {
cert_subject_parts => $serializer->serialize( \%cert_subject_parts ),
},
);
if ($is_server_profile) {
$wftest->state_is('ENTER_SAN');
$wftest->execute(
csr_edit_san => {
cert_san_parts => $serializer->serialize( { } ),
},
);
}
$wftest->state_is('ENTER_CERT_INFO');
$wftest->execute(
'csr_edit_cert_info' => {
cert_info => $serializer->serialize( {
requestor_gname => $params->requestor_gname,
requestor_name => $params->requestor_name,
requestor_email => $params->requestor_email,
} )
},
);
$wftest->state_is('SUBJECT_COMPLETE') or BAIL_OUT;
# Test FQDNs should not validate so we need a policy exception request
# (on rare cases the responsible router might return a valid address, so we check)
my $msg = $self->api2_command(
get_workflow_info => { id => $wftest->id, with_ui_info => 1 }
);
my $actions = $msg->{state}->{option};
my $intermediate_state;
if (grep { /^csr_enter_policy_violation_comment$/ } @$actions) {
diag "Test FQDNs do not resolve - handling policy violation" if $ENV{TEST_VERBOSE};
$wftest->execute(
csr_enter_policy_violation_comment => { policy_comment => 'This is just a test' },
);
$intermediate_state = 'PENDING_POLICY_VIOLATION';
}
else {
diag "For whatever reason test FQDNs do resolve - submitting request" if $ENV{TEST_VERBOSE};
$wftest->execute(
csr_submit => {},
);
$intermediate_state = 'PENDING';
}
$wftest->state_is('ENTER_KEY_PASSWORD');
$wftest->execute(
csr_ask_client_password => { _password => "m4#bDf7m3abd" },
);
# if ($self->notbefore) {
# $test->execute_ok('csr_edit_validity', {
# notbefore => $self->notbefore,
# notafter => $self->notafter,
# });
# $test->state_is( ??? );
# }
$wftest->state_is($intermediate_state);
$wftest->execute(
csr_approve_csr => {},
);
$wftest->state_is('SUCCESS') or BAIL_OUT;
my $temp = $self->api2_command(
get_workflow_info => { id => $wftest->id }
);
$cert_info = {
req_key => $temp->{workflow}->{context}->{csr_serial},
identifier => $temp->{workflow}->{context}->{cert_identifier},
profile => $temp->{workflow}->{context}->{cert_profile},
};
} "successfully run workflow";
$sess_data->user($old_user);
$sess_data->role($old_role);
};
return $cert_info;
}
1;
| oliwel/openxpki | qatest/lib/OpenXPKI/Test/QA/Role/WorkflowCreateCert.pm | Perl | apache-2.0 | 7,067 |
#!/usr/bin/perl -w
###########################################################################
## ##
## Carnegie Mellon University ##
## Copyright (c) 2005 ##
## All Rights Reserved. ##
## ##
## Permission is hereby granted, free of charge, to use and distribute ##
## this software and its documentation without restriction, including ##
## without limitation the rights to use, copy, modify, merge, publish, ##
## distribute, sublicense, and/or sell copies of this work, and to ##
## permit persons to whom this work is furnished to do so, subject to ##
## the following conditions: ##
## 1. The code must retain the above copyright notice, this list of ##
## conditions and the following disclaimer. ##
## 2. Any modifications must be clearly marked as such. ##
## 3. Original authors' names are not deleted. ##
## 4. The authors' names are not used to endorse or promote products ##
## derived from this software without specific prior written ##
## permission. ##
## ##
## CARNEGIE MELLON UNIVERSITY AND THE CONTRIBUTORS TO THIS WORK ##
## DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ##
## ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT ##
## SHALL CARNEGIE MELLON UNIVERSITY NOR THE CONTRIBUTORS BE LIABLE ##
## FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ##
## WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN ##
## AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ##
## ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF ##
## THIS SOFTWARE. ##
## ##
###########################################################################
## ##
## Convert ascii to binary ##
## ##
###########################################################################
use strict;
$|=1;
binmode(STDOUT);
my $bintype;
if ($ARGV[0] eq "short") {
$bintype='s';
}
if ($ARGV[0] eq "byte") {
$bintype='c';
}
my $dub;
my $buff;
while (<>) {
$buff = pack($bintype,$_);
print "$buff";
}
| saikrishnarallabandi/Festival-Speech-Synthesis-System | flite/tools/a2bin.pl | Perl | apache-2.0 | 2,911 |
sub Mojo::Webqq::_send_friend_message{
my($self,$msg) = @_;
my $callback = sub{
my $json = shift;
$msg->parse_send_status_msg( $json );
if(!$msg->is_success and $msg->ttl > 0){
$self->debug("消息[ " .$msg->id . " ]发送失败,尝试重新发送,当前TTL: " . $msg->ttl);
$self->message_queue->put($msg);
return;
}
else{
if(ref $msg->cb eq 'CODE'){
$msg->cb->(
$self,
$msg,
);
}
$self->emit(send_message =>
$msg,
);
}
};
my $api_url = ($self->security?'https':'http') . '://d1.web2.qq.com/channel/send_buddy_msg2';
my $headers = {
Referer => 'http://d1.web2.qq.com/proxy.html?v=20151105001&callback=1&id=2',
json => 1,
};
my @content = map {
if($_->{type} eq "txt"){$_->{content}}
elsif($_->{type} eq "face"){["face",0+$_->{id}]}
} @{$msg->raw_content};
#for(my $i=0;$i<@content;$i++){
# if(ref $content[$i] eq "ARRAY"){
# if(ref $content[$i] eq "ARRAY"){
# splice @content,$i+1,0," ";
# }
# else{
# $content[$i+1] = " " . $content[$i+1];
# }
# }
#}
my $content = [@content,["font",{name=>"宋体",size=>10,style=>[0,0,0],color=>"000000"}]];
my %s = (
to => $msg->receiver_id,
face => $self->user->face || 570,
content => $self->to_json($content),
msg_id => $msg->id,
clientid => $self->clientid,
psessionid => $self->psessionid,
);
$self->http_post(
$api_url,
$headers,
form=>{r=>$self->to_json(\%s)},
$callback,
);
}
1;
| sjdy521/Mojo-Webqq | lib/Mojo/Webqq/Message/Remote/_send_friend_message.pm | Perl | bsd-2-clause | 1,830 |
#!/usr/bin/perl
# sortfrags2.pl fragments.fa stem -- splits reading frame labeled data into
# files each with known reading frames
die "Usage: sortfrags2.pl <labeled fragments> \n"
unless $#ARGV +1 == 1;
$filename = $ARGV[0] ;
$stem = $filename;
$stem =~ s/.fasta$//; $stem =~ s/.fna$//; $stem =~ s/.fa$//;
open $FILE, "<$filename" or die "sortfrags2: can't open $filename !";
open $O0, ">$stem.0.fa";
open $O1, ">$stem.1.fa";
open $O2, ">$stem.2.fa";
open $O3, ">$stem.3.fa";
open $O4, ">$stem.4.fa";
open $O5, ">$stem.5.fa";
open $O6, ">$stem.6.fa";
$/ = '>' ;
@fragments = <$FILE>;
foreach $line (@fragments)
{
chop($line); # removes trailing ">"
# print STDERR "LINE: $line\n";
@fields = split '\n', $line;
if( $fields[0] =~ m/gen.*gen.*gene 1 rf 0/ )
{print $O0 ">".join("\n", @fields)."\n";}
if( $fields[0] =~ m/gen.*gen.*gene 1 rf 1/ )
{print $O1 ">".join("\n", @fields)."\n";}
if( $fields[0] =~ m/gen.*gen.*gene 1 rf 2/ )
{print $O2 ">".join("\n", @fields)."\n";}
if( $fields[0] =~ m/gen.*gen.*gene -1 rf 0/ || $fields[0] =~ m/gen.*gen.*gene -1 rf 3/ )
{print $O3 ">".join("\n", @fields)."\n";}
if( $fields[0] =~ m/gen.*gen.*gene -1 rf 1/ || $fields[0] =~ m/gen.*gen.*gene -1 rf 4/ )
{print $O4 ">".join("\n", @fields)."\n";}
if( $fields[0] =~ m/gen.*gen.*gene -1 rf 2/ )
{print $O5 ">".join("\n", @fields)."\n";}
if( $fields[0] =~ m/gen.*gen.*gene 0/ )
{print $O6 ">".join("\n", @fields)."\n";}
}
| wltrimbl/genecall_test | src/sortfrags2.pl | Perl | bsd-2-clause | 1,510 |
###########################################################################
#
# This file is partially auto-generated by the DateTime::Locale generator
# tools (v0.10). This code generator comes with the DateTime::Locale
# distribution in the tools/ directory, and is called generate-modules.
#
# This file was generated from the CLDR JSON locale data. See the LICENSE.cldr
# file included in this distribution for license details.
#
# Do not edit this file directly unless you are sure the part you are editing
# is not created by the generator.
#
###########################################################################
=pod
=encoding UTF-8
=head1 NAME
DateTime::Locale::rm - Locale data examples for the rm locale.
=head1 DESCRIPTION
This pod file contains examples of the locale data available for the
Romansh locale.
=head2 Days
=head3 Wide (format)
glindesdi
mardi
mesemna
gievgia
venderdi
sonda
dumengia
=head3 Abbreviated (format)
gli
ma
me
gie
ve
so
du
=head3 Narrow (format)
G
M
M
G
V
S
D
=head3 Wide (stand-alone)
glindesdi
mardi
mesemna
gievgia
venderdi
sonda
dumengia
=head3 Abbreviated (stand-alone)
gli
ma
me
gie
ve
so
du
=head3 Narrow (stand-alone)
G
M
M
G
V
S
D
=head2 Months
=head3 Wide (format)
schaner
favrer
mars
avrigl
matg
zercladur
fanadur
avust
settember
october
november
december
=head3 Abbreviated (format)
schan.
favr.
mars
avr.
matg
zercl.
fan.
avust
sett.
oct.
nov.
dec.
=head3 Narrow (format)
S
F
M
A
M
Z
F
A
S
O
N
D
=head3 Wide (stand-alone)
schaner
favrer
mars
avrigl
matg
zercladur
fanadur
avust
settember
october
november
december
=head3 Abbreviated (stand-alone)
schan.
favr.
mars
avr.
matg
zercl.
fan.
avust
sett.
oct.
nov.
dec.
=head3 Narrow (stand-alone)
S
F
M
A
M
Z
F
A
S
O
N
D
=head2 Quarters
=head3 Wide (format)
1. quartal
2. quartal
3. quartal
4. quartal
=head3 Abbreviated (format)
1. quartal
2. quartal
3. quartal
4. quartal
=head3 Narrow (format)
1
2
3
4
=head3 Wide (stand-alone)
1. quartal
2. quartal
3. quartal
4. quartal
=head3 Abbreviated (stand-alone)
1. quartal
2. quartal
3. quartal
4. quartal
=head3 Narrow (stand-alone)
1
2
3
4
=head2 Eras
=head3 Wide (format)
avant Cristus
suenter Cristus
=head3 Abbreviated (format)
av. Cr.
s. Cr.
=head3 Narrow (format)
av. Cr.
s. Cr.
=head2 Date Formats
=head3 Full
2008-02-05T18:30:30 = mardi, ils 5 da favrer 2008
1995-12-22T09:05:02 = venderdi, ils 22 da december 1995
-0010-09-15T04:44:23 = sonda, ils 15 da settember -10
=head3 Long
2008-02-05T18:30:30 = 5 da favrer 2008
1995-12-22T09:05:02 = 22 da december 1995
-0010-09-15T04:44:23 = 15 da settember -10
=head3 Medium
2008-02-05T18:30:30 = 05-02-2008
1995-12-22T09:05:02 = 22-12-1995
-0010-09-15T04:44:23 = 15-09--10
=head3 Short
2008-02-05T18:30:30 = 05-02-08
1995-12-22T09:05:02 = 22-12-95
-0010-09-15T04:44:23 = 15-09--10
=head2 Time Formats
=head3 Full
2008-02-05T18:30:30 = 18:30:30 UTC
1995-12-22T09:05:02 = 09:05:02 UTC
-0010-09-15T04:44:23 = 04:44:23 UTC
=head3 Long
2008-02-05T18:30:30 = 18:30:30 UTC
1995-12-22T09:05:02 = 09:05:02 UTC
-0010-09-15T04:44:23 = 04:44:23 UTC
=head3 Medium
2008-02-05T18:30:30 = 18:30:30
1995-12-22T09:05:02 = 09:05:02
-0010-09-15T04:44:23 = 04:44:23
=head3 Short
2008-02-05T18:30:30 = 18:30
1995-12-22T09:05:02 = 09:05
-0010-09-15T04:44:23 = 04:44
=head2 Datetime Formats
=head3 Full
2008-02-05T18:30:30 = mardi, ils 5 da favrer 2008 18:30:30 UTC
1995-12-22T09:05:02 = venderdi, ils 22 da december 1995 09:05:02 UTC
-0010-09-15T04:44:23 = sonda, ils 15 da settember -10 04:44:23 UTC
=head3 Long
2008-02-05T18:30:30 = 5 da favrer 2008 18:30:30 UTC
1995-12-22T09:05:02 = 22 da december 1995 09:05:02 UTC
-0010-09-15T04:44:23 = 15 da settember -10 04:44:23 UTC
=head3 Medium
2008-02-05T18:30:30 = 05-02-2008 18:30:30
1995-12-22T09:05:02 = 22-12-1995 09:05:02
-0010-09-15T04:44:23 = 15-09--10 04:44:23
=head3 Short
2008-02-05T18:30:30 = 05-02-08 18:30
1995-12-22T09:05:02 = 22-12-95 09:05
-0010-09-15T04:44:23 = 15-09--10 04:44
=head2 Available Formats
=head3 E (ccc)
2008-02-05T18:30:30 = ma
1995-12-22T09:05:02 = ve
-0010-09-15T04:44:23 = so
=head3 EHm (E HH:mm)
2008-02-05T18:30:30 = ma 18:30
1995-12-22T09:05:02 = ve 09:05
-0010-09-15T04:44:23 = so 04:44
=head3 EHms (E HH:mm:ss)
2008-02-05T18:30:30 = ma 18:30:30
1995-12-22T09:05:02 = ve 09:05:02
-0010-09-15T04:44:23 = so 04:44:23
=head3 Ed (E d.)
2008-02-05T18:30:30 = ma 5.
1995-12-22T09:05:02 = ve 22.
-0010-09-15T04:44:23 = so 15.
=head3 Ehm (E h:mm a)
2008-02-05T18:30:30 = ma 6:30 PM
1995-12-22T09:05:02 = ve 9:05 AM
-0010-09-15T04:44:23 = so 4:44 AM
=head3 Ehms (E h:mm:ss a)
2008-02-05T18:30:30 = ma 6:30:30 PM
1995-12-22T09:05:02 = ve 9:05:02 AM
-0010-09-15T04:44:23 = so 4:44:23 AM
=head3 Gy (G y)
2008-02-05T18:30:30 = s. Cr. 2008
1995-12-22T09:05:02 = s. Cr. 1995
-0010-09-15T04:44:23 = av. Cr. -10
=head3 GyMMM (G y MMM)
2008-02-05T18:30:30 = s. Cr. 2008 favr.
1995-12-22T09:05:02 = s. Cr. 1995 dec.
-0010-09-15T04:44:23 = av. Cr. -10 sett.
=head3 GyMMMEd (G y MMM d, E)
2008-02-05T18:30:30 = s. Cr. 2008 favr. 5, ma
1995-12-22T09:05:02 = s. Cr. 1995 dec. 22, ve
-0010-09-15T04:44:23 = av. Cr. -10 sett. 15, so
=head3 GyMMMd (G y MMM d)
2008-02-05T18:30:30 = s. Cr. 2008 favr. 5
1995-12-22T09:05:02 = s. Cr. 1995 dec. 22
-0010-09-15T04:44:23 = av. Cr. -10 sett. 15
=head3 H (H)
2008-02-05T18:30:30 = 18
1995-12-22T09:05:02 = 9
-0010-09-15T04:44:23 = 4
=head3 HHmm (HH:mm)
2008-02-05T18:30:30 = 18:30
1995-12-22T09:05:02 = 09:05
-0010-09-15T04:44:23 = 04:44
=head3 HHmmss (HH:mm:ss)
2008-02-05T18:30:30 = 18:30:30
1995-12-22T09:05:02 = 09:05:02
-0010-09-15T04:44:23 = 04:44:23
=head3 Hm (H:mm)
2008-02-05T18:30:30 = 18:30
1995-12-22T09:05:02 = 9:05
-0010-09-15T04:44:23 = 4:44
=head3 Hms (HH:mm:ss)
2008-02-05T18:30:30 = 18:30:30
1995-12-22T09:05:02 = 09:05:02
-0010-09-15T04:44:23 = 04:44:23
=head3 Hmsv (HH:mm:ss v)
2008-02-05T18:30:30 = 18:30:30 UTC
1995-12-22T09:05:02 = 09:05:02 UTC
-0010-09-15T04:44:23 = 04:44:23 UTC
=head3 Hmv (HH:mm v)
2008-02-05T18:30:30 = 18:30 UTC
1995-12-22T09:05:02 = 09:05 UTC
-0010-09-15T04:44:23 = 04:44 UTC
=head3 M (L)
2008-02-05T18:30:30 = 2
1995-12-22T09:05:02 = 12
-0010-09-15T04:44:23 = 9
=head3 MEd (E, d.M.)
2008-02-05T18:30:30 = ma, 5.2.
1995-12-22T09:05:02 = ve, 22.12.
-0010-09-15T04:44:23 = so, 15.9.
=head3 MMM (LLL)
2008-02-05T18:30:30 = favr.
1995-12-22T09:05:02 = dec.
-0010-09-15T04:44:23 = sett.
=head3 MMMEd (E d. MMM)
2008-02-05T18:30:30 = ma 5. favr.
1995-12-22T09:05:02 = ve 22. dec.
-0010-09-15T04:44:23 = so 15. sett.
=head3 MMMMEd (E d. MMMM)
2008-02-05T18:30:30 = ma 5. favrer
1995-12-22T09:05:02 = ve 22. december
-0010-09-15T04:44:23 = so 15. settember
=head3 MMMMd (d. MMMM)
2008-02-05T18:30:30 = 5. favrer
1995-12-22T09:05:02 = 22. december
-0010-09-15T04:44:23 = 15. settember
=head3 MMMd (d. MMM)
2008-02-05T18:30:30 = 5. favr.
1995-12-22T09:05:02 = 22. dec.
-0010-09-15T04:44:23 = 15. sett.
=head3 MMd (d.MM.)
2008-02-05T18:30:30 = 5.02.
1995-12-22T09:05:02 = 22.12.
-0010-09-15T04:44:23 = 15.09.
=head3 MMdd (dd.MM.)
2008-02-05T18:30:30 = 05.02.
1995-12-22T09:05:02 = 22.12.
-0010-09-15T04:44:23 = 15.09.
=head3 Md (d.M.)
2008-02-05T18:30:30 = 5.2.
1995-12-22T09:05:02 = 22.12.
-0010-09-15T04:44:23 = 15.9.
=head3 d (d)
2008-02-05T18:30:30 = 5
1995-12-22T09:05:02 = 22
-0010-09-15T04:44:23 = 15
=head3 h (h a)
2008-02-05T18:30:30 = 6 PM
1995-12-22T09:05:02 = 9 AM
-0010-09-15T04:44:23 = 4 AM
=head3 hm (h:mm a)
2008-02-05T18:30:30 = 6:30 PM
1995-12-22T09:05:02 = 9:05 AM
-0010-09-15T04:44:23 = 4:44 AM
=head3 hms (h:mm:ss a)
2008-02-05T18:30:30 = 6:30:30 PM
1995-12-22T09:05:02 = 9:05:02 AM
-0010-09-15T04:44:23 = 4:44:23 AM
=head3 hmsv (h:mm:ss a v)
2008-02-05T18:30:30 = 6:30:30 PM UTC
1995-12-22T09:05:02 = 9:05:02 AM UTC
-0010-09-15T04:44:23 = 4:44:23 AM UTC
=head3 hmv (h:mm a v)
2008-02-05T18:30:30 = 6:30 PM UTC
1995-12-22T09:05:02 = 9:05 AM UTC
-0010-09-15T04:44:23 = 4:44 AM UTC
=head3 mmss (mm:ss)
2008-02-05T18:30:30 = 30:30
1995-12-22T09:05:02 = 05:02
-0010-09-15T04:44:23 = 44:23
=head3 ms (mm:ss)
2008-02-05T18:30:30 = 30:30
1995-12-22T09:05:02 = 05:02
-0010-09-15T04:44:23 = 44:23
=head3 y (y)
2008-02-05T18:30:30 = 2008
1995-12-22T09:05:02 = 1995
-0010-09-15T04:44:23 = -10
=head3 yM (y-M)
2008-02-05T18:30:30 = 2008-2
1995-12-22T09:05:02 = 1995-12
-0010-09-15T04:44:23 = -10-9
=head3 yMEd (E, y-M-d)
2008-02-05T18:30:30 = ma, 2008-2-5
1995-12-22T09:05:02 = ve, 1995-12-22
-0010-09-15T04:44:23 = so, -10-9-15
=head3 yMM (MM.y)
2008-02-05T18:30:30 = 02.2008
1995-12-22T09:05:02 = 12.1995
-0010-09-15T04:44:23 = 09.-10
=head3 yMMM (MMM y)
2008-02-05T18:30:30 = favr. 2008
1995-12-22T09:05:02 = dec. 1995
-0010-09-15T04:44:23 = sett. -10
=head3 yMMMEd (E, d. MMM y)
2008-02-05T18:30:30 = ma, 5. favr. 2008
1995-12-22T09:05:02 = ve, 22. dec. 1995
-0010-09-15T04:44:23 = so, 15. sett. -10
=head3 yMMMM (MMMM y)
2008-02-05T18:30:30 = favrer 2008
1995-12-22T09:05:02 = december 1995
-0010-09-15T04:44:23 = settember -10
=head3 yMMMd (y MMM d)
2008-02-05T18:30:30 = 2008 favr. 5
1995-12-22T09:05:02 = 1995 dec. 22
-0010-09-15T04:44:23 = -10 sett. 15
=head3 yMMdd (dd.MM.y)
2008-02-05T18:30:30 = 05.02.2008
1995-12-22T09:05:02 = 22.12.1995
-0010-09-15T04:44:23 = 15.09.-10
=head3 yMd (y-MM-dd)
2008-02-05T18:30:30 = 2008-02-05
1995-12-22T09:05:02 = 1995-12-22
-0010-09-15T04:44:23 = -10-09-15
=head3 yQQQ (QQQ y)
2008-02-05T18:30:30 = 1. quartal 2008
1995-12-22T09:05:02 = 4. quartal 1995
-0010-09-15T04:44:23 = 3. quartal -10
=head3 yQQQQ (QQQQ y)
2008-02-05T18:30:30 = 1. quartal 2008
1995-12-22T09:05:02 = 4. quartal 1995
-0010-09-15T04:44:23 = 3. quartal -10
=head2 Miscellaneous
=head3 Prefers 24 hour time?
Yes
=head3 Local first day of the week
1 (glindesdi)
=head1 SUPPORT
See L<DateTime::Locale>.
=cut
| jkb78/extrajnm | local/lib/perl5/DateTime/Locale/rm.pod | Perl | mit | 10,595 |
:- use_module(library(jpl)).
jpl_versions_demo :-
jpl_call('org.jpl7.JPL', version_string, [], Vj),
jpl_c_lib_version(Vc),
jpl_pl_lib_version(Vp),
nl,
write('prolog library version: '), write( Vp), nl,
write(' java library version: '), write( Vj), nl,
write(' c library version: '), write( Vc), nl,
( Vp == Vj,
Vj == Vc
-> write('BINGO! you appear to have the same version of each library installed'), nl
; write('WHOOPS! you appear not to have the same version of each library installed'), nl
),
nl.
% this directive runs the above demo
:- jpl_versions_demo.
| TeamSPoon/logicmoo_workspace | docker/rootfs/usr/local/share/swipl/doc/packages/examples/jpl/prolog/jpl_versions_demo.pl | Perl | mit | 647 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
use strict;
use warnings;
package LucyX::Search::MockMatcher;
use Lucy;
our $VERSION = '0.005000';
$VERSION = eval $VERSION;
sub new {
my ( $either, %args ) = @_;
confess("Missing doc_ids") unless ref( $args{doc_ids} ) eq 'ARRAY';
my $doc_ids = Lucy::Object::I32Array->new( ints => $args{doc_ids} );
my $size = $doc_ids->get_size;
my $scores;
if ( ref( $args{scores} ) eq 'ARRAY' ) {
confess("Mismatch between scores and doc_ids array sizes")
unless scalar @{ $args{scores} } == $size;
$scores = Clownfish::Blob->new(
pack( "f$size", @{ $args{scores} } ) );
}
return $either->_new(
doc_ids => $doc_ids,
scores => $scores,
);
}
1;
__END__
=head1 NAME
LucyX::Search::MockMatcher - Matcher with arbitrary docs and scores.
=head1 DESCRIPTION
Used for testing combining L<Matchers|Lucy::Search::Matcher> such as
ANDMatcher, MockMatcher allows arbitrary match criteria to be supplied,
obviating the need for clever index construction to cover corner cases.
MockMatcher is a testing and demonstration class; it is unsupported.
=head1 CONSTRUCTORS
=head2 new( [I<labeled params>] )
=over
=item *
B<doc_ids> - A sorted array of L<doc_ids|Lucy::Docs::DocIDs>.
=item *
B<scores> - An array of scores, one for each doc_id.
=back
=cut
| rbevers/lucy | perl/lib/LucyX/Search/MockMatcher.pm | Perl | apache-2.0 | 2,119 |
package Google::Ads::AdWords::v201409::AppConversion::AppConversionType;
use strict;
use warnings;
sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201409'};
# derivation by restriction
use base qw(
SOAP::WSDL::XSD::Typelib::Builtin::string);
1;
__END__
=pod
=head1 NAME
=head1 DESCRIPTION
Perl data type class for the XML Schema defined simpleType
AppConversion.AppConversionType from the namespace https://adwords.google.com/api/adwords/cm/v201409.
This clase is derived from
SOAP::WSDL::XSD::Typelib::Builtin::string
. SOAP::WSDL's schema implementation does not validate data, so you can use it exactly
like it's base type.
# Description of restrictions not implemented yet.
=head1 METHODS
=head2 new
Constructor.
=head2 get_value / set_value
Getter and setter for the simpleType's value.
=head1 OVERLOADING
Depending on the simple type's base type, the following operations are overloaded
Stringification
Numerification
Boolification
Check L<SOAP::WSDL::XSD::Typelib::Builtin> for more information.
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| gitpan/GOOGLE-ADWORDS-PERL-CLIENT | lib/Google/Ads/AdWords/v201409/AppConversion/AppConversionType.pm | Perl | apache-2.0 | 1,101 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Lucy::Search::RangeQuery;
use Lucy;
our $VERSION = '0.005000';
$VERSION = eval $VERSION;
1;
__END__
| rectang/lucy | perl/lib/Lucy/Search/RangeQuery.pm | Perl | apache-2.0 | 894 |
package AsposeSlidesCloud::Object::SlideComments;
require 5.6.0;
use strict;
use warnings;
use utf8;
use JSON qw(decode_json);
use Data::Dumper;
use Module::Runtime qw(use_module);
use Log::Any qw($log);
use Date::Parse;
use DateTime;
use base "AsposeSlidesCloud::Object::BaseObject";
#
#
#
#NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually.
#
my $swagger_types = {
'List' => 'ARRAY[SlideComment]',
'SelfUri' => 'ResourceUri',
'AlternateLinks' => 'ARRAY[ResourceUri]',
'Links' => 'ARRAY[ResourceUri]'
};
my $attribute_map = {
'List' => 'List',
'SelfUri' => 'SelfUri',
'AlternateLinks' => 'AlternateLinks',
'Links' => 'Links'
};
# new object
sub new {
my ($class, %args) = @_;
my $self = {
#
'List' => $args{'List'},
#
'SelfUri' => $args{'SelfUri'},
#
'AlternateLinks' => $args{'AlternateLinks'},
#
'Links' => $args{'Links'}
};
return bless $self, $class;
}
# get swagger type of the attribute
sub get_swagger_types {
return $swagger_types;
}
# get attribute mappping
sub get_attribute_map {
return $attribute_map;
}
1;
| aspose-slides/Aspose.Slides-for-Cloud | SDKs/Aspose.Slides-Cloud-SDK-for-Perl/lib/AsposeSlidesCloud/Object/SlideComments.pm | Perl | mit | 1,215 |
# Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
#
# Licensed under the OpenSSL license (the "License"). You may not use
# this file except in compliance with the License. You can obtain a copy
# in the file LICENSE in the source distribution or at
# https://www.openssl.org/source/license.html
use strict;
package TLSProxy::ServerKeyExchange;
use vars '@ISA';
push @ISA, 'TLSProxy::Message';
sub new
{
my $class = shift;
my ($server,
$data,
$records,
$startoffset,
$message_frag_lens) = @_;
my $self = $class->SUPER::new(
$server,
TLSProxy::Message::MT_SERVER_KEY_EXCHANGE,
$data,
$records,
$startoffset,
$message_frag_lens);
#DHE
$self->{p} = "";
$self->{g} = "";
$self->{pub_key} = "";
$self->{sig} = "";
return $self;
}
sub parse
{
my $self = shift;
#Minimal SKE parsing. Only supports DHE at the moment (if its not DHE
#the parsing data will be trash...which is ok as long as we don't try to
#use it)
my $p_len = unpack('n', $self->data);
my $ptr = 2;
my $p = substr($self->data, $ptr, $p_len);
$ptr += $p_len;
my $g_len = unpack('n', substr($self->data, $ptr));
$ptr += 2;
my $g = substr($self->data, $ptr, $g_len);
$ptr += $g_len;
my $pub_key_len = unpack('n', substr($self->data, $ptr));
$ptr += 2;
my $pub_key = substr($self->data, $ptr, $pub_key_len);
$ptr += $pub_key_len;
#We assume its signed
my $sig_len = unpack('n', substr($self->data, $ptr));
my $sig = "";
if (defined $sig_len) {
$ptr += 2;
$sig = substr($self->data, $ptr, $sig_len);
$ptr += $sig_len;
}
$self->p($p);
$self->g($g);
$self->pub_key($pub_key);
$self->sig($sig);
}
#Reconstruct the on-the-wire message data following changes
sub set_message_contents
{
my $self = shift;
my $data;
$data = pack('n', length($self->p));
$data .= $self->p;
$data .= pack('n', length($self->g));
$data .= $self->g;
$data .= pack('n', length($self->pub_key));
$data .= $self->pub_key;
if (length($self->sig) > 0) {
$data .= pack('n', length($self->sig));
$data .= $self->sig;
}
$self->data($data);
}
#Read/write accessors
#DHE
sub p
{
my $self = shift;
if (@_) {
$self->{p} = shift;
}
return $self->{p};
}
sub g
{
my $self = shift;
if (@_) {
$self->{g} = shift;
}
return $self->{g};
}
sub pub_key
{
my $self = shift;
if (@_) {
$self->{pub_key} = shift;
}
return $self->{pub_key};
}
sub sig
{
my $self = shift;
if (@_) {
$self->{sig} = shift;
}
return $self->{sig};
}
1;
| openweave/openweave-core | third_party/openssl/openssl/util/TLSProxy/ServerKeyExchange.pm | Perl | apache-2.0 | 2,739 |
#line 1
package Module::Install::Makefile;
use strict 'vars';
use ExtUtils::MakeMaker ();
use Module::Install::Base ();
use Fcntl qw/:flock :seek/;
use vars qw{$VERSION @ISA $ISCORE};
BEGIN {
$VERSION = '1.00';
@ISA = 'Module::Install::Base';
$ISCORE = 1;
}
sub Makefile { $_[0] }
my %seen = ();
sub prompt {
shift;
# Infinite loop protection
my @c = caller();
if ( ++$seen{"$c[1]|$c[2]|$_[0]"} > 3 ) {
die "Caught an potential prompt infinite loop ($c[1]|$c[2]|$_[0])";
}
# In automated testing or non-interactive session, always use defaults
if ( ($ENV{AUTOMATED_TESTING} or -! -t STDIN) and ! $ENV{PERL_MM_USE_DEFAULT} ) {
local $ENV{PERL_MM_USE_DEFAULT} = 1;
goto &ExtUtils::MakeMaker::prompt;
} else {
goto &ExtUtils::MakeMaker::prompt;
}
}
# Store a cleaned up version of the MakeMaker version,
# since we need to behave differently in a variety of
# ways based on the MM version.
my $makemaker = eval $ExtUtils::MakeMaker::VERSION;
# If we are passed a param, do a "newer than" comparison.
# Otherwise, just return the MakeMaker version.
sub makemaker {
( @_ < 2 or $makemaker >= eval($_[1]) ) ? $makemaker : 0
}
# Ripped from ExtUtils::MakeMaker 6.56, and slightly modified
# as we only need to know here whether the attribute is an array
# or a hash or something else (which may or may not be appendable).
my %makemaker_argtype = (
C => 'ARRAY',
CONFIG => 'ARRAY',
# CONFIGURE => 'CODE', # ignore
DIR => 'ARRAY',
DL_FUNCS => 'HASH',
DL_VARS => 'ARRAY',
EXCLUDE_EXT => 'ARRAY',
EXE_FILES => 'ARRAY',
FUNCLIST => 'ARRAY',
H => 'ARRAY',
IMPORTS => 'HASH',
INCLUDE_EXT => 'ARRAY',
LIBS => 'ARRAY', # ignore ''
MAN1PODS => 'HASH',
MAN3PODS => 'HASH',
META_ADD => 'HASH',
META_MERGE => 'HASH',
PL_FILES => 'HASH',
PM => 'HASH',
PMLIBDIRS => 'ARRAY',
PMLIBPARENTDIRS => 'ARRAY',
PREREQ_PM => 'HASH',
CONFIGURE_REQUIRES => 'HASH',
SKIP => 'ARRAY',
TYPEMAPS => 'ARRAY',
XS => 'HASH',
# VERSION => ['version',''], # ignore
# _KEEP_AFTER_FLUSH => '',
clean => 'HASH',
depend => 'HASH',
dist => 'HASH',
dynamic_lib=> 'HASH',
linkext => 'HASH',
macro => 'HASH',
postamble => 'HASH',
realclean => 'HASH',
test => 'HASH',
tool_autosplit => 'HASH',
# special cases where you can use makemaker_append
CCFLAGS => 'APPENDABLE',
DEFINE => 'APPENDABLE',
INC => 'APPENDABLE',
LDDLFLAGS => 'APPENDABLE',
LDFROM => 'APPENDABLE',
);
sub makemaker_args {
my ($self, %new_args) = @_;
my $args = ( $self->{makemaker_args} ||= {} );
foreach my $key (keys %new_args) {
if ($makemaker_argtype{$key}) {
if ($makemaker_argtype{$key} eq 'ARRAY') {
$args->{$key} = [] unless defined $args->{$key};
unless (ref $args->{$key} eq 'ARRAY') {
$args->{$key} = [$args->{$key}]
}
push @{$args->{$key}},
ref $new_args{$key} eq 'ARRAY'
? @{$new_args{$key}}
: $new_args{$key};
}
elsif ($makemaker_argtype{$key} eq 'HASH') {
$args->{$key} = {} unless defined $args->{$key};
foreach my $skey (keys %{ $new_args{$key} }) {
$args->{$key}{$skey} = $new_args{$key}{$skey};
}
}
elsif ($makemaker_argtype{$key} eq 'APPENDABLE') {
$self->makemaker_append($key => $new_args{$key});
}
}
else {
if (defined $args->{$key}) {
warn qq{MakeMaker attribute "$key" is overriden; use "makemaker_append" to append values\n};
}
$args->{$key} = $new_args{$key};
}
}
return $args;
}
# For mm args that take multiple space-seperated args,
# append an argument to the current list.
sub makemaker_append {
my $self = shift;
my $name = shift;
my $args = $self->makemaker_args;
$args->{$name} = defined $args->{$name}
? join( ' ', $args->{$name}, @_ )
: join( ' ', @_ );
}
sub build_subdirs {
my $self = shift;
my $subdirs = $self->makemaker_args->{DIR} ||= [];
for my $subdir (@_) {
push @$subdirs, $subdir;
}
}
sub clean_files {
my $self = shift;
my $clean = $self->makemaker_args->{clean} ||= {};
%$clean = (
%$clean,
FILES => join ' ', grep { length $_ } ($clean->{FILES} || (), @_),
);
}
sub realclean_files {
my $self = shift;
my $realclean = $self->makemaker_args->{realclean} ||= {};
%$realclean = (
%$realclean,
FILES => join ' ', grep { length $_ } ($realclean->{FILES} || (), @_),
);
}
sub libs {
my $self = shift;
my $libs = ref $_[0] ? shift : [ shift ];
$self->makemaker_args( LIBS => $libs );
}
sub inc {
my $self = shift;
$self->makemaker_args( INC => shift );
}
sub _wanted_t {
}
sub tests_recursive {
my $self = shift;
my $dir = shift || 't';
unless ( -d $dir ) {
die "tests_recursive dir '$dir' does not exist";
}
my %tests = map { $_ => 1 } split / /, ($self->tests || '');
require File::Find;
File::Find::find(
sub { /\.t$/ and -f $_ and $tests{"$File::Find::dir/*.t"} = 1 },
$dir
);
$self->tests( join ' ', sort keys %tests );
}
sub write {
my $self = shift;
die "&Makefile->write() takes no arguments\n" if @_;
# Check the current Perl version
my $perl_version = $self->perl_version;
if ( $perl_version ) {
eval "use $perl_version; 1"
or die "ERROR: perl: Version $] is installed, "
. "but we need version >= $perl_version";
}
# Make sure we have a new enough MakeMaker
require ExtUtils::MakeMaker;
if ( $perl_version and $self->_cmp($perl_version, '5.006') >= 0 ) {
# MakeMaker can complain about module versions that include
# an underscore, even though its own version may contain one!
# Hence the funny regexp to get rid of it. See RT #35800
# for details.
my $v = $ExtUtils::MakeMaker::VERSION =~ /^(\d+\.\d+)/;
$self->build_requires( 'ExtUtils::MakeMaker' => $v );
$self->configure_requires( 'ExtUtils::MakeMaker' => $v );
} else {
# Allow legacy-compatibility with 5.005 by depending on the
# most recent EU:MM that supported 5.005.
$self->build_requires( 'ExtUtils::MakeMaker' => 6.42 );
$self->configure_requires( 'ExtUtils::MakeMaker' => 6.42 );
}
# Generate the MakeMaker params
my $args = $self->makemaker_args;
$args->{DISTNAME} = $self->name;
$args->{NAME} = $self->module_name || $self->name;
$args->{NAME} =~ s/-/::/g;
$args->{VERSION} = $self->version or die <<'EOT';
ERROR: Can't determine distribution version. Please specify it
explicitly via 'version' in Makefile.PL, or set a valid $VERSION
in a module, and provide its file path via 'version_from' (or
'all_from' if you prefer) in Makefile.PL.
EOT
$DB::single = 1;
if ( $self->tests ) {
my @tests = split ' ', $self->tests;
my %seen;
$args->{test} = {
TESTS => (join ' ', grep {!$seen{$_}++} @tests),
};
} elsif ( $Module::Install::ExtraTests::use_extratests ) {
# Module::Install::ExtraTests doesn't set $self->tests and does its own tests via harness.
# So, just ignore our xt tests here.
} elsif ( -d 'xt' and ($Module::Install::AUTHOR or $ENV{RELEASE_TESTING}) ) {
$args->{test} = {
TESTS => join( ' ', map { "$_/*.t" } grep { -d $_ } qw{ t xt } ),
};
}
if ( $] >= 5.005 ) {
$args->{ABSTRACT} = $self->abstract;
$args->{AUTHOR} = join ', ', @{$self->author || []};
}
if ( $self->makemaker(6.10) ) {
$args->{NO_META} = 1;
#$args->{NO_MYMETA} = 1;
}
if ( $self->makemaker(6.17) and $self->sign ) {
$args->{SIGN} = 1;
}
unless ( $self->is_admin ) {
delete $args->{SIGN};
}
if ( $self->makemaker(6.31) and $self->license ) {
$args->{LICENSE} = $self->license;
}
my $prereq = ($args->{PREREQ_PM} ||= {});
%$prereq = ( %$prereq,
map { @$_ } # flatten [module => version]
map { @$_ }
grep $_,
($self->requires)
);
# Remove any reference to perl, PREREQ_PM doesn't support it
delete $args->{PREREQ_PM}->{perl};
# Merge both kinds of requires into BUILD_REQUIRES
my $build_prereq = ($args->{BUILD_REQUIRES} ||= {});
%$build_prereq = ( %$build_prereq,
map { @$_ } # flatten [module => version]
map { @$_ }
grep $_,
($self->configure_requires, $self->build_requires)
);
# Remove any reference to perl, BUILD_REQUIRES doesn't support it
delete $args->{BUILD_REQUIRES}->{perl};
# Delete bundled dists from prereq_pm, add it to Makefile DIR
my $subdirs = ($args->{DIR} || []);
if ($self->bundles) {
my %processed;
foreach my $bundle (@{ $self->bundles }) {
my ($mod_name, $dist_dir) = @$bundle;
delete $prereq->{$mod_name};
$dist_dir = File::Basename::basename($dist_dir); # dir for building this module
if (not exists $processed{$dist_dir}) {
if (-d $dist_dir) {
# List as sub-directory to be processed by make
push @$subdirs, $dist_dir;
}
# Else do nothing: the module is already present on the system
$processed{$dist_dir} = undef;
}
}
}
unless ( $self->makemaker('6.55_03') ) {
%$prereq = (%$prereq,%$build_prereq);
delete $args->{BUILD_REQUIRES};
}
if ( my $perl_version = $self->perl_version ) {
eval "use $perl_version; 1"
or die "ERROR: perl: Version $] is installed, "
. "but we need version >= $perl_version";
if ( $self->makemaker(6.48) ) {
$args->{MIN_PERL_VERSION} = $perl_version;
}
}
if ($self->installdirs) {
warn qq{old INSTALLDIRS (probably set by makemaker_args) is overriden by installdirs\n} if $args->{INSTALLDIRS};
$args->{INSTALLDIRS} = $self->installdirs;
}
my %args = map {
( $_ => $args->{$_} ) } grep {defined($args->{$_} )
} keys %$args;
my $user_preop = delete $args{dist}->{PREOP};
if ( my $preop = $self->admin->preop($user_preop) ) {
foreach my $key ( keys %$preop ) {
$args{dist}->{$key} = $preop->{$key};
}
}
my $mm = ExtUtils::MakeMaker::WriteMakefile(%args);
$self->fix_up_makefile($mm->{FIRST_MAKEFILE} || 'Makefile');
}
sub fix_up_makefile {
my $self = shift;
my $makefile_name = shift;
my $top_class = ref($self->_top) || '';
my $top_version = $self->_top->VERSION || '';
my $preamble = $self->preamble
? "# Preamble by $top_class $top_version\n"
. $self->preamble
: '';
my $postamble = "# Postamble by $top_class $top_version\n"
. ($self->postamble || '');
local *MAKEFILE;
open MAKEFILE, "+< $makefile_name" or die "fix_up_makefile: Couldn't open $makefile_name: $!";
eval { flock MAKEFILE, LOCK_EX };
my $makefile = do { local $/; <MAKEFILE> };
$makefile =~ s/\b(test_harness\(\$\(TEST_VERBOSE\), )/$1'inc', /;
$makefile =~ s/( -I\$\(INST_ARCHLIB\))/ -Iinc$1/g;
$makefile =~ s/( "-I\$\(INST_LIB\)")/ "-Iinc"$1/g;
$makefile =~ s/^(FULLPERL = .*)/$1 "-Iinc"/m;
$makefile =~ s/^(PERL = .*)/$1 "-Iinc"/m;
# Module::Install will never be used to build the Core Perl
# Sometimes PERL_LIB and PERL_ARCHLIB get written anyway, which breaks
# PREFIX/PERL5LIB, and thus, install_share. Blank them if they exist
$makefile =~ s/^PERL_LIB = .+/PERL_LIB =/m;
#$makefile =~ s/^PERL_ARCHLIB = .+/PERL_ARCHLIB =/m;
# Perl 5.005 mentions PERL_LIB explicitly, so we have to remove that as well.
$makefile =~ s/(\"?)-I\$\(PERL_LIB\)\1//g;
# XXX - This is currently unused; not sure if it breaks other MM-users
# $makefile =~ s/^pm_to_blib\s+:\s+/pm_to_blib :: /mg;
seek MAKEFILE, 0, SEEK_SET;
truncate MAKEFILE, 0;
print MAKEFILE "$preamble$makefile$postamble" or die $!;
close MAKEFILE or die $!;
1;
}
sub preamble {
my ($self, $text) = @_;
$self->{preamble} = $text . $self->{preamble} if defined $text;
$self->{preamble};
}
sub postamble {
my ($self, $text) = @_;
$self->{postamble} ||= $self->admin->postamble;
$self->{postamble} .= $text if defined $text;
$self->{postamble}
}
1;
__END__
#line 541
| dpaneda/nginx | syslog_patch/test/inc/Module/Install/Makefile.pm | Perl | bsd-2-clause | 11,802 |
package DDG::Goodie::TwelveOclock;
# ABSTRACT: Determine whether 12:00 is midnight or noon.
use strict;
use DDG::Goodie;
zci answer_type => "twelve_oclock";
zci is_cached => 1;
primary_example_queries "is 12:00am noon?", "is 1200pm midnight?";
secondary_example_queries "when is noon?", "when is midnight?";
description "Succinct explanation of what this instant answer does";
name "TwelveOclock";
code_url "https://github.com/duckduckgo/zeroclickinfo-spice/blob/master/lib/DDG/Spice/TwelveOclock.pm";
category "reference";
topics "everyday";
attribution github => ['duckduckgo', 'DuckDuckGo'];
# Triggers
triggers any => "midnight", "noon";
# 12am; 12:00PM; when is noon?; when is midnight
my $question_re = qr/(?:\b12(?:00|:00)?\s?(?<mer>[ap]m)\b|^when is (?<q>noon|midnight)\??$)/;
my %answers = (
am => 'midnight',
pm => 'noon',
);
%answers = (%answers, reverse %answers); # Point both ways to answer either direction.
# Handle statement
handle query => sub {
my $query = lc shift;
$query =~ s/\.//g; # Strip any dots.
return unless ($query =~ $question_re);
my $included_mer = $+{'mer'};
my $meridian = $included_mer || $answers{$+{'q'}}; # No included meridian implies straight-forward question.
my $to_show = $answers{$meridian};
return unless $to_show;
my $guess_result = '';
if ($included_mer) {
# If they included a meridian with their 12 o'clock, we need to figure out if they were guessing.
my $noon = ($query =~ qr/\bnoon\b/);
my $midnight = ($query =~ qr/\bmidnight\b/);
# It's only a guess if they mention only one or the other.
my $guess = ($noon && !$midnight) ? 'noon' : ($midnight && !$noon) ? 'midnight' : '';
# If they guessed, we need to answer the question they asked.
$guess_result = (!$guess) ? '' : ($guess eq $to_show) ? 'Yes, ' : 'No, ';
}
my $answer = $guess_result . '12:00' . $meridian . ' is ' . $to_show . '.';
return $answer,
structured_answer => {
input => [],
operation => 'Midnight or noon',
result => $answer
};
};
1;
| digit4lfa1l/zeroclickinfo-goodies | lib/DDG/Goodie/TwelveOclock.pm | Perl | apache-2.0 | 2,151 |
use Win32::OLE 'in';
use Win32::OLE::Const 'Microsoft WMI Scripting';
my $ComputerName = ".";
my $NameSpace = "root/cimv2";
my $Locator=Win32::OLE->new("WbemScripting.SWbemLocator");
my $WbemServices = $Locator->ConnectServer($ComputerName, $NameSpace);
#maakt een hashmap met de value - waarde koppels van het attribuut
sub maakHash{
my ($prop,$Classnaam)=@_;
my $Class = $WbemServices->Get($Classnaam,wbemFlagUseAmendedQualifiers);
my $Qualifiers = $Class->Properties_($prop)->{Qualifiers_};
my %hash=();
@hash{@{$Qualifiers->Item("ValueMap")->{Value}}} = @{$Qualifiers->Item("Values")->{Value}};
return %hash;
}
#op klassen de hash initialiseren
%Availability = maakHash("Availability","Win32_NetworkAdapter");
%NetConnectionStatus = maakHash("NetConnectionStatus","Win32_NetworkAdapter");
my $Query="SELECT * FROM Win32_NetworkAdapter WHERE NetConnectionStatus>=0");
my $AdapterInstances = $WbemServices->Execquery($Query); #(*)
foreach $AdapterInstance (sort {uc($a->{NetConnectionID}) cmp uc($b->{NetConnectionID})} in $AdapterInstances) {
print "******************************************************** \n";
printf "%s: %s\n", "Connection Name", $AdapterInstance->{NetConnectionID};
printf "%s: %s\n", "Adapter name", $AdapterInstance->{Name};
printf "%s: %s\n", "Device availability", $Availability{$AdapterInstance->{Availability}};
printf "%s: %s\n", "Adapter type", $AdapterInstance->{AdapterType};
printf "%s: %s\n", "Adapter state", $NetConnectionStatus{$AdapterInstance->{NetConnectionStatus}};
printf "%s: %s\n", "MAC address", $AdapterInstance->{MACAddress};
printf "%s: %s\n", "Adapter service name", $AdapterInstance->{ServiceName};
printf "%s: %s\n", "Last reset", $AdapterInstance->{TimeOfLastReset};
#Recource Informatie
$Query="ASSOCIATORS OF {Win32_NetworkAdapter='$AdapterInstance->{Index}'}
WHERE AssocClass=Win32_AllocatedResource";
my $AdapterResourceInstances = $WbemServices->ExecQuery ($Query);
foreach $AdaptResInstance (in $AdapterResourceInstances) {
my $className=$AdaptResInstance->{Path_}->{Class};
printf "%s: %s\n", "IRQ resource", $AdaptResInstance->{IRQNumber} if $className eq "Win32_IRQResource";
printf "%s: %s\n", "DMA channel", $AdaptResInstance->{DMAChannel} if $className eq "Win32_DMAChannel";
printf "%s: %s\n", "I/O Port", $AdaptResInstance->{Caption} if $className eq "Win32_PortResource";
printf "%s: %s\n", "Memory address", $AdaptResInstance->{Caption} if $className eq "Win32_DeviceMemoryAddress";
}
my $AdapterInstance = $WbemServices->Get ("Win32_NetworkAdapterConfiguration=$AdapterInstance->{Index}");
next unless $AdapterInstance->{IPEnabled};
if ($AdapterInstance->{DHCPEnabled}) {
printf "%s: %s\n", "DHCP expires", $AdapterInstance->{DHCPLeaseExpires};
printf "%s: %s\n", "DHCP obtained", $AdapterInstance->{DHCPLeaseObtained};
printf "%s: %s\n", "DHCP server", $AdapterInstance->{DHCPServer};
}
printf "%s: %s\n", "IP address(es)", (join ",",@{$AdapterInstance->{IPAddress}});
printf "%s: %s\n", "IP mask(s)", (join ",",@{$AdapterInstance->{IPSubnet}});
printf "%s: %s\n", "IP connection metric", $AdapterInstance->{IPConnectionMetric};
printf "%s: %s\n", "Default Gateway(s)",(join ",",@{$AdapterInstance->{DefaultIPGateway}});
printf "%s: %s\n", "Dead gateway detection enabled", $AdapterInstance->{DeadGWDetectEnabled};
printf "%s: %s\n", "DNS registration enabled", $AdapterInstance->{DomainDNSRegistrationEnabled};
printf "%s: %s\n", "DNS FULL registration enabled", $AdapterInstance->{FullDNSRegistrationEnabled};
printf "%s: %s\n", "DNS search order", (join ",",@{$AdapterInstance->{DNSServerSearchOrder}});
printf "%s: %s\n", "DNS domain", $AdapterInstance->{DNSDomain};
printf "%s: %s\n", "DNS domain suffix search order", $AdapterInstance->{DNSDomainSuffixSearchOrder};
printf "%s: %s\n", "DNS enabled for WINS resolution", $AdapterInstance->{DNSEnabledForWINSResolution};
}
#Andere oplossing zonder query . Vervang in (*) ->execquery door:
#my $AdapterInstances = $WbemServices->InstancesOf("Win32_NetworkAdapter");
#foreach $AdapterInstance (sort {uc($a->{NetConnectionID}) cmp uc($b->{NetConnectionID})} in $AdapterInstances) {
# next unless defined $AdapterInstance->{NetConnectionStatus};
#....
| VDBBjorn/Besturingssystemen-III | Labo/reeks4/Reeks4_34.pl | Perl | mit | 4,414 |
package Bio::KBase::NexusEmulation::TokenManager;
use strict;
use Errno;
use Data::UUID;
use File::Slurp;
use Crypt::OpenSSL::RSA;
use DB_File;
use URI;
use Data::Dumper;
use LWP::UserAgent;
use JSON::XS;
use base 'Class::Accessor';
__PACKAGE__->mk_accessors(qw(token_lifetime dir db tied db_hash key_length url_base));
#un=kbasetest|tokenid=3be5a452-0d97-11e2-81d0-12313809f035|expiry=1380831397|client_id=kbasetest|token_type=Bearer|SigningSubject=https://nexus.api.globusonline.org/goauth/keys/efc9fd6e-0ba9-11e2-81d0-12313809f035|sig=7ae1687147d52a5717f5ebc15a64cda67f8648332944220d1e578f847fd1899ed5abd7b7bd4b4e9b568bd959f35517b5722e12f044e173bd23337103643279330b26c897a89e21f44e27ead4bb75ab510dca9f08734b7aa6bc7ab4554821fd70a90a8151f44968cc510e6a64b3b5ff2f7736c199e8a711e151c7422f7d8816db
=head1 NAME
TokenManager
=head1 SYNOPSIS
$mgr = TokenManager->new($storage_dir);
$mgr->token_lifetime(86400);
$key_id = $mgr->default_key();
$signed_token = $mgr->create_and_sign_token($key, "username");
=head1 DESCRIPTION
The TokenManager keeps a small database of RSA signing keys for the use in
emulating the Globus Nexus authentication service. This database resides in a
Berekeley DB file in the storage directory specfied by the constructor.
=head2 CONSTRUCTOR METHODS
=over 4
=item $mgr = TokenManager->new($storage_dir)
Create a new TokenManager instance using the given storage directory. If the
directory does not exist, one will be created.
=cut
sub new
{
my($class, $storage_dir, $url_base) = @_;
if (!$url_base)
{
die "TokenManager::new: url_base parameter must be provided";
}
if (! -d $storage_dir)
{
mkdir($storage_dir) or die "Cannot mkdir $storage_dir: $!";
}
my $db = "$storage_dir/storage.db";
my $tie;
my $hash = {};
$tie = tie %$hash, 'DB_File', $db, O_RDWR, 0644, $DB_HASH;
if (!$tie && $!{ENOENT})
{
$tie = tie %$hash, 'DB_File', $db, O_RDWR | O_CREAT, 0644, $DB_HASH;
if (!$tie)
{
die "Cannot create database: $!";
}
}
my $self = { dir => $storage_dir,
db => $db,
tied => $tie,
db_hash => $hash,
key_length => 1024,
url_base => $url_base,
token_lifetime => 86400 * 365,
};
return bless $self, $class;
}
=back
=head2 ACCESS METHODS
=over 4
=item $lifetime = $mgr->token_lifetime()
=item $mgr->token_lifetime(86400)
Get or set the token lifetime (in seconds).
=item $mgr->default_key()
Return the id of the default key. Create one if it does not exist.
=cut
sub default_key
{
my($self) = @_;
my $key = $self->db_hash->{default_key};
if (!$key)
{
$key = $self->create_signing_key();
$self->db_hash->{default_key} = $key;
$self->tied->sync;
}
return $key;
}
=item $pubkey = $mgr->public_key($key_id)
Return the public key text for the given key id.
=cut
sub public_key
{
my($self, $key_id) = @_;
$key_id =~ /^[-0-9a-fA-F]+$/ or die "Invalid key id $key_id";
my $str = $self->db_hash->{"pub.$key_id"};
print STDERR "Get 'pub.$key_id' returns $str\n";
$str or die "Key $key_id not found";
return $str;
}
=back
=head2 KEY AND SIGNING METHODS
=over 4
=item $key = $mgr->create_signing_key()
Create a new signing key and return its key ID. Writes the
public and private keys to the database.
=cut
sub create_signing_key
{
my($self) = @_;
my $key_id = Data::UUID->new->create_str();
my $key = Crypt::OpenSSL::RSA->generate_key($self->key_length);
$self->db_hash->{"pub.$key_id"} = $key->get_public_key_string();
$self->db_hash->{"priv.$key_id"} = $key->get_private_key_string();
$self->tied->sync();
return $key_id;
}
=item $token = $mgr->create_signed_token("username")
Create and sign a new token for the given username.
=cut
sub create_signed_token
{
my($self, $user, $client_id, $override_user) = @_;
my $now = time;
my $key = $self->default_key;
my $token_type = 'Bearer';
my $signing_subject = $self->url_base . "/goauth/keys/$key";
my $expires_in = $self->token_lifetime;
my $expiry = $now + $expires_in;
$client_id = $client_id ? $client_id : $user;
my $token_id = Data::UUID->new->create_str;
my $token_data = { un => $user,
tokenid => $token_id,
expiry => $expiry,
client_id => $client_id,
token_type => $token_type,
this_is_globus => "globus_style_token",
SigningSubject => $signing_subject,
};
my $token_str = join("|", map { "$_=$token_data->{$_}" } qw(un tokenid expiry client_id token_type SigningSubject this_is_globus));
$token_str .= "|override_user=$override_user" if $override_user;
my $sig = $self->sign($key, $token_str);
$token_str .= "|sig=$sig";
my $val = {
access_token => $token_str,
client_id => $client_id,
expires_in => $expires_in,
expiry => $expiry,
issued_on => $now,
lifetime => $expires_in,
scopes => [],
token_id => $token_id,
token_type => $token_type,
user_name => $user,
};
return $val;
}
=item $hex_str = $mgr->sign($key, $plaintext)
Look up the private key in the database for $key and use it to sign the given
plaintext. Return the hex form of the signature.
=cut
sub sign
{
my($self, $key_id, $plaintext) = @_;
my $key = $self->db_hash->{"priv.$key_id"};
$key or die "Cannot find key $key_id in database";
my $signed = Crypt::OpenSSL::RSA->new_private_key($key)->sign($plaintext);
my $hex_sig = unpack("H*", $signed);
return $hex_sig;
}
=item $ok = $mgr->validate($token, $user)
Validate the given token to be valid for the given user. The token is assumed to have been one that
B<this> token manager issued; in other words, the base URL for the signing subject
in the token must be the same as our URL base.
=cut
sub validate
{
my($self, $token, $user) = @_;
my @parts = map { [ split(/=/, $_, 2) ] } split(/\|/, $token);
my $to_sign = join("|", map { join("=", @$_) } grep { $_->[0] ne "sig" } @parts);
my %parts = map { $_->[0] => $_->[1]} @parts;
# print Dumper($to_sign, \%parts);
my $subj = $parts{SigningSubject};
my $surl = URI->new($subj);
$surl->path('');
if ($surl ne $self->url_base)
{
print STDERR "validate failed (ignored) on $surl ne $self->{url_base}\n";
#return 0;
}
my($key_id) = $subj =~ m,/goauth/keys/(\S+)$,;
#$key_id or die "No key found in $subj\n";
my $key = $self->db_hash->{"priv.$key_id"};
if (!$key_id || !$key)
{
#
# This is an external token. Validate it.
#
print STDERR "OKing external token from $subj...\n";
my $ua = LWP::UserAgent->new();
my $res = $ua->get($subj);
if ($res->is_success)
{
my $data = decode_json($res->content);
if (!$data->{valid})
{
print STDERR "public key is invalid\n";
return 0;
}
my $pubkey = $data->{pubkey};
my $rsa = Crypt::OpenSSL::RSA->new_public_key($pubkey);
$rsa->use_sha1_hash();
my $binary_sig = pack('H*', $parts{sig});
my $verify = $rsa->verify($to_sign, $binary_sig);
print STDERR "verified: $verify\n";
return $verify;
}
else
{
print STDERR "Error getting pbukey from $subj " . $res->content;
return 0;
}
}
my $sig = $self->sign($key_id, $to_sign);
if ($sig ne $parts{sig})
{
print STDERR "signature did not match\n$sig\n$parts{sig}";
return 0;
}
if ($parts{expiry} < time)
{
print STDERR "token expired\n";
return 0;
}
if ($parts{un} ne $user)
{
print STDERR "user does not match token\n";
return 0;
}
return 1;
}
=item $ok = $mgr->validate_and_get_user($token)
Validate the given token and return its user id. The token is assumed to have been one that
B<this> token manager issued; in other words, the base URL for the signing subject
in the token must be the same as our URL base.
=cut
sub validate_and_get_user
{
my($self, $token) = @_;
my @parts = map { [ split(/=/, $_, 2) ] } split(/\|/, $token);
my $to_sign = join("|", map { join("=", @$_) } grep { $_->[0] ne "sig" } @parts);
my %parts = map { $_->[0] => $_->[1]} @parts;
# print Dumper($to_sign, \%parts);
my $subj = $parts{SigningSubject};
my $surl = URI->new($subj);
$surl->path('');
if ($surl ne $self->url_base)
{
print STDERR "validate failed on $surl ne $self->{url_base}\n";
return undef;
}
my($key) = $subj =~ m,/goauth/keys/(\S+)$,;
$key or die "No key found in $subj\n";
my $sig = $self->sign($key, $to_sign);
if ($sig ne $parts{sig})
{
print STDERR "signature did not match\n$sig\n$parts{sig}";
return undef;
}
if ($parts{expiry} < time)
{
print STDERR "token expired\n";
return undef;
}
return $parts{un};
}
1;
| kbase/nexus_emulation | lib/Bio/KBase/NexusEmulation/TokenManager.pm | Perl | mit | 8,758 |
=begin comment
OEML - REST API
This section will provide necessary information about the `CoinAPI OEML REST API` protocol. <br/> This API is also available in the Postman application: <a href=\"https://postman.coinapi.io/\" target=\"_blank\">https://postman.coinapi.io/</a> <br/><br/> Implemented Standards: * [HTTP1.0](https://datatracker.ietf.org/doc/html/rfc1945) * [HTTP1.1](https://datatracker.ietf.org/doc/html/rfc2616) * [HTTP2.0](https://datatracker.ietf.org/doc/html/rfc7540)
The version of the OpenAPI document: v1
Contact: support@coinapi.io
Generated by: https://openapi-generator.tech
=end comment
=cut
#
# NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
# Do not edit the class manually.
# Ref: https://openapi-generator.tech
#
package WWW::OpenAPIClient::Role;
use utf8;
use Moose::Role;
use namespace::autoclean;
use Class::Inspector;
use Log::Any qw($log);
use WWW::OpenAPIClient::ApiFactory;
has base_url => ( is => 'ro',
required => 0,
isa => 'Str',
documentation => 'Root of the server that requests are sent to',
);
has api_factory => ( is => 'ro',
isa => 'WWW::OpenAPIClient::ApiFactory',
builder => '_build_af',
lazy => 1,
documentation => 'Builds an instance of the endpoint API class',
);
has tokens => ( is => 'ro',
isa => 'HashRef',
required => 0,
default => sub { {} },
documentation => 'The auth tokens required by the application - basic, OAuth and/or API key(s)',
);
has _cfg => ( is => 'ro',
isa => 'WWW::OpenAPIClient::Configuration',
default => sub { WWW::OpenAPIClient::Configuration->new() },
);
has version_info => ( is => 'ro',
isa => 'HashRef',
default => sub { {
app_name => 'OEML - REST API',
app_version => 'v1',
generator_class => 'org.openapitools.codegen.languages.PerlClientCodegen',
} },
documentation => 'Information about the application version and the codegen codebase version'
);
sub BUILD {
my $self = shift;
$self->_cfg->accept_tokens( $self->tokens ) if keys %{$self->tokens};
# ignore these symbols imported into API namespaces
my %outsiders = map {$_ => 1} qw( croak );
my %delegates;
# collect the methods callable on each API
foreach my $api_name ($self->api_factory->apis_available) {
my $api_class = $self->api_factory->classname_for($api_name);
my $methods = Class::Inspector->methods($api_class, 'expanded'); # not Moose, so use CI instead
my @local_methods = grep {! /^_/} grep {! $outsiders{$_}} map {$_->[2]} grep {$_->[1] eq $api_class} @$methods;
push( @{$delegates{$_}}, {api_name => $api_name, api_class => $api_class} ) for @local_methods;
}
# remove clashes
foreach my $method (keys %delegates) {
if ( @{$delegates{$method}} > 1 ) {
my ($apis) = delete $delegates{$method};
}
}
# build the flattened API
foreach my $api_name ($self->api_factory->apis_available) {
my $att_name = sprintf "%s_api", lc($api_name);
my $api_class = $self->api_factory->classname_for($api_name);
my @delegated = grep { $delegates{$_}->[0]->{api_name} eq $api_name } keys %delegates;
$log->debugf("Adding API: '%s' handles %s", $att_name, join ', ', @delegated);
$self->meta->add_attribute( $att_name => (
is => 'ro',
isa => $api_class,
default => sub {$self->api_factory->get_api($api_name)},
lazy => 1,
handles => \@delegated,
) );
}
}
sub _build_af {
my $self = shift;
my %args;
$args{base_url} = $self->base_url if $self->base_url;
return WWW::OpenAPIClient::ApiFactory->new(%args);
}
=head1 NAME
WWW::OpenAPIClient::Role - a Moose role for the OEML - REST API
=head2 OEML - REST API version: v1
=head1 VERSION
Automatically generated by the Perl OpenAPI Generator project:
=over 4
=item Build package: org.openapitools.codegen.languages.PerlClientCodegen
=item Codegen version:
=back
=head2 A note on Moose
This role is the only component of the library that uses Moose. See
WWW::OpenAPIClient::ApiFactory for non-Moosey usage.
=head1 SYNOPSIS
The Perl Generator in the OpenAPI Generator project builds a library of Perl modules to interact with
a web service defined by a OpenAPI Specification. See below for how to build the
library.
This module provides an interface to the generated library. All the classes,
objects, and methods (well, not quite *all*, see below) are flattened into this
role.
package MyApp;
use Moose;
with 'WWW::OpenAPIClient::Role';
package main;
my $api = MyApp->new({ tokens => $tokens });
my $pet = $api->get_pet_by_id(pet_id => $pet_id);
=head2 Structure of the library
The library consists of a set of API classes, one for each endpoint. These APIs
implement the method calls available on each endpoint.
Additionally, there is a set of "object" classes, which represent the objects
returned by and sent to the methods on the endpoints.
An API factory class is provided, which builds instances of each endpoint API.
This Moose role flattens all the methods from the endpoint APIs onto the consuming
class. It also provides methods to retrieve the endpoint API objects, and the API
factory object, should you need it.
For documentation of all these methods, see AUTOMATIC DOCUMENTATION below.
=head2 Configuring authentication
In the normal case, the OpenAPI Spec will describe what parameters are
required and where to put them. You just need to supply the tokens.
my $tokens = {
# basic
username => $username,
password => $password,
# oauth
access_token => $oauth_token,
# keys
$some_key => { token => $token,
prefix => $prefix,
in => $in, # 'head||query',
},
$another => { token => $token,
prefix => $prefix,
in => $in, # 'head||query',
},
...,
};
my $api = MyApp->new({ tokens => $tokens });
Note these are all optional, as are C<prefix> and C<in>, and depend on the API
you are accessing. Usually C<prefix> and C<in> will be determined by the code generator from
the spec and you will not need to set them at run time. If not, C<in> will
default to 'head' and C<prefix> to the empty string.
The tokens will be placed in a L<WWW::OpenAPIClient::Configuration> instance
as follows, but you don't need to know about this.
=over 4
=item C<$cfg-\>{username}>
String. The username for basic auth.
=item C<$cfg-\>{password}>
String. The password for basic auth.
=item C<$cfg-\>{api_key}>
Hashref. Keyed on the name of each key (there can be multiple tokens).
$cfg->{api_key} = {
secretKey => 'aaaabbbbccccdddd',
anotherKey => '1111222233334444',
};
=item C<$cfg->{api_key_prefix}>
Hashref. Keyed on the name of each key (there can be multiple tokens). Note not
all api keys require a prefix.
$cfg->{api_key_prefix} = {
secretKey => 'string',
anotherKey => 'same or some other string',
};
=item C<$config-\>{access_token}>
String. The OAuth access token.
=back
=head1 METHODS
=head2 C<base_url>
The generated code has the C<base_url> already set as a default value. This method
returns the current value of C<base_url>.
=head2 C<api_factory>
Returns an API factory object. You probably won't need to call this directly.
$self->api_factory('Pet'); # returns a WWW::OpenAPIClient::PetApi instance
$self->pet_api; # the same
=head1 MISSING METHODS
Most of the methods on the API are delegated to individual endpoint API objects
(e.g. Pet API, Store API, User API etc). Where different endpoint APIs use the
same method name (e.g. C<new()>), these methods can't be delegated. So you need
to call C<$api-E<gt>pet_api-E<gt>new()>.
In principle, every API is susceptible to the presence of a few, random, undelegatable
method names. In practice, because of the way method names are constructed, it's
unlikely in general that any methods will be undelegatable, except for:
new()
class_documentation()
method_documentation()
To call these methods, you need to get a handle on the relevant object, either
by calling C<$api-E<gt>foo_api> or by retrieving an object, e.g.
C<$api-E<gt>get_pet_by_id(pet_id =E<gt> $pet_id)>. They are class methods, so
you could also call them on class names.
=head1 BUILDING YOUR LIBRARY
See the homepage C<https://openapi-generator.tech> for full details.
But briefly, clone the git repository, build the codegen codebase, set up your build
config file, then run the API build script. You will need git, Java 7 or 8 and Apache
maven 3.0.3 or better already installed.
The config file should specify the project name for the generated library:
{"moduleName":"WWW::MyProjectName"}
Your library files will be built under C<WWW::MyProjectName>.
$ git clone https://github.com/openapitools/openapi-generator
$ cd openapi-generator
$ mvn package
$ java -jar modules/openapi-generator-cli/target/openapi-generator-cli.jar generate \
-i [URL or file path to JSON OpenAPI API spec] \
-g perl \
-c /path/to/config/file.json \
-o /path/to/output/folder
Bang, all done. Run the C<autodoc> script in the C<bin> directory to see the API
you just built.
=head1 AUTOMATIC DOCUMENTATION
You can print out a summary of the generated API by running the included
C<autodoc> script in the C<bin> directory of your generated library. A few
output formats are supported:
Usage: autodoc [OPTION]
-w wide format (default)
-n narrow format
-p POD format
-H HTML format
-m Markdown format
-h print this help message
-c your application class
The C<-c> option allows you to load and inspect your own application. A dummy
namespace is used if you don't supply your own class.
=head1 DOCUMENTATION FROM THE OpenAPI Spec
Additional documentation for each class and method may be provided by the OpenAPI
spec. If so, this is available via the C<class_documentation()> and
C<method_documentation()> methods on each generated object class, and the
C<method_documentation()> method on the endpoint API classes:
my $cmdoc = $api->pet_api->method_documentation->{$method_name};
my $odoc = $api->get_pet_by_id->(pet_id => $pet_id)->class_documentation;
my $omdoc = $api->get_pet_by_id->(pet_id => $pet_id)->method_documentation->{method_name};
Each of these calls returns a hashref with various useful pieces of information.
=cut
1;
| coinapi/coinapi-sdk | oeml-sdk/perl/lib/WWW/OpenAPIClient/Role.pm | Perl | mit | 11,320 |
#!/usr/bin/perl
# ********************************************************************
# * Copyright (C) 2016 and later: Unicode, Inc. and others.
# * License & terms of use: http://www.unicode.org/copyright.html#License
# ********************************************************************
# ********************************************************************
# * COPYRIGHT:
# * Copyright (c) 2002-2013, International Business Machines Corporation and
# * others. All Rights Reserved.
# ********************************************************************
#use strict;
require "../perldriver/Common.pl";
use lib '../perldriver';
use PerfFramework;
# This test only works on Windows.
if (!$OnWindows) {
print "This test only works on Windows.\n";
exit(1);
}
my $options = {
"title"=>"Conversion Performance: ICU ".$ICULatestVersion." vs. Windows XP ANSI Interface",
"headers"=>"WindowsXP(IMultiLanguage2) ICU".$ICULatestVersion,
"operationIs"=>"code point",
"passes"=>"10",
"time"=>"5",
#"outputType"=>"HTML",
"dataDir"=>"Not Using Data Files",
"outputDir"=>"../results"
};
# programs
# tests will be done for all the programs. Results will be stored and connected
my $p = "cd ".$ICULatest."/bin && ".$ICUPathLatest."/convperf/$WindowsPlatform/Release/convperf.exe";
my $tests = {
"UTF-8 From Unicode", ["$p,TestWinANSI_UTF8_FromUnicode" , "$p,TestICU_UTF8_FromUnicode" ],
"UTF-8 To Unicode", ["$p,TestWinANSI_UTF8_ToUnicode" , "$p,TestICU_UTF8_ToUnicode" ],
####
"ISO-8859-1 From Unicode", ["$p,TestWinANSI_Latin1_FromUnicode" , "$p,TestICU_Latin1_FromUnicode" ],
"ISO-8859-1 To Unicode", ["$p,TestWinANSI_Latin1_ToUnicode" , "$p,TestICU_Latin1_ToUnicode" ],
####
"Shift-JIS From Unicode", ["$p,TestWinANSI_SJIS_FromUnicode" , "$p,TestICU_SJIS_FromUnicode" ],
"Shift-JIS To Unicode", ["$p,TestWinANSI_SJIS_ToUnicode" , "$p,TestICU_SJIS_ToUnicode" ],
####
"EUC-JP From Unicode", ["$p,TestWinANSI_EUCJP_FromUnicode" , "$p,TestICU_EUCJP_FromUnicode" ],
"EUC-JP To Unicode", ["$p,TestWinANSI_EUCJP_ToUnicode" , "$p,TestICU_EUCJP_ToUnicode" ],
####
"GB2312 From Unicode", ["$p,TestWinANSI_GB2312_FromUnicode" , "$p,TestICU_GB2312_FromUnicode" ],
"GB2312 To Unicode", ["$p,TestWinANSI_GB2312_ToUnicode" , "$p,TestICU_GB2312_ToUnicode" ],
};
my $dataFiles = "";
runTests($options, $tests, $dataFiles);
| quyse/flaw | flaw-font-icu/src/icu/source/test/perf/convperf/convperf_ansi.pl | Perl | mit | 2,529 |
Puszka szparagów, 73 gołębi, Niektórzy żyją amunicji i zamarzniętym daquiri !!
-- Bob Violence
%
Krasnolud przechodzi gdzieś w Detroit!
-- Bob Violence
%
A zgrabna KATOLICKI uczennica wiercić w moim stroju ..
-- Bob Violence
%
Szeroki oczach, niewinny UNICORN, delikatnie gotowy na łące wypełnionyz bzu, lizaki i małych dzieci w Hush zmierzchu ??
-- Bob Violence
%
Właściwie, co chciałbym to mała zabawka kosmiczny !!
-- Bob Violence
%
Wszystko, co mogę myśleć o to talerz organicznej przycinać chipsy deptaneprzez armię smagłych, włoskich WOKALISTÓW Hol ...
-- Bob Violence
%
Nagle, chcę rzucać nad moją obiecującą aktorską karierę, rosnądługą, czarną brodę i nosić kapelusz BASEBALL !! ... Chociaż nie wiem dlaczego !!
-- Bob Violence
%
Całe życie jest rozmycie Republikanów i mięsa!
-- Bob Violence
%
Dobra, degeneruje! Chcę to miejsce wydostające się w 20 sekund!
-- Bob Violence
%
Przez cały ten czas byłem przeglądasz ROSYJSKI Midget sodomize Housecat!
-- Bob Violence
%
Dobra, ty !! Naśladować rannych SEAL błagając o miejsce parkingowe !!
-- Bob Violence
%
Czy jestem w towarzystwie rodzica lub opiekuna?
-- Bob Violence
%
Mam jeszcze decyzji?
-- Bob Violence
%
Jestem na studiach jeszcze?
-- Bob Violence
%
Ja Shoplifting?
-- Bob Violence
%
Ameryka!! Widziałem to wszystko !! Wymioty! Falowanie! JERRY FALWELLING sięTwój nieważne tubka UHF zapomnienia !! Safeway umysłu ...
-- Bob Violence
%
Powietrze frytek przenika moje nozdrza !!
-- Bob Violence
%
Tusz-ling? Pewnie - Bierze jeden !! Kupiłeś żadnych mundurów komunistycznych ??
-- Bob Violence
%
Włoskie czesze sobie włosy na przedmieściach Des Moines!
-- Bob Violence
%
A ponadto, mój średni Bowling jest bez zarzutu !!!
-- Bob Violence
%
ANN Jillian włosy sprawia Loni Anderson włosy wyglądają jak RICARDOMontalban włosy!
-- Bob Violence
%
Są duszone śliwek wciąż w suszarki do włosów?
-- Bob Violence
%
Czy żyjemy lub na taśmie?
-- Bob Violence
%
Czy jesteśmy jeszcze strajkują?
-- Bob Violence
%
Czy już dotarliśmy?
-- Bob Violence
%
Czy już dotarliśmy? Mój umysł jest okręt podwodny !!
-- Bob Violence
%
Czy jesteś psychicznie tutaj w Pizza Hut ??
-- Bob Violence
%
Sprzedajesz Nylon szybów naftowych ?? Jeśli tak, to możemy wykorzystać dwa tuziny !!
-- Bob Violence
%
Czy wciąż jesteś alkoholikiem?
-- Bob Violence
%
Jako przewodniczący muszę iść odkurzyć moją kolekcję monet!
-- Bob Violence
%
Awright, która z was ukrywała mój penis ENVY?
-- Bob Violence
%
Barbara Stanwyck Denerwuję !!
-- Bob Violence
%
Barbie mówi, Take quaaludes w ginie, i iść na dyskotekę od razu!Ale Ken mówi, woo-woo !! Nie kredytowej w "Panu Liquor" !!
-- Bob Violence
%
BARRY ... To był najbardziej na duchu wydawania "Zrobiłem to mojaWAY ", jaką kiedykolwiek słyszałem !!
-- Bob Violence
%
Będąc BALD HERO jest prawie tak Szczęśliwego jako tatuażem Knockwurst.
-- Bob Violence
%
Bela Lugosi jest mój pilot ...
-- Bob Violence
%
BI-bi-bi-bi-bi-bi-bi-bi-bi-bi-bi-bi-bi-bi-bi-bi-bi-bi-bi-bi-bi-bi-bi-bi-
-- Bob Violence
%
... Ponurość ... pustka ... widelce plastikowe ...
-- Bob Violence
%
Bo Derek zrujnował mi życie!
-- Bob Violence
%
Rany, jestem zadowolony, że to tylko 1971 ...
-- Bob Violence
%
Chłopaki, to wszystkie zostały wybrane do ZOSTAW th 'Planet w 15 minut !!
-- Bob Violence
%
Ale poszli na Marsa około 1953 !!
-- Bob Violence
%
Ale on był wystarczająco dojrzały ostatnia noc w lesbijskiej maskaradę?
-- Bob Violence
%
Czy mogę mieć impuls ITEM zamiast tego?
-- Bob Violence
%
Możecie pisać tort Bean?
-- Bob Violence
%
Keczup i musztarda w każdym miejscu! To Hamburger człowiekiem!
-- Bob Violence
%
Chubby Checker właśnie miałem kanapkę z kurczakiem w centrum Duluth!
-- Bob Violence
%
Cywilizacja jest zabawa! W każdym razie, to trzyma mnie zajęty !!
-- Bob Violence
%
Wyczyść pralniczy !! Ten wir-o-matic prostu miał nuklearnej katastrofy !!
-- Bob Violence
%
Skoncentruj się na th'cute, Cartoon Li'l faceci! Pamiętaj szeregowejTAKTY MUZYCZNE!! Wykonaj Whipple AVE. WYJŚCIE!! Mieć wolne Pepsi !! SkręcaćW lewo na th'HOLIDAY INN !! Dołącz do świata CREDIT !! Make me an offer !!!
-- Bob Violence
%
GRATULACJE! Teraz powinienem zrobić słabo zawoalowane komentarze na tematGODNOŚĆ, samoocena i znalezienie prawdziwej zabawy w swojej prawej komory ??
-- Bob Violence
%
Treść: 80% poliester, 20% DACRONi ... wiaty mundur kelnerkaTARTAR SOS jak 8 "x 10" GLOSSY ...
-- Bob Violence
%
Mógłbym mieć przedawkowania narkotyków?
-- Bob Violence
%
Czy włoska CRANE OPERATOR prostu doświadczyć wrażeń w niehamowaneMalibu HOT TUB?
-- Bob Violence
%
Zrobiłem błędnej THING ??
-- Bob Violence
%
Czy mogę powiedzieć, że jestem sardynka? Albo autobus ???
-- Bob Violence
%
Did I WYPRZEDAŻE jeszcze ??
-- Bob Violence
%
Znalazłeś zegarek cyfrowy w skrzynce z Velveeta?
-- Bob Violence
%
Czy Ci wiele koreańskich NOŻE stek przenieść tę podróż, obskurne?
-- Bob Violence
%
DIDI ... jest to, że MARTIAN nazwa, albo jesteśmy w Izraelu?
-- Bob Violence
%
nie mogę kupić 1951 Packard od ostatniego marca w Kairze?
-- Bob Violence
%
Disco Rozprowadzenie oleju stworzy pulsujący naugahide przebieg rurociąguprosto do tropików z regionów dywan produkcji i dewaluacji dolara!
-- Bob Violence
%
Czy mam jeszcze życia?
-- Bob Violence
%
Czy znacie właśnie przeszedł thru czarna dziura w kosmosie?
-- Bob Violence
%
Czy masz dokładnie to, co chcę w kratę Poindexter bar bat ??
-- Bob Violence
%
Lubisz "Vittles Tender"?
-- Bob Violence
%
Czy uważasz, że "Małpki" powinien dostać gaz na nieparzystych lub parzystych dni?
-- Bob Violence
%
Czy ktoś z Peoria mają krótszą uwagi niż mnie?
-- Bob Violence
%
twoja PRZEBIERALNIA mają wystarczająco dużo szparagów?
-- Bob Violence
%
NIE go !! Nie jestem HOWARD Cosell !! Wiem Polish jokes ... CZEKAJ !!Nie jedź !! JESTEM Howard Cosell! ... I nie wiem polskich żartów !!
-- Bob Violence
%
Nie bij mnie !! Jestem w Twilight Zone !!!
-- Bob Violence
%
Nie SANFORIZE mnie !!
-- Bob Violence
%
Nie martw się, nikt tak naprawdę nie słucha wykładów w Moskwie, albo! ...FRANCUSKI, historia, zaawansowanych rachunku, Informatyk, CZARNYSTUDIA, socjobiologii! ... Czy są jakieś pytania??
-- Bob Violence
%
Edwin Meese mnie nosić kurdybany !!
-- Bob Violence
%
Eisenhower !! Urządzenie mimeograph denerwuje brzuch !!
-- Bob Violence
%
Albo Wyznaj teraz albo idziemy do "sąd ludowa" !!
-- Bob Violence
%
Każdy dostaje darmowy barszcz!
-- Bob Violence
%
Każdy będzie gdzieś !! To chyba sprzedaży garażu lubfilm katastroficzny!!
-- Bob Violence
%
Wszędzie patrzę widzę negatywności i asfalt ...
-- Bob Violence
%
Przepraszam, ale nie mogę nie powiedzieć, że nie ma nadziei na przetrwanieDRUK OFFSETOWY?
-- Bob Violence
%
Uczucia są kaskadowe nade mną !!!
-- Bob Violence
%
Wreszcie Zippy napędza jego 1958 Rambler Metropolitan do wydziałujadalnia.
-- Bob Violence
%
Po pierwsze, mam zamiar dać Ci wszystkie odpowiedzi do dzisiejszego testu ... Więcwystarczy podłączyć SONY walkmany i relaks !!
-- Bob Violence
%
BŁAŹ you! Pochłaniają promienie EGO wstrząsające impulsowych, poliester poltroon !!
-- Bob Violence
%
do sztucznego aromatyzowania !!
-- Bob Violence
%
Cztery tysiące różnych magnatów, magnatów i NABOBS są romping w moimgothic solarium !!
-- Bob Violence
%
MROŻONE przystawek można rzucił przez członków przeciwnych Swanson sekt ...
-- Bob Violence
%
Zabawa nigdy się konieczności powiedzieć, że jesteś SUSHI !!
-- Bob Violence
%
Gee, czuję rodzaj światła w głowie teraz, wiedząc, że nie może zrobić mójPŁATNOŚCI talerz satelitarny!
-- Bob Violence
%
Gibble, Gobble, przyjmujemy, że ...
-- Bob Violence
%
Daj im Radar PROWADZONE pasach SKEE kulkowe i burritos Velveeta !!
-- Bob Violence
%
Dalej, Emote! Wychowałem się na balony myśli !!
-- Bob Violence
%
Dobranoc, wszyscy ... Teraz muszę iść administrowania Pierwsza pomoc do mojegopet Leisure Suit !!
-- Bob Violence
%
HAIR toniki, proszę !!
-- Bob Violence
%
Pół umysł jest straszne marnotrawstwo!
-- Bob Violence
%
Podaj mi parę skórzanych spodniach i klawiaturę CASIO - Żyję na dzisiaj!
-- Bob Violence
%
Posiada każdy dostał Halvah rozsianych po całym kostek ?? ... Teraz jestczas "mają NAGEELA" !!
-- Bob Violence
%
... On dominuje dekadenckiego SUBWAY sceny.
-- Bob Violence
%
On jest MELBA samopoczucie ... Cake ANIOŁ ... XEROX XEROX mu go ... -
-- Bob Violence
%
Pewnie po prostu chce przejąć moich komórek, a następnie eksplodować wewnątrz mniejak lufa cieknący posiekane WĄTROBIE! A może chciałbyPSYCHOLIGICALLY Terroryzuj mnie, dopóki nie mam zastrzeżeń co do prawicowegoWOJSKOWY przejęciu mojego mieszkania !! Chyba powinienem zadzwonić Al Pacino!
-- Bob Violence
%
HELLO KITTY gang terroryzuje miasto, rodzina stickered na śmierć!
-- Bob Violence
%
Witam wszystkich, jestem człowiekiem !!
-- Bob Violence
%
Witam, Gorry-O !! Jestem geniuszem Harvard !!
-- Bob Violence
%
Cześć. Wiem, że współczynnik rozwodów wśród niezamężnych kobiet katolickich Alaski !!
-- Bob Violence
%
Cześć. Wystarczy spacer wzdłuż a nie próbować myśleć o jelitachjest prawie czterdzieści STOCZNIE LONG !!
-- Bob Violence
%
Hello ... żelaznej kurtyny? Wyślij na pizzę kiełbasą! III wojna światowa? Nie, dziękuję!
-- Bob Violence
%
Cześć? Lewatywa Bondage? Dzwonię, bo chcę być szczęśliwy, myślę, że ...
-- Bob Violence
%
Oto jestem na pchlim targu, ale nikt nie kupuje moich moczu butelek na próbki ...
-- Bob Violence
%
Tu jestem w 53 roku p.n.e. i wszystko czego chcę to koperkiem !!
-- Bob Violence
%
Oto jestem w lobule POSTERIOR węchowych, ale nie widzę Carl Sagangdziekolwiek!!
-- Bob Violence
%
Oto jesteśmy w Ameryce ... kiedy zbieramy bezrobocie?
-- Bob Violence
%
Hej, czekaj !! Chcę rozwodu!! ... Nie jesteś Clint Eastwood !!
-- Bob Violence
%
Hej, kelner! Chcę nową koszulę i koński ogon z sosem cytrynowym!
-- Bob Violence
%
Czkawki i drżeniem do hałd New Jersey, jak niektórzyPijany KAPUSTA łata lalki, kaszel w kolejce w FIORUCCI'S !!
-- Bob Violence
%
Hmmm ... kalekiego KSIĘGOWY z kanapką falafel jest trafionyWÓZEK-CAR ...
-- Bob Violence
%
Hmmm ... Hash-piosenkarka i zezowaty facet spali na bezludnejwyspa, kiedy ...
-- Bob Violence
%
Hmmm ... główki od szpilki, podczas trzęsienia ziemi, napotka ALL-MidgetFiddle ORKIESTRA ... ha ... ha ...
-- Bob Violence
%
Hmmm ... arogancki bukiet z subtelną sugestię poliwinyloweCHLOREK ...
-- Bob Violence
%
Trzymając Mayo i przekazać kosmicznego ŚWIADOMOŚCI ...
-- Bob Violence
%
Brawo, Ronald !! Teraz możesz poślubić Linda Ronstadt też !!
-- Bob Violence
%
Jak mogę dostać się do domu?
-- Bob Violence
%
Jak wytłumaczyć MOC Wayne'a Newtona przez miliony? To th 'Wąsy... Czy zauważyłeś th 'sposób emanuje szczerość, uczciwość i ciepło?Jest to Moustache chcesz zabrać do domu i wprowadzić do Nancy Sinatra!
-- Bob Violence
%
Ile retured murarze z Florydy są na zakup PENCILTemperówki w tej chwili ??
-- Bob Violence
%
Jak leci w tych modułowe urządzenie MIŁOSNE ??
-- Bob Violence
%
Jak się czuje żona? Czy ona w domu cieszyć kapitalizmu?
-- Bob Violence
%
hubub, hubub, hubub, hubub, hubub, hubub, hubub, hubub, hubub, hubub.
-- Bob Violence
%
HUGH BEAUMONT zmarł w 1982 roku !!
-- Bob Violence
%
REPLIKI człowieka są wstawiane do kadzi drożdże odżywcze ...
-- Bob Violence
%
Zawsze dobrze się bawić, bo jestem z mojego umysłu !!!
-- Bob Violence
%
Jestem galaretki cukierka. Jestem galaretki cukierka.
-- Bob Violence
%
Jestem sygnalizacji świetlnej, a Alan Ginzberg porwał moje pranie w 1927 roku!
-- Bob Violence
%
Ja pokryte Czysty olej roślinny i piszę bestsellerem!
-- Bob Violence
%
Jestem głęboko zaniepokojony i chcę coś dobrego na śniadanie!
-- Bob Violence
%
Jestem zabawy ... Zastanawiam się, czy to NET FUN FUN lub brutto?
-- Bob Violence
%
Nie jestem nut ....
-- Bob Violence
%
Ja przekazuję wam ambasadora Fantasy Island !!!
-- Bob Violence
%
I przyniósł mi Bowling Ball - i niektóre leki !!
-- Bob Violence
%
Nie mogę się zdecydować, który Wrong Turn, aby pierwszy !! Zastanawiam się, czy BOBGuccione ma te problemy!
-- Bob Violence
%
Nie mogę o tym myśleć. Nie idź z ZABEZPIECZEŃ w kształcieLITTLE LULU - lub roboty produkcji cegieł ...
-- Bob Violence
%
Żądam bezkarnie!
-- Bob Violence
%
Nie zamawiałem żadnej woo-woo ... może YUBBA ... ale nie woo-woo!
-- Bob Violence
%
Nie wierzę, że naprawdę brakuje GAS .. Myślę, że to wszystko to tylkoBIG oszustwem ze strony sprzedawców plastik migowych - sprzedać więcej numerów !!
-- Bob Violence
%
... Nie wiem dlaczego, ale nagle, chcę omówić spadkową iloraz inteligencjiPOZIOMY z niebieską wstążką podkomisji Senatu!
-- Bob Violence
%
Nie wiem, dlaczego to powiedziałem, że ... Myślę, że to pochodzi z wypełnieniami wmoje tylne trzonowce ...
-- Bob Violence
%
... Nie podoba mi się Frank Sinatra czy jego dzieci.
-- Bob Violence
%
I nie rozumieją Humor z Three Stooges !!
-- Bob Violence
%
Czuję ... szyjnych ...
-- Bob Violence
%
Czuję się lepiej o problemach światowych teraz!
-- Bob Violence
%
Czuję się jak mokry parkometru na Darvon!
-- Bob Violence
%
Czuję, że dzielę się `` corn-DOG '' Nikita Chruszczow ...
-- Bob Violence
%
Czuję, że jestem w muszli klozetowej z pinezkę na moim czole !!
-- Bob Violence
%
Czuję się częściowo utwardzony!
-- Bob Violence
%
Wypełniam MOICH przemysłowych pojemników na odpady ze starych egzemplarzy "Strażnica"a następnie dodać Hawaiian dziurkowania do góry ... Wyglądają ładnie w zagrodzie ...
-- Bob Violence
%
Myślę, że to był tylko sen ... albo odcinek Hawaii Five-O ...
-- Bob Violence
%
Chyba dostaliście duże mięśnie przed robieniem zbyt STUDIOWANIE!
-- Bob Violence
%
Miałem dzierżawę kompleksu Edypa tyłu w '81 ...
-- Bob Violence
%
Miałem naleśnik makijaż na brunch!
-- Bob Violence
%
Mam maleńką Bowl w mojej głowie
-- Bob Violence
%
Mam bardzo dobrą DENTAL planu. Dziękuję Ci.
-- Bob Violence
%
Mam wizję! Jest to zjełczały dwukrotnie FISHWICH na wzbogacona BUN !!
-- Bob Violence
%
Przyjąłem Provolone w moim życiu!
-- Bob Violence
%
Mam wiele wykresy i diagramy ..
-- Bob Violence
%
... Mam przeczytać instrukcję ...
-- Bob Violence
%
- Widziałem Fun -
-- Bob Violence
%
Widziałem te przedłużacze jajka w moim supermarkecie ... Mam przeczytaćINSTRUKCJA ...
-- Bob Violence
%
Mam prawo do wstrzymania produkcji we wszystkich komediach NASTOLETNI SEX !!
-- Bob Violence
%
Muszę kupić nowy "dodge Miser" i dwa tuziny jordache JEANS, ponieważmoja viewscreen jest "user-friendly" !!
-- Bob Violence
%
Nie są małżeństwem od ponad sześciu lat, ale mieliśmy poradnictwa seksualnegocodziennie od Oral Roberts !!
-- Bob Violence
%
Mam nadzieję, że kupiłem odpowiedni smakiem ... zzzzzzzzz ...
-- Bob Violence
%
Mam nadzieję, że coś dobrego przyszedł w mailu dzisiaj więc mam powód do życia !!
-- Bob Violence
%
Mam nadzieję, że `` 'antykoncepcję Eurythmics praktykę ...
-- Bob Violence
%
Mam nadzieję, że milionerzy jesteś zabawę! Właśnie zainwestował połowę swojego życiaoszczędności w drożdżach !!
-- Bob Violence
%
Wymyśliłem skoki spadochronowe w 1989 roku!
-- Bob Violence
%
Dołączyłem Scjentologię na sprzedaż garażu !!
-- Bob Violence
%
Właśnie zapomniałem całe filozofię życia !!!
-- Bob Violence
%
Właśnie mój książę zderzaka naklejki ... Ale teraz nie mogę sobie przypomnieć, kim jest ...
-- Bob Violence
%
Po prostu miałem operacji nosa !!
-- Bob Violence
%
Właśnie pokrył całe moje pokarmowego z teflonem!
-- Bob Violence
%
Właśnie słyszałem latach siedemdziesiątych było ponad !! I właśnie nawiązanie kontaktuz moim Leisure Suit !!
-- Bob Violence
%
Właśnie sobie przypomniałem coś o ropuchę!
-- Bob Violence
%
I KAISER ROLL ?! Co dobrego jest Kaiser rolki bez trochę coleslawod strony?
-- Bob Violence
%
Wiem żart !!
-- Bob Violence
%
Wiem, jak to zrobić Efekty specjalne !!
-- Bob Violence
%
Wiem th'MAMBO !! Mam Dwukolorowe chemii SET !!
-- Bob Violence
%
Znam rzeczy o: Troy Donahue, który nie może być nawet wydrukowane !!
-- Bob Violence
%
Zostawiłem mój portfel w łazience !!
-- Bob Violence
%
Lubię sposób w jaki tylko ich usta przejść ... Wyglądają jak DYING ostryg
-- Bob Violence
%
Lubię swoją Snoopy PLAKAT !!
-- Bob Violence
%
- Kocham Katrinka bo jeździ Pontiac. Jedziemy z dalateraz. I nakarmił kota.
-- Bob Violence
%
Kocham rock n roll! I na pamięć wszystkie słowa z "WIPE-out"1965 !!
-- Bob Violence
%
Muszę omówić PRZEPISY odkupu z co najmniej sześciu SLEAZEBALLS studio !!
-- Bob Violence
%
Kiedyś moje mieszkanie urządzone w całości w dziesięciu widelców do sałatek kolana !!
-- Bob Violence
%
Jestem właścicielem siedmiu ósmych wszystkich artystów w centrum miasta Burbank!
-- Bob Violence
%
I odłożyć mój egzemplarz "BOWLING WORLD" i myśleć o GUN KONTROLIustawodawstwo...
-- Bob Violence
%
Reprezentuję sardynki !!
-- Bob Violence
%
Wnioskuję weekend w Hawanie z Phil Silvers!
-- Bob Violence
%
... Widzę sedesy ...
-- Bob Violence
%
Wybrałem E5 ... ale nie słyszałem "Sam Sham i faraonów"!
-- Bob Violence
%
I zapach zjełczałego corn dog!
-- Bob Violence
%
Czuję jak mokry redukującego klinice na Dzień Kolumba!
-- Bob Violence
%
Myślę, że jestem sensacją teraz !!
-- Bob Violence
%
... Myślę, że lepiej wrócić do mojego biurka i zabawki z kilkoma wspólneNieporozumienia ...
-- Bob Violence
%
Myślę, że będę się zabić skacząc z tej chwili okno 14-ci STORYczytanie poezji Erica Jong'S !!
-- Bob Violence
%
Myślę, że moja kariera jest zniszczony!
-- Bob Violence
%
Kiedyś być fundamentalistycznych, ale potem usłyszałem o HIGHPROMIENIOWANIE poziomach i kupił Encyklopedia !!
-- Bob Violence
%
... Chcę T.V. kolor i wibrującym łóżku !!!
-- Bob Violence
%
Chcę wegetarianinem BURRITO iść ... z dodatkowym MSG !!
-- Bob Violence
%
Chcę dzierżawy OIL Wesson !!
-- Bob Violence
%
Chcę kolejnego ponownego zapisu na mojej sałatka Cezara !!
-- Bob Violence
%
Chcę uszami! Chcę dwie kule czarny uszy do sprawiają, że czuję ciepło 'n zabezpieczyć !!
-- Bob Violence
%
... Chcę czterdzieści dwa TRYNEL SYSTEMS flotacji zainstalowane wGODZINY sześć i pół !!!
-- Bob Violence
%
Chcę prezydencja tak źle, mogę już spróbować przystawek.
-- Bob Violence
%
Chcę się ubrać cię tak Tallulah Bankhead i obejmować cię wazelinąi pszenicy rozrzedza ...
-- Bob Violence
%
Chcę zabić wszystkich tutaj z cute kolorowe bomby wodorowej !!
-- Bob Violence
%
... Chcę do wykonywania czynności czaszkowych z Tuesday Weld !!
-- Bob Violence
%
Chcę przeczytać mój nowy wiersz o mózgi wieprzowe i kosmosu ...
-- Bob Violence
%
Chcę tak szczęśliwy, żyły na szyi wyróżniać !!
-- Bob Violence
%
Chcę Ci zapamiętać zebrane wiersze Edna St. Vincent Millay... WSTECZ!!
-- Bob Violence
%
Chcę Ci zorganizować swoje tace cieście ... moje herbaty TINS są lśniące wPowstawanie takich jak rzędu bębna mażoretek - proszę nie bądź na mnie wściekły -
-- Bob Violence
%
Urodziłem się w fabryce Gospodyni Cupcake przed rewolucją seksualną!
-- Bob Violence
%
Robiłem pączki, a teraz jestem w autobusie!
-- Bob Violence
%
Chciałbym manikiurzystka sex-zagłodzony znaleziony martwy w Bronksie !!
-- Bob Violence
%
Chciałbym być na rogu ulicy Cincinnati trzyma czystej psa!
-- Bob Violence
%
Zastanawiam się, czy mógłbym zacząć w świecie kredytową?
-- Bob Violence
%
Zastanawiam się, czy powinienem powiedzieć im o moim poprzednim życiu jako kompletnyNIEZNAJOMY?
-- Bob Violence
%
Zastanawiam się, czy powinienem postawić się w depozycie !!
-- Bob Violence
%
Zastanawiam się, czy jest coś, co dobrze na dzisiejszy wieczór?
-- Bob Violence
%
Chciałbym oddać mocz w puli owalne, porcelany -
-- Bob Violence
%
Chciałbym moje dane bazy julienned i mieszać-smażone!
-- Bob Violence
%
Chciałbym jakiś fast-food ... a potem chcę być sam -
-- Bob Violence
%
Zjem wszystko, co jest jasne, niebieskie !!
-- Bob Violence
%
pokażę ci mój numer teleksu, jeśli pokażesz mi ciebie ...
-- Bob Violence
%
Jestem piłki fuschia bowling gdzieś w Bretanii
-- Bob Violence
%
Jestem geniuszem! Chcę zakwestionować konstrukcji zdaniowych z Susan Sontag !!
-- Bob Violence
%
Jestem atomową łódź podwodną pod polarnej pokrywy lodowej i muszę Kleenex!
-- Bob Violence
%
Jestem też przeciwko ciała surfowania !!
-- Bob Violence
%
Jestem również pre-pre-wylewa medytował i prerafaelitów !!
-- Bob Violence
%
Jestem ANN LANDERS !! Mogę okradać sklepy !!
-- Bob Violence
%
Jestem Zmiana kanału ... Ale mogę to reklamy dla "roncoCUD BAMBUS parowce "!
-- Bob Violence
%
Jestem ciągle zdumiony th'breathtaking skutków erozją wietrzną !!
-- Bob Violence
%
I na pewno nie jestem w Omaha!
-- Bob Violence
%
Jestem przygnębiony ... Mam nadzieję, że coś głęboko smażone w ramach tegoMiniaturowy kopulaste STADIUM ...
-- Bob Violence
%
Jestem ubiera w źle dopasowanym IVY-League SUIT !! Za późno...
-- Bob Violence
%
Jestem teraz, bo mam Merchandising Clout EMOCJONALNA !!
-- Bob Violence
%
Jestem zamknięty w podszewce czystego kiełbasa wieprzowa !!
-- Bob Violence
%
Cieszę się, że pamiętał, żeby wszystkie moje XEROX podkoszulki !!
-- Bob Violence
%
Jestem szybując nad jądrowego wysypiska śmieci w pobliżu Atlanta, Georgia !!
-- Bob Violence
%
Mam Big Bang Theory !!
-- Bob Violence
%
Mam kryzys wieku średniego tydzień!
-- Bob Violence
%
Mam doświadczenie religijne ... i nie biorę żadnych leków
-- Bob Violence
%
Mam doświadczenie odpisać! Potrzebuję kryzysu energetycznego !!
-- Bob Violence
%
Mam emocjonalny wybuch !!
-- Bob Violence
%
Mam emocjonalny wybuch !! Ale, uh, dlaczego jest wafel wmoja piżama POCKET ??
-- Bob Violence
%
Mam PIĘKNE myśli o mdły żony i smugbogatych firmowych prawników ...
-- Bob Violence
%
Ja bawię autostopem do Cincinnati lub Far Rockaway !!
-- Bob Violence
%
... Wyobrażam sobie zmysłową żyrafa, cavorting na zapleczuz koszerną DELI -
-- Bob Violence
%
Jestem w bezpośrednim kontakcie z wielu zaawansowanych koncepcji zabawy.
-- Bob Violence
%
Jestem w programie!
-- Bob Violence
%
Jestem medytacji na formaldehyd i azbestu przecieka do mojegoPRZESTRZEŃ OSOBISTA!!
-- Bob Violence
%
Jestem psychicznie przesadzony! Co to DROGOWSKAZ się do przodu? Gdzie RODSTERLING kiedy naprawdę go potrzebujemy?
-- Bob Violence
%
Nie jestem irański !! Głosowałem za Dianne Feinstein !!
-- Bob Violence
%
Nie jestem dostępny dla komentarza ..
-- Bob Violence
%
Udaję jestem pociągnięcie pstrąga! Czy robię to poprawnie ??
-- Bob Violence
%
Udaję, że wszyscy jesteśmy oglądania Phil Silvers zamiast RICARDOMontalban!
-- Bob Violence
%
Ja spokojnie czytając najnowszy numer "BOWLING WORLD", podczas gdy moja żonai dwoje dzieci stoją cicho, ...
-- Bob Violence
%
Jestem oceniane PG-34 !!
-- Bob Violence
%
Odbieram zaszyfrowaną wiadomość od EUBIE BLAKE !!
-- Bob Violence
%
Jestem RELIGIJNE !! Kocham człowieka z Hairpiece !! Wyposaż mnie pociskami !!
-- Bob Violence
%
Jestem zgłoszenie się do służby jako współczesnego człowieka. Chcę teraz zrobić łacińskiego Hustle!
-- Bob Violence
%
Jestem golenia !! Jestem Golenie !!
-- Bob Violence
%
Siedzę na moim QUEEN szybkość ... Dla mnie to ENJOYABLE ... Jestem CIEPŁY... Jestem WIBRACYJNY ...
-- Bob Violence
%
Myślę o cyfrowych systemów odczytu i generowanych przez komputerIMAGE SKŁADY ...
-- Bob Violence
%
Jestem totalnie przybity nad libijską sytuacją a ceną z kurczaka ...
-- Bob Violence
%
Używam rentgenowskiej VISION aby uzyskać rzadką spojrzenie wewnętrznejFunkcjonowania tego ZIEMNIAKA !!
-- Bob Violence
%
Noszę Pampers !!
-- Bob Violence
%
Jestem mokry! Jestem dzika!
-- Bob Violence
%
Jestem młody ... jestem zdrowy ... mogę podwyżka THRU CAPT Grogan'S lędźwiowego REGIONÓW!
-- Bob Violence
%
Jestem ZIPPY Pinhead i jestem w pełni zaangażowana w trybie świątecznym.
-- Bob Violence
%
Mam kuzyna, który pracuje w dzielnicy mody ...
-- Bob Violence
%
Mam pomysł!! Dlaczego nie patrzą na ciebie tak mocno, gdybyś zapomniałNUMER UBEZPIECZENIA SPOŁECZNEGO!!
-- Bob Violence
%
Czytałem siedem milionów książek !!
-- Bob Violence
%
... Ich bin in einem dusenjet ins Jahr 53 vor chr ... Ich im Landeantiken Rom ... einige gladiatoren spielen scrabble ... Ich riechePIZZA ...
-- Bob Violence
%
Jeżeli dana osoba jest znana w tym kraju, to trzeba iść na drodze doMiesięcy w czasie i mają swoje nazwy błędnie na bokuGREYHOUND SCENICRUISER !!
-- Bob Violence
%
Jeśli wybrany, Zippy zobowiązuje się do każdego i każdy Amerykanin 55-letni Houseboy ...
-- Bob Violence
%
Jeśli jestem wybrany nikt nigdy nie będzie trzeba zrobić pranie raz!
-- Bob Violence
%
Jeśli jestem wybrany, betonowe bariery wokół Białego Domu będziezastąpiony przez gustowne replik pianki z Ann-Margret!
-- Bob Violence
%
Gdybym czuła się bardziej wyrafinowane, że umrę ze wstydu!
-- Bob Violence
%
Gdybym miał Q-Tip, mogę zapobiec th 'załamanie negocjacji !!
-- Bob Violence
%
... Gdybym miał niewydolność serca teraz, nie mogę być bardziej szczęśliwy, człowiek !!
-- Bob Violence
%
Gdybym wyciągnąć ten przełącznik będę Rita Hayworth !! Albo Scjentologiem!
-- Bob Violence
%
jeśli błyszczy, pochłaniają go !!
-- Bob Violence
%
Jeśli nasze zachowanie jest surowa, nie musimy się dobrze!
-- Bob Violence
%
Jeśli Robert Di Niro morduje Walter Slezak, będą Jodie Foster poślubić Bonza ??
-- Bob Violence
%
W 1962 roku można było kupić parę spodni rekina, z "ContinentalBelt ", za $ +10,99 !!
-- Bob Violence
%
W Newark na pralnie są otwarte 24 godziny na dobę!
-- Bob Violence
%
INSIDE, mam to samo zaburzenie osobowości LUCY RICARDO !!
-- Bob Violence
%
Wewnątrz jestem już szlochając!
-- Bob Violence
%
Jest to prawdziwy tatuaż, jak krawężnik lub pancernik? Albo jesteśmy cierpienia Safewaya?
-- Bob Violence
%
Czy on MAGIC INCA niosąc żaba na ramionach ?? Czy FROGJego GuideLight ?? To ciekawe, że pies biegnie już na schodach ...
-- Bob Violence
%
Czy 1974? Co na kolację? Mogę spędzać FUND College w jednymdzikie południu ??
-- Bob Violence
%
Czy to jest czysta w innych wymiarach?
-- Bob Violence
%
Czy nouvelle cuisine, gdy 3 oliwki walczą z muszelki w miłymPłyta Sos Mornay?
-- Bob Violence
%
Czy coś się wydarzy GWAŁTOWNY do kosza?
-- Bob Violence
%
Czy to out-take z "Brady Bunch"?
-- Bob Violence
%
Czy to będzie obejmować RAW ludzkiego ecstasy?
-- Bob Violence
%
Czy to TERMINAL zabawy?
-- Bob Violence
%
Jest to linia do najnowszej kapryśny dramatu jugosłowiańskiego które równieżaż chce się płakać i ponowne wojny w Wietnamie?
-- Bob Violence
%
Czy to nie jest mój STOP ?!
-- Bob Violence
%
To nie znaczy nic, jeśli nie mam tego SWING !!
-- Bob Violence
%
To był żart!! Zdobyć?? I otrzymywał wiadomości od Davida Lettermana !!YOW !!
-- Bob Violence
%
Jest dużo zabawy, że żyje ... Zastanawiam się, czy moje łóżko jest?!?
-- Bob Violence
%
To nie ma sensu ... Poszedłem do "Club Med" !!
-- Bob Violence
%
To oczywiste ... futra nigdy nie osiągnął Stambule ... Byłaś EXTRAw remake'u "Topkapi" ... Wracaj do domu, do żony ... RobiFrancuskie tosty!
-- Bob Violence
%
To jest w porządku - jestem intelektualistą, too.
-- Bob Violence
%
Jest to cykl płukania !! Oni wszyscy zignorowali płukanie !!
-- Bob Violence
%
Japonia jest wspaniałym planeta - Zastanawiam się, czy my kiedykolwiek osiągnąć swój poziomPorównawczych ZAKUPY ...
-- Bob Violence
%
Jezuickich księży Dating dyplomatów karierę !!
-- Bob Violence
%
Jezus jest moim POSTMASTER OGÓLNE ...
-- Bob Violence
%
Dzieci, nie zarobił mnie ... "Przygody z Higieny Psychicznej" może byćprzeprowadza się za daleko!
-- Bob Violence
%
Dzieci, siedem podstawowych grup żywności są GUM, ciasto francuskie, Pizza,Pestycydy, antybiotyki, NUTRA-słodkie i niewypałów MLEKO !!
-- Bob Violence
%
Pralnia jest piąty wymiar !! ... Hm ... hm ... th 'pralkaJest to czarna dziura i różowe skarpetki są kierowcy autobusów, którzy po prostu zakochałem się w !!
-- Bob Violence
%
LBJ, LBJ, ile żartów powiedziałaś dzisiaj ??!
-- Bob Violence
%
Leona, chcę wyznać, co do ciebie ... Chcę zawijać cię w ScarletROBE obszyta polichlorku winylu ... Chcę opróżnić popielniczki ...
-- Bob Violence
%
Pozwól mi zrobić mój hołd pończochy ...
-- Bob Violence
%
Niech wszyscy pokazać ludzką troskę o trudnościach prawnych REVERAND Księżyca !!
-- Bob Violence
%
Załóżmy, wysyłać Rosjanom wadliwych akcesoriów stylu życia!
-- Bob Violence
%
Życie jest konkurs popularności! Jestem ŚWIEŻYM Szczery !!
-- Bob Violence
%
Jak zawsze mówię - nic nie może pokonać kiełbasa tutaj w Düsseldorfie !!
-- Bob Violence
%
włosy Loni Anderson powinna zostać zalegalizowana !!
-- Bob Violence
%
Spójrz w głąb otworów !! Czy widzisz jakieś elfy lub EDSELS ... alboCOCKTAIL WHISKY Z SODĄ?? ...
-- Bob Violence
%
Spójrz mi w oczy i staram się zapomnieć, że masz kartę pobierają Macy!
-- Bob Violence
%
Wygląd! Drabina! Może to prowadzi do nieba, albo kanapkę!
-- Bob Violence
%
WYGLĄD!! Ponury amerykańskich nastolatków szortach Madras i "StadoMewy "Strzyżenie!
-- Bob Violence
%
Make me wyglądać Linda Ronstadt ponownie !!
-- Bob Violence
%
SIÓDMA MĄŻ Mary Tyler Moore nosi moje dacron rękawów w sposóbtani hotel w Honolulu!
-- Bob Violence
%
Może moglibyśmy pomalować Goldie Hawn bogatym błękit pruski -
-- Bob Violence
%
Meryl Streep jest mój ginekolog!
-- Bob Violence
%
MMM-MM !! Więc to BIO-NEBULATION!
-- Bob Violence
%
Mmmmmm-MMMMMM !! Talerz z kawałków na parze świni miesza się zstrzępy kilku KURCZAKACH !! ... O chłopie!! Mam zamiar połknąćSekcja DRZEJĄCY-off krowiego lewej nogi moczone w oleju z nasion bawełny iCUKIER!! ... Zobaczmy ... Następnie będę miał ciało MASA-up z cute,BABY LAMBS smażone w roztopionym, tłuste tkanki z ciepłą krwiąktoś zwierzę raz pogłaskał !! ... MNIAM !! To było dobre!! Na deser,Muszę hamburgera tofu z kiełków fasoli na kamiennym ziemi, całePSZENICA BUN !!
-- Bob Violence
%
Pan i Pani PED, mogę pożyczyć 26,7% produkcji RAYON WŁÓKIENNICZEindonezyjskiego archipelagu?
-- Bob Violence
%
Ciotka MAUREEN był doradcą wojskowym Ike & Tina Turner !!
-- Bob Violence
%
Moim biologicznym BUDZIK prostu poszedł ... Ma bezgłośne drzemkiFunkcja i kuchnia pełna !!
-- Bob Violence
%
Moja Kodeks Etyki jest wakacji w słynnym Schroon Lake, w stanie Nowy Jork !!
-- Bob Violence
%
Moje uszy są GONE !!
-- Bob Violence
%
Moja twarz jest nowa, moja licencja wygasła, i jestem pod opieką lekarza !!!!
-- Bob Violence
%
Moja fryzura jest całkowicie tradycyjny!
-- Bob Violence
%
Mój dochód rozporządzalny jest wszystko!
-- Bob Violence
%
Mój rekord LESLIE GORE jest złamana ...
-- Bob Violence
%
Moje życie jest patio zabawy!
-- Bob Violence
%
Mój umysł jest kartoflisko ...
-- Bob Violence
%
Mój umysł czyni popielniczki w Dayton ...
-- Bob Violence
%
Mój nos czuje się jak zły Ronald Reagan filmu ...
-- Bob Violence
%
Mój nos jest NUMB!
-- Bob Violence
%
... Moje spodnie po prostu poszedł na dzikim szał przez Long Island Bowling Alley !!
-- Bob Violence
%
Moje spodnie po prostu poszedł do liceum w Carlsbad Caverns !!!
-- Bob Violence
%
Moja winylu kowbojem portfel powstał w Hong Kongu Montgomery Clift!
-- Bob Violence
%
Mój wuj Murray pokonał Egipt w 53 roku p.n.e. I mogę to udowodnić też !!
-- Bob Violence
%
Moja wazeliny w toku ...
-- Bob Violence
%
NANCY !! Dlaczego wszystko jest RED ?!
-- Bob Violence
%
NATHAN ... twoi rodzice byli w Carcrash !! Oni WYGAŚNIĘCIE - OniZwinięty Nie mieli pilarek ... Nie mieli pieniędzy ... Oni MASZYNYrobił pigułki w skąpym GRASS spódnice ... Nathan, ja EMULATED je ... alebyli OFF-KEY ...
-- Bob Violence
%
NEWARK został REZONED !! DES MOINES został REZONED !!
-- Bob Violence
%
Sutki, dołeczki, kostki, Nickles, zmarszczki, pryszcze !!
-- Bob Violence
%
Nie ZMYSŁOWA ... Tylko "figlarny" ... i potrzebuje dentystyczne pracy ... w bólu !!!
-- Bob Violence
%
Teraz jestem w depresji ...
-- Bob Violence
%
Teraz myślę, że po prostu osiągnął stan nadciśnienia tętniczego, które zbiega sięPRZED zobaczyć łączną przy kasie LICZNIKA Safeway!
-- Bob Violence
%
Teraz rozumiem znaczenie "mod squad"!
-- Bob Violence
%
Teraz mam czym mimowolnie tasuje bliżej DIP z małżBROKEN plastikowe widelce w nim !!
-- Bob Violence
%
Teraz jestem koncentrując się na konkretnej bitwie pancernej pod koniec II wojny światowej!
-- Bob Violence
%
Teraz mam mdły myśli o pięknej, okrągłe żonyHOLLYWOOD potentatów MOVIE obłożony CARS z pleksiglasu i zbliżyłprzez małych chłopców sprzedających owoce ...
-- Bob Violence
%
Teraz BARBIE KEN i są trwale uzależnieni od HALUCYNOGENNYCH ...
-- Bob Violence
%
Teraz moje EMOCJONALNE zasoby są mocno zaangażowana w 23% z wytopui przemysłu rafinacji stanie Nevada !!
-- Bob Violence
%
Teraz, gdy mam "Apple" Rozumiem księgowania kosztów !!
-- Bob Violence
%
A teraz wysłać do quiche !!
-- Bob Violence
%
Oczywiście, że rozumiesz o Pledy w cyklu SPIN -
-- Bob Violence
%
O mój Boże - słońce właśnie wpadł Yankee Stadium !!
-- Bob Violence
%
O, teraz rozumiem!! "Idzie na plaży", co, synku ??
-- Bob Violence
%
Dobra ... Idę do domu, aby napisać "Nienawidzę Rubika Podręcznik dlaDEAD miłośników kotów "...
-- Bob Violence
%
W PORZĄDKU!! Włącz dźwięk tylko dla TRYNEL wykładzina, w pełni wyposażoneR.V. i SYSTEMS flotacji !!
-- Bob Violence
%
OMNIVERSAL ŚWIADOMOŚĆ ?? Och, YEH !! Najpierw trzeba czterech litrów galaretkii wielkim kluczem !! ... Myślę, że spadnie th'WRENCH w galaretki jakbyto był smak, lub składnik ... ... albo ... Ja ... hm ... gdzie jestpralek?
-- Bob Violence
%
Na sekundę myśli, być może będę się nagrzewać niektórych ziaren pieczone i oglądać REGISPhilbin ... To wspaniale, że żyje !!
-- Bob Violence
%
Z drugiej strony, życie może być niekończąca się parada TRANSSEKSUALIŚCIEPikowanie PSZCZOŁY na pokładzie statku do DisneyWorld jeśli tylko pozwolimy go !!
-- Bob Violence
%
Na drodze, ZIPPY jest pinhead bez celu, ale nigdy bez punktu.
-- Bob Violence
%
Dawno, dawno temu, cztery osoby dzwoniące amfibia HOG zaatakował rodzinęBezbronni, wrażliwy kolekcjonerów i sprowadzić ich własnościWARTOŚĆ!!
-- Bob Violence
%
Gdy nie było zabawne ... To było przed planowaniem MENU, modaoświadczenia lub urządzenia NAUTILUS ... Wtedy, w 1985 roku był ... FUNcałkowicie zakodowany w tym maleńkim MICROCHIP ... ona zawierać 14,768 niejasnozabawne piloci SIT-COM !! Musieliśmy czekać cztery miliardy lat, alewreszcie Jerry Lewis, MTV oraz duży wybór creme wypełnioneprzekąska ciastka!
-- Bob Violence
%
Jeden FISHWICH wymyślanie !!
-- Bob Violence
%
Jedno życie do życia dla wszystkich moich dzieci w innym świecie przez wszystkie dni naszego życia.
-- Bob Violence
%
ONE: będę przekazać całą moją "Baby Huey" kolekcję komiksów dośródmieście PLASMA CENTER ...DRUGA: Nie będę założyć zespół o nazwie "Kadafi i kadrze HIT" ...TRZECIA: Nie będę kiedykolwiek bębnowych wyschnie mój foksterier !!
-- Bob Violence
%
... Czy ty jazdy Pontiac, który zatrąbił na mnie w Miami ostatni wtorek?
-- Bob Violence
%
Ojcze nasz, któryś jest w niebie ... szczerze modlić się, że ktoś na totabela będzie zapłacić za co i rozdrobnione English muffin, a także ...zostawić hojny napiwek ....
-- Bob Violence
%
nad w zachodniej Filadelfii szczeniak wymiotuje ...
-- Bob Violence
%
Nad podziemnym! Pod wiaduktem! Wokół przyszłości i nie do naprawienia !!
-- Bob Violence
%
Wybaczcie, mam mówić po angielsku?
-- Bob Violence
%
Wybaczcie, ale czy wiesz, co to znaczy być naprawdę jeden z swoim stoisku!
-- Bob Violence
%
Peggy Fleming jest kradzież BALLS kosz do karmienia niemowląt w Vermont.
-- Bob Violence
%
Ludzie upokarzając salami!
-- Bob Violence
%
PIZZA!!
-- Bob Violence
%
Umieść mnie na liczniku BUFOR podczas umniejszać kilka boye wTrianon pokoju !! Daj mi jeden ze swoich ZALEŻNYCH!
-- Bob Violence
%
Proszę się ze mną do domu ... Mam Tylenol !!
-- Bob Violence
%
Psychoanaliza?? Myślałem, że to była naga sesja rap !!!
-- Bob Violence
%
PUNK ROCK !! DISCO KACZKA !! Antykoncepcyjnej !!
-- Bob Violence
%
Szybkie, śpiewaj mi hymnu BUDAPEST NATIONAL !!
-- Bob Violence
%
KREWNYCH !!
-- Bob Violence
%
Pamiętaj, że w 2039, pianka i makaron będą dostępne tylko na receptę !!
-- Bob Violence
%
Rhapsody in kleju!
-- Bob Violence
%
Mikołaj przychodzi ewakuacyjne sobie jasny niebieski podgrzewacze nogę... On Scrub do papieża z łagodnym mydłem przez 15 minut,Jane Fonda w roli głównej !!
-- Bob Violence
%
Wyślij swoje pytania do `` ASK ZIPPY '', Box 40474, San Francisco, CA94140, USA
-- Bob Violence
%
Ciii !! Słyszę SZEŚĆ tatuażem Podnośniki DRIVERS podrzucając w blokach silnikówpustych beczek po oleju ...
-- Bob Violence
%
Czy mogę wykonywać moją Bobbie VINTON składanka?
-- Bob Violence
%
Czy mam zablokowane w biurem PRINCICAL dziś - lub mieć wazektomii ??
-- Bob Violence
%
Powinienem zacząć od czasu przeszedłem osobistości z beatnikstylista włosów albo mój brak odnoszą nastolatków pięciu do dobrego okulisty?
-- Bob Violence
%
Zarejestruj moje prośby.
-- Bob Violence
%
Tak to jest, jakie to uczucie być sałatka ziemniaczana
-- Bob Violence
%
Tak więc, jeśli przeliczymy podażowych FUTURES sojowy o wysokiej wydajności do bonówWskaźników, PRE-inflacyjnych ryzyko będzie maleć w tempie 2ZAKUPY sprees na bakłażan !!
-- Bob Violence
%
Ktoś w Dayton, Ohio jest sprzedaż używanych CARPETS do serbochorwacki
-- Bob Violence
%
Gdzieś w 1993 roku Nancy Sinatra doprowadzi bezkrwawym zamachu stanu na Guam !!
-- Bob Violence
%
Gdzieś, w centrum miasta Burbank prostytutka, jest przywieraniu kotlet LAMB !!
-- Bob Violence
%
Gdzieś na przedmieściach Honolulu, bezrobotny Tragarz jest biczowanie Uppartia nielegalna psylocybina Chop Suey !!
-- Bob Violence
%
Gdzieś w Tenafly, New Jersey, kręgarz ogląda "Zostaw goBeaver "!
-- Bob Violence
%
Rozprzestrzenianie masła orzechowego przypomina mi opery !! Zastanawiam się dlaczego?
-- Bob Violence
%
Statecznikach !! ... Kliknij ...
-- Bob Violence
%
Mówiąc Pinhead Blues:Och, ja straciłem `hello kitty '' Lalki i mam zły odbiór na kanale DWADZIEŚCIA SZEŚĆ!!Th'HOSTESS fabrycznym jest closin "w dół i właśnie usłyszałem Zasu Pitts został DEAD od lat .. (sniff)Moja kolekcja buty platformy zostało przeżute przez psa th ', Alexander Haig nie pozwoli mi wziąć prysznic 'til Wielkanocy ... (snurf)Więc poszłam do kuchni, ale WALNUT boazerii whup mi wzrostowy mah Haid !! (Na nie, nie, nie .. Heh, heh)
-- Bob Violence
%
STUKAJĄCY? Ty POLITYCY! Nie zdajesz sobie sprawę, że w końcu "WashCykl "jest ceniona moment dla większości ludzi ?!
-- Bob Violence
%
Tex SEX! HOME kół! Kapanie kawy !! Zabierz mnie doMinnesota, ale mnie nie ZAWTYDZAĆ !!
-- Bob Violence
%
Th 'umysł jest Pizza Palace of th "DUSZY
-- Bob Violence
%
Dzięki Bogu!! ... To HENNY Youngman !!
-- Bob Violence
%
Aprecjacja samej średniej graphisticator wizualnej wartocała suaveness i dekadencji, która obfituje !!
-- Bob Violence
%
Cały chińskich kobiet siatkówka TEAM wszystkie akcje jednej osobowości -i mają od urodzenia !!
-- Bob Violence
%
Fakt, że 47 osób są krzyki i pot kaskadowych w dół mojegoKręgosłupa jest dość przyjemne !!
-- Bob Violence
%
Ziemie falafel na mojej głowie i stać się wegetarianinem ...
-- Bob Violence
%
... Autostradzie jest wykonana z wapna Jello i moja HONDA jest barbequeuedOSTRYGA! Yum!
-- Bob Violence
%
Wojna koreańska musiało być zabawne.
-- Bob Violence
%
... Że Mysterians są tutaj z moim sztruks Mydelniczka !!
-- Bob Violence
%
The Osmonds! Wszyscy jesteście Osmonds !! Rzucanie się na autostradzie o świcie !!!
-- Bob Violence
%
Pillsbury Doughboy płacze za kres filmów Burt Reynolds !!
-- Bob Violence
%
Różowa skarpety były pierwotnie z 1952 !! Ale poszli na Marsaokoło 1953 !!
-- Bob Violence
%
SAME WAVE utrzymuje najbliższych i zawaleniem jak sztuczny jedwab MUU-MUU ...
-- Bob Violence
%
Nie ma prawdy. Nie ma rzeczywistości. Nie ma spójności.Istnieje żaden bezwzględny oświadczenia. Jestem bardzo pewnie źle.
-- Bob Violence
%
Jest to trochę obraz Ed McMahon robi złe rzeczy, aby Joan Rivers$ 200.000 w Malibu Beach DOMU !!
-- Bob Violence
%
Nie ma wystarczająco dużo pieniędzy, żeby kupić 5000 puszek Kluski-Roni!
-- Bob Violence
%
"Są to mroczne czasy dla najwyższych wartości całej ludzkości za!""Są to mroczne czasy dla wolności i dobrobytu""Są to wspaniały czas, aby umieścić swoje pieniądze w złym facetem kick bzduraz Megaton człowieka! "
-- Bob Violence
%
Te PRZETWORY powinna być karmiona siłą do PENTAGON URZĘDNIKÓW !!
-- Bob Violence
%
Oni zawalił ... jak mniszek na ulicy ... nie mieli nastolatkaodwołać się!
-- Bob Violence
%
Ten bezpłciowe PIG naprawdę sprowadza krew moja ... On jest taki ... taki ... PILNE !!
-- Bob Violence
%
"To jest zadanie dla Boba przemocy i szumowiny, niewiarygodnie głupia MUTANT DOG".
-- Bob Violence
%
Loty bez fanaberii - trzymaj th 'boczkiem kanadyjskiej !!
-- Bob Violence
%
To musi być dobra partia - Moja klatka piersiowa jest wciśnięty aż do bóluprzed czyjąś MARTINI !!
-- Bob Violence
%
... Musi to być, jak to jest być po studiach !!
-- Bob Violence
%
Ta pizza symbolizuje mój pełny emocjonalnej RECOVERY !!
-- Bob Violence
%
Ten PORCUPINE zna kod pocztowy ... I ma "VISA" !!
-- Bob Violence
%
Ten TOPS od mojego partygoing doświadczenie! Ktoś mi się nie podoba toze mną rozmawiać o na duchu europejskim filmie ...
-- Bob Violence
%
Są to nie Winos - to moja żongler, mój Aerialist, mój mieczSwallower, a mój LATEX NOWOŚĆ DOSTAWCA !!
-- Bob Violence
%
Tysiące dni cywilów ... przyniosły uczucie dla ...Moduły estetyczne -
-- Bob Violence
%
Dziś, trzy Winos z Detroit sprzedała mi oprawione zdjęcie Tab Hunterprzed jego metamorfozę!
-- Bob Violence
%
Stóp, kolan, sutki. Stóp, kolan, sutki, KNUCKLES ...Sutki, dołeczki, kostki, Nickles, zmarszczki, pryszcze !!
-- Bob Violence
%
Tony Randall! Czy twoje życie patio ZABAWY ??
-- Bob Violence
%
Uh-oh - Dlaczego nagle myśli czcigodną przywódcy religijnegobeztroskie w weekend Fort Lauderdale?
-- Bob Violence
%
O o!! Zapomniałem złożyć obowiązkowej analizy moczu!
-- Bob Violence
%
O O!! Włożyłem "Great czołowo kolizje kolejowego w latach 50-tych przez"błąd!!!
-- Bob Violence
%
O O!! Myślę KEN jest nadmiernie DUE na jego R.V. Płatności i sędziegoZałamanie nerwowe też !! Ha ha.
-- Bob Violence
%
O o!! Mam za dużo zabawy !!
-- Bob Violence
%
O O!! Jesteśmy z części samochodowych i wyroby gumowe!
-- Bob Violence
%
Używane zszywki są dobre z sosem sojowym!
-- Bob Violence
%
Zastępczo wystąpić pewne powody, by żyć !!
-- Bob Violence
%
Głosujcie na mnie - jestem dobrze stożkowy, pochopnie, nieprzemyślane i podatku odroczonego!
-- Bob Violence
%
Czekaj ... to jest rzeczą zabawy lub do końca życia w Petticoat Junction ??
-- Bob Violence
%
Był mój SOY LOAF pominięte w th'RAIN? Smakuje naprawdę dobry !!
-- Bob Violence
%
Jesteśmy teraz cieszyć całkowitą wzajemnej interakcji w wyimaginowanej hydromasażem ...
-- Bob Violence
%
Mamy różne ilości włosów -
-- Bob Violence
%
Właśnie dołączył do spraw patrol włosów!
-- Bob Violence
%
Kładziemy dwie kopie magazynu People w ciemnych, wilgotnych mobile home.45 minut później Cyndi Lauper wyłania sobie klatka dla ptaków na głowie!
-- Bob Violence
%
Cóż, jestem tutaj w Ameryce .. I like it. Nienawidzę tego. Lubię to. jaNienawidzę tego. Lubię to. Nienawidzę tego. Lubię to. Nienawidzę tego. LUBIĘ ...Emocje są ogarniająca mnie !!
-- Bob Violence
%
Cóż, jestem klasycznym ANAL zapamiętywaniem !! A ja szukam sposobu naZastępczo wystąpić pewne powody, by żyć !!
-- Bob Violence
%
Cóż, jestem NIEWIDOCZNE znowu ... równie dobrze mogę złożyć wizytę do pańPOKÓJ ...
-- Bob Violence
%
Cóż, O.K. Będę kompromis z moich zasad, ponieważ egzystencjalnej rozpaczy!
-- Bob Violence
%
Gdyby te pasternak POPRAWNIE marynowane w TACO SOSIE?
-- Bob Violence
%
Co za zbieg okoliczności! Jestem autoryzowane "SNOOTS gwiazd" dystrybutorów !!
-- Bob Violence
%
Co dobrego jest walizka CARDBOARD Anyway?
-- Bob Violence
%
Co potrzebne jest relacja dojrzały z dyskietki ...
-- Bob Violence
%
Co chcę, aby dowiedzieć się - nie wiem zbyt wiele o papugi Astro-Turf?
-- Bob Violence
%
Jaki program ich oglądanie?
-- Bob Violence
%
Co wszechświecie jest to, proszę ??
-- Bob Violence
%
Co się stało Sid? ... Czy Twoje napoje niezadowalające?
-- Bob Violence
%
Kiedy spotkałem th'POPE powrotem w '58, I szorowała go z mydłem lubDetergent do 15 minut. Wydawało się, że się spodoba ...
-- Bob Violence
%
Gdy obciążenie odbywa myślę, że będę go myć ponownie ...
-- Bob Violence
%
Po otrzymaniu Ph.D. dostaniesz w stanie pracować w Burger King?
-- Bob Violence
%
Kiedy powiedziałeś "tereny silnie zalesionych" przypomniało mi zaległych CZYSZCZENIABILL ... Nie widzisz? O'Grogan POŁKNIĘTY zbiór WARTOŚCIOWA COINi miał zamordować jedynym człowiekiem, który wiedział !!
-- Bob Violence
%
Gdzie skarpetkach przejść kiedy straci je w th "WASHER?
-- Bob Violence
%
Gdzie pójść, kiedy opróżnić?
-- Bob Violence
%
Gdzie Sandy Duncan
-- Bob Violence
%
Gdzie jest th "Kaczor Daffy EXHIBIT ??
-- Bob Violence
%
Gdzie jest maszyna Koks? Opowiedz mi żart!!
-- Bob Violence
%
Podczas gdy moja brainpan jest odmówił służby w Burger King, jezuityKapłani są randki dyplomatów karierę !!
-- Bob Violence
%
Gdy jesteś żucia, myśli o rachunku bankowym Stevena Spielberga ... jegobędą miały taki sam skutek, jak dwóch "blokery skrobi"!
-- Bob Violence
%
WHO widzi BEACH BUNNY szlochając na dywanie Shag ?!
-- Bob Violence
%
WHOA !! Ken i Barbie mają za dużo zabawy !! musi byćJony ujemne !!
-- Bob Violence
%
Dlaczego te sportowe buty sprzedawców następujące mi ??
-- Bob Violence
%
Dlaczego nie kiedykolwiek wprowadzić jakiekolwiek konkursy, Marvin ?? Nie wiesz swojewłasna ZIPCODE?
-- Bob Violence
%
Dlaczego wszystko jest wykonane z Lycra Spandex?
-- Bob Violence
%
Dlaczego jest tak, że kiedy umrzesz, nie można zabrać do rozrywki domowejCENTER z wami ??
-- Bob Violence
%
Poprawi moją CASH FLOW?
-- Bob Violence
%
Czy trzecia wojna światowa utrzymać "łonem Buddies" off powietrzu?
-- Bob Violence
%
Czy to niekończąca się seria przyjemnych ZDARZEŃ nigdy nie przestanie?
-- Bob Violence
%
Z tobą mogę być sobą ... nie musimy Dan Rather ...
-- Bob Violence
%
III wojna światowa? Nie, dziękuję!
-- Bob Violence
%
Trzy wojny światowej można uniknąć dzięki przestrzeganiu ściśle egzekwowane ubioru!
-- Bob Violence
%
Łoł! Wygląd!! Zabłąkany klopsik !! Załóżmy, przesłuchać go!
-- Bob Violence
%
Xerox lunch i złożyć je na "przestępców seksualnych"!
-- Bob Violence
%
Tak, ale widzę Zajączek w Skitnight skóry na żelazkoMAIDEN koncert?
-- Bob Violence
%
Nie możesz mnie zranić !! Mam assumable KREDYTY !!
-- Bob Violence
%
Chcesz powiedzieć, że teraz mogę cię zastrzelić w plecy i dalej rozmycie th 'Rozróżnienie pomiędzy fantazją a rzeczywistością?
-- Bob Violence
%
To znaczy, że nie chcą oglądać wrestlingu z Atlanty?
-- Bob Violence
%
Wybrałaś NOS Karl Malden'S !!
-- Bob Violence
%
Należy wszystko skakać przez dwie godziny, a ja decyduje o nowej kariery !!
-- Bob Violence
%
Byłaś s'posed się śmiać!
-- Bob Violence
%
TY!! Daj mi Bystry, PINKEST i najbardziej uroczy mały wiktoriańskiejDollhouse można znaleźć !! Sprawiają, że sztuczki !!
-- Bob Violence
%
Policzki siedzieć bliźniaczych nektarynki powyższych usta, które nie zna granic -
-- Bob Violence
%
Młodzież dziś! Dołącz do mnie w masowym wiecu dla tradycyjnych psychicznegopostawy!
-- Bob Violence
%
Yow!
-- Bob Violence
%
Yow! Ja bawimy się jeszcze?
-- Bob Violence
%
Yow! Czy jestem w Milwaukee?
-- Bob Violence
%
Yow! A potem mogliśmy siedzieć na kapturach samochodów na światłach stop!
-- Bob Violence
%
Yow! Czy ułożyliśmy już wrócił?
-- Bob Violence
%
Yow! Czy mamy jeszcze mokre?
-- Bob Violence
%
Yow! Czy jesteś self-smażenia prezydentem?
-- Bob Violence
%
Yow! Czy coś złego stało czy jestem w filmie drive-in ??
-- Bob Violence
%
Yow! Poszedłem po prostu poniżej granicy ubóstwa!
-- Bob Violence
%
Yow! Rzuciłem się na moim oknem!
-- Bob Violence
%
Yow! Chcę mój nos w światłach!
-- Bob Violence
%
Yow! Chcę mail z opalony karczocha do Nikaragui!
-- Bob Violence
%
Yow! Mam quadrophonic odczucie samych dwóch Winos w hucie!
-- Bob Violence
%
Yow! Wyobrażam sobie surfer van wypełniony sosem sojowym!
-- Bob Violence
%
Yow! Czy mój fallout shelter termitów dowód?
-- Bob Violence
%
Yow! Czy jednak ten stosunek seksualny ?? Czy to, co, to jest ??
-- Bob Violence
%
Yow! To dziura aż do centrum Burbank!
-- Bob Violence
%
Yow! To niektórzy ludzie wewnątrz ścianie! To jest lepsze niż mopem!
-- Bob Violence
%
Yow! Może powinienem prosić o moim bomba neutronowa w Paisley -
-- Bob Violence
%
Yow! Teraz mogę myśleć o wszystkich złych rzeczy, które zrobiłem do BOWLINGPIŁKA kiedy byłem w gimnazjum!
-- Bob Violence
%
Yow! Teraz możemy stać się alkoholikami!
-- Bob Violence
%
Yow! Ci ludzie wyglądają dokładnie tak, jak Donnie i Marie Osmond !!
-- Bob Violence
%
Yow! Idziemy do nowego disco!
-- Bob Violence
%
YOW !! Każdy z puli genetycznej!
-- Bob Violence
%
YOW !! Jestem w bardzo sprytny i urocza wariatów !!
-- Bob Violence
%
YOW !! Teraz rozumiem zaawansowaną mikrobiologii i Th 'nowych przepisów podatkowych reformy !!
-- Bob Violence
%
YOW !! Ziemia z wschodzącego SONY !!
-- Bob Violence
%
YOW !! Przed nami! To pączka HUT !!
-- Bob Violence
%
YOW !! Co należy cała rasa ludzka ROBIĆ ?? Spożywać jedną piątąChivas Regal, nartach nago MT dół. EVEREST i mają dzikiego seksu weekend!
-- Bob Violence
%
YOW !!! Dobrze się bawię!!!
-- Bob Violence
%
Komórki mózgowe zippy są starając się zlikwidować synaps ...
-- Bob Violence
%
| kevingnet/fortunes_translated | fortunes/pl/zippy.pl | Perl | mit | 53,737 |
:- consult('configuration').
:- consult('iem_parser').
:- consult('normal_form').
:- consult('export_domain').
:- consult('export_problem').
:- consult('dates').
:- consult('profile').
:- consult('world_knowledge').
:- consult('atTime').
:- consult('baseKB').
:- consult('flp_convert_pl_to_verb').
:- consult('generate_glosses').
:- consult('dialog_interface').
| TeamSPoon/logicmoo_workspace | packs_sys/logicmoo_ec/ext/flp/loader.pl | Perl | mit | 362 |
# This file is auto-generated by the Perl DateTime Suite time zone
# code generator (0.07) This code generator comes with the
# DateTime::TimeZone module distribution in the tools/ directory
#
# Generated from /tmp/BU3Xn7v6Kb/africa. Olson data version 2015g
#
# Do not edit this file directly.
#
package DateTime::TimeZone::Indian::Reunion;
$DateTime::TimeZone::Indian::Reunion::VERSION = '1.94';
use strict;
use Class::Singleton 1.03;
use DateTime::TimeZone;
use DateTime::TimeZone::OlsonDB;
@DateTime::TimeZone::Indian::Reunion::ISA = ( 'Class::Singleton', 'DateTime::TimeZone' );
my $spans =
[
[
DateTime::TimeZone::NEG_INFINITY, # utc_start
60286796288, # utc_end 1911-05-31 20:18:08 (Wed)
DateTime::TimeZone::NEG_INFINITY, # local_start
60286809600, # local_end 1911-06-01 00:00:00 (Thu)
13312,
0,
'LMT',
],
[
60286796288, # utc_start 1911-05-31 20:18:08 (Wed)
DateTime::TimeZone::INFINITY, # utc_end
60286810688, # local_start 1911-06-01 00:18:08 (Thu)
DateTime::TimeZone::INFINITY, # local_end
14400,
0,
'RET',
],
];
sub olson_version {'2015g'}
sub has_dst_changes {0}
sub _max_year {2025}
sub _new_instance {
return shift->_init( @_, spans => $spans );
}
1;
| rosiro/wasarabi | local/lib/perl5/DateTime/TimeZone/Indian/Reunion.pm | Perl | mit | 1,224 |
#!/bin/sh
. $(perl -e'print((getpwnam"interiot")[7])')/oraenv # source ~interiot/oraenv
exec ${ORAPERL:-$ORACLE_HOME/perl/bin/perl} -x "$0" "$@"; exit
#!/usr/bin/perl
#line 6
## This is a "Hello World"-level example of running a query via Oracle, within Perl.
## The first three lines ensure that it works even when running from within Cron,
## or when run by a user who doesn't have an 'oraenv' properly setup.
use DBI;
use Data::Dumper;
my $database = "database";
my $user = "username";
my $passwd = "password";
my $dbh = DBI->connect("dbi:Oracle:$database", $user, $passwd)
or die "Couldn't connect to database: " . DBI->errstr;
my $example_query = "SELECT DISTINCT owner FROM all_objects";
my @results = $dbh->selectall_listofhashes($example_query);
print Dumper \@results;
# DBI has selectall_arrayref() and selectall_hashref(), but no selectall_listofhashes(). Fix that.
sub DBI::db::selectall_listofhashes {my($dbh,$stmt,$attr,@bind)=@_;@{$dbh->selectall_arrayref($stmt,{%{$attr||{}},Slice=>{}},@bind)}}
sub DBI::st::fetchall_listofhashes {my($sth,$max_rows)=@_;@{$sth->fetchall_arrayref({},$max_rows)}}
| DeeNewcum/individual_scripts | oracle/basic_oracle_example.pl | Perl | mit | 1,156 |
package Google::Ads::AdWords::v201809::ProductCanonicalCondition;
use strict;
use warnings;
__PACKAGE__->_set_element_form_qualified(1);
sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201809' };
our $XML_ATTRIBUTE_CLASS;
undef $XML_ATTRIBUTE_CLASS;
sub __get_attr_class {
return $XML_ATTRIBUTE_CLASS;
}
use base qw(Google::Ads::AdWords::v201809::ProductDimension);
# Variety: sequence
use Class::Std::Fast::Storable constructor => 'none';
use base qw(Google::Ads::SOAP::Typelib::ComplexType);
{ # BLOCK to scope variables
my %ProductDimension__Type_of :ATTR(:get<ProductDimension__Type>);
my %condition_of :ATTR(:get<condition>);
__PACKAGE__->_factory(
[ qw( ProductDimension__Type
condition
) ],
{
'ProductDimension__Type' => \%ProductDimension__Type_of,
'condition' => \%condition_of,
},
{
'ProductDimension__Type' => 'SOAP::WSDL::XSD::Typelib::Builtin::string',
'condition' => 'Google::Ads::AdWords::v201809::ProductCanonicalCondition::Condition',
},
{
'ProductDimension__Type' => 'ProductDimension.Type',
'condition' => 'condition',
}
);
} # end BLOCK
1;
=pod
=head1 NAME
Google::Ads::AdWords::v201809::ProductCanonicalCondition
=head1 DESCRIPTION
Perl data type class for the XML Schema defined complexType
ProductCanonicalCondition from the namespace https://adwords.google.com/api/adwords/cm/v201809.
A canonical condition. Only supported by campaigns of {@link AdvertisingChannelType#SHOPPING}.
=head2 PROPERTIES
The following properties may be accessed using get_PROPERTY / set_PROPERTY
methods:
=over
=item * condition
=back
=head1 METHODS
=head2 new
Constructor. The following data structure may be passed to new():
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| googleads/googleads-perl-lib | lib/Google/Ads/AdWords/v201809/ProductCanonicalCondition.pm | Perl | apache-2.0 | 1,834 |
:- module('prolog_to_xml_swi', [sort_highlights/2,
output_file_as_xml_element/3,
output_string_as_xml_element/3,
output_xml_decl/1,
my_html_quoted_chars/3,
output_file_as_html_no_ann/1,
output_file_as_html/3]).
:- use_module('matcher_swi.pl').
:- use_module('pillow/pillow.pl').
output_xml_decl(Stylesheet) :-
output_html([xmldecl(['version="1.0"','encoding="UTF-8"']), nl,
'<?xml-stylesheet type="text/xsl" href="', Stylesheet, '"?>', nl]).
output_string_as_xml_element(Element, String, Highlights) :-
output_html([begin(Element), nl]),
gen_html_from_string(Highlights, String),
output_html([end(Element), nl]).
my_html_quoted_chars([]) --> [].
my_html_quoted_chars([C|T]) -->
my_html_quoted_char(C),
my_html_quoted_chars(T).
my_html_quoted_char(0'>) --> !, ">".
my_html_quoted_char(0'<) --> !, "<".
my_html_quoted_char(0'&) --> !, "&".
my_html_quoted_char(0'") --> !, """.
my_html_quoted_char(C) --> [C].
get_tuples([], []).
get_tuples([Start, Stop, Ann|Anns], [ann(Start, Stop, Ann)| NAnns]) :-
get_tuples(Anns, NAnns).
sort_highlights(Anns, NAnns) :- predsort(ann_compare, Anns, NAnns).
% same ann (shouldn't ever happen)!
ann_compare(=, ann(S, E, A), ann(S, E, A)) :- !.
ann_compare(>, ann(S1, _, _), ann(S2, _, _)) :- S1 > S2, !.
ann_compare(<, ann(S1, _, _), ann(S2, _, _)) :- S1 < S2, !.
% At this point the starts must be equal
ann_compare(<, ann(_, E1, _), ann(_, E2, _)) :- E1 > E2, !.
ann_compare(>, ann(_, E1, _), ann(_, E2, _)) :- E1 < E2, !.
% starts and ends are the same
ann_compare(<, ann(_, _, hide_nf), ann(_, _, _)) :- !.
ann_compare(>, ann(_, _, _), ann(_, _, hide_nf)) :- !.
ann_compare(>, ann(_, _, atom), ann(_, _, _)) :- !.
ann_compare(<, ann(_, _, _), ann(_, _, atom)) :- !.
ann_compare(=, _, _).
output_file_as_html(Filename, Annfile, Extra) :-
get_ann_positions(Filename, Annfile, UnsortedAnns, Extra), !,
collect_filters(FiltStream, FilSyntax, Extra),
sort_highlights(UnsortedAnns, SortedAnns),
sort_highlights(FilSyntax, SortedFil),
output_file_as_xml_element(source, Filename, SortedAnns),
output_stream_as_xml_element(filters, FiltStream, SortedFil).
output_file_as_html_no_ann(Filename) :-
get_syntax_positions(Filename, Syntax), !,
sort_highlights(Syntax, SortedSyn),
output_file_as_xml_element(source, Filename, SortedSyn).
read_bytes(_, 0, []) :- !.
read_bytes(Stream, _, []) :-
at_end_of_stream(Stream), !.
read_bytes(Stream, rest, [B|Bs]) :-
!, get_byte(Stream, B),
read_bytes(Stream, rest, Bs).
read_bytes(Stream, N, [B|Bs]) :-
get_byte(Stream, B),
N1 is N - 1,
read_bytes(Stream, N1, Bs).
read_bytes(Stream, S, E, Bs) :-
N is E - S,
read_bytes(Stream, N, Bs).
output_stream_as_xml_element(Element, Stream, Syntax) :-
output_html([begin(Element)]),
gen_html(Stream, 0, _, Syntax, _),
output_html([end(Element), nl]).
output_file_as_xml_element(Element, Filename, Syntax) :-
open(Filename, read, Stream),
output_stream_as_xml_element(Element, Stream, Syntax),
close(Stream).
convert_tag(';', disj) :- !.
convert_tag(A,A).
% no terms remaining
gen_html(Stream, Pos, NPos, [], []) :-
nonvar(NPos), !,
read_bytes(Stream, Pos, NPos, Read),
my_html_quoted_chars(Read, Output, []),
output_html(Output).
gen_html(Stream, _, _, [], []) :-
!, read_bytes(Stream, rest, Read),
add_comment_markup(Read, Output),
output_html(Output).
gen_html(Stream, Pos, NPos, [ann(S,E,Tag)|Tags], [ann(S,E,Tag)|Tags]) :-
nonvar(NPos), NPos =< S, !,
read_bytes(Stream, Pos, NPos, Read),
my_html_quoted_chars(Read, Output, []),
output_html(Output).
gen_html(Stream, Pos, NPos, [ann(S, E, atom)|Tags], NTags) :-
!, read_bytes(Stream, Pos, S, Read),
add_comment_markup(Read, Out1),
output_html(Out1),
gen_html(Stream, S, E, Tags, NTags2),
gen_html(Stream, E, NPos, NTags2, NTags).
gen_html(Stream, Pos, NPos, [ann(S, E, Tag)|Tags], NTags) :-
!, read_bytes(Stream, Pos, S, Read),
add_comment_markup(Read, Out1),
output_html(Out1),
enclosing_tag(Tag, TagName),
gen_html(Stream, S, E, Tags, NTags2),
output_html([end(TagName)]),
gen_html(Stream, E, NPos, NTags2, NTags).
enclosing_tag((Ann+Fix)/Arity, Tag) :-
convert_tag(Ann, Tag),
output_html([begin(Tag, [arity=Arity, fix=Fix])]).
enclosing_tag(Ann+Fix, Tag) :-
convert_tag(Ann, Tag),
output_html([begin(Tag, [fix=Fix])]).
enclosing_tag(Ann/Arity, Tag) :-
convert_tag(Ann, Tag),
output_html([begin(Tag, [arity=Arity])]).
enclosing_tag(Ann*Replacement, Tag) :-
convert_tag(Ann, Tag),
output_html([begin(Tag)]),
write_replace_block(Replacement).
enclosing_tag(Ann, Tag) :-
convert_tag(Ann, Tag),
output_html([begin(Tag)]).
% This will create a
% <replacement>:- <directive>filter</directive>
% <filter>name</filter>(<dynamic>dynamic</dynamic>,...)</replacement>
% block from a parsed filter declaration. This will then go inside the
% badfilter block and the xslt will generate the code to replace the existing
% filter when fix is clicked.
write_replace_block(Replace) :-
collect_filters_from_list(FiltStream, FilSyntax, [],
[ann_decl(filter, Replace)]),
%the first element will be a wholefilter tag. this needs removing
sort_highlights(FilSyntax, [_WholeFilter|SortedFil]),
output_html([begin(replacement)]),
gen_html(FiltStream, 0, _, SortedFil, _),
output_html([end(replacement)]).
add_comment_markup([], []).
add_comment_markup([0'%|T], [comm([0'%|T1])|T2]) :-
consume_till_line_end(T, T1, TR),
add_comment_markup(TR, T2).
add_comment_markup([0'/,0'*|T], [comm([0'/,0'*|T1])|T2]) :-
consume_till_comment_end(T, T1, TR),
add_comment_markup(TR, T2).
add_comment_markup([H|T], NT) :-
add_quoted_char(H, T2, NT),
add_comment_markup(T, T2).
consume_till_line_end([10|T], [10], T).
consume_till_line_end([H|T], NT, TR) :-
add_quoted_char(H, T2, NT),
consume_till_line_end(T, T2, TR).
consume_till_comment_end([0'*,0'/|T], [0'*,0'/], T).
consume_till_comment_end([H|T], NT, TR) :-
add_quoted_char(H, T2, NT),
consume_till_comment_end(T, T2, TR).
add_quoted_char(0'&, L, [0'&, 0'a, 0'm, 0'p, 0';|L]).
add_quoted_char(0'<, L, [0'&, 0'l, 0't, 0';|L]).
add_quoted_char(0'>, L, [0'&, 0'g, 0't, 0';|L]).
add_quoted_char(0'", L, [0'&, 0'q, 0'u, 0'o, 0't, 0';|L]).
add_quoted_char(X, L, [X|L]).
| leuschel/logen | weblogen/backend/prolog_to_xml_swi.pl | Perl | apache-2.0 | 6,252 |
package OpenXPKI::Server::Workflow::Activity::Tools::Datapool::SetEntry;
use strict;
use English;
use base qw( OpenXPKI::Server::Workflow::Activity );
use OpenXPKI::Server::Context qw( CTX );
use OpenXPKI::Exception;
use OpenXPKI::Debug;
use OpenXPKI::Serialization::Simple;
use OpenXPKI::DateTime;
use DateTime;
use Template;
use Data::Dumper;
sub execute {
##! 1: 'start'
my $self = shift;
my $workflow = shift;
my $context = $workflow->context();
my $serializer = OpenXPKI::Serialization::Simple->new();
my $params = { PKI_REALM => CTX('api')->get_pki_realm(), };
# fallback to old parameter format
my $prefix = '';
if ($self->param('ds_namespace')) {
$prefix = 'ds_';
# get the name of the key and resolve it
my $dp_key_param = $self->param('ds_key_param');
if ( not $dp_key_param ) {
OpenXPKI::Exception->throw(
message => 'I18N_OPENXPKI_SERVER_WORKFLOW_ACTIVITY_TOOLS_DATAPOOL_MISSPARAM_KEY_PARAM'
);
}
$params->{ KEY } = $context->param( $dp_key_param );
my $dp_value_param = $self->param('ds_value_param');
if ( not $dp_value_param ) {
OpenXPKI::Exception->throw(
message => 'I18N_OPENXPKI_SERVER_WORKFLOW_ACTIVITY_TOOLS_DATAPOOL_MISSPARAM_VALUE_PARAM'
);
}
$params->{ VALUE } = $context->param( $dp_value_param );
CTX('log')->application()->debug('Old parameter format found in set datapool activity');
} else {
$params->{ KEY } = $self->param( 'key' );
$params->{ VALUE } = $self->param( 'value' );
}
# map those parameters 1:1 to the API method
foreach my $key (qw( namespace encrypt force expiration_date )) {
my $val = $self->param($prefix.$key);
if (defined $val) {
$params->{ uc($key) } = $val;
}
}
# check for mandatory fields
foreach my $key (qw( namespace key encrypt force )) {
if ( not defined $params->{ uc($key) } ) {
OpenXPKI::Exception->throw( message =>
'I18N_OPENXPKI_SERVER_WORKFLOW_ACTIVITY_TOOLS_DATAPOOL_' .
'MISSPARAM_' . uc($key)
);
}
}
if (defined $params->{EXPIRATION_DATE}) {
my $then = OpenXPKI::DateTime::get_validity({
REFERENCEDATE => DateTime->now(),
VALIDITY => $params->{EXPIRATION_DATE},
VALIDITYFORMAT => 'detect',
});
$params->{EXPIRATION_DATE} = $then->epoch();
}
CTX('api')->set_data_pool_entry($params);
# we support this feature only in legacy mode
if ($self->param('ds_unset_context_value')) {
##! 16: 'clearing context parameter ' . $valparam
my $valparam = $self->param('ds_value_param');
$context->param( $valparam => undef );
}
CTX('log')->application()->info('Set datapool entry for key '.$params->{KEY}.' in namespace '.$params->{NAMESPACE});
# TODO: handle return code from set_data_pool_entry()
return 1;
}
1;
__END__
=head1 Name
OpenXPKI::Server::Workflow::Activity::Tools::Datapool::SetEntry
=head1 Description
This class sets an entry in the Datapool.
=head1 Configuration
=head2 Parameters
In the activity definition, the following parameters must be set. The syntax
using the I<ds_> prefix is deprecated, use the I<_map> syntax to load key and
value from the context. It is not allowed to mix prefixed and non-prefixed
parameters!
=over 8
=item namespace / ds_namespace
The namespace to use for storing the key-value pair. Generally speaking,
there are no rigid naming conventions. The namespace I<sys>, however,
is reserved for internal server and system related data.
=item encrypt / ds_encrypt
A boolean value that specifies whether the value of the entry is to be
encrypted. [optional - default is I<0>]
=item force / ds_force
Causes the set action to overwrite an existing entry.
=item expiration_date / ds_expiration_date
Sets expiration date of the datapool entry to the specified value.
The value should be a time specification recognized by OpenXPKI::DateTime
autodetection. (such as '+000001', which means one day), a terse data or
epoch. See OpenXPKI::DateTime::get_validity for details.
=item key
The value used as datapool key, use I<_map> syntax to use values from context!
=item value
The actual value written to the datapool, use I<_map> syntax to use values
from context!
=item ds_key_param, deprecated
The name of the context parameter that contains the key for this
datastore entry. Deprecated, use key with _map syntax instead.
=item ds_value_param, deprecated
The name of the context parameter that contains the value for this
datastore entry. Deprecated, use value with _map syntax instead.
=item ds_unset_context_value, deprecated
If this parameter is set to 1 the activity clears the workflow context
value specified via ds_value_param after storing the value in the datapool.
This options is deprecated and will be removed in the future. Use volatile
parameters or clear them afterwards.
=back
=head2 Arguments
The workflow action requires two parameters that are passed via the
workflow context. The names are set above with the I<ds_key_param> and
I<ds_value_param> parameters.
=head2 Example
set_puk_in_datapool:
class: OpenXPKI::Server::Workflow::Activity::Tools::Datapool::SetEntry
param:
namespace: puk_namespace
_map_key: $token_id
_map_value: $_puk
encrypt: 1
force: 1
expiration_date: "+10"
=head2 Example (Legacy format - same result as above)
set_puk_in_datapool:
class: OpenXPKI::Server::Workflow::Activity::Tools::Datapool::SetEntry
param:
ds_namespace: puk_namespace
ds_key_param: token_id
ds_value_param: _puk
ds_encrypt: 1
ds_force: 1
ds_unset_context_value: 0
ds_expiration_date: "+10"
| stefanomarty/openxpki | core/server/OpenXPKI/Server/Workflow/Activity/Tools/Datapool/SetEntry.pm | Perl | apache-2.0 | 6,045 |
#
# Copyright 2019 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package hardware::ups::mge::snmp::mode::outputlines;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'global', type => 0, skipped_code => { -10 => 1 } },
{ name => 'oline', type => 1, cb_prefix_output => 'prefix_oline_output', message_multiple => 'All output lines are ok', skipped_code => { -10 => 1 } },
];
$self->{maps_counters}->{global} = [
{ label => 'stdev-3phases', nlabel => 'output.3phases.stdev.gauge', set => {
key_values => [ { name => 'stdev' } ],
output_template => 'Load Standard Deviation : %.2f',
perfdatas => [
{ label => 'stdev', value => 'stdev_absolute', template => '%.2f' },
],
}
},
];
$self->{maps_counters}->{oline} = [
{ label => 'load', nlabel => 'line.output.load.percentage', set => {
key_values => [ { name => 'mgoutputLoadPerPhase', no_value => 0 } ],
output_template => 'Load : %.2f %%',
perfdatas => [
{ value => 'mgoutputLoadPerPhase_absolute', template => '%.2f',
min => 0, max => 100, unit => '%', label_extra_instance => 1 },
],
}
},
{ label => 'current', nlabel => 'line.output.current.ampere', set => {
key_values => [ { name => 'mgoutputCurrent', no_value => 0 } ],
output_template => 'Current : %.2f A',
perfdatas => [
{ value => 'mgoutputCurrent_absolute', template => '%.2f',
min => 0, unit => 'A', label_extra_instance => 1 },
],
}
},
{ label => 'voltage', nlabel => 'line.output.voltage.volt', set => {
key_values => [ { name => 'mgoutputVoltage', no_value => 0 } ],
output_template => 'Voltage : %.2f V',
perfdatas => [
{ value => 'mgoutputVoltage_absolute', template => '%.2f',
unit => 'V', label_extra_instance => 1 },
],
}
},
{ label => 'frequence', nlabel => 'line.output.frequence.hertz', set => {
key_values => [ { name => 'mgoutputFrequency', no_value => -1 } ],
output_template => 'Frequence : %.2f Hz',
perfdatas => [
{ value => 'mgoutputFrequency_absolute', template => '%.2f',
unit => 'Hz', label_extra_instance => 1 },
],
}
},
];
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$options{options}->add_options(arguments => {
});
return $self;
}
sub prefix_oline_output {
my ($self, %options) = @_;
return "Output Line '" . $options{instance_value}->{display} . "' ";
}
sub stdev {
my ($self, %options) = @_;
# Calculate stdev
my $total = 0;
my $num_present = scalar(keys %{$self->{oline}});
foreach my $instance (keys %{$self->{oline}}) {
next if (!defined($self->{oline}->{$instance}->{mgoutputLoadPerPhase}));
$total += $self->{oline}->{$instance}->{mgoutputLoadPerPhase};
}
my $mean = $total / $num_present;
$total = 0;
foreach my $instance (keys %{$self->{oline}}) {
next if (!defined($self->{oline}->{$instance}->{mgoutputLoadPerPhase}));
$total += ($mean - $self->{oline}->{$instance}->{mgoutputLoadPerPhase}) ** 2;
}
my $stdev = sqrt($total / $num_present);
$self->{global} = { stdev => $stdev };
}
my $mapping = {
mgoutputVoltage => { oid => '.1.3.6.1.4.1.705.1.7.2.1.2' }, # in dV
mgoutputFrequency => { oid => '.1.3.6.1.4.1.705.1.7.2.1.3' }, # in dHz
mgoutputLoadPerPhase => { oid => '.1.3.6.1.4.1.705.1.7.2.1.4' }, # in %
mgoutputCurrent => { oid => '.1.3.6.1.4.1.705.1.7.2.1.5' }, # in dA
};
my $oid_upsmgOutputPhaseEntry = '.1.3.6.1.4.1.705.1.7.2.1';
sub manage_selection {
my ($self, %options) = @_;
$self->{oline} = {};
my $snmp_result = $options{snmp}->get_table(
oid => $oid_upsmgOutputPhaseEntry,
nothing_quit => 1
);
foreach my $oid (keys %{$snmp_result}) {
$oid =~ /^$oid_upsmgOutputPhaseEntry\.\d+\.(.*)$/;
my $instance = $1;
next if (defined($self->{oline}->{$instance}));
my $result = $options{snmp}->map_instance(mapping => $mapping, results => $snmp_result, instance => $instance);
$result->{mgoutputVoltage} *= 0.1 if (defined($result->{mgoutputVoltage}));
$result->{mgoutputFrequency} *= 0.1 if (defined($result->{mgoutputFrequency}));
$result->{mgoutputCurrent} *= 0.1 if (defined($result->{mgoutputCurrent}));
$self->{oline}->{$instance} = { display => $instance, %$result };
}
if (scalar(keys %{$self->{oline}}) > 1) {
$self->stdev();
}
}
1;
__END__
=head1 MODE
Check Output lines metrics (load, voltage, current).
=over 8
=item B<--warning-*>
Threshold warning.
Can be: 'load', 'voltage', 'current', 'frequence', 'stdev-3phases'.
=item B<--critical-*>
Threshold critical.
Can be: 'load', 'voltage', 'current', 'frequence', 'stdev-3phases'.
=back
=cut
| Sims24/centreon-plugins | hardware/ups/mge/snmp/mode/outputlines.pm | Perl | apache-2.0 | 6,224 |
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V9::Common::VideoResponsiveAdInfo;
use strict;
use warnings;
use base qw(Google::Ads::GoogleAds::BaseEntity);
use Google::Ads::GoogleAds::Utils::GoogleAdsHelper;
sub new {
my ($class, $args) = @_;
my $self = {
callToActions => $args->{callToActions},
companionBanners => $args->{companionBanners},
descriptions => $args->{descriptions},
headlines => $args->{headlines},
longHeadlines => $args->{longHeadlines},
videos => $args->{videos}};
# Delete the unassigned fields in this object for a more concise JSON payload
remove_unassigned_fields($self, $args);
bless $self, $class;
return $self;
}
1;
| googleads/google-ads-perl | lib/Google/Ads/GoogleAds/V9/Common/VideoResponsiveAdInfo.pm | Perl | apache-2.0 | 1,274 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2021] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
package EnsEMBL::Web::ZMenu::Idhistory::Node;
use strict;
use base qw(EnsEMBL::Web::ZMenu::Idhistory);
sub content {
my $self = shift;
my $hub = $self->hub;
my $a_id = $hub->param('node') || die 'No node value in params';
my $archive = $self->archive_adaptor->fetch_by_stable_id_dbname($a_id, $hub->param('db_name'));
my $id = $archive->stable_id . '.' . $archive->version;
$self->caption($id);
$self->add_entry({
type => $archive->type eq 'Translation' ? 'Protein' : $archive->type,
label_html => $id,
link => $self->archive_link($archive)
});
$self->add_entry({
type => 'Release',
label => $archive->release
});
$self->add_entry({
type => 'Assembly',
label => $archive->assembly
});
$self->add_entry({
type => 'Database',
label => $archive->db_name
});
}
1;
| Ensembl/ensembl-webcode | modules/EnsEMBL/Web/ZMenu/Idhistory/Node.pm | Perl | apache-2.0 | 1,580 |
%% ---------------------------------------------
%% db.pl - database module - conditional assert
%% ---------------------------------------------
:- module(db, [cond_assert_size/1, fact/1, get_prev_fact/2]).
:- use_module(constraints, [more_general_constraints/2,
equiv_constraints/2]).
:- use_module(library(terms), [variant/2]).
:- use_module(library(lists), [member/2, delete/3]).
:- dynamic fact/1.
%% -------------------------------------------------------
%% get_prev_fact:
%%
get_prev_fact(Head,(HeadCopy :- PrevCs)) :-
copy_term(Head,HeadCopy),
fact((HeadCopy :- PrevCs)).
%% --------------------------------------------------------------------
%% delete_variants(L1,L2,L3) : L3 contains all the elements
%% from L1 that are not variants of elements in L2
delete_variants(List,[],List).
delete_variants(List, [Elt|Elts], Rest) :-
member(Elt, List),!,
delete(List, Elt, Rest1),
delete_variants(Rest1, Elts, Rest).
delete_variants(List, [_|Elts], Rest) :-
delete_variants(List, Elts, Rest).
in_database_size(fact((F:-D))) :- !,
functor(F,N,A), functor(F1,N,A),
fact((F1:-D1)),
variant((F,D),(F1,D1)).
%% --------------------------------------------------------------------
%% cond_assert_size(+F) : conditional assertion of a
%% size syntactic object to db.
cond_assert_size(F) :-
in_database_size(F),!.
cond_assert_size(F) :- !,
findall(Fact1, compare_size(F, Fact1, more), More),
findall(Fact2, compare_size(F, Fact2, less), Less),
delete_variants(Less, More, StrictLess),
retract_list(StrictLess),
(More == [] -> assert(F), assert(user:flag); true),!.
compare_size(fact((P:-Dep1)), fact((P:-Dep2)),M) :-
fact((P:-Dep2)), compare_constraints(Dep2,Dep1,M).
compare_constraints(C1,C2,more) :- more_general_constraints(C1,C2).
compare_constraints(C1,C2,less) :- more_general_constraints(C2,C1).
compare_constraints(C1,C2,same) :- equiv_constraints(C1,C2).
retract_list([]).
retract_list([F|Fs]) :- safe_retract(F), retract_list(Fs).
safe_retract(F) :-
numbervars(F,0,_),
retract(F).
| leuschel/logen | old_logen/convex/db.pl | Perl | apache-2.0 | 2,121 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2021] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
package EnsEMBL::Web::QueryStore::Cache::PrecacheFile;
use strict;
use warnings;
use bytes;
use JSON;
use Fcntl qw(SEEK_SET SEEK_END SEEK_CUR :flock);
use Compress::Zlib;
use DB_File;
use File::Copy;
use List::Util qw(max);
use List::MoreUtils qw(any);
use Digest::MD5 qw(md5_base64);
use File::Basename;
use EnsEMBL::Web::QueryStore::Cache::PrecacheBuilder qw(identity);
our $DEBUG = 1;
sub new {
my ($proto,$conf) = @_;
my $class = ref($proto) || $proto;
my @parts = ($conf->{'base'},$conf->{'dir'});
$parts[0] ||= 'precache';
if($conf->{'filename'}) {
@parts = fileparse($conf->{'filename'},qr{\.(idx|dat)$});
}
my $self = {
dir => $parts[1],
base => $parts[0],
write => $conf->{'write'},
};
bless $self,$class;
$self->cache_open;
return $self;
}
sub systell { sysseek($_[0], 0, SEEK_CUR) }
sub fn {
my ($self,$type,$base) = @_;
$base ||= $self->{'base'};
return "$self->{'dir'}/$base.$type";
}
sub _remove_undef {
my ($self,$obj) = @_;
if(ref($obj) eq 'HASH') {
foreach my $k (keys %$obj) {
if(defined $obj->{$k}) {
$self->_remove_undef($obj->{$k});
} else {
delete $obj->{$k};
}
}
} elsif(ref($obj) eq 'ARRAY') {
$self->_remove_undef($_) for @$obj;
}
}
sub _open {
my ($self,$mode,$datop,$name) = @_;
return if $self->{'open'};
my %idx;
if(tie(%idx,'DB_File',$self->fn("idx"),$mode,0600,$DB_HASH)) {
$self->{'idx'} = \%idx;
} else {
if($name eq 'reading') {
$self->{'idx'} = {};
} else {
warn "Cannot open '".$self->fn('dat')."' $name: $!\n";
return;
}
}
unless(open($self->{'dat'},$datop,$self->fn('dat'))) {
if($name ne 'reading') {
warn "Cannot open '".$self->fn('dat')."' $name: $!\n";
return;
}
}
$self->{'open'} = 1;
}
sub cache_open {
my ($self,$write) = @_;
if($self->{'write'}) {
$self->_open(O_RDWR|O_CREAT,'+>>:raw','writing');
} else {
$self->_open(O_RDONLY,'<:raw','reading');
}
}
sub cache_close {
my ($self) = @_;
return unless $self->{'open'};
$self->{'open'} = 0;
untie $self->{'idx'};
$self->{'idx'} = {};
close $self->{'dat'};
}
sub _keys {
my ($self,$class,$ver,$args) = @_;
$args = {%$args};
$self->_remove_undef($args->{'args'});
my $json = JSON->new->canonical(1)->encode($args);
warn "$json\n" if $DEBUG > 1;
return [md5_base64($class),$ver,md5_base64($json)];
}
sub launch_as {
my ($self,$name,$suffix) = @_;
my $id = identity();
$self->cache_close;
if($suffix) {
my $idx = 0;
my $newname = "$name.$id.$idx";
while(-e $self->fn('dat',$newname)) { $idx++; }
$name = $newname;
open(DAT,'>',$self->fn('dat',$name)) or die "$!: ".$self->fn('dat',$name);
close DAT;
}
rename($self->fn('dat'),$self->fn('dat',$name)) or die "$!";
rename($self->fn('idx'),$self->fn('idx',$name)) or die "$!";
}
sub set {
my ($self,$class,$ver,$keyin,$valuein,$build) = @_;
return undef unless $self->{'open'} and $self->{'write'};
my $key = join(':',@{$self->_keys($class,$ver,$keyin)});
#warn "set $class $ver ".JSON->new->canonical(1)->encode($keyin)."\n";
my $value = Compress::Zlib::memGzip(JSON->new->encode($valuein));
return $self->_set_key($key,$value,$build);
}
sub _set_key {
my ($self,$key,$value,$build,$length) = @_;
return 0 if exists $self->{'idx'}{$key};
my $start = systell $self->{'dat'};
syswrite $self->{'dat'},$value,length($value);
my $end = systell $self->{'dat'};
$self->{'idx'}{$key} = JSON->new->encode([$start,$end-$start,$build]);
$$length += length $self->{'idx'}{$key} if $length;
return 1;
}
sub _get_key {
my ($self,$key) = @_;
return undef unless exists $self->{'idx'}{$key};
my $d = JSON->new->decode($self->{'idx'}{$key});
sysseek($self->{'dat'},$d->[0],SEEK_SET);
my $out;
sysread($self->{'dat'},$out,$d->[1]);
return ($out,$d->[2]);
}
sub get {
my ($self,$class,$ver,$keyin) = @_;
return undef unless $self->{'open'};
my $key = join(':',@{$self->_keys($class,$ver,$keyin)});
my ($data) = $self->_get_key($key);
my $out;
return undef unless defined $data;
eval {
$out = JSON->new->decode(Compress::Zlib::memGunzip($data));
};
die "Get failed" unless $out;
return $out;
}
my $FILESIZE = 20_000_000;
sub _cat {
my ($self,$target,$src) = @_;
open(IN,'<:raw',$src) or die;
open(OUT,'>>:raw',$target) or die;
my $offset = tell OUT;
while(1) {
my $data;
my $r = read(IN,$data,$FILESIZE);
last if $r==0;
print OUT $data;
}
close OUT;
close IN;
return $offset;
}
sub addall {
my ($self,$source) = @_;
$self->cache_close;
my $offset = $self->_cat($self->fn('dat'),$source->fn('dat'));
$self->cache_open;
foreach my $k (keys %{$source->{'idx'}}) {
my $v = $source->{'idx'}{$k};
my $d = JSON->new->decode($v);
$self->{'idx'}{$k} = JSON->new->encode([$d->[0]+$offset,$d->[1],$d->[2]]);
}
}
sub addgood {
my ($self,$source,$versions,$seen,$lengths,$kindin) = @_;
my @good_prefixes =
map { md5_base64($_).":".$versions->{$_}.":" } keys %$versions;
my ($all,$ndups,$nold,$nskip) = (0,0,0,0);
foreach my $k (keys %{$source->{'idx'}}) {
$all++;
my ($data,$kind) = $source->_get_key($k);
next if $kindin and $kindin ne $kind;
$nskip++;
next if $self->{'idx'}{$k}; # duplicate
$ndups++;
next unless any { substr($k,0,length $_) eq $_ } @good_prefixes;
$nold++;
($seen->{$kind}||=0)++ if $seen;
my $length = $lengths->{$kind};
$self->_set_key($k,$data,$kind,\$length);
$lengths->{$kind} = $length;
}
my $f = $source->fn('idx');
$f =~ s!^.*/!!;
warn sprintf("add %s: keys=%d skipped=%d dups=%d old=%d\n",$f,$all,$all-$nskip,$nskip-$ndups,$ndups-$nold);
}
sub remove {
my ($self) = @_;
unlink $self->fn('idx');
unlink $self->fn('dat');
}
sub _idx_to_dat {
my $f = $_[1];
$f =~ s/\.idx$/.dat/;
return $f;
}
sub select {
my ($self,$pattern,$from,$to,$min,$max) = @_;
my %name_undo;
my @files = sort { ((-s $a)||0) <=> ((-s $b)||0) } glob("$pattern.idx");
return undef if @files < $min;
splice(@files,$max) if $max and @files > $max;
my @out;
foreach my $f (@files) {
my @parts = split('/',$f);
my $tmpname = "selected.".identity().".idx";
my @tparts = @parts;
$tparts[-1] = $tmpname;
my $tmp = join('/',@tparts);
rename $f,$tmp;
rename $self->_idx_to_dat($f),$self->_idx_to_dat($tmp);
unless(-e $tmp and -e $self->_idx_to_dat($tmp)) {
# one may have succeeded
rename $tmp,$f;
rename $self->_idx_to_dat($tmp),$self->_idx_to_dat($f);
next;
}
$parts[-1] =~ s/$from/$to/;
my $out = join('/',@parts);
rename $tmp,$out or die;
rename $self->_idx_to_dat($tmp),$self->_idx_to_dat($out) or die;
$name_undo{$out} = $f;
push @out,EnsEMBL::Web::QueryStore::Cache::PrecacheFile->new({ filename => $out });
}
if(@out < $min) {
foreach my $new (keys %name_undo) {
rename $new,$name_undo{$new} or die;
rename $self->_idx_to_dat($new),$self->_idx_to_dat($name_undo{$new}) or die;
return undef;
}
}
return \@out;
}
1;
| Ensembl/ensembl-webcode | modules/EnsEMBL/Web/QueryStore/Cache/PrecacheFile.pm | Perl | apache-2.0 | 7,867 |
#
# Copyright 2018 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::h3c::snmp::mode::components::default;
use strict;
use warnings;
my %map_default_status = (
1 => 'notSupported',
2 => 'normal',
3 => 'postFailure',
4 => 'entityAbsent',
11 => 'poeError',
21 => 'stackError',
22 => 'stackPortBlocked',
23 => 'stackPortFailed',
31 => 'sfpRecvError',
32 => 'sfpSendError',
33 => 'sfpBothError',
41 => 'fanError',
51 => 'psuError',
61 => 'rpsError',
71 => 'moduleFaulty',
81 => 'sensorError',
91 => 'hardwareFaulty',
);
sub check {
my ($self, %options) = @_;
$self->{output}->output_add(long_msg => "Checking " . $options{component});
$self->{components}->{$options{component}} = {name => $options{component}, total => 0, skip => 0};
return if ($self->check_filter(section => $options{component}));
my $mapping = {
EntityExtErrorStatus => { oid => $self->{branch} . '.19', map => \%map_default_status },
};
foreach my $instance (sort $self->get_instance_class(class => { $options{component_class} => 1 })) {
my $result = $self->{snmp}->map_instance(mapping => $mapping, results => $self->{results}->{$self->{branch} . '.19'}, instance => $instance);
next if (!defined($result->{EntityExtErrorStatus}));
next if ($self->check_filter(section => $options{component}, instance => $instance));
if ($result->{EntityExtErrorStatus} =~ /entityAbsent/i) {
$self->absent_problem(section => $options{component}, instance => $instance);
next;
}
my $name = '';
$name = $self->get_short_name(instance => $instance) if (defined($self->{short_name}) && $self->{short_name} == 1);
$name = $self->get_long_name(instance => $instance) unless (defined($self->{short_name}) && $self->{short_name} == 1 && defined($name) && $name ne '');
$self->{components}->{$options{component}}->{total}++;
$self->{output}->output_add(long_msg => sprintf("%s '%s' status is '%s' [instance = %s]",
ucfirst($options{component}), $name, $result->{EntityExtErrorStatus}, $instance));
my $exit = $self->get_severity(section => $options{component}, value => $result->{EntityExtErrorStatus});
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("%s '%s' status is '%s'", $options{component}, $name, $result->{EntityExtErrorStatus}));
}
}
}
1; | wilfriedcomte/centreon-plugins | network/h3c/snmp/mode/components/default.pm | Perl | apache-2.0 | 3,363 |
package DocSet::Source::POD;
use strict;
use warnings;
use DocSet::Util;
use DocSet::RunTime;
use vars qw(@ISA);
require DocSet::Doc;
@ISA = qw(DocSet::Doc);
use constant HEAD_MAX_LEVEL => 4;
use constant MAX_DESC_LENGTH => 500;
# META: we are presenting too early, or this code should be moved to
# POD2HTML specific module
require Pod::POM::View::HTML;
my $mode = 'Pod::POM::View::HTML';
sub retrieve_meta_data {
my ($self) = @_;
$self->parse_pod;
#print Pod::POM::View::HTML->print($pom);
my $meta = {
title => 'No Title',
abstract => '',
};
my $pom = $self->{parsed_tree};
my @sections = $pom->head1();
if (@sections) {
# extract the title from the NAME section and remove it from content
if ($sections[0]->title =~ /NAME/) {
# don't present on purpose ->present($mode); there should
# be no markup in NAME a problem with
# <TITLE><CODE>....</CODE><TITLE> and alike
$meta->{title} = (shift @sections)->content();
$meta->{title} =~ s/^\s*|\s*$//sg;
}
# stitle is the same in docs
$meta->{stitle} = $meta->{title};
# locate the DESCRIPTION section (should be in the first three
# sections)
for (0..2) {
next unless defined $sections[$_]
&& $sections[$_]->title =~ /DESCRIPTION/i;
my $abstract = $sections[$_]->content->present($mode);
# cannot do this now, as it might cut some markup in the middle: <i>1 2</i>
# # we are interested only in the first paragraph, or if its
# # too big first MAX_DESC_LENGTH chars.
# my $index = index $abstract, " ", MAX_DESC_LENGTH;
# # cut only if index didn't return '-1' which is when the the
# # space wasn't found starting from location MAX_DESC_LENGTH
# unless ($index == -1) {
# $abstract = substr $abstract, 0, $index+1;
# $abstract .= " ... <i>(continued)</i>";
# }
#
# # temp workaround, but can only split on paras
$abstract =~ s|<p>(.*?)</p>.*|$1|s;
$meta->{abstract} = $abstract;
last;
}
}
$meta->{link} = $self->{rel_dst_path};
# put all the meta data under the same attribute
$self->{meta} = $meta;
# build the toc datastructure
my @toc = ();
my $level = 1;
for my $node (@sections) {
push @toc, $self->render_toc_level($node, $level);
}
$self->{toc} = \@toc;
}
sub render_toc_level {
my ($self, $node, $level) = @_;
my $title = $node->title;
my $link = "$title"; # must stringify to get the raw string
$link =~ s/^\s*|\s*$//g; # strip leading and closing spaces
$link =~ s/\W/_/g; # META: put into a sub? see Doc::Common::pod_pom_html_anchor
# prepand '#' for internal links
my $toc_link = "toc_$link"; # self referring toc entry
$link = "#$link";
my %toc_entry = (
title => $title->present($mode), # run the formatting if any
link => $link,
toc_link => $toc_link,
);
my @sub = ();
$level++;
if ($level <= HEAD_MAX_LEVEL) {
# if there are deeper than =head4 levels we don't go down (spec is 1-4)
my $method = "head$level";
for my $sub_node ($node->$method()) {
push @sub, $self->render_toc_level($sub_node, $level);
}
}
$toc_entry{subs} = \@sub if @sub;
return \%toc_entry;
}
sub parse_pod {
my ($self) = @_;
# already parsed
return if exists $self->{parsed_tree} && $self->{parsed_tree};
# print ${ $self->{content} };
use Pod::POM;
my %options;
my $parser = Pod::POM->new(\%options);
my $pom = $parser->parse_text(${ $self->{content} })
or die $parser->error();
$self->{parsed_tree} = $pom;
# examine any warnings raised
if (my @warnings = $parser->warnings()) {
print "\n", '-' x 40, "\n";
print "File: $self->{src_path}\n";
warn "$_\n" for @warnings;
}
}
sub src_filter {
my ($self) = @_;
$self->extract_pod;
$self->head2page_breaks() if $self->{docset}->options('slides_mode');
$self->podify_items() if $self->{docset}->options('podify_items');
}
sub extract_pod {
my ($self) = @_;
my @pod = ();
my $in_pod = 0;
for (split /\n{2,}/, ${ $self->{content} }) {
unless ($in_pod) {
s/^[\s\n]*//ms; # skip empty lines in preamble
$in_pod = /^=/s;
}
next unless $in_pod;
$in_pod = 0 if /^=cut/;
push @pod, $_;
}
# handle empty files
unless (@pod) {
push @pod, "=head1 NAME", "=head1 Not documented", "=cut";
}
my $content = join "\n\n", @pod;
$self->{content} = \$content;
}
sub podify_items {
my ($self) = @_;
# tmp storage
my @paras = ();
my $items = 0;
my $second = 0;
# we want the source in paragraphs
my @content = split /\n\n/, ${ $self->{content} };
foreach (@content) {
# is it an item?
if (/^(\*|\d+)\s+((\*|\d+)\s+)?/) {
$items++;
if ($2) {
$second++;
s/^(\*|\d+)\s+//; # strip the first level shortcut
s/^(\*|\d+)\s+/=item $1\n\n/; # do the second
s/^/=over 4\n\n/ if $second == 1; # start 2nd level
} else {
# first time insert the =over pod tag
s/^(\*|\d+)\s+/=item $1\n\n/; # start 1st level
s/^/=over 4\n\n/ if $items == 1;
s/^/=back\n\n/ if $second; # complete 2nd level
$second = 0; # end 2nd level section
}
push @paras, split /\n\n/, $_;
} else {
# complete the =over =item =back tag
$second=0, push @paras, "=back" if $second; # if 2nd level is not closed
push @paras, "=back" if $items;
push @paras, $_;
# not a tag item
$items = 0;
}
}
my $content = join "\n\n", @paras;
$self->{content} = \$content;
}
# add a page break for =headX in slides mode
sub head2page_breaks {
my ($self) = @_;
# we want the source in paragraphs
my @content = split /\n\n/, ${ $self->{content} };
my $count = 0;
my @paras = ();
foreach (@content) {
# add a page break starting from the third head (since the
# first is removed anyway, and we don't want to start a new
# page on the very first page)
if (/^=head/) {
$count++;
if ($count > 2) {
push @paras, qq{=for html <?page-break>};
}
}
push @paras, $_;
}
my $content = join "\n\n", @paras;
$self->{content} = \$content;
}
1;
__END__
=head1 NAME
C<DocSet::Source::POD> - A class for parsing input document in the POD format
=head1 SYNOPSIS
=head1 DESCRIPTION
META: not sure if the customized implementation of L<> belongs
here. But it works as follows:
Assuming that the main I<config.cfg> specifies the following argument:
dir => {
...
# search path for pods, etc. must put more specific paths first!
search_paths => [qw(
docs/2.0/api/mod_perl-2.0
docs/2.0/api/ModPerl-Registry
docs/2.0
docs/1.0
.
)],
# what extensions to search for
search_exts => [qw(pod pm html)],
},
Whenever the pod includes L<Title|foo::bar/section>, the code will
first convert C<foo::bar> into I<foo/bar> and then will try to find
the file I<foo/bar.pod> in the search path (similar to C<@INC>), as
well as files I<foo/bar.pm> and I<foo/bar.html> under dir I<src>. If
other C<search_exts> are specified they will be searched as well. If
there is a much the link will be created, otherwise only the title of
the link will be displayed.
Notice that the C<search_paths> must specify more specific paths
first. If you don't they won't be searched. Currently this is done
only to optimize memory usage and some speed, not sure if that's very
important. But this is different from how Perl does search with
C<@INC> since DocSet reads all the files in memory once and then
reuses this data.
=head2 METHODS
=over
=item retrieve_meta_data()
=item parse_pod()
=item podify_items()
podify_items();
Podify text to represent items in pod, e.g:
1 Some text from item Item1
2 Some text from item Item2
becomes:
=over 4
=item 1
Some text from item Item1
=item 2
Some text from item Item2
=back
podify_items() accepts 'C<*>' and digits as bullets
podify_items() receives a ref to array of paragraphs as a parameter
and modifies it. Nothing returned.
Moreover, you can use a second level of indentation. So you can have
* title
* * item
* * item
or
* title
* 1 item
* 2 item
where the second mark is which tells whether to use a ball bullet or a
numbered item.
=item head2page_breaks
in the I<slides_mode> we want each =headX to start a new slide, so
this mode inserts the page-breaks:
=for html <?page-break>
starting from the second header (well actually from the third in the
raw POD, because the first one (NAME) gets stripped before it's seen
by the rendering engine.
=back
=head1 AUTHORS
Stas Bekman E<lt>stas (at) stason.orgE<gt>
=cut
| Distrotech/mod_perl | docs/lib/DocSet/Source/POD.pm | Perl | apache-2.0 | 9,508 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.